Passthru CCID card: QOMify
[qemu.git] / tcg / s390 / tcg-target.c
blob58520fa22b4b73da70214de09da4d4c4de672de7
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "qemu/osdep.h"
28 #include "tcg-be-ldst.h"
30 /* We only support generating code for 64-bit mode. */
31 #if TCG_TARGET_REG_BITS != 64
32 #error "unsupported code generation mode"
33 #endif
35 #include "elf.h"
37 /* ??? The translation blocks produced by TCG are generally small enough to
38 be entirely reachable with a 16-bit displacement. Leaving the option for
39 a 32-bit displacement here Just In Case. */
40 #define USE_LONG_BRANCHES 0
42 #define TCG_CT_CONST_MULI 0x100
43 #define TCG_CT_CONST_ORI 0x200
44 #define TCG_CT_CONST_XORI 0x400
45 #define TCG_CT_CONST_CMPI 0x800
46 #define TCG_CT_CONST_ADLI 0x1000
48 /* Several places within the instruction set 0 means "no register"
49 rather than TCG_REG_R0. */
50 #define TCG_REG_NONE 0
52 /* A scratch register that may be be used throughout the backend. */
53 #define TCG_TMP0 TCG_REG_R14
55 #ifndef CONFIG_SOFTMMU
56 #define TCG_GUEST_BASE_REG TCG_REG_R13
57 #endif
59 /* All of the following instructions are prefixed with their instruction
60 format, and are defined as 8- or 16-bit quantities, even when the two
61 halves of the 16-bit quantity may appear 32 bits apart in the insn.
62 This makes it easy to copy the values from the tables in Appendix B. */
63 typedef enum S390Opcode {
64 RIL_AFI = 0xc209,
65 RIL_AGFI = 0xc208,
66 RIL_ALFI = 0xc20b,
67 RIL_ALGFI = 0xc20a,
68 RIL_BRASL = 0xc005,
69 RIL_BRCL = 0xc004,
70 RIL_CFI = 0xc20d,
71 RIL_CGFI = 0xc20c,
72 RIL_CLFI = 0xc20f,
73 RIL_CLGFI = 0xc20e,
74 RIL_IIHF = 0xc008,
75 RIL_IILF = 0xc009,
76 RIL_LARL = 0xc000,
77 RIL_LGFI = 0xc001,
78 RIL_LGRL = 0xc408,
79 RIL_LLIHF = 0xc00e,
80 RIL_LLILF = 0xc00f,
81 RIL_LRL = 0xc40d,
82 RIL_MSFI = 0xc201,
83 RIL_MSGFI = 0xc200,
84 RIL_NIHF = 0xc00a,
85 RIL_NILF = 0xc00b,
86 RIL_OIHF = 0xc00c,
87 RIL_OILF = 0xc00d,
88 RIL_SLFI = 0xc205,
89 RIL_SLGFI = 0xc204,
90 RIL_XIHF = 0xc006,
91 RIL_XILF = 0xc007,
93 RI_AGHI = 0xa70b,
94 RI_AHI = 0xa70a,
95 RI_BRC = 0xa704,
96 RI_IIHH = 0xa500,
97 RI_IIHL = 0xa501,
98 RI_IILH = 0xa502,
99 RI_IILL = 0xa503,
100 RI_LGHI = 0xa709,
101 RI_LLIHH = 0xa50c,
102 RI_LLIHL = 0xa50d,
103 RI_LLILH = 0xa50e,
104 RI_LLILL = 0xa50f,
105 RI_MGHI = 0xa70d,
106 RI_MHI = 0xa70c,
107 RI_NIHH = 0xa504,
108 RI_NIHL = 0xa505,
109 RI_NILH = 0xa506,
110 RI_NILL = 0xa507,
111 RI_OIHH = 0xa508,
112 RI_OIHL = 0xa509,
113 RI_OILH = 0xa50a,
114 RI_OILL = 0xa50b,
116 RIE_CGIJ = 0xec7c,
117 RIE_CGRJ = 0xec64,
118 RIE_CIJ = 0xec7e,
119 RIE_CLGRJ = 0xec65,
120 RIE_CLIJ = 0xec7f,
121 RIE_CLGIJ = 0xec7d,
122 RIE_CLRJ = 0xec77,
123 RIE_CRJ = 0xec76,
124 RIE_RISBG = 0xec55,
126 RRE_AGR = 0xb908,
127 RRE_ALGR = 0xb90a,
128 RRE_ALCR = 0xb998,
129 RRE_ALCGR = 0xb988,
130 RRE_CGR = 0xb920,
131 RRE_CLGR = 0xb921,
132 RRE_DLGR = 0xb987,
133 RRE_DLR = 0xb997,
134 RRE_DSGFR = 0xb91d,
135 RRE_DSGR = 0xb90d,
136 RRE_LGBR = 0xb906,
137 RRE_LCGR = 0xb903,
138 RRE_LGFR = 0xb914,
139 RRE_LGHR = 0xb907,
140 RRE_LGR = 0xb904,
141 RRE_LLGCR = 0xb984,
142 RRE_LLGFR = 0xb916,
143 RRE_LLGHR = 0xb985,
144 RRE_LRVR = 0xb91f,
145 RRE_LRVGR = 0xb90f,
146 RRE_LTGR = 0xb902,
147 RRE_MLGR = 0xb986,
148 RRE_MSGR = 0xb90c,
149 RRE_MSR = 0xb252,
150 RRE_NGR = 0xb980,
151 RRE_OGR = 0xb981,
152 RRE_SGR = 0xb909,
153 RRE_SLGR = 0xb90b,
154 RRE_SLBR = 0xb999,
155 RRE_SLBGR = 0xb989,
156 RRE_XGR = 0xb982,
158 RRF_LOCR = 0xb9f2,
159 RRF_LOCGR = 0xb9e2,
161 RR_AR = 0x1a,
162 RR_ALR = 0x1e,
163 RR_BASR = 0x0d,
164 RR_BCR = 0x07,
165 RR_CLR = 0x15,
166 RR_CR = 0x19,
167 RR_DR = 0x1d,
168 RR_LCR = 0x13,
169 RR_LR = 0x18,
170 RR_LTR = 0x12,
171 RR_NR = 0x14,
172 RR_OR = 0x16,
173 RR_SR = 0x1b,
174 RR_SLR = 0x1f,
175 RR_XR = 0x17,
177 RSY_RLL = 0xeb1d,
178 RSY_RLLG = 0xeb1c,
179 RSY_SLLG = 0xeb0d,
180 RSY_SRAG = 0xeb0a,
181 RSY_SRLG = 0xeb0c,
183 RS_SLL = 0x89,
184 RS_SRA = 0x8a,
185 RS_SRL = 0x88,
187 RXY_AG = 0xe308,
188 RXY_AY = 0xe35a,
189 RXY_CG = 0xe320,
190 RXY_CY = 0xe359,
191 RXY_LAY = 0xe371,
192 RXY_LB = 0xe376,
193 RXY_LG = 0xe304,
194 RXY_LGB = 0xe377,
195 RXY_LGF = 0xe314,
196 RXY_LGH = 0xe315,
197 RXY_LHY = 0xe378,
198 RXY_LLGC = 0xe390,
199 RXY_LLGF = 0xe316,
200 RXY_LLGH = 0xe391,
201 RXY_LMG = 0xeb04,
202 RXY_LRV = 0xe31e,
203 RXY_LRVG = 0xe30f,
204 RXY_LRVH = 0xe31f,
205 RXY_LY = 0xe358,
206 RXY_STCY = 0xe372,
207 RXY_STG = 0xe324,
208 RXY_STHY = 0xe370,
209 RXY_STMG = 0xeb24,
210 RXY_STRV = 0xe33e,
211 RXY_STRVG = 0xe32f,
212 RXY_STRVH = 0xe33f,
213 RXY_STY = 0xe350,
215 RX_A = 0x5a,
216 RX_C = 0x59,
217 RX_L = 0x58,
218 RX_LA = 0x41,
219 RX_LH = 0x48,
220 RX_ST = 0x50,
221 RX_STC = 0x42,
222 RX_STH = 0x40,
223 } S390Opcode;
225 #ifndef NDEBUG
226 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
227 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
228 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
230 #endif
232 /* Since R6 is a potential argument register, choose it last of the
233 call-saved registers. Likewise prefer the call-clobbered registers
234 in reverse order to maximize the chance of avoiding the arguments. */
235 static const int tcg_target_reg_alloc_order[] = {
236 /* Call saved registers. */
237 TCG_REG_R13,
238 TCG_REG_R12,
239 TCG_REG_R11,
240 TCG_REG_R10,
241 TCG_REG_R9,
242 TCG_REG_R8,
243 TCG_REG_R7,
244 TCG_REG_R6,
245 /* Call clobbered registers. */
246 TCG_REG_R14,
247 TCG_REG_R0,
248 TCG_REG_R1,
249 /* Argument registers, in reverse order of allocation. */
250 TCG_REG_R5,
251 TCG_REG_R4,
252 TCG_REG_R3,
253 TCG_REG_R2,
256 static const int tcg_target_call_iarg_regs[] = {
257 TCG_REG_R2,
258 TCG_REG_R3,
259 TCG_REG_R4,
260 TCG_REG_R5,
261 TCG_REG_R6,
264 static const int tcg_target_call_oarg_regs[] = {
265 TCG_REG_R2,
268 #define S390_CC_EQ 8
269 #define S390_CC_LT 4
270 #define S390_CC_GT 2
271 #define S390_CC_OV 1
272 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
273 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
274 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
275 #define S390_CC_NEVER 0
276 #define S390_CC_ALWAYS 15
278 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
279 static const uint8_t tcg_cond_to_s390_cond[] = {
280 [TCG_COND_EQ] = S390_CC_EQ,
281 [TCG_COND_NE] = S390_CC_NE,
282 [TCG_COND_LT] = S390_CC_LT,
283 [TCG_COND_LE] = S390_CC_LE,
284 [TCG_COND_GT] = S390_CC_GT,
285 [TCG_COND_GE] = S390_CC_GE,
286 [TCG_COND_LTU] = S390_CC_LT,
287 [TCG_COND_LEU] = S390_CC_LE,
288 [TCG_COND_GTU] = S390_CC_GT,
289 [TCG_COND_GEU] = S390_CC_GE,
292 /* Condition codes that result from a LOAD AND TEST. Here, we have no
293 unsigned instruction variation, however since the test is vs zero we
294 can re-map the outcomes appropriately. */
295 static const uint8_t tcg_cond_to_ltr_cond[] = {
296 [TCG_COND_EQ] = S390_CC_EQ,
297 [TCG_COND_NE] = S390_CC_NE,
298 [TCG_COND_LT] = S390_CC_LT,
299 [TCG_COND_LE] = S390_CC_LE,
300 [TCG_COND_GT] = S390_CC_GT,
301 [TCG_COND_GE] = S390_CC_GE,
302 [TCG_COND_LTU] = S390_CC_NEVER,
303 [TCG_COND_LEU] = S390_CC_EQ,
304 [TCG_COND_GTU] = S390_CC_NE,
305 [TCG_COND_GEU] = S390_CC_ALWAYS,
308 #ifdef CONFIG_SOFTMMU
309 static void * const qemu_ld_helpers[16] = {
310 [MO_UB] = helper_ret_ldub_mmu,
311 [MO_SB] = helper_ret_ldsb_mmu,
312 [MO_LEUW] = helper_le_lduw_mmu,
313 [MO_LESW] = helper_le_ldsw_mmu,
314 [MO_LEUL] = helper_le_ldul_mmu,
315 [MO_LESL] = helper_le_ldsl_mmu,
316 [MO_LEQ] = helper_le_ldq_mmu,
317 [MO_BEUW] = helper_be_lduw_mmu,
318 [MO_BESW] = helper_be_ldsw_mmu,
319 [MO_BEUL] = helper_be_ldul_mmu,
320 [MO_BESL] = helper_be_ldsl_mmu,
321 [MO_BEQ] = helper_be_ldq_mmu,
324 static void * const qemu_st_helpers[16] = {
325 [MO_UB] = helper_ret_stb_mmu,
326 [MO_LEUW] = helper_le_stw_mmu,
327 [MO_LEUL] = helper_le_stl_mmu,
328 [MO_LEQ] = helper_le_stq_mmu,
329 [MO_BEUW] = helper_be_stw_mmu,
330 [MO_BEUL] = helper_be_stl_mmu,
331 [MO_BEQ] = helper_be_stq_mmu,
333 #endif
335 static tcg_insn_unit *tb_ret_addr;
337 /* A list of relevant facilities used by this translator. Some of these
338 are required for proper operation, and these are checked at startup. */
340 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
341 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
342 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
343 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
344 #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
346 static uint64_t facilities;
348 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
349 intptr_t value, intptr_t addend)
351 intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1);
352 assert(addend == -2);
354 switch (type) {
355 case R_390_PC16DBL:
356 assert(pcrel2 == (int16_t)pcrel2);
357 tcg_patch16(code_ptr, pcrel2);
358 break;
359 case R_390_PC32DBL:
360 assert(pcrel2 == (int32_t)pcrel2);
361 tcg_patch32(code_ptr, pcrel2);
362 break;
363 default:
364 tcg_abort();
365 break;
369 /* parse target specific constraints */
370 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
372 const char *ct_str = *pct_str;
374 switch (ct_str[0]) {
375 case 'r': /* all registers */
376 ct->ct |= TCG_CT_REG;
377 tcg_regset_set32(ct->u.regs, 0, 0xffff);
378 break;
379 case 'R': /* not R0 */
380 ct->ct |= TCG_CT_REG;
381 tcg_regset_set32(ct->u.regs, 0, 0xffff);
382 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
383 break;
384 case 'L': /* qemu_ld/st constraint */
385 ct->ct |= TCG_CT_REG;
386 tcg_regset_set32(ct->u.regs, 0, 0xffff);
387 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
388 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
389 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
390 break;
391 case 'a': /* force R2 for division */
392 ct->ct |= TCG_CT_REG;
393 tcg_regset_clear(ct->u.regs);
394 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
395 break;
396 case 'b': /* force R3 for division */
397 ct->ct |= TCG_CT_REG;
398 tcg_regset_clear(ct->u.regs);
399 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
400 break;
401 case 'A':
402 ct->ct |= TCG_CT_CONST_ADLI;
403 break;
404 case 'K':
405 ct->ct |= TCG_CT_CONST_MULI;
406 break;
407 case 'O':
408 ct->ct |= TCG_CT_CONST_ORI;
409 break;
410 case 'X':
411 ct->ct |= TCG_CT_CONST_XORI;
412 break;
413 case 'C':
414 ct->ct |= TCG_CT_CONST_CMPI;
415 break;
416 default:
417 return -1;
419 ct_str++;
420 *pct_str = ct_str;
422 return 0;
425 /* Immediates to be used with logical OR. This is an optimization only,
426 since a full 64-bit immediate OR can always be performed with 4 sequential
427 OI[LH][LH] instructions. What we're looking for is immediates that we
428 can load efficiently, and the immediate load plus the reg-reg OR is
429 smaller than the sequential OI's. */
431 static int tcg_match_ori(TCGType type, tcg_target_long val)
433 if (facilities & FACILITY_EXT_IMM) {
434 if (type == TCG_TYPE_I32) {
435 /* All 32-bit ORs can be performed with 1 48-bit insn. */
436 return 1;
440 /* Look for negative values. These are best to load with LGHI. */
441 if (val < 0) {
442 if (val == (int16_t)val) {
443 return 0;
445 if (facilities & FACILITY_EXT_IMM) {
446 if (val == (int32_t)val) {
447 return 0;
452 return 1;
455 /* Immediates to be used with logical XOR. This is almost, but not quite,
456 only an optimization. XOR with immediate is only supported with the
457 extended-immediate facility. That said, there are a few patterns for
458 which it is better to load the value into a register first. */
460 static int tcg_match_xori(TCGType type, tcg_target_long val)
462 if ((facilities & FACILITY_EXT_IMM) == 0) {
463 return 0;
466 if (type == TCG_TYPE_I32) {
467 /* All 32-bit XORs can be performed with 1 48-bit insn. */
468 return 1;
471 /* Look for negative values. These are best to load with LGHI. */
472 if (val < 0 && val == (int32_t)val) {
473 return 0;
476 return 1;
479 /* Imediates to be used with comparisons. */
481 static int tcg_match_cmpi(TCGType type, tcg_target_long val)
483 if (facilities & FACILITY_EXT_IMM) {
484 /* The COMPARE IMMEDIATE instruction is available. */
485 if (type == TCG_TYPE_I32) {
486 /* We have a 32-bit immediate and can compare against anything. */
487 return 1;
488 } else {
489 /* ??? We have no insight here into whether the comparison is
490 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
491 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
492 a 32-bit unsigned immediate. If we were to use the (semi)
493 obvious "val == (int32_t)val" we would be enabling unsigned
494 comparisons vs very large numbers. The only solution is to
495 take the intersection of the ranges. */
496 /* ??? Another possible solution is to simply lie and allow all
497 constants here and force the out-of-range values into a temp
498 register in tgen_cmp when we have knowledge of the actual
499 comparison code in use. */
500 return val >= 0 && val <= 0x7fffffff;
502 } else {
503 /* Only the LOAD AND TEST instruction is available. */
504 return val == 0;
508 /* Immediates to be used with add2/sub2. */
510 static int tcg_match_add2i(TCGType type, tcg_target_long val)
512 if (facilities & FACILITY_EXT_IMM) {
513 if (type == TCG_TYPE_I32) {
514 return 1;
515 } else if (val >= -0xffffffffll && val <= 0xffffffffll) {
516 return 1;
519 return 0;
522 /* Test if a constant matches the constraint. */
523 static int tcg_target_const_match(tcg_target_long val, TCGType type,
524 const TCGArgConstraint *arg_ct)
526 int ct = arg_ct->ct;
528 if (ct & TCG_CT_CONST) {
529 return 1;
532 if (type == TCG_TYPE_I32) {
533 val = (int32_t)val;
536 /* The following are mutually exclusive. */
537 if (ct & TCG_CT_CONST_MULI) {
538 /* Immediates that may be used with multiply. If we have the
539 general-instruction-extensions, then we have MULTIPLY SINGLE
540 IMMEDIATE with a signed 32-bit, otherwise we have only
541 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
542 if (facilities & FACILITY_GEN_INST_EXT) {
543 return val == (int32_t)val;
544 } else {
545 return val == (int16_t)val;
547 } else if (ct & TCG_CT_CONST_ADLI) {
548 return tcg_match_add2i(type, val);
549 } else if (ct & TCG_CT_CONST_ORI) {
550 return tcg_match_ori(type, val);
551 } else if (ct & TCG_CT_CONST_XORI) {
552 return tcg_match_xori(type, val);
553 } else if (ct & TCG_CT_CONST_CMPI) {
554 return tcg_match_cmpi(type, val);
557 return 0;
560 /* Emit instructions according to the given instruction format. */
562 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
564 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
567 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
568 TCGReg r1, TCGReg r2)
570 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
573 static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
574 TCGReg r1, TCGReg r2, int m3)
576 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
579 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
581 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
584 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
586 tcg_out16(s, op | (r1 << 4));
587 tcg_out32(s, i2);
590 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
591 TCGReg b2, TCGReg r3, int disp)
593 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
594 | (disp & 0xfff));
597 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
598 TCGReg b2, TCGReg r3, int disp)
600 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
601 tcg_out32(s, (op & 0xff) | (b2 << 28)
602 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
605 #define tcg_out_insn_RX tcg_out_insn_RS
606 #define tcg_out_insn_RXY tcg_out_insn_RSY
608 /* Emit an opcode with "type-checking" of the format. */
609 #define tcg_out_insn(S, FMT, OP, ...) \
610 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
613 /* emit 64-bit shifts */
614 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
615 TCGReg src, TCGReg sh_reg, int sh_imm)
617 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
620 /* emit 32-bit shifts */
621 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
622 TCGReg sh_reg, int sh_imm)
624 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
627 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
629 if (src != dst) {
630 if (type == TCG_TYPE_I32) {
631 tcg_out_insn(s, RR, LR, dst, src);
632 } else {
633 tcg_out_insn(s, RRE, LGR, dst, src);
638 /* load a register with an immediate value */
639 static void tcg_out_movi(TCGContext *s, TCGType type,
640 TCGReg ret, tcg_target_long sval)
642 static const S390Opcode lli_insns[4] = {
643 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
646 tcg_target_ulong uval = sval;
647 int i;
649 if (type == TCG_TYPE_I32) {
650 uval = (uint32_t)sval;
651 sval = (int32_t)sval;
654 /* Try all 32-bit insns that can load it in one go. */
655 if (sval >= -0x8000 && sval < 0x8000) {
656 tcg_out_insn(s, RI, LGHI, ret, sval);
657 return;
660 for (i = 0; i < 4; i++) {
661 tcg_target_long mask = 0xffffull << i*16;
662 if ((uval & mask) == uval) {
663 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
664 return;
668 /* Try all 48-bit insns that can load it in one go. */
669 if (facilities & FACILITY_EXT_IMM) {
670 if (sval == (int32_t)sval) {
671 tcg_out_insn(s, RIL, LGFI, ret, sval);
672 return;
674 if (uval <= 0xffffffff) {
675 tcg_out_insn(s, RIL, LLILF, ret, uval);
676 return;
678 if ((uval & 0xffffffff) == 0) {
679 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
680 return;
684 /* Try for PC-relative address load. */
685 if ((sval & 1) == 0) {
686 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
687 if (off == (int32_t)off) {
688 tcg_out_insn(s, RIL, LARL, ret, off);
689 return;
693 /* If extended immediates are not present, then we may have to issue
694 several instructions to load the low 32 bits. */
695 if (!(facilities & FACILITY_EXT_IMM)) {
696 /* A 32-bit unsigned value can be loaded in 2 insns. And given
697 that the lli_insns loop above did not succeed, we know that
698 both insns are required. */
699 if (uval <= 0xffffffff) {
700 tcg_out_insn(s, RI, LLILL, ret, uval);
701 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
702 return;
705 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
706 We first want to make sure that all the high bits get set. With
707 luck the low 16-bits can be considered negative to perform that for
708 free, otherwise we load an explicit -1. */
709 if (sval >> 31 >> 1 == -1) {
710 if (uval & 0x8000) {
711 tcg_out_insn(s, RI, LGHI, ret, uval);
712 } else {
713 tcg_out_insn(s, RI, LGHI, ret, -1);
714 tcg_out_insn(s, RI, IILL, ret, uval);
716 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
717 return;
721 /* If we get here, both the high and low parts have non-zero bits. */
723 /* Recurse to load the lower 32-bits. */
724 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
726 /* Insert data into the high 32-bits. */
727 uval = uval >> 31 >> 1;
728 if (facilities & FACILITY_EXT_IMM) {
729 if (uval < 0x10000) {
730 tcg_out_insn(s, RI, IIHL, ret, uval);
731 } else if ((uval & 0xffff) == 0) {
732 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
733 } else {
734 tcg_out_insn(s, RIL, IIHF, ret, uval);
736 } else {
737 if (uval & 0xffff) {
738 tcg_out_insn(s, RI, IIHL, ret, uval);
740 if (uval & 0xffff0000) {
741 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
747 /* Emit a load/store type instruction. Inputs are:
748 DATA: The register to be loaded or stored.
749 BASE+OFS: The effective address.
750 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
751 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
753 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
754 TCGReg data, TCGReg base, TCGReg index,
755 tcg_target_long ofs)
757 if (ofs < -0x80000 || ofs >= 0x80000) {
758 /* Combine the low 20 bits of the offset with the actual load insn;
759 the high 44 bits must come from an immediate load. */
760 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
761 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
762 ofs = low;
764 /* If we were already given an index register, add it in. */
765 if (index != TCG_REG_NONE) {
766 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
768 index = TCG_TMP0;
771 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
772 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
773 } else {
774 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
779 /* load data without address translation or endianness conversion */
780 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
781 TCGReg base, intptr_t ofs)
783 if (type == TCG_TYPE_I32) {
784 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
785 } else {
786 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
790 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
791 TCGReg base, intptr_t ofs)
793 if (type == TCG_TYPE_I32) {
794 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
795 } else {
796 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
800 /* load data from an absolute host address */
801 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
803 intptr_t addr = (intptr_t)abs;
805 if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
806 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
807 if (disp == (int32_t)disp) {
808 if (type == TCG_TYPE_I32) {
809 tcg_out_insn(s, RIL, LRL, dest, disp);
810 } else {
811 tcg_out_insn(s, RIL, LGRL, dest, disp);
813 return;
817 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
818 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
821 static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
822 int msb, int lsb, int ofs, int z)
824 /* Format RIE-f */
825 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
826 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
827 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
830 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
832 if (facilities & FACILITY_EXT_IMM) {
833 tcg_out_insn(s, RRE, LGBR, dest, src);
834 return;
837 if (type == TCG_TYPE_I32) {
838 if (dest == src) {
839 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
840 } else {
841 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
843 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
844 } else {
845 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
846 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
850 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
852 if (facilities & FACILITY_EXT_IMM) {
853 tcg_out_insn(s, RRE, LLGCR, dest, src);
854 return;
857 if (dest == src) {
858 tcg_out_movi(s, type, TCG_TMP0, 0xff);
859 src = TCG_TMP0;
860 } else {
861 tcg_out_movi(s, type, dest, 0xff);
863 if (type == TCG_TYPE_I32) {
864 tcg_out_insn(s, RR, NR, dest, src);
865 } else {
866 tcg_out_insn(s, RRE, NGR, dest, src);
870 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
872 if (facilities & FACILITY_EXT_IMM) {
873 tcg_out_insn(s, RRE, LGHR, dest, src);
874 return;
877 if (type == TCG_TYPE_I32) {
878 if (dest == src) {
879 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
880 } else {
881 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
883 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
884 } else {
885 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
886 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
890 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
892 if (facilities & FACILITY_EXT_IMM) {
893 tcg_out_insn(s, RRE, LLGHR, dest, src);
894 return;
897 if (dest == src) {
898 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
899 src = TCG_TMP0;
900 } else {
901 tcg_out_movi(s, type, dest, 0xffff);
903 if (type == TCG_TYPE_I32) {
904 tcg_out_insn(s, RR, NR, dest, src);
905 } else {
906 tcg_out_insn(s, RRE, NGR, dest, src);
910 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
912 tcg_out_insn(s, RRE, LGFR, dest, src);
915 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
917 tcg_out_insn(s, RRE, LLGFR, dest, src);
920 /* Accept bit patterns like these:
921 0....01....1
922 1....10....0
923 1..10..01..1
924 0..01..10..0
925 Copied from gcc sources. */
926 static inline bool risbg_mask(uint64_t c)
928 uint64_t lsb;
929 /* We don't change the number of transitions by inverting,
930 so make sure we start with the LSB zero. */
931 if (c & 1) {
932 c = ~c;
934 /* Reject all zeros or all ones. */
935 if (c == 0) {
936 return false;
938 /* Find the first transition. */
939 lsb = c & -c;
940 /* Invert to look for a second transition. */
941 c = ~c;
942 /* Erase the first transition. */
943 c &= -lsb;
944 /* Find the second transition, if any. */
945 lsb = c & -c;
946 /* Match if all the bits are 1's, or if c is zero. */
947 return c == -lsb;
950 static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
952 int msb, lsb;
953 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
954 /* Achieve wraparound by swapping msb and lsb. */
955 msb = 64 - ctz64(~val);
956 lsb = clz64(~val) - 1;
957 } else {
958 msb = clz64(val);
959 lsb = 63 - ctz64(val);
961 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
964 static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
966 static const S390Opcode ni_insns[4] = {
967 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
969 static const S390Opcode nif_insns[2] = {
970 RIL_NILF, RIL_NIHF
972 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
973 int i;
975 /* Look for the zero-extensions. */
976 if ((val & valid) == 0xffffffff) {
977 tgen_ext32u(s, dest, dest);
978 return;
980 if (facilities & FACILITY_EXT_IMM) {
981 if ((val & valid) == 0xff) {
982 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
983 return;
985 if ((val & valid) == 0xffff) {
986 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
987 return;
991 /* Try all 32-bit insns that can perform it in one go. */
992 for (i = 0; i < 4; i++) {
993 tcg_target_ulong mask = ~(0xffffull << i*16);
994 if (((val | ~valid) & mask) == mask) {
995 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
996 return;
1000 /* Try all 48-bit insns that can perform it in one go. */
1001 if (facilities & FACILITY_EXT_IMM) {
1002 for (i = 0; i < 2; i++) {
1003 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1004 if (((val | ~valid) & mask) == mask) {
1005 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1006 return;
1010 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
1011 tgen_andi_risbg(s, dest, dest, val);
1012 return;
1015 /* Fall back to loading the constant. */
1016 tcg_out_movi(s, type, TCG_TMP0, val);
1017 if (type == TCG_TYPE_I32) {
1018 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
1019 } else {
1020 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
1024 static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1026 static const S390Opcode oi_insns[4] = {
1027 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1029 static const S390Opcode nif_insns[2] = {
1030 RIL_OILF, RIL_OIHF
1033 int i;
1035 /* Look for no-op. */
1036 if (val == 0) {
1037 return;
1040 if (facilities & FACILITY_EXT_IMM) {
1041 /* Try all 32-bit insns that can perform it in one go. */
1042 for (i = 0; i < 4; i++) {
1043 tcg_target_ulong mask = (0xffffull << i*16);
1044 if ((val & mask) != 0 && (val & ~mask) == 0) {
1045 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1046 return;
1050 /* Try all 48-bit insns that can perform it in one go. */
1051 for (i = 0; i < 2; i++) {
1052 tcg_target_ulong mask = (0xffffffffull << i*32);
1053 if ((val & mask) != 0 && (val & ~mask) == 0) {
1054 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1055 return;
1059 /* Perform the OR via sequential modifications to the high and
1060 low parts. Do this via recursion to handle 16-bit vs 32-bit
1061 masks in each half. */
1062 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1063 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1064 } else {
1065 /* With no extended-immediate facility, we don't need to be so
1066 clever. Just iterate over the insns and mask in the constant. */
1067 for (i = 0; i < 4; i++) {
1068 tcg_target_ulong mask = (0xffffull << i*16);
1069 if ((val & mask) != 0) {
1070 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1076 static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1078 /* Perform the xor by parts. */
1079 if (val & 0xffffffff) {
1080 tcg_out_insn(s, RIL, XILF, dest, val);
1082 if (val > 0xffffffff) {
1083 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1087 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1088 TCGArg c2, int c2const)
1090 bool is_unsigned = is_unsigned_cond(c);
1091 if (c2const) {
1092 if (c2 == 0) {
1093 if (type == TCG_TYPE_I32) {
1094 tcg_out_insn(s, RR, LTR, r1, r1);
1095 } else {
1096 tcg_out_insn(s, RRE, LTGR, r1, r1);
1098 return tcg_cond_to_ltr_cond[c];
1099 } else {
1100 if (is_unsigned) {
1101 if (type == TCG_TYPE_I32) {
1102 tcg_out_insn(s, RIL, CLFI, r1, c2);
1103 } else {
1104 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1106 } else {
1107 if (type == TCG_TYPE_I32) {
1108 tcg_out_insn(s, RIL, CFI, r1, c2);
1109 } else {
1110 tcg_out_insn(s, RIL, CGFI, r1, c2);
1114 } else {
1115 if (is_unsigned) {
1116 if (type == TCG_TYPE_I32) {
1117 tcg_out_insn(s, RR, CLR, r1, c2);
1118 } else {
1119 tcg_out_insn(s, RRE, CLGR, r1, c2);
1121 } else {
1122 if (type == TCG_TYPE_I32) {
1123 tcg_out_insn(s, RR, CR, r1, c2);
1124 } else {
1125 tcg_out_insn(s, RRE, CGR, r1, c2);
1129 return tcg_cond_to_s390_cond[c];
1132 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1133 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
1135 int cc;
1137 switch (cond) {
1138 case TCG_COND_GTU:
1139 case TCG_COND_GT:
1140 do_greater:
1141 /* The result of a compare has CC=2 for GT and CC=3 unused.
1142 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1143 tgen_cmp(s, type, cond, c1, c2, c2const);
1144 tcg_out_movi(s, type, dest, 0);
1145 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1146 return;
1148 case TCG_COND_GEU:
1149 do_geu:
1150 /* We need "real" carry semantics, so use SUBTRACT LOGICAL
1151 instead of COMPARE LOGICAL. This needs an extra move. */
1152 tcg_out_mov(s, type, TCG_TMP0, c1);
1153 if (c2const) {
1154 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1155 if (type == TCG_TYPE_I32) {
1156 tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2);
1157 } else {
1158 tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2);
1160 } else {
1161 if (type == TCG_TYPE_I32) {
1162 tcg_out_insn(s, RR, SLR, TCG_TMP0, c2);
1163 } else {
1164 tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2);
1166 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1168 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1169 return;
1171 case TCG_COND_LEU:
1172 case TCG_COND_LTU:
1173 case TCG_COND_LT:
1174 /* Swap operands so that we can use GEU/GTU/GT. */
1175 if (c2const) {
1176 tcg_out_movi(s, type, TCG_TMP0, c2);
1177 c2 = c1;
1178 c2const = 0;
1179 c1 = TCG_TMP0;
1180 } else {
1181 TCGReg t = c1;
1182 c1 = c2;
1183 c2 = t;
1185 if (cond == TCG_COND_LEU) {
1186 goto do_geu;
1188 cond = tcg_swap_cond(cond);
1189 goto do_greater;
1191 case TCG_COND_NE:
1192 /* X != 0 is X > 0. */
1193 if (c2const && c2 == 0) {
1194 cond = TCG_COND_GTU;
1195 goto do_greater;
1197 break;
1199 case TCG_COND_EQ:
1200 /* X == 0 is X <= 0 is 0 >= X. */
1201 if (c2const && c2 == 0) {
1202 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0);
1203 c2 = c1;
1204 c2const = 0;
1205 c1 = TCG_TMP0;
1206 goto do_geu;
1208 break;
1210 default:
1211 break;
1214 cc = tgen_cmp(s, type, cond, c1, c2, c2const);
1215 if (facilities & FACILITY_LOAD_ON_COND) {
1216 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1217 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1218 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1219 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1220 } else {
1221 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1222 tcg_out_movi(s, type, dest, 1);
1223 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1224 tcg_out_movi(s, type, dest, 0);
1228 static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1229 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1231 int cc;
1232 if (facilities & FACILITY_LOAD_ON_COND) {
1233 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1234 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1235 } else {
1236 c = tcg_invert_cond(c);
1237 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1239 /* Emit: if (cc) goto over; dest = r3; over: */
1240 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1241 tcg_out_insn(s, RRE, LGR, dest, r3);
1245 bool tcg_target_deposit_valid(int ofs, int len)
1247 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1250 static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1251 int ofs, int len)
1253 int lsb = (63 - ofs);
1254 int msb = lsb - (len - 1);
1255 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
1258 static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
1260 ptrdiff_t off = dest - s->code_ptr;
1261 if (off == (int16_t)off) {
1262 tcg_out_insn(s, RI, BRC, cc, off);
1263 } else if (off == (int32_t)off) {
1264 tcg_out_insn(s, RIL, BRCL, cc, off);
1265 } else {
1266 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1267 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1271 static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1273 if (l->has_value) {
1274 tgen_gotoi(s, cc, l->u.value_ptr);
1275 } else if (USE_LONG_BRANCHES) {
1276 tcg_out16(s, RIL_BRCL | (cc << 4));
1277 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, -2);
1278 s->code_ptr += 2;
1279 } else {
1280 tcg_out16(s, RI_BRC | (cc << 4));
1281 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, -2);
1282 s->code_ptr += 1;
1286 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1287 TCGReg r1, TCGReg r2, TCGLabel *l)
1289 intptr_t off;
1291 if (l->has_value) {
1292 off = l->u.value_ptr - s->code_ptr;
1293 } else {
1294 /* We need to keep the offset unchanged for retranslation. */
1295 off = s->code_ptr[1];
1296 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
1299 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1300 tcg_out16(s, off);
1301 tcg_out16(s, cc << 12 | (opc & 0xff));
1304 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1305 TCGReg r1, int i2, TCGLabel *l)
1307 tcg_target_long off;
1309 if (l->has_value) {
1310 off = l->u.value_ptr - s->code_ptr;
1311 } else {
1312 /* We need to keep the offset unchanged for retranslation. */
1313 off = s->code_ptr[1];
1314 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
1317 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1318 tcg_out16(s, off);
1319 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1322 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1323 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1325 int cc;
1327 if (facilities & FACILITY_GEN_INST_EXT) {
1328 bool is_unsigned = is_unsigned_cond(c);
1329 bool in_range;
1330 S390Opcode opc;
1332 cc = tcg_cond_to_s390_cond[c];
1334 if (!c2const) {
1335 opc = (type == TCG_TYPE_I32
1336 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1337 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1338 tgen_compare_branch(s, opc, cc, r1, c2, l);
1339 return;
1342 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1343 If the immediate we've been given does not fit that range, we'll
1344 fall back to separate compare and branch instructions using the
1345 larger comparison range afforded by COMPARE IMMEDIATE. */
1346 if (type == TCG_TYPE_I32) {
1347 if (is_unsigned) {
1348 opc = RIE_CLIJ;
1349 in_range = (uint32_t)c2 == (uint8_t)c2;
1350 } else {
1351 opc = RIE_CIJ;
1352 in_range = (int32_t)c2 == (int8_t)c2;
1354 } else {
1355 if (is_unsigned) {
1356 opc = RIE_CLGIJ;
1357 in_range = (uint64_t)c2 == (uint8_t)c2;
1358 } else {
1359 opc = RIE_CGIJ;
1360 in_range = (int64_t)c2 == (int8_t)c2;
1363 if (in_range) {
1364 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1365 return;
1369 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1370 tgen_branch(s, cc, l);
1373 static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1375 ptrdiff_t off = dest - s->code_ptr;
1376 if (off == (int32_t)off) {
1377 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1378 } else {
1379 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1380 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1384 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1385 TCGReg base, TCGReg index, int disp)
1387 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1388 case MO_UB:
1389 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1390 break;
1391 case MO_SB:
1392 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1393 break;
1395 case MO_UW | MO_BSWAP:
1396 /* swapped unsigned halfword load with upper bits zeroed */
1397 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1398 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1399 break;
1400 case MO_UW:
1401 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1402 break;
1404 case MO_SW | MO_BSWAP:
1405 /* swapped sign-extended halfword load */
1406 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1407 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1408 break;
1409 case MO_SW:
1410 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1411 break;
1413 case MO_UL | MO_BSWAP:
1414 /* swapped unsigned int load with upper bits zeroed */
1415 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1416 tgen_ext32u(s, data, data);
1417 break;
1418 case MO_UL:
1419 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1420 break;
1422 case MO_SL | MO_BSWAP:
1423 /* swapped sign-extended int load */
1424 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1425 tgen_ext32s(s, data, data);
1426 break;
1427 case MO_SL:
1428 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1429 break;
1431 case MO_Q | MO_BSWAP:
1432 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1433 break;
1434 case MO_Q:
1435 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1436 break;
1438 default:
1439 tcg_abort();
1443 static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1444 TCGReg base, TCGReg index, int disp)
1446 switch (opc & (MO_SIZE | MO_BSWAP)) {
1447 case MO_UB:
1448 if (disp >= 0 && disp < 0x1000) {
1449 tcg_out_insn(s, RX, STC, data, base, index, disp);
1450 } else {
1451 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1453 break;
1455 case MO_UW | MO_BSWAP:
1456 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1457 break;
1458 case MO_UW:
1459 if (disp >= 0 && disp < 0x1000) {
1460 tcg_out_insn(s, RX, STH, data, base, index, disp);
1461 } else {
1462 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1464 break;
1466 case MO_UL | MO_BSWAP:
1467 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1468 break;
1469 case MO_UL:
1470 if (disp >= 0 && disp < 0x1000) {
1471 tcg_out_insn(s, RX, ST, data, base, index, disp);
1472 } else {
1473 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1475 break;
1477 case MO_Q | MO_BSWAP:
1478 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1479 break;
1480 case MO_Q:
1481 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1482 break;
1484 default:
1485 tcg_abort();
1489 #if defined(CONFIG_SOFTMMU)
1490 /* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1491 Using the offset of the second entry in the last tlb table ensures
1492 that we can index all of the elements of the first entry. */
1493 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1494 > 0x7ffff);
1496 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1497 addend into R2. Returns a register with the santitized guest address. */
1498 static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1499 int mem_index, bool is_ld)
1501 int s_mask = (1 << (opc & MO_SIZE)) - 1;
1502 int ofs, a_off;
1503 uint64_t tlb_mask;
1505 /* For aligned accesses, we check the first byte and include the alignment
1506 bits within the address. For unaligned access, we check that we don't
1507 cross pages using the address of the last byte of the access. */
1508 if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
1509 a_off = 0;
1510 tlb_mask = TARGET_PAGE_MASK | s_mask;
1511 } else {
1512 a_off = s_mask;
1513 tlb_mask = TARGET_PAGE_MASK;
1516 if (facilities & FACILITY_GEN_INST_EXT) {
1517 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1518 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1519 63 - CPU_TLB_ENTRY_BITS,
1520 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
1521 if (a_off) {
1522 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1523 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1524 } else {
1525 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1527 } else {
1528 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1529 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1530 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1531 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1532 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1533 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1536 if (is_ld) {
1537 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1538 } else {
1539 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1541 if (TARGET_LONG_BITS == 32) {
1542 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1543 } else {
1544 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1547 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1548 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1550 if (TARGET_LONG_BITS == 32) {
1551 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1552 return TCG_REG_R3;
1554 return addr_reg;
1557 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1558 TCGReg data, TCGReg addr,
1559 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1561 TCGLabelQemuLdst *label = new_ldst_label(s);
1563 label->is_ld = is_ld;
1564 label->oi = oi;
1565 label->datalo_reg = data;
1566 label->addrlo_reg = addr;
1567 label->raddr = raddr;
1568 label->label_ptr[0] = label_ptr;
1571 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1573 TCGReg addr_reg = lb->addrlo_reg;
1574 TCGReg data_reg = lb->datalo_reg;
1575 TCGMemOpIdx oi = lb->oi;
1576 TCGMemOp opc = get_memop(oi);
1578 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1580 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1581 if (TARGET_LONG_BITS == 64) {
1582 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1584 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
1585 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
1586 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
1587 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
1589 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1592 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1594 TCGReg addr_reg = lb->addrlo_reg;
1595 TCGReg data_reg = lb->datalo_reg;
1596 TCGMemOpIdx oi = lb->oi;
1597 TCGMemOp opc = get_memop(oi);
1599 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1601 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1602 if (TARGET_LONG_BITS == 64) {
1603 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1605 switch (opc & MO_SIZE) {
1606 case MO_UB:
1607 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1608 break;
1609 case MO_UW:
1610 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1611 break;
1612 case MO_UL:
1613 tgen_ext32u(s, TCG_REG_R4, data_reg);
1614 break;
1615 case MO_Q:
1616 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1617 break;
1618 default:
1619 tcg_abort();
1621 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
1622 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
1623 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1625 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1627 #else
1628 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1629 TCGReg *index_reg, tcg_target_long *disp)
1631 if (TARGET_LONG_BITS == 32) {
1632 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1633 *addr_reg = TCG_TMP0;
1635 if (guest_base < 0x80000) {
1636 *index_reg = TCG_REG_NONE;
1637 *disp = guest_base;
1638 } else {
1639 *index_reg = TCG_GUEST_BASE_REG;
1640 *disp = 0;
1643 #endif /* CONFIG_SOFTMMU */
1645 static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1646 TCGMemOpIdx oi)
1648 TCGMemOp opc = get_memop(oi);
1649 #ifdef CONFIG_SOFTMMU
1650 unsigned mem_index = get_mmuidx(oi);
1651 tcg_insn_unit *label_ptr;
1652 TCGReg base_reg;
1654 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1656 /* We need to keep the offset unchanged for retranslation. */
1657 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1658 label_ptr = s->code_ptr;
1659 s->code_ptr += 1;
1661 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1663 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1664 #else
1665 TCGReg index_reg;
1666 tcg_target_long disp;
1668 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1669 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1670 #endif
1673 static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1674 TCGMemOpIdx oi)
1676 TCGMemOp opc = get_memop(oi);
1677 #ifdef CONFIG_SOFTMMU
1678 unsigned mem_index = get_mmuidx(oi);
1679 tcg_insn_unit *label_ptr;
1680 TCGReg base_reg;
1682 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1684 /* We need to keep the offset unchanged for retranslation. */
1685 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1686 label_ptr = s->code_ptr;
1687 s->code_ptr += 1;
1689 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1691 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1692 #else
1693 TCGReg index_reg;
1694 tcg_target_long disp;
1696 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1697 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1698 #endif
1701 # define OP_32_64(x) \
1702 case glue(glue(INDEX_op_,x),_i32): \
1703 case glue(glue(INDEX_op_,x),_i64)
1705 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1706 const TCGArg *args, const int *const_args)
1708 S390Opcode op;
1709 TCGArg a0, a1, a2;
1711 switch (opc) {
1712 case INDEX_op_exit_tb:
1713 /* return value */
1714 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1715 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1716 break;
1718 case INDEX_op_goto_tb:
1719 if (s->tb_jmp_offset) {
1720 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1721 s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
1722 s->code_ptr += 2;
1723 } else {
1724 /* load address stored at s->tb_next + args[0] */
1725 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1726 /* and go there */
1727 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1729 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
1730 break;
1732 OP_32_64(ld8u):
1733 /* ??? LLC (RXY format) is only present with the extended-immediate
1734 facility, whereas LLGC is always present. */
1735 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1736 break;
1738 OP_32_64(ld8s):
1739 /* ??? LB is no smaller than LGB, so no point to using it. */
1740 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1741 break;
1743 OP_32_64(ld16u):
1744 /* ??? LLH (RXY format) is only present with the extended-immediate
1745 facility, whereas LLGH is always present. */
1746 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1747 break;
1749 case INDEX_op_ld16s_i32:
1750 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1751 break;
1753 case INDEX_op_ld_i32:
1754 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1755 break;
1757 OP_32_64(st8):
1758 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1759 TCG_REG_NONE, args[2]);
1760 break;
1762 OP_32_64(st16):
1763 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1764 TCG_REG_NONE, args[2]);
1765 break;
1767 case INDEX_op_st_i32:
1768 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1769 break;
1771 case INDEX_op_add_i32:
1772 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1773 if (const_args[2]) {
1774 do_addi_32:
1775 if (a0 == a1) {
1776 if (a2 == (int16_t)a2) {
1777 tcg_out_insn(s, RI, AHI, a0, a2);
1778 break;
1780 if (facilities & FACILITY_EXT_IMM) {
1781 tcg_out_insn(s, RIL, AFI, a0, a2);
1782 break;
1785 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1786 } else if (a0 == a1) {
1787 tcg_out_insn(s, RR, AR, a0, a2);
1788 } else {
1789 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1791 break;
1792 case INDEX_op_sub_i32:
1793 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1794 if (const_args[2]) {
1795 a2 = -a2;
1796 goto do_addi_32;
1798 tcg_out_insn(s, RR, SR, args[0], args[2]);
1799 break;
1801 case INDEX_op_and_i32:
1802 if (const_args[2]) {
1803 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
1804 } else {
1805 tcg_out_insn(s, RR, NR, args[0], args[2]);
1807 break;
1808 case INDEX_op_or_i32:
1809 if (const_args[2]) {
1810 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1811 } else {
1812 tcg_out_insn(s, RR, OR, args[0], args[2]);
1814 break;
1815 case INDEX_op_xor_i32:
1816 if (const_args[2]) {
1817 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1818 } else {
1819 tcg_out_insn(s, RR, XR, args[0], args[2]);
1821 break;
1823 case INDEX_op_neg_i32:
1824 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1825 break;
1827 case INDEX_op_mul_i32:
1828 if (const_args[2]) {
1829 if ((int32_t)args[2] == (int16_t)args[2]) {
1830 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1831 } else {
1832 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1834 } else {
1835 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1837 break;
1839 case INDEX_op_div2_i32:
1840 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1841 break;
1842 case INDEX_op_divu2_i32:
1843 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1844 break;
1846 case INDEX_op_shl_i32:
1847 op = RS_SLL;
1848 do_shift32:
1849 if (const_args[2]) {
1850 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1851 } else {
1852 tcg_out_sh32(s, op, args[0], args[2], 0);
1854 break;
1855 case INDEX_op_shr_i32:
1856 op = RS_SRL;
1857 goto do_shift32;
1858 case INDEX_op_sar_i32:
1859 op = RS_SRA;
1860 goto do_shift32;
1862 case INDEX_op_rotl_i32:
1863 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1864 if (const_args[2]) {
1865 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1866 } else {
1867 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1869 break;
1870 case INDEX_op_rotr_i32:
1871 if (const_args[2]) {
1872 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1873 TCG_REG_NONE, (32 - args[2]) & 31);
1874 } else {
1875 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1876 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1878 break;
1880 case INDEX_op_ext8s_i32:
1881 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1882 break;
1883 case INDEX_op_ext16s_i32:
1884 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1885 break;
1886 case INDEX_op_ext8u_i32:
1887 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1888 break;
1889 case INDEX_op_ext16u_i32:
1890 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1891 break;
1893 OP_32_64(bswap16):
1894 /* The TCG bswap definition requires bits 0-47 already be zero.
1895 Thus we don't need the G-type insns to implement bswap16_i64. */
1896 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1897 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1898 break;
1899 OP_32_64(bswap32):
1900 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1901 break;
1903 case INDEX_op_add2_i32:
1904 if (const_args[4]) {
1905 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
1906 } else {
1907 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1909 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1910 break;
1911 case INDEX_op_sub2_i32:
1912 if (const_args[4]) {
1913 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
1914 } else {
1915 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1917 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1918 break;
1920 case INDEX_op_br:
1921 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
1922 break;
1924 case INDEX_op_brcond_i32:
1925 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1926 args[1], const_args[1], arg_label(args[3]));
1927 break;
1928 case INDEX_op_setcond_i32:
1929 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1930 args[2], const_args[2]);
1931 break;
1932 case INDEX_op_movcond_i32:
1933 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1934 args[2], const_args[2], args[3]);
1935 break;
1937 case INDEX_op_qemu_ld_i32:
1938 /* ??? Technically we can use a non-extending instruction. */
1939 case INDEX_op_qemu_ld_i64:
1940 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
1941 break;
1942 case INDEX_op_qemu_st_i32:
1943 case INDEX_op_qemu_st_i64:
1944 tcg_out_qemu_st(s, args[0], args[1], args[2]);
1945 break;
1947 case INDEX_op_ld16s_i64:
1948 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1949 break;
1950 case INDEX_op_ld32u_i64:
1951 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1952 break;
1953 case INDEX_op_ld32s_i64:
1954 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1955 break;
1956 case INDEX_op_ld_i64:
1957 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1958 break;
1960 case INDEX_op_st32_i64:
1961 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1962 break;
1963 case INDEX_op_st_i64:
1964 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1965 break;
1967 case INDEX_op_add_i64:
1968 a0 = args[0], a1 = args[1], a2 = args[2];
1969 if (const_args[2]) {
1970 do_addi_64:
1971 if (a0 == a1) {
1972 if (a2 == (int16_t)a2) {
1973 tcg_out_insn(s, RI, AGHI, a0, a2);
1974 break;
1976 if (facilities & FACILITY_EXT_IMM) {
1977 if (a2 == (int32_t)a2) {
1978 tcg_out_insn(s, RIL, AGFI, a0, a2);
1979 break;
1980 } else if (a2 == (uint32_t)a2) {
1981 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1982 break;
1983 } else if (-a2 == (uint32_t)-a2) {
1984 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1985 break;
1989 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1990 } else if (a0 == a1) {
1991 tcg_out_insn(s, RRE, AGR, a0, a2);
1992 } else {
1993 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1995 break;
1996 case INDEX_op_sub_i64:
1997 a0 = args[0], a1 = args[1], a2 = args[2];
1998 if (const_args[2]) {
1999 a2 = -a2;
2000 goto do_addi_64;
2001 } else {
2002 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
2004 break;
2006 case INDEX_op_and_i64:
2007 if (const_args[2]) {
2008 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2009 } else {
2010 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
2012 break;
2013 case INDEX_op_or_i64:
2014 if (const_args[2]) {
2015 tgen64_ori(s, args[0], args[2]);
2016 } else {
2017 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
2019 break;
2020 case INDEX_op_xor_i64:
2021 if (const_args[2]) {
2022 tgen64_xori(s, args[0], args[2]);
2023 } else {
2024 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
2026 break;
2028 case INDEX_op_neg_i64:
2029 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2030 break;
2031 case INDEX_op_bswap64_i64:
2032 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2033 break;
2035 case INDEX_op_mul_i64:
2036 if (const_args[2]) {
2037 if (args[2] == (int16_t)args[2]) {
2038 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2039 } else {
2040 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2042 } else {
2043 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2045 break;
2047 case INDEX_op_div2_i64:
2048 /* ??? We get an unnecessary sign-extension of the dividend
2049 into R3 with this definition, but as we do in fact always
2050 produce both quotient and remainder using INDEX_op_div_i64
2051 instead requires jumping through even more hoops. */
2052 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2053 break;
2054 case INDEX_op_divu2_i64:
2055 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2056 break;
2057 case INDEX_op_mulu2_i64:
2058 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2059 break;
2061 case INDEX_op_shl_i64:
2062 op = RSY_SLLG;
2063 do_shift64:
2064 if (const_args[2]) {
2065 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2066 } else {
2067 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2069 break;
2070 case INDEX_op_shr_i64:
2071 op = RSY_SRLG;
2072 goto do_shift64;
2073 case INDEX_op_sar_i64:
2074 op = RSY_SRAG;
2075 goto do_shift64;
2077 case INDEX_op_rotl_i64:
2078 if (const_args[2]) {
2079 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2080 TCG_REG_NONE, args[2]);
2081 } else {
2082 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2084 break;
2085 case INDEX_op_rotr_i64:
2086 if (const_args[2]) {
2087 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2088 TCG_REG_NONE, (64 - args[2]) & 63);
2089 } else {
2090 /* We can use the smaller 32-bit negate because only the
2091 low 6 bits are examined for the rotate. */
2092 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2093 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2095 break;
2097 case INDEX_op_ext8s_i64:
2098 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2099 break;
2100 case INDEX_op_ext16s_i64:
2101 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2102 break;
2103 case INDEX_op_ext_i32_i64:
2104 case INDEX_op_ext32s_i64:
2105 tgen_ext32s(s, args[0], args[1]);
2106 break;
2107 case INDEX_op_ext8u_i64:
2108 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2109 break;
2110 case INDEX_op_ext16u_i64:
2111 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2112 break;
2113 case INDEX_op_extu_i32_i64:
2114 case INDEX_op_ext32u_i64:
2115 tgen_ext32u(s, args[0], args[1]);
2116 break;
2118 case INDEX_op_add2_i64:
2119 if (const_args[4]) {
2120 if ((int64_t)args[4] >= 0) {
2121 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2122 } else {
2123 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2125 } else {
2126 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2128 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2129 break;
2130 case INDEX_op_sub2_i64:
2131 if (const_args[4]) {
2132 if ((int64_t)args[4] >= 0) {
2133 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2134 } else {
2135 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2137 } else {
2138 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2140 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2141 break;
2143 case INDEX_op_brcond_i64:
2144 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2145 args[1], const_args[1], arg_label(args[3]));
2146 break;
2147 case INDEX_op_setcond_i64:
2148 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2149 args[2], const_args[2]);
2150 break;
2151 case INDEX_op_movcond_i64:
2152 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2153 args[2], const_args[2], args[3]);
2154 break;
2156 OP_32_64(deposit):
2157 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2158 break;
2160 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2161 case INDEX_op_mov_i64:
2162 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2163 case INDEX_op_movi_i64:
2164 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2165 default:
2166 tcg_abort();
2170 static const TCGTargetOpDef s390_op_defs[] = {
2171 { INDEX_op_exit_tb, { } },
2172 { INDEX_op_goto_tb, { } },
2173 { INDEX_op_br, { } },
2175 { INDEX_op_ld8u_i32, { "r", "r" } },
2176 { INDEX_op_ld8s_i32, { "r", "r" } },
2177 { INDEX_op_ld16u_i32, { "r", "r" } },
2178 { INDEX_op_ld16s_i32, { "r", "r" } },
2179 { INDEX_op_ld_i32, { "r", "r" } },
2180 { INDEX_op_st8_i32, { "r", "r" } },
2181 { INDEX_op_st16_i32, { "r", "r" } },
2182 { INDEX_op_st_i32, { "r", "r" } },
2184 { INDEX_op_add_i32, { "r", "r", "ri" } },
2185 { INDEX_op_sub_i32, { "r", "0", "ri" } },
2186 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2188 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2189 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2191 { INDEX_op_and_i32, { "r", "0", "ri" } },
2192 { INDEX_op_or_i32, { "r", "0", "rO" } },
2193 { INDEX_op_xor_i32, { "r", "0", "rX" } },
2195 { INDEX_op_neg_i32, { "r", "r" } },
2197 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2198 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2199 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2201 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2202 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2204 { INDEX_op_ext8s_i32, { "r", "r" } },
2205 { INDEX_op_ext8u_i32, { "r", "r" } },
2206 { INDEX_op_ext16s_i32, { "r", "r" } },
2207 { INDEX_op_ext16u_i32, { "r", "r" } },
2209 { INDEX_op_bswap16_i32, { "r", "r" } },
2210 { INDEX_op_bswap32_i32, { "r", "r" } },
2212 { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } },
2213 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } },
2215 { INDEX_op_brcond_i32, { "r", "rC" } },
2216 { INDEX_op_setcond_i32, { "r", "r", "rC" } },
2217 { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
2218 { INDEX_op_deposit_i32, { "r", "0", "r" } },
2220 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2221 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2222 { INDEX_op_qemu_st_i32, { "L", "L" } },
2223 { INDEX_op_qemu_st_i64, { "L", "L" } },
2225 { INDEX_op_ld8u_i64, { "r", "r" } },
2226 { INDEX_op_ld8s_i64, { "r", "r" } },
2227 { INDEX_op_ld16u_i64, { "r", "r" } },
2228 { INDEX_op_ld16s_i64, { "r", "r" } },
2229 { INDEX_op_ld32u_i64, { "r", "r" } },
2230 { INDEX_op_ld32s_i64, { "r", "r" } },
2231 { INDEX_op_ld_i64, { "r", "r" } },
2233 { INDEX_op_st8_i64, { "r", "r" } },
2234 { INDEX_op_st16_i64, { "r", "r" } },
2235 { INDEX_op_st32_i64, { "r", "r" } },
2236 { INDEX_op_st_i64, { "r", "r" } },
2238 { INDEX_op_add_i64, { "r", "r", "ri" } },
2239 { INDEX_op_sub_i64, { "r", "0", "ri" } },
2240 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2242 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2243 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
2244 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
2246 { INDEX_op_and_i64, { "r", "0", "ri" } },
2247 { INDEX_op_or_i64, { "r", "0", "rO" } },
2248 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2250 { INDEX_op_neg_i64, { "r", "r" } },
2252 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2253 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2254 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2256 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2257 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2259 { INDEX_op_ext8s_i64, { "r", "r" } },
2260 { INDEX_op_ext8u_i64, { "r", "r" } },
2261 { INDEX_op_ext16s_i64, { "r", "r" } },
2262 { INDEX_op_ext16u_i64, { "r", "r" } },
2263 { INDEX_op_ext32s_i64, { "r", "r" } },
2264 { INDEX_op_ext32u_i64, { "r", "r" } },
2266 { INDEX_op_ext_i32_i64, { "r", "r" } },
2267 { INDEX_op_extu_i32_i64, { "r", "r" } },
2269 { INDEX_op_bswap16_i64, { "r", "r" } },
2270 { INDEX_op_bswap32_i64, { "r", "r" } },
2271 { INDEX_op_bswap64_i64, { "r", "r" } },
2273 { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
2274 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
2276 { INDEX_op_brcond_i64, { "r", "rC" } },
2277 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
2278 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
2279 { INDEX_op_deposit_i64, { "r", "0", "r" } },
2281 { -1 },
2284 static void query_facilities(void)
2286 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2288 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2289 is present on all 64-bit systems, but let's check for it anyway. */
2290 if (hwcap & HWCAP_S390_STFLE) {
2291 register int r0 __asm__("0");
2292 register void *r1 __asm__("1");
2294 /* stfle 0(%r1) */
2295 r1 = &facilities;
2296 asm volatile(".word 0xb2b0,0x1000"
2297 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2301 static void tcg_target_init(TCGContext *s)
2303 query_facilities();
2305 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2306 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2308 tcg_regset_clear(tcg_target_call_clobber_regs);
2309 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2310 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2311 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2312 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2313 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2314 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2315 /* The r6 register is technically call-saved, but it's also a parameter
2316 register, so it can get killed by setup for the qemu_st helper. */
2317 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
2318 /* The return register can be considered call-clobbered. */
2319 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2321 tcg_regset_clear(s->reserved_regs);
2322 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2323 /* XXX many insns can't be used with R0, so we better avoid it for now */
2324 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2325 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2327 tcg_add_target_add_op_defs(s390_op_defs);
2330 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2331 + TCG_STATIC_CALL_ARGS_SIZE \
2332 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2334 static void tcg_target_qemu_prologue(TCGContext *s)
2336 /* stmg %r6,%r15,48(%r15) (save registers) */
2337 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2339 /* aghi %r15,-frame_size */
2340 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
2342 tcg_set_frame(s, TCG_REG_CALL_STACK,
2343 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2344 CPU_TEMP_BUF_NLONGS * sizeof(long));
2346 #ifndef CONFIG_SOFTMMU
2347 if (guest_base >= 0x80000) {
2348 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2349 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2351 #endif
2353 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2354 /* br %r3 (go to TB) */
2355 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2357 tb_ret_addr = s->code_ptr;
2359 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2360 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2361 FRAME_SIZE + 48);
2363 /* br %r14 (return) */
2364 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2367 typedef struct {
2368 DebugFrameHeader h;
2369 uint8_t fde_def_cfa[4];
2370 uint8_t fde_reg_ofs[18];
2371 } DebugFrame;
2373 /* We're expecting a 2 byte uleb128 encoded value. */
2374 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2376 #define ELF_HOST_MACHINE EM_S390
2378 static const DebugFrame debug_frame = {
2379 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2380 .h.cie.id = -1,
2381 .h.cie.version = 1,
2382 .h.cie.code_align = 1,
2383 .h.cie.data_align = 8, /* sleb128 8 */
2384 .h.cie.return_column = TCG_REG_R14,
2386 /* Total FDE size does not include the "len" member. */
2387 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2389 .fde_def_cfa = {
2390 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2391 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2392 (FRAME_SIZE >> 7)
2394 .fde_reg_ofs = {
2395 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2396 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2397 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2398 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2399 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2400 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2401 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2402 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2403 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2407 void tcg_register_jit(void *buf, size_t buf_size)
2409 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));