dump: allow target to set the physical base
[qemu/ar7.git] / tcg / s390 / tcg-target.c
blobfbf97bb2e15d17e5994288346524bf173b0c618d
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "tcg-be-ldst.h"
29 /* We only support generating code for 64-bit mode. */
30 #if TCG_TARGET_REG_BITS != 64
31 #error "unsupported code generation mode"
32 #endif
34 #include "elf.h"
36 /* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39 #define USE_LONG_BRANCHES 0
41 #define TCG_CT_CONST_MULI 0x100
42 #define TCG_CT_CONST_ORI 0x200
43 #define TCG_CT_CONST_XORI 0x400
44 #define TCG_CT_CONST_CMPI 0x800
45 #define TCG_CT_CONST_ADLI 0x1000
47 /* Several places within the instruction set 0 means "no register"
48 rather than TCG_REG_R0. */
49 #define TCG_REG_NONE 0
51 /* A scratch register that may be be used throughout the backend. */
52 #define TCG_TMP0 TCG_REG_R14
54 #ifndef CONFIG_SOFTMMU
55 #define TCG_GUEST_BASE_REG TCG_REG_R13
56 #endif
58 /* All of the following instructions are prefixed with their instruction
59 format, and are defined as 8- or 16-bit quantities, even when the two
60 halves of the 16-bit quantity may appear 32 bits apart in the insn.
61 This makes it easy to copy the values from the tables in Appendix B. */
62 typedef enum S390Opcode {
63 RIL_AFI = 0xc209,
64 RIL_AGFI = 0xc208,
65 RIL_ALFI = 0xc20b,
66 RIL_ALGFI = 0xc20a,
67 RIL_BRASL = 0xc005,
68 RIL_BRCL = 0xc004,
69 RIL_CFI = 0xc20d,
70 RIL_CGFI = 0xc20c,
71 RIL_CLFI = 0xc20f,
72 RIL_CLGFI = 0xc20e,
73 RIL_IIHF = 0xc008,
74 RIL_IILF = 0xc009,
75 RIL_LARL = 0xc000,
76 RIL_LGFI = 0xc001,
77 RIL_LGRL = 0xc408,
78 RIL_LLIHF = 0xc00e,
79 RIL_LLILF = 0xc00f,
80 RIL_LRL = 0xc40d,
81 RIL_MSFI = 0xc201,
82 RIL_MSGFI = 0xc200,
83 RIL_NIHF = 0xc00a,
84 RIL_NILF = 0xc00b,
85 RIL_OIHF = 0xc00c,
86 RIL_OILF = 0xc00d,
87 RIL_SLFI = 0xc205,
88 RIL_SLGFI = 0xc204,
89 RIL_XIHF = 0xc006,
90 RIL_XILF = 0xc007,
92 RI_AGHI = 0xa70b,
93 RI_AHI = 0xa70a,
94 RI_BRC = 0xa704,
95 RI_IIHH = 0xa500,
96 RI_IIHL = 0xa501,
97 RI_IILH = 0xa502,
98 RI_IILL = 0xa503,
99 RI_LGHI = 0xa709,
100 RI_LLIHH = 0xa50c,
101 RI_LLIHL = 0xa50d,
102 RI_LLILH = 0xa50e,
103 RI_LLILL = 0xa50f,
104 RI_MGHI = 0xa70d,
105 RI_MHI = 0xa70c,
106 RI_NIHH = 0xa504,
107 RI_NIHL = 0xa505,
108 RI_NILH = 0xa506,
109 RI_NILL = 0xa507,
110 RI_OIHH = 0xa508,
111 RI_OIHL = 0xa509,
112 RI_OILH = 0xa50a,
113 RI_OILL = 0xa50b,
115 RIE_CGIJ = 0xec7c,
116 RIE_CGRJ = 0xec64,
117 RIE_CIJ = 0xec7e,
118 RIE_CLGRJ = 0xec65,
119 RIE_CLIJ = 0xec7f,
120 RIE_CLGIJ = 0xec7d,
121 RIE_CLRJ = 0xec77,
122 RIE_CRJ = 0xec76,
123 RIE_RISBG = 0xec55,
125 RRE_AGR = 0xb908,
126 RRE_ALGR = 0xb90a,
127 RRE_ALCR = 0xb998,
128 RRE_ALCGR = 0xb988,
129 RRE_CGR = 0xb920,
130 RRE_CLGR = 0xb921,
131 RRE_DLGR = 0xb987,
132 RRE_DLR = 0xb997,
133 RRE_DSGFR = 0xb91d,
134 RRE_DSGR = 0xb90d,
135 RRE_LGBR = 0xb906,
136 RRE_LCGR = 0xb903,
137 RRE_LGFR = 0xb914,
138 RRE_LGHR = 0xb907,
139 RRE_LGR = 0xb904,
140 RRE_LLGCR = 0xb984,
141 RRE_LLGFR = 0xb916,
142 RRE_LLGHR = 0xb985,
143 RRE_LRVR = 0xb91f,
144 RRE_LRVGR = 0xb90f,
145 RRE_LTGR = 0xb902,
146 RRE_MLGR = 0xb986,
147 RRE_MSGR = 0xb90c,
148 RRE_MSR = 0xb252,
149 RRE_NGR = 0xb980,
150 RRE_OGR = 0xb981,
151 RRE_SGR = 0xb909,
152 RRE_SLGR = 0xb90b,
153 RRE_SLBR = 0xb999,
154 RRE_SLBGR = 0xb989,
155 RRE_XGR = 0xb982,
157 RRF_LOCR = 0xb9f2,
158 RRF_LOCGR = 0xb9e2,
160 RR_AR = 0x1a,
161 RR_ALR = 0x1e,
162 RR_BASR = 0x0d,
163 RR_BCR = 0x07,
164 RR_CLR = 0x15,
165 RR_CR = 0x19,
166 RR_DR = 0x1d,
167 RR_LCR = 0x13,
168 RR_LR = 0x18,
169 RR_LTR = 0x12,
170 RR_NR = 0x14,
171 RR_OR = 0x16,
172 RR_SR = 0x1b,
173 RR_SLR = 0x1f,
174 RR_XR = 0x17,
176 RSY_RLL = 0xeb1d,
177 RSY_RLLG = 0xeb1c,
178 RSY_SLLG = 0xeb0d,
179 RSY_SRAG = 0xeb0a,
180 RSY_SRLG = 0xeb0c,
182 RS_SLL = 0x89,
183 RS_SRA = 0x8a,
184 RS_SRL = 0x88,
186 RXY_AG = 0xe308,
187 RXY_AY = 0xe35a,
188 RXY_CG = 0xe320,
189 RXY_CY = 0xe359,
190 RXY_LAY = 0xe371,
191 RXY_LB = 0xe376,
192 RXY_LG = 0xe304,
193 RXY_LGB = 0xe377,
194 RXY_LGF = 0xe314,
195 RXY_LGH = 0xe315,
196 RXY_LHY = 0xe378,
197 RXY_LLGC = 0xe390,
198 RXY_LLGF = 0xe316,
199 RXY_LLGH = 0xe391,
200 RXY_LMG = 0xeb04,
201 RXY_LRV = 0xe31e,
202 RXY_LRVG = 0xe30f,
203 RXY_LRVH = 0xe31f,
204 RXY_LY = 0xe358,
205 RXY_STCY = 0xe372,
206 RXY_STG = 0xe324,
207 RXY_STHY = 0xe370,
208 RXY_STMG = 0xeb24,
209 RXY_STRV = 0xe33e,
210 RXY_STRVG = 0xe32f,
211 RXY_STRVH = 0xe33f,
212 RXY_STY = 0xe350,
214 RX_A = 0x5a,
215 RX_C = 0x59,
216 RX_L = 0x58,
217 RX_LA = 0x41,
218 RX_LH = 0x48,
219 RX_ST = 0x50,
220 RX_STC = 0x42,
221 RX_STH = 0x40,
222 } S390Opcode;
224 #ifndef NDEBUG
225 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
226 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
227 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
229 #endif
231 /* Since R6 is a potential argument register, choose it last of the
232 call-saved registers. Likewise prefer the call-clobbered registers
233 in reverse order to maximize the chance of avoiding the arguments. */
234 static const int tcg_target_reg_alloc_order[] = {
235 /* Call saved registers. */
236 TCG_REG_R13,
237 TCG_REG_R12,
238 TCG_REG_R11,
239 TCG_REG_R10,
240 TCG_REG_R9,
241 TCG_REG_R8,
242 TCG_REG_R7,
243 TCG_REG_R6,
244 /* Call clobbered registers. */
245 TCG_REG_R14,
246 TCG_REG_R0,
247 TCG_REG_R1,
248 /* Argument registers, in reverse order of allocation. */
249 TCG_REG_R5,
250 TCG_REG_R4,
251 TCG_REG_R3,
252 TCG_REG_R2,
255 static const int tcg_target_call_iarg_regs[] = {
256 TCG_REG_R2,
257 TCG_REG_R3,
258 TCG_REG_R4,
259 TCG_REG_R5,
260 TCG_REG_R6,
263 static const int tcg_target_call_oarg_regs[] = {
264 TCG_REG_R2,
267 #define S390_CC_EQ 8
268 #define S390_CC_LT 4
269 #define S390_CC_GT 2
270 #define S390_CC_OV 1
271 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
272 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
273 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
274 #define S390_CC_NEVER 0
275 #define S390_CC_ALWAYS 15
277 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
278 static const uint8_t tcg_cond_to_s390_cond[] = {
279 [TCG_COND_EQ] = S390_CC_EQ,
280 [TCG_COND_NE] = S390_CC_NE,
281 [TCG_COND_LT] = S390_CC_LT,
282 [TCG_COND_LE] = S390_CC_LE,
283 [TCG_COND_GT] = S390_CC_GT,
284 [TCG_COND_GE] = S390_CC_GE,
285 [TCG_COND_LTU] = S390_CC_LT,
286 [TCG_COND_LEU] = S390_CC_LE,
287 [TCG_COND_GTU] = S390_CC_GT,
288 [TCG_COND_GEU] = S390_CC_GE,
291 /* Condition codes that result from a LOAD AND TEST. Here, we have no
292 unsigned instruction variation, however since the test is vs zero we
293 can re-map the outcomes appropriately. */
294 static const uint8_t tcg_cond_to_ltr_cond[] = {
295 [TCG_COND_EQ] = S390_CC_EQ,
296 [TCG_COND_NE] = S390_CC_NE,
297 [TCG_COND_LT] = S390_CC_LT,
298 [TCG_COND_LE] = S390_CC_LE,
299 [TCG_COND_GT] = S390_CC_GT,
300 [TCG_COND_GE] = S390_CC_GE,
301 [TCG_COND_LTU] = S390_CC_NEVER,
302 [TCG_COND_LEU] = S390_CC_EQ,
303 [TCG_COND_GTU] = S390_CC_NE,
304 [TCG_COND_GEU] = S390_CC_ALWAYS,
307 #ifdef CONFIG_SOFTMMU
308 static void * const qemu_ld_helpers[16] = {
309 [MO_UB] = helper_ret_ldub_mmu,
310 [MO_SB] = helper_ret_ldsb_mmu,
311 [MO_LEUW] = helper_le_lduw_mmu,
312 [MO_LESW] = helper_le_ldsw_mmu,
313 [MO_LEUL] = helper_le_ldul_mmu,
314 [MO_LESL] = helper_le_ldsl_mmu,
315 [MO_LEQ] = helper_le_ldq_mmu,
316 [MO_BEUW] = helper_be_lduw_mmu,
317 [MO_BESW] = helper_be_ldsw_mmu,
318 [MO_BEUL] = helper_be_ldul_mmu,
319 [MO_BESL] = helper_be_ldsl_mmu,
320 [MO_BEQ] = helper_be_ldq_mmu,
323 static void * const qemu_st_helpers[16] = {
324 [MO_UB] = helper_ret_stb_mmu,
325 [MO_LEUW] = helper_le_stw_mmu,
326 [MO_LEUL] = helper_le_stl_mmu,
327 [MO_LEQ] = helper_le_stq_mmu,
328 [MO_BEUW] = helper_be_stw_mmu,
329 [MO_BEUL] = helper_be_stl_mmu,
330 [MO_BEQ] = helper_be_stq_mmu,
332 #endif
334 static tcg_insn_unit *tb_ret_addr;
336 /* A list of relevant facilities used by this translator. Some of these
337 are required for proper operation, and these are checked at startup. */
339 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
340 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
341 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
342 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
343 #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
345 static uint64_t facilities;
347 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
348 intptr_t value, intptr_t addend)
350 intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1);
351 assert(addend == -2);
353 switch (type) {
354 case R_390_PC16DBL:
355 assert(pcrel2 == (int16_t)pcrel2);
356 tcg_patch16(code_ptr, pcrel2);
357 break;
358 case R_390_PC32DBL:
359 assert(pcrel2 == (int32_t)pcrel2);
360 tcg_patch32(code_ptr, pcrel2);
361 break;
362 default:
363 tcg_abort();
364 break;
368 /* parse target specific constraints */
369 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
371 const char *ct_str = *pct_str;
373 switch (ct_str[0]) {
374 case 'r': /* all registers */
375 ct->ct |= TCG_CT_REG;
376 tcg_regset_set32(ct->u.regs, 0, 0xffff);
377 break;
378 case 'R': /* not R0 */
379 ct->ct |= TCG_CT_REG;
380 tcg_regset_set32(ct->u.regs, 0, 0xffff);
381 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
382 break;
383 case 'L': /* qemu_ld/st constraint */
384 ct->ct |= TCG_CT_REG;
385 tcg_regset_set32(ct->u.regs, 0, 0xffff);
386 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
387 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
388 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
389 break;
390 case 'a': /* force R2 for division */
391 ct->ct |= TCG_CT_REG;
392 tcg_regset_clear(ct->u.regs);
393 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
394 break;
395 case 'b': /* force R3 for division */
396 ct->ct |= TCG_CT_REG;
397 tcg_regset_clear(ct->u.regs);
398 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
399 break;
400 case 'A':
401 ct->ct |= TCG_CT_CONST_ADLI;
402 break;
403 case 'K':
404 ct->ct |= TCG_CT_CONST_MULI;
405 break;
406 case 'O':
407 ct->ct |= TCG_CT_CONST_ORI;
408 break;
409 case 'X':
410 ct->ct |= TCG_CT_CONST_XORI;
411 break;
412 case 'C':
413 ct->ct |= TCG_CT_CONST_CMPI;
414 break;
415 default:
416 return -1;
418 ct_str++;
419 *pct_str = ct_str;
421 return 0;
424 /* Immediates to be used with logical OR. This is an optimization only,
425 since a full 64-bit immediate OR can always be performed with 4 sequential
426 OI[LH][LH] instructions. What we're looking for is immediates that we
427 can load efficiently, and the immediate load plus the reg-reg OR is
428 smaller than the sequential OI's. */
430 static int tcg_match_ori(TCGType type, tcg_target_long val)
432 if (facilities & FACILITY_EXT_IMM) {
433 if (type == TCG_TYPE_I32) {
434 /* All 32-bit ORs can be performed with 1 48-bit insn. */
435 return 1;
439 /* Look for negative values. These are best to load with LGHI. */
440 if (val < 0) {
441 if (val == (int16_t)val) {
442 return 0;
444 if (facilities & FACILITY_EXT_IMM) {
445 if (val == (int32_t)val) {
446 return 0;
451 return 1;
454 /* Immediates to be used with logical XOR. This is almost, but not quite,
455 only an optimization. XOR with immediate is only supported with the
456 extended-immediate facility. That said, there are a few patterns for
457 which it is better to load the value into a register first. */
459 static int tcg_match_xori(TCGType type, tcg_target_long val)
461 if ((facilities & FACILITY_EXT_IMM) == 0) {
462 return 0;
465 if (type == TCG_TYPE_I32) {
466 /* All 32-bit XORs can be performed with 1 48-bit insn. */
467 return 1;
470 /* Look for negative values. These are best to load with LGHI. */
471 if (val < 0 && val == (int32_t)val) {
472 return 0;
475 return 1;
478 /* Imediates to be used with comparisons. */
480 static int tcg_match_cmpi(TCGType type, tcg_target_long val)
482 if (facilities & FACILITY_EXT_IMM) {
483 /* The COMPARE IMMEDIATE instruction is available. */
484 if (type == TCG_TYPE_I32) {
485 /* We have a 32-bit immediate and can compare against anything. */
486 return 1;
487 } else {
488 /* ??? We have no insight here into whether the comparison is
489 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
490 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
491 a 32-bit unsigned immediate. If we were to use the (semi)
492 obvious "val == (int32_t)val" we would be enabling unsigned
493 comparisons vs very large numbers. The only solution is to
494 take the intersection of the ranges. */
495 /* ??? Another possible solution is to simply lie and allow all
496 constants here and force the out-of-range values into a temp
497 register in tgen_cmp when we have knowledge of the actual
498 comparison code in use. */
499 return val >= 0 && val <= 0x7fffffff;
501 } else {
502 /* Only the LOAD AND TEST instruction is available. */
503 return val == 0;
507 /* Immediates to be used with add2/sub2. */
509 static int tcg_match_add2i(TCGType type, tcg_target_long val)
511 if (facilities & FACILITY_EXT_IMM) {
512 if (type == TCG_TYPE_I32) {
513 return 1;
514 } else if (val >= -0xffffffffll && val <= 0xffffffffll) {
515 return 1;
518 return 0;
521 /* Test if a constant matches the constraint. */
522 static int tcg_target_const_match(tcg_target_long val, TCGType type,
523 const TCGArgConstraint *arg_ct)
525 int ct = arg_ct->ct;
527 if (ct & TCG_CT_CONST) {
528 return 1;
531 if (type == TCG_TYPE_I32) {
532 val = (int32_t)val;
535 /* The following are mutually exclusive. */
536 if (ct & TCG_CT_CONST_MULI) {
537 /* Immediates that may be used with multiply. If we have the
538 general-instruction-extensions, then we have MULTIPLY SINGLE
539 IMMEDIATE with a signed 32-bit, otherwise we have only
540 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
541 if (facilities & FACILITY_GEN_INST_EXT) {
542 return val == (int32_t)val;
543 } else {
544 return val == (int16_t)val;
546 } else if (ct & TCG_CT_CONST_ADLI) {
547 return tcg_match_add2i(type, val);
548 } else if (ct & TCG_CT_CONST_ORI) {
549 return tcg_match_ori(type, val);
550 } else if (ct & TCG_CT_CONST_XORI) {
551 return tcg_match_xori(type, val);
552 } else if (ct & TCG_CT_CONST_CMPI) {
553 return tcg_match_cmpi(type, val);
556 return 0;
559 /* Emit instructions according to the given instruction format. */
561 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
563 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
566 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
567 TCGReg r1, TCGReg r2)
569 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
572 static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
573 TCGReg r1, TCGReg r2, int m3)
575 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
578 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
580 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
583 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
585 tcg_out16(s, op | (r1 << 4));
586 tcg_out32(s, i2);
589 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
590 TCGReg b2, TCGReg r3, int disp)
592 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
593 | (disp & 0xfff));
596 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
597 TCGReg b2, TCGReg r3, int disp)
599 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
600 tcg_out32(s, (op & 0xff) | (b2 << 28)
601 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
604 #define tcg_out_insn_RX tcg_out_insn_RS
605 #define tcg_out_insn_RXY tcg_out_insn_RSY
607 /* Emit an opcode with "type-checking" of the format. */
608 #define tcg_out_insn(S, FMT, OP, ...) \
609 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
612 /* emit 64-bit shifts */
613 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
614 TCGReg src, TCGReg sh_reg, int sh_imm)
616 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
619 /* emit 32-bit shifts */
620 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
621 TCGReg sh_reg, int sh_imm)
623 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
626 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
628 if (src != dst) {
629 if (type == TCG_TYPE_I32) {
630 tcg_out_insn(s, RR, LR, dst, src);
631 } else {
632 tcg_out_insn(s, RRE, LGR, dst, src);
637 /* load a register with an immediate value */
638 static void tcg_out_movi(TCGContext *s, TCGType type,
639 TCGReg ret, tcg_target_long sval)
641 static const S390Opcode lli_insns[4] = {
642 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
645 tcg_target_ulong uval = sval;
646 int i;
648 if (type == TCG_TYPE_I32) {
649 uval = (uint32_t)sval;
650 sval = (int32_t)sval;
653 /* Try all 32-bit insns that can load it in one go. */
654 if (sval >= -0x8000 && sval < 0x8000) {
655 tcg_out_insn(s, RI, LGHI, ret, sval);
656 return;
659 for (i = 0; i < 4; i++) {
660 tcg_target_long mask = 0xffffull << i*16;
661 if ((uval & mask) == uval) {
662 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
663 return;
667 /* Try all 48-bit insns that can load it in one go. */
668 if (facilities & FACILITY_EXT_IMM) {
669 if (sval == (int32_t)sval) {
670 tcg_out_insn(s, RIL, LGFI, ret, sval);
671 return;
673 if (uval <= 0xffffffff) {
674 tcg_out_insn(s, RIL, LLILF, ret, uval);
675 return;
677 if ((uval & 0xffffffff) == 0) {
678 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
679 return;
683 /* Try for PC-relative address load. */
684 if ((sval & 1) == 0) {
685 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
686 if (off == (int32_t)off) {
687 tcg_out_insn(s, RIL, LARL, ret, off);
688 return;
692 /* If extended immediates are not present, then we may have to issue
693 several instructions to load the low 32 bits. */
694 if (!(facilities & FACILITY_EXT_IMM)) {
695 /* A 32-bit unsigned value can be loaded in 2 insns. And given
696 that the lli_insns loop above did not succeed, we know that
697 both insns are required. */
698 if (uval <= 0xffffffff) {
699 tcg_out_insn(s, RI, LLILL, ret, uval);
700 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
701 return;
704 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
705 We first want to make sure that all the high bits get set. With
706 luck the low 16-bits can be considered negative to perform that for
707 free, otherwise we load an explicit -1. */
708 if (sval >> 31 >> 1 == -1) {
709 if (uval & 0x8000) {
710 tcg_out_insn(s, RI, LGHI, ret, uval);
711 } else {
712 tcg_out_insn(s, RI, LGHI, ret, -1);
713 tcg_out_insn(s, RI, IILL, ret, uval);
715 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
716 return;
720 /* If we get here, both the high and low parts have non-zero bits. */
722 /* Recurse to load the lower 32-bits. */
723 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
725 /* Insert data into the high 32-bits. */
726 uval = uval >> 31 >> 1;
727 if (facilities & FACILITY_EXT_IMM) {
728 if (uval < 0x10000) {
729 tcg_out_insn(s, RI, IIHL, ret, uval);
730 } else if ((uval & 0xffff) == 0) {
731 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
732 } else {
733 tcg_out_insn(s, RIL, IIHF, ret, uval);
735 } else {
736 if (uval & 0xffff) {
737 tcg_out_insn(s, RI, IIHL, ret, uval);
739 if (uval & 0xffff0000) {
740 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
746 /* Emit a load/store type instruction. Inputs are:
747 DATA: The register to be loaded or stored.
748 BASE+OFS: The effective address.
749 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
750 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
752 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
753 TCGReg data, TCGReg base, TCGReg index,
754 tcg_target_long ofs)
756 if (ofs < -0x80000 || ofs >= 0x80000) {
757 /* Combine the low 20 bits of the offset with the actual load insn;
758 the high 44 bits must come from an immediate load. */
759 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
760 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
761 ofs = low;
763 /* If we were already given an index register, add it in. */
764 if (index != TCG_REG_NONE) {
765 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
767 index = TCG_TMP0;
770 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
771 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
772 } else {
773 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
778 /* load data without address translation or endianness conversion */
779 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
780 TCGReg base, intptr_t ofs)
782 if (type == TCG_TYPE_I32) {
783 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
784 } else {
785 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
789 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
790 TCGReg base, intptr_t ofs)
792 if (type == TCG_TYPE_I32) {
793 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
794 } else {
795 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
799 /* load data from an absolute host address */
800 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
802 intptr_t addr = (intptr_t)abs;
804 if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
805 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
806 if (disp == (int32_t)disp) {
807 if (type == TCG_TYPE_I32) {
808 tcg_out_insn(s, RIL, LRL, dest, disp);
809 } else {
810 tcg_out_insn(s, RIL, LGRL, dest, disp);
812 return;
816 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
817 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
820 static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
821 int msb, int lsb, int ofs, int z)
823 /* Format RIE-f */
824 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
825 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
826 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
829 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
831 if (facilities & FACILITY_EXT_IMM) {
832 tcg_out_insn(s, RRE, LGBR, dest, src);
833 return;
836 if (type == TCG_TYPE_I32) {
837 if (dest == src) {
838 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
839 } else {
840 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
842 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
843 } else {
844 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
845 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
849 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
851 if (facilities & FACILITY_EXT_IMM) {
852 tcg_out_insn(s, RRE, LLGCR, dest, src);
853 return;
856 if (dest == src) {
857 tcg_out_movi(s, type, TCG_TMP0, 0xff);
858 src = TCG_TMP0;
859 } else {
860 tcg_out_movi(s, type, dest, 0xff);
862 if (type == TCG_TYPE_I32) {
863 tcg_out_insn(s, RR, NR, dest, src);
864 } else {
865 tcg_out_insn(s, RRE, NGR, dest, src);
869 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
871 if (facilities & FACILITY_EXT_IMM) {
872 tcg_out_insn(s, RRE, LGHR, dest, src);
873 return;
876 if (type == TCG_TYPE_I32) {
877 if (dest == src) {
878 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
879 } else {
880 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
882 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
883 } else {
884 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
885 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
889 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
891 if (facilities & FACILITY_EXT_IMM) {
892 tcg_out_insn(s, RRE, LLGHR, dest, src);
893 return;
896 if (dest == src) {
897 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
898 src = TCG_TMP0;
899 } else {
900 tcg_out_movi(s, type, dest, 0xffff);
902 if (type == TCG_TYPE_I32) {
903 tcg_out_insn(s, RR, NR, dest, src);
904 } else {
905 tcg_out_insn(s, RRE, NGR, dest, src);
909 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
911 tcg_out_insn(s, RRE, LGFR, dest, src);
914 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
916 tcg_out_insn(s, RRE, LLGFR, dest, src);
919 /* Accept bit patterns like these:
920 0....01....1
921 1....10....0
922 1..10..01..1
923 0..01..10..0
924 Copied from gcc sources. */
925 static inline bool risbg_mask(uint64_t c)
927 uint64_t lsb;
928 /* We don't change the number of transitions by inverting,
929 so make sure we start with the LSB zero. */
930 if (c & 1) {
931 c = ~c;
933 /* Reject all zeros or all ones. */
934 if (c == 0) {
935 return false;
937 /* Find the first transition. */
938 lsb = c & -c;
939 /* Invert to look for a second transition. */
940 c = ~c;
941 /* Erase the first transition. */
942 c &= -lsb;
943 /* Find the second transition, if any. */
944 lsb = c & -c;
945 /* Match if all the bits are 1's, or if c is zero. */
946 return c == -lsb;
949 static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
951 int msb, lsb;
952 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
953 /* Achieve wraparound by swapping msb and lsb. */
954 msb = 64 - ctz64(~val);
955 lsb = clz64(~val) - 1;
956 } else {
957 msb = clz64(val);
958 lsb = 63 - ctz64(val);
960 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
963 static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
965 static const S390Opcode ni_insns[4] = {
966 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
968 static const S390Opcode nif_insns[2] = {
969 RIL_NILF, RIL_NIHF
971 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
972 int i;
974 /* Look for the zero-extensions. */
975 if ((val & valid) == 0xffffffff) {
976 tgen_ext32u(s, dest, dest);
977 return;
979 if (facilities & FACILITY_EXT_IMM) {
980 if ((val & valid) == 0xff) {
981 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
982 return;
984 if ((val & valid) == 0xffff) {
985 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
986 return;
990 /* Try all 32-bit insns that can perform it in one go. */
991 for (i = 0; i < 4; i++) {
992 tcg_target_ulong mask = ~(0xffffull << i*16);
993 if (((val | ~valid) & mask) == mask) {
994 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
995 return;
999 /* Try all 48-bit insns that can perform it in one go. */
1000 if (facilities & FACILITY_EXT_IMM) {
1001 for (i = 0; i < 2; i++) {
1002 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1003 if (((val | ~valid) & mask) == mask) {
1004 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1005 return;
1009 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
1010 tgen_andi_risbg(s, dest, dest, val);
1011 return;
1014 /* Fall back to loading the constant. */
1015 tcg_out_movi(s, type, TCG_TMP0, val);
1016 if (type == TCG_TYPE_I32) {
1017 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
1018 } else {
1019 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
1023 static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1025 static const S390Opcode oi_insns[4] = {
1026 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1028 static const S390Opcode nif_insns[2] = {
1029 RIL_OILF, RIL_OIHF
1032 int i;
1034 /* Look for no-op. */
1035 if (val == 0) {
1036 return;
1039 if (facilities & FACILITY_EXT_IMM) {
1040 /* Try all 32-bit insns that can perform it in one go. */
1041 for (i = 0; i < 4; i++) {
1042 tcg_target_ulong mask = (0xffffull << i*16);
1043 if ((val & mask) != 0 && (val & ~mask) == 0) {
1044 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1045 return;
1049 /* Try all 48-bit insns that can perform it in one go. */
1050 for (i = 0; i < 2; i++) {
1051 tcg_target_ulong mask = (0xffffffffull << i*32);
1052 if ((val & mask) != 0 && (val & ~mask) == 0) {
1053 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1054 return;
1058 /* Perform the OR via sequential modifications to the high and
1059 low parts. Do this via recursion to handle 16-bit vs 32-bit
1060 masks in each half. */
1061 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1062 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1063 } else {
1064 /* With no extended-immediate facility, we don't need to be so
1065 clever. Just iterate over the insns and mask in the constant. */
1066 for (i = 0; i < 4; i++) {
1067 tcg_target_ulong mask = (0xffffull << i*16);
1068 if ((val & mask) != 0) {
1069 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1075 static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1077 /* Perform the xor by parts. */
1078 if (val & 0xffffffff) {
1079 tcg_out_insn(s, RIL, XILF, dest, val);
1081 if (val > 0xffffffff) {
1082 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1086 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1087 TCGArg c2, int c2const)
1089 bool is_unsigned = is_unsigned_cond(c);
1090 if (c2const) {
1091 if (c2 == 0) {
1092 if (type == TCG_TYPE_I32) {
1093 tcg_out_insn(s, RR, LTR, r1, r1);
1094 } else {
1095 tcg_out_insn(s, RRE, LTGR, r1, r1);
1097 return tcg_cond_to_ltr_cond[c];
1098 } else {
1099 if (is_unsigned) {
1100 if (type == TCG_TYPE_I32) {
1101 tcg_out_insn(s, RIL, CLFI, r1, c2);
1102 } else {
1103 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1105 } else {
1106 if (type == TCG_TYPE_I32) {
1107 tcg_out_insn(s, RIL, CFI, r1, c2);
1108 } else {
1109 tcg_out_insn(s, RIL, CGFI, r1, c2);
1113 } else {
1114 if (is_unsigned) {
1115 if (type == TCG_TYPE_I32) {
1116 tcg_out_insn(s, RR, CLR, r1, c2);
1117 } else {
1118 tcg_out_insn(s, RRE, CLGR, r1, c2);
1120 } else {
1121 if (type == TCG_TYPE_I32) {
1122 tcg_out_insn(s, RR, CR, r1, c2);
1123 } else {
1124 tcg_out_insn(s, RRE, CGR, r1, c2);
1128 return tcg_cond_to_s390_cond[c];
1131 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1132 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
1134 int cc;
1136 switch (cond) {
1137 case TCG_COND_GTU:
1138 case TCG_COND_GT:
1139 do_greater:
1140 /* The result of a compare has CC=2 for GT and CC=3 unused.
1141 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1142 tgen_cmp(s, type, cond, c1, c2, c2const);
1143 tcg_out_movi(s, type, dest, 0);
1144 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1145 return;
1147 case TCG_COND_GEU:
1148 do_geu:
1149 /* We need "real" carry semantics, so use SUBTRACT LOGICAL
1150 instead of COMPARE LOGICAL. This needs an extra move. */
1151 tcg_out_mov(s, type, TCG_TMP0, c1);
1152 if (c2const) {
1153 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1154 if (type == TCG_TYPE_I32) {
1155 tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2);
1156 } else {
1157 tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2);
1159 } else {
1160 if (type == TCG_TYPE_I32) {
1161 tcg_out_insn(s, RR, SLR, TCG_TMP0, c2);
1162 } else {
1163 tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2);
1165 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1167 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1168 return;
1170 case TCG_COND_LEU:
1171 case TCG_COND_LTU:
1172 case TCG_COND_LT:
1173 /* Swap operands so that we can use GEU/GTU/GT. */
1174 if (c2const) {
1175 tcg_out_movi(s, type, TCG_TMP0, c2);
1176 c2 = c1;
1177 c2const = 0;
1178 c1 = TCG_TMP0;
1179 } else {
1180 TCGReg t = c1;
1181 c1 = c2;
1182 c2 = t;
1184 if (cond == TCG_COND_LEU) {
1185 goto do_geu;
1187 cond = tcg_swap_cond(cond);
1188 goto do_greater;
1190 case TCG_COND_NE:
1191 /* X != 0 is X > 0. */
1192 if (c2const && c2 == 0) {
1193 cond = TCG_COND_GTU;
1194 goto do_greater;
1196 break;
1198 case TCG_COND_EQ:
1199 /* X == 0 is X <= 0 is 0 >= X. */
1200 if (c2const && c2 == 0) {
1201 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0);
1202 c2 = c1;
1203 c2const = 0;
1204 c1 = TCG_TMP0;
1205 goto do_geu;
1207 break;
1209 default:
1210 break;
1213 cc = tgen_cmp(s, type, cond, c1, c2, c2const);
1214 if (facilities & FACILITY_LOAD_ON_COND) {
1215 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1216 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1217 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1218 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1219 } else {
1220 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1221 tcg_out_movi(s, type, dest, 1);
1222 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1223 tcg_out_movi(s, type, dest, 0);
1227 static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1228 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1230 int cc;
1231 if (facilities & FACILITY_LOAD_ON_COND) {
1232 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1233 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1234 } else {
1235 c = tcg_invert_cond(c);
1236 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1238 /* Emit: if (cc) goto over; dest = r3; over: */
1239 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1240 tcg_out_insn(s, RRE, LGR, dest, r3);
1244 bool tcg_target_deposit_valid(int ofs, int len)
1246 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1249 static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1250 int ofs, int len)
1252 int lsb = (63 - ofs);
1253 int msb = lsb - (len - 1);
1254 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
1257 static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
1259 ptrdiff_t off = dest - s->code_ptr;
1260 if (off == (int16_t)off) {
1261 tcg_out_insn(s, RI, BRC, cc, off);
1262 } else if (off == (int32_t)off) {
1263 tcg_out_insn(s, RIL, BRCL, cc, off);
1264 } else {
1265 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1266 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1270 static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1272 if (l->has_value) {
1273 tgen_gotoi(s, cc, l->u.value_ptr);
1274 } else if (USE_LONG_BRANCHES) {
1275 tcg_out16(s, RIL_BRCL | (cc << 4));
1276 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, -2);
1277 s->code_ptr += 2;
1278 } else {
1279 tcg_out16(s, RI_BRC | (cc << 4));
1280 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, -2);
1281 s->code_ptr += 1;
1285 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1286 TCGReg r1, TCGReg r2, TCGLabel *l)
1288 intptr_t off;
1290 if (l->has_value) {
1291 off = l->u.value_ptr - s->code_ptr;
1292 } else {
1293 /* We need to keep the offset unchanged for retranslation. */
1294 off = s->code_ptr[1];
1295 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
1298 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1299 tcg_out16(s, off);
1300 tcg_out16(s, cc << 12 | (opc & 0xff));
1303 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1304 TCGReg r1, int i2, TCGLabel *l)
1306 tcg_target_long off;
1308 if (l->has_value) {
1309 off = l->u.value_ptr - s->code_ptr;
1310 } else {
1311 /* We need to keep the offset unchanged for retranslation. */
1312 off = s->code_ptr[1];
1313 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
1316 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1317 tcg_out16(s, off);
1318 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1321 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1322 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1324 int cc;
1326 if (facilities & FACILITY_GEN_INST_EXT) {
1327 bool is_unsigned = is_unsigned_cond(c);
1328 bool in_range;
1329 S390Opcode opc;
1331 cc = tcg_cond_to_s390_cond[c];
1333 if (!c2const) {
1334 opc = (type == TCG_TYPE_I32
1335 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1336 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1337 tgen_compare_branch(s, opc, cc, r1, c2, l);
1338 return;
1341 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1342 If the immediate we've been given does not fit that range, we'll
1343 fall back to separate compare and branch instructions using the
1344 larger comparison range afforded by COMPARE IMMEDIATE. */
1345 if (type == TCG_TYPE_I32) {
1346 if (is_unsigned) {
1347 opc = RIE_CLIJ;
1348 in_range = (uint32_t)c2 == (uint8_t)c2;
1349 } else {
1350 opc = RIE_CIJ;
1351 in_range = (int32_t)c2 == (int8_t)c2;
1353 } else {
1354 if (is_unsigned) {
1355 opc = RIE_CLGIJ;
1356 in_range = (uint64_t)c2 == (uint8_t)c2;
1357 } else {
1358 opc = RIE_CGIJ;
1359 in_range = (int64_t)c2 == (int8_t)c2;
1362 if (in_range) {
1363 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1364 return;
1368 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1369 tgen_branch(s, cc, l);
1372 static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1374 ptrdiff_t off = dest - s->code_ptr;
1375 if (off == (int32_t)off) {
1376 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1377 } else {
1378 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1379 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1383 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1384 TCGReg base, TCGReg index, int disp)
1386 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1387 case MO_UB:
1388 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1389 break;
1390 case MO_SB:
1391 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1392 break;
1394 case MO_UW | MO_BSWAP:
1395 /* swapped unsigned halfword load with upper bits zeroed */
1396 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1397 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1398 break;
1399 case MO_UW:
1400 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1401 break;
1403 case MO_SW | MO_BSWAP:
1404 /* swapped sign-extended halfword load */
1405 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1406 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1407 break;
1408 case MO_SW:
1409 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1410 break;
1412 case MO_UL | MO_BSWAP:
1413 /* swapped unsigned int load with upper bits zeroed */
1414 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1415 tgen_ext32u(s, data, data);
1416 break;
1417 case MO_UL:
1418 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1419 break;
1421 case MO_SL | MO_BSWAP:
1422 /* swapped sign-extended int load */
1423 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1424 tgen_ext32s(s, data, data);
1425 break;
1426 case MO_SL:
1427 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1428 break;
1430 case MO_Q | MO_BSWAP:
1431 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1432 break;
1433 case MO_Q:
1434 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1435 break;
1437 default:
1438 tcg_abort();
1442 static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1443 TCGReg base, TCGReg index, int disp)
1445 switch (opc & (MO_SIZE | MO_BSWAP)) {
1446 case MO_UB:
1447 if (disp >= 0 && disp < 0x1000) {
1448 tcg_out_insn(s, RX, STC, data, base, index, disp);
1449 } else {
1450 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1452 break;
1454 case MO_UW | MO_BSWAP:
1455 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1456 break;
1457 case MO_UW:
1458 if (disp >= 0 && disp < 0x1000) {
1459 tcg_out_insn(s, RX, STH, data, base, index, disp);
1460 } else {
1461 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1463 break;
1465 case MO_UL | MO_BSWAP:
1466 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1467 break;
1468 case MO_UL:
1469 if (disp >= 0 && disp < 0x1000) {
1470 tcg_out_insn(s, RX, ST, data, base, index, disp);
1471 } else {
1472 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1474 break;
1476 case MO_Q | MO_BSWAP:
1477 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1478 break;
1479 case MO_Q:
1480 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1481 break;
1483 default:
1484 tcg_abort();
1488 #if defined(CONFIG_SOFTMMU)
1489 /* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1490 Using the offset of the second entry in the last tlb table ensures
1491 that we can index all of the elements of the first entry. */
1492 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1493 > 0x7ffff);
1495 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1496 addend into R2. Returns a register with the santitized guest address. */
1497 static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1498 int mem_index, bool is_ld)
1500 int s_mask = (1 << (opc & MO_SIZE)) - 1;
1501 int ofs, a_off;
1502 uint64_t tlb_mask;
1504 /* For aligned accesses, we check the first byte and include the alignment
1505 bits within the address. For unaligned access, we check that we don't
1506 cross pages using the address of the last byte of the access. */
1507 if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
1508 a_off = 0;
1509 tlb_mask = TARGET_PAGE_MASK | s_mask;
1510 } else {
1511 a_off = s_mask;
1512 tlb_mask = TARGET_PAGE_MASK;
1515 if (facilities & FACILITY_GEN_INST_EXT) {
1516 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1517 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1518 63 - CPU_TLB_ENTRY_BITS,
1519 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
1520 if (a_off) {
1521 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1522 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1523 } else {
1524 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1526 } else {
1527 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1528 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1529 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1530 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1531 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1532 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1535 if (is_ld) {
1536 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1537 } else {
1538 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1540 if (TARGET_LONG_BITS == 32) {
1541 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1542 } else {
1543 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1546 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1547 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1549 if (TARGET_LONG_BITS == 32) {
1550 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1551 return TCG_REG_R3;
1553 return addr_reg;
1556 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1557 TCGReg data, TCGReg addr,
1558 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1560 TCGLabelQemuLdst *label = new_ldst_label(s);
1562 label->is_ld = is_ld;
1563 label->oi = oi;
1564 label->datalo_reg = data;
1565 label->addrlo_reg = addr;
1566 label->raddr = raddr;
1567 label->label_ptr[0] = label_ptr;
1570 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1572 TCGReg addr_reg = lb->addrlo_reg;
1573 TCGReg data_reg = lb->datalo_reg;
1574 TCGMemOpIdx oi = lb->oi;
1575 TCGMemOp opc = get_memop(oi);
1577 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1579 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1580 if (TARGET_LONG_BITS == 64) {
1581 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1583 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
1584 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
1585 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
1586 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
1588 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1591 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1593 TCGReg addr_reg = lb->addrlo_reg;
1594 TCGReg data_reg = lb->datalo_reg;
1595 TCGMemOpIdx oi = lb->oi;
1596 TCGMemOp opc = get_memop(oi);
1598 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1600 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1601 if (TARGET_LONG_BITS == 64) {
1602 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1604 switch (opc & MO_SIZE) {
1605 case MO_UB:
1606 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1607 break;
1608 case MO_UW:
1609 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1610 break;
1611 case MO_UL:
1612 tgen_ext32u(s, TCG_REG_R4, data_reg);
1613 break;
1614 case MO_Q:
1615 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1616 break;
1617 default:
1618 tcg_abort();
1620 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
1621 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
1622 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1624 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1626 #else
1627 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1628 TCGReg *index_reg, tcg_target_long *disp)
1630 if (TARGET_LONG_BITS == 32) {
1631 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1632 *addr_reg = TCG_TMP0;
1634 if (guest_base < 0x80000) {
1635 *index_reg = TCG_REG_NONE;
1636 *disp = guest_base;
1637 } else {
1638 *index_reg = TCG_GUEST_BASE_REG;
1639 *disp = 0;
1642 #endif /* CONFIG_SOFTMMU */
1644 static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1645 TCGMemOpIdx oi)
1647 TCGMemOp opc = get_memop(oi);
1648 #ifdef CONFIG_SOFTMMU
1649 unsigned mem_index = get_mmuidx(oi);
1650 tcg_insn_unit *label_ptr;
1651 TCGReg base_reg;
1653 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1655 /* We need to keep the offset unchanged for retranslation. */
1656 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1657 label_ptr = s->code_ptr;
1658 s->code_ptr += 1;
1660 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1662 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1663 #else
1664 TCGReg index_reg;
1665 tcg_target_long disp;
1667 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1668 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1669 #endif
1672 static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1673 TCGMemOpIdx oi)
1675 TCGMemOp opc = get_memop(oi);
1676 #ifdef CONFIG_SOFTMMU
1677 unsigned mem_index = get_mmuidx(oi);
1678 tcg_insn_unit *label_ptr;
1679 TCGReg base_reg;
1681 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1683 /* We need to keep the offset unchanged for retranslation. */
1684 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1685 label_ptr = s->code_ptr;
1686 s->code_ptr += 1;
1688 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1690 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1691 #else
1692 TCGReg index_reg;
1693 tcg_target_long disp;
1695 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1696 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1697 #endif
1700 # define OP_32_64(x) \
1701 case glue(glue(INDEX_op_,x),_i32): \
1702 case glue(glue(INDEX_op_,x),_i64)
1704 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1705 const TCGArg *args, const int *const_args)
1707 S390Opcode op;
1708 TCGArg a0, a1, a2;
1710 switch (opc) {
1711 case INDEX_op_exit_tb:
1712 /* return value */
1713 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1714 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1715 break;
1717 case INDEX_op_goto_tb:
1718 if (s->tb_jmp_offset) {
1719 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1720 s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
1721 s->code_ptr += 2;
1722 } else {
1723 /* load address stored at s->tb_next + args[0] */
1724 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1725 /* and go there */
1726 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1728 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
1729 break;
1731 OP_32_64(ld8u):
1732 /* ??? LLC (RXY format) is only present with the extended-immediate
1733 facility, whereas LLGC is always present. */
1734 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1735 break;
1737 OP_32_64(ld8s):
1738 /* ??? LB is no smaller than LGB, so no point to using it. */
1739 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1740 break;
1742 OP_32_64(ld16u):
1743 /* ??? LLH (RXY format) is only present with the extended-immediate
1744 facility, whereas LLGH is always present. */
1745 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1746 break;
1748 case INDEX_op_ld16s_i32:
1749 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1750 break;
1752 case INDEX_op_ld_i32:
1753 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1754 break;
1756 OP_32_64(st8):
1757 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1758 TCG_REG_NONE, args[2]);
1759 break;
1761 OP_32_64(st16):
1762 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1763 TCG_REG_NONE, args[2]);
1764 break;
1766 case INDEX_op_st_i32:
1767 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1768 break;
1770 case INDEX_op_add_i32:
1771 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1772 if (const_args[2]) {
1773 do_addi_32:
1774 if (a0 == a1) {
1775 if (a2 == (int16_t)a2) {
1776 tcg_out_insn(s, RI, AHI, a0, a2);
1777 break;
1779 if (facilities & FACILITY_EXT_IMM) {
1780 tcg_out_insn(s, RIL, AFI, a0, a2);
1781 break;
1784 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1785 } else if (a0 == a1) {
1786 tcg_out_insn(s, RR, AR, a0, a2);
1787 } else {
1788 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1790 break;
1791 case INDEX_op_sub_i32:
1792 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1793 if (const_args[2]) {
1794 a2 = -a2;
1795 goto do_addi_32;
1797 tcg_out_insn(s, RR, SR, args[0], args[2]);
1798 break;
1800 case INDEX_op_and_i32:
1801 if (const_args[2]) {
1802 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
1803 } else {
1804 tcg_out_insn(s, RR, NR, args[0], args[2]);
1806 break;
1807 case INDEX_op_or_i32:
1808 if (const_args[2]) {
1809 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1810 } else {
1811 tcg_out_insn(s, RR, OR, args[0], args[2]);
1813 break;
1814 case INDEX_op_xor_i32:
1815 if (const_args[2]) {
1816 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1817 } else {
1818 tcg_out_insn(s, RR, XR, args[0], args[2]);
1820 break;
1822 case INDEX_op_neg_i32:
1823 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1824 break;
1826 case INDEX_op_mul_i32:
1827 if (const_args[2]) {
1828 if ((int32_t)args[2] == (int16_t)args[2]) {
1829 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1830 } else {
1831 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1833 } else {
1834 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1836 break;
1838 case INDEX_op_div2_i32:
1839 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1840 break;
1841 case INDEX_op_divu2_i32:
1842 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1843 break;
1845 case INDEX_op_shl_i32:
1846 op = RS_SLL;
1847 do_shift32:
1848 if (const_args[2]) {
1849 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1850 } else {
1851 tcg_out_sh32(s, op, args[0], args[2], 0);
1853 break;
1854 case INDEX_op_shr_i32:
1855 op = RS_SRL;
1856 goto do_shift32;
1857 case INDEX_op_sar_i32:
1858 op = RS_SRA;
1859 goto do_shift32;
1861 case INDEX_op_rotl_i32:
1862 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1863 if (const_args[2]) {
1864 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1865 } else {
1866 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1868 break;
1869 case INDEX_op_rotr_i32:
1870 if (const_args[2]) {
1871 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1872 TCG_REG_NONE, (32 - args[2]) & 31);
1873 } else {
1874 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1875 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1877 break;
1879 case INDEX_op_ext8s_i32:
1880 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1881 break;
1882 case INDEX_op_ext16s_i32:
1883 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1884 break;
1885 case INDEX_op_ext8u_i32:
1886 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1887 break;
1888 case INDEX_op_ext16u_i32:
1889 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1890 break;
1892 OP_32_64(bswap16):
1893 /* The TCG bswap definition requires bits 0-47 already be zero.
1894 Thus we don't need the G-type insns to implement bswap16_i64. */
1895 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1896 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1897 break;
1898 OP_32_64(bswap32):
1899 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1900 break;
1902 case INDEX_op_add2_i32:
1903 if (const_args[4]) {
1904 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
1905 } else {
1906 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1908 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1909 break;
1910 case INDEX_op_sub2_i32:
1911 if (const_args[4]) {
1912 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
1913 } else {
1914 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1916 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1917 break;
1919 case INDEX_op_br:
1920 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
1921 break;
1923 case INDEX_op_brcond_i32:
1924 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1925 args[1], const_args[1], arg_label(args[3]));
1926 break;
1927 case INDEX_op_setcond_i32:
1928 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1929 args[2], const_args[2]);
1930 break;
1931 case INDEX_op_movcond_i32:
1932 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1933 args[2], const_args[2], args[3]);
1934 break;
1936 case INDEX_op_qemu_ld_i32:
1937 /* ??? Technically we can use a non-extending instruction. */
1938 case INDEX_op_qemu_ld_i64:
1939 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
1940 break;
1941 case INDEX_op_qemu_st_i32:
1942 case INDEX_op_qemu_st_i64:
1943 tcg_out_qemu_st(s, args[0], args[1], args[2]);
1944 break;
1946 case INDEX_op_ld16s_i64:
1947 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1948 break;
1949 case INDEX_op_ld32u_i64:
1950 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1951 break;
1952 case INDEX_op_ld32s_i64:
1953 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1954 break;
1955 case INDEX_op_ld_i64:
1956 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1957 break;
1959 case INDEX_op_st32_i64:
1960 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1961 break;
1962 case INDEX_op_st_i64:
1963 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1964 break;
1966 case INDEX_op_add_i64:
1967 a0 = args[0], a1 = args[1], a2 = args[2];
1968 if (const_args[2]) {
1969 do_addi_64:
1970 if (a0 == a1) {
1971 if (a2 == (int16_t)a2) {
1972 tcg_out_insn(s, RI, AGHI, a0, a2);
1973 break;
1975 if (facilities & FACILITY_EXT_IMM) {
1976 if (a2 == (int32_t)a2) {
1977 tcg_out_insn(s, RIL, AGFI, a0, a2);
1978 break;
1979 } else if (a2 == (uint32_t)a2) {
1980 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1981 break;
1982 } else if (-a2 == (uint32_t)-a2) {
1983 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1984 break;
1988 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1989 } else if (a0 == a1) {
1990 tcg_out_insn(s, RRE, AGR, a0, a2);
1991 } else {
1992 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1994 break;
1995 case INDEX_op_sub_i64:
1996 a0 = args[0], a1 = args[1], a2 = args[2];
1997 if (const_args[2]) {
1998 a2 = -a2;
1999 goto do_addi_64;
2000 } else {
2001 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
2003 break;
2005 case INDEX_op_and_i64:
2006 if (const_args[2]) {
2007 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2008 } else {
2009 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
2011 break;
2012 case INDEX_op_or_i64:
2013 if (const_args[2]) {
2014 tgen64_ori(s, args[0], args[2]);
2015 } else {
2016 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
2018 break;
2019 case INDEX_op_xor_i64:
2020 if (const_args[2]) {
2021 tgen64_xori(s, args[0], args[2]);
2022 } else {
2023 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
2025 break;
2027 case INDEX_op_neg_i64:
2028 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2029 break;
2030 case INDEX_op_bswap64_i64:
2031 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2032 break;
2034 case INDEX_op_mul_i64:
2035 if (const_args[2]) {
2036 if (args[2] == (int16_t)args[2]) {
2037 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2038 } else {
2039 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2041 } else {
2042 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2044 break;
2046 case INDEX_op_div2_i64:
2047 /* ??? We get an unnecessary sign-extension of the dividend
2048 into R3 with this definition, but as we do in fact always
2049 produce both quotient and remainder using INDEX_op_div_i64
2050 instead requires jumping through even more hoops. */
2051 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2052 break;
2053 case INDEX_op_divu2_i64:
2054 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2055 break;
2056 case INDEX_op_mulu2_i64:
2057 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2058 break;
2060 case INDEX_op_shl_i64:
2061 op = RSY_SLLG;
2062 do_shift64:
2063 if (const_args[2]) {
2064 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2065 } else {
2066 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2068 break;
2069 case INDEX_op_shr_i64:
2070 op = RSY_SRLG;
2071 goto do_shift64;
2072 case INDEX_op_sar_i64:
2073 op = RSY_SRAG;
2074 goto do_shift64;
2076 case INDEX_op_rotl_i64:
2077 if (const_args[2]) {
2078 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2079 TCG_REG_NONE, args[2]);
2080 } else {
2081 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2083 break;
2084 case INDEX_op_rotr_i64:
2085 if (const_args[2]) {
2086 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2087 TCG_REG_NONE, (64 - args[2]) & 63);
2088 } else {
2089 /* We can use the smaller 32-bit negate because only the
2090 low 6 bits are examined for the rotate. */
2091 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2092 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2094 break;
2096 case INDEX_op_ext8s_i64:
2097 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2098 break;
2099 case INDEX_op_ext16s_i64:
2100 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2101 break;
2102 case INDEX_op_ext_i32_i64:
2103 case INDEX_op_ext32s_i64:
2104 tgen_ext32s(s, args[0], args[1]);
2105 break;
2106 case INDEX_op_ext8u_i64:
2107 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2108 break;
2109 case INDEX_op_ext16u_i64:
2110 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2111 break;
2112 case INDEX_op_extu_i32_i64:
2113 case INDEX_op_ext32u_i64:
2114 tgen_ext32u(s, args[0], args[1]);
2115 break;
2117 case INDEX_op_add2_i64:
2118 if (const_args[4]) {
2119 if ((int64_t)args[4] >= 0) {
2120 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2121 } else {
2122 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2124 } else {
2125 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2127 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2128 break;
2129 case INDEX_op_sub2_i64:
2130 if (const_args[4]) {
2131 if ((int64_t)args[4] >= 0) {
2132 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2133 } else {
2134 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2136 } else {
2137 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2139 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2140 break;
2142 case INDEX_op_brcond_i64:
2143 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2144 args[1], const_args[1], arg_label(args[3]));
2145 break;
2146 case INDEX_op_setcond_i64:
2147 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2148 args[2], const_args[2]);
2149 break;
2150 case INDEX_op_movcond_i64:
2151 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2152 args[2], const_args[2], args[3]);
2153 break;
2155 OP_32_64(deposit):
2156 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2157 break;
2159 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2160 case INDEX_op_mov_i64:
2161 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2162 case INDEX_op_movi_i64:
2163 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2164 default:
2165 tcg_abort();
2169 static const TCGTargetOpDef s390_op_defs[] = {
2170 { INDEX_op_exit_tb, { } },
2171 { INDEX_op_goto_tb, { } },
2172 { INDEX_op_br, { } },
2174 { INDEX_op_ld8u_i32, { "r", "r" } },
2175 { INDEX_op_ld8s_i32, { "r", "r" } },
2176 { INDEX_op_ld16u_i32, { "r", "r" } },
2177 { INDEX_op_ld16s_i32, { "r", "r" } },
2178 { INDEX_op_ld_i32, { "r", "r" } },
2179 { INDEX_op_st8_i32, { "r", "r" } },
2180 { INDEX_op_st16_i32, { "r", "r" } },
2181 { INDEX_op_st_i32, { "r", "r" } },
2183 { INDEX_op_add_i32, { "r", "r", "ri" } },
2184 { INDEX_op_sub_i32, { "r", "0", "ri" } },
2185 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2187 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2188 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2190 { INDEX_op_and_i32, { "r", "0", "ri" } },
2191 { INDEX_op_or_i32, { "r", "0", "rO" } },
2192 { INDEX_op_xor_i32, { "r", "0", "rX" } },
2194 { INDEX_op_neg_i32, { "r", "r" } },
2196 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2197 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2198 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2200 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2201 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2203 { INDEX_op_ext8s_i32, { "r", "r" } },
2204 { INDEX_op_ext8u_i32, { "r", "r" } },
2205 { INDEX_op_ext16s_i32, { "r", "r" } },
2206 { INDEX_op_ext16u_i32, { "r", "r" } },
2208 { INDEX_op_bswap16_i32, { "r", "r" } },
2209 { INDEX_op_bswap32_i32, { "r", "r" } },
2211 { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } },
2212 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } },
2214 { INDEX_op_brcond_i32, { "r", "rC" } },
2215 { INDEX_op_setcond_i32, { "r", "r", "rC" } },
2216 { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
2217 { INDEX_op_deposit_i32, { "r", "0", "r" } },
2219 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2220 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2221 { INDEX_op_qemu_st_i32, { "L", "L" } },
2222 { INDEX_op_qemu_st_i64, { "L", "L" } },
2224 { INDEX_op_ld8u_i64, { "r", "r" } },
2225 { INDEX_op_ld8s_i64, { "r", "r" } },
2226 { INDEX_op_ld16u_i64, { "r", "r" } },
2227 { INDEX_op_ld16s_i64, { "r", "r" } },
2228 { INDEX_op_ld32u_i64, { "r", "r" } },
2229 { INDEX_op_ld32s_i64, { "r", "r" } },
2230 { INDEX_op_ld_i64, { "r", "r" } },
2232 { INDEX_op_st8_i64, { "r", "r" } },
2233 { INDEX_op_st16_i64, { "r", "r" } },
2234 { INDEX_op_st32_i64, { "r", "r" } },
2235 { INDEX_op_st_i64, { "r", "r" } },
2237 { INDEX_op_add_i64, { "r", "r", "ri" } },
2238 { INDEX_op_sub_i64, { "r", "0", "ri" } },
2239 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2241 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2242 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
2243 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
2245 { INDEX_op_and_i64, { "r", "0", "ri" } },
2246 { INDEX_op_or_i64, { "r", "0", "rO" } },
2247 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2249 { INDEX_op_neg_i64, { "r", "r" } },
2251 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2252 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2253 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2255 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2256 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2258 { INDEX_op_ext8s_i64, { "r", "r" } },
2259 { INDEX_op_ext8u_i64, { "r", "r" } },
2260 { INDEX_op_ext16s_i64, { "r", "r" } },
2261 { INDEX_op_ext16u_i64, { "r", "r" } },
2262 { INDEX_op_ext32s_i64, { "r", "r" } },
2263 { INDEX_op_ext32u_i64, { "r", "r" } },
2265 { INDEX_op_ext_i32_i64, { "r", "r" } },
2266 { INDEX_op_extu_i32_i64, { "r", "r" } },
2268 { INDEX_op_bswap16_i64, { "r", "r" } },
2269 { INDEX_op_bswap32_i64, { "r", "r" } },
2270 { INDEX_op_bswap64_i64, { "r", "r" } },
2272 { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
2273 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
2275 { INDEX_op_brcond_i64, { "r", "rC" } },
2276 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
2277 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
2278 { INDEX_op_deposit_i64, { "r", "0", "r" } },
2280 { -1 },
2283 static void query_facilities(void)
2285 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2287 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2288 is present on all 64-bit systems, but let's check for it anyway. */
2289 if (hwcap & HWCAP_S390_STFLE) {
2290 register int r0 __asm__("0");
2291 register void *r1 __asm__("1");
2293 /* stfle 0(%r1) */
2294 r1 = &facilities;
2295 asm volatile(".word 0xb2b0,0x1000"
2296 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2300 static void tcg_target_init(TCGContext *s)
2302 query_facilities();
2304 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2305 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2307 tcg_regset_clear(tcg_target_call_clobber_regs);
2308 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2309 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2310 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2311 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2312 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2313 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2314 /* The r6 register is technically call-saved, but it's also a parameter
2315 register, so it can get killed by setup for the qemu_st helper. */
2316 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
2317 /* The return register can be considered call-clobbered. */
2318 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2320 tcg_regset_clear(s->reserved_regs);
2321 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2322 /* XXX many insns can't be used with R0, so we better avoid it for now */
2323 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2324 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2326 tcg_add_target_add_op_defs(s390_op_defs);
2329 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2330 + TCG_STATIC_CALL_ARGS_SIZE \
2331 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2333 static void tcg_target_qemu_prologue(TCGContext *s)
2335 /* stmg %r6,%r15,48(%r15) (save registers) */
2336 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2338 /* aghi %r15,-frame_size */
2339 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
2341 tcg_set_frame(s, TCG_REG_CALL_STACK,
2342 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2343 CPU_TEMP_BUF_NLONGS * sizeof(long));
2345 #ifndef CONFIG_SOFTMMU
2346 if (guest_base >= 0x80000) {
2347 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2348 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2350 #endif
2352 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2353 /* br %r3 (go to TB) */
2354 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2356 tb_ret_addr = s->code_ptr;
2358 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2359 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2360 FRAME_SIZE + 48);
2362 /* br %r14 (return) */
2363 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2366 typedef struct {
2367 DebugFrameHeader h;
2368 uint8_t fde_def_cfa[4];
2369 uint8_t fde_reg_ofs[18];
2370 } DebugFrame;
2372 /* We're expecting a 2 byte uleb128 encoded value. */
2373 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2375 #define ELF_HOST_MACHINE EM_S390
2377 static const DebugFrame debug_frame = {
2378 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2379 .h.cie.id = -1,
2380 .h.cie.version = 1,
2381 .h.cie.code_align = 1,
2382 .h.cie.data_align = 8, /* sleb128 8 */
2383 .h.cie.return_column = TCG_REG_R14,
2385 /* Total FDE size does not include the "len" member. */
2386 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2388 .fde_def_cfa = {
2389 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2390 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2391 (FRAME_SIZE >> 7)
2393 .fde_reg_ofs = {
2394 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2395 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2396 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2397 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2398 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2399 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2400 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2401 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2402 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2406 void tcg_register_jit(void *buf, size_t buf_size)
2408 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));