Merge remote-tracking branch 'stefanha/block' into staging
[qemu.git] / tcg / hppa / tcg-target.c
blob236b39c31f9e46ab317815c4dd5e4a2627846695
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #if TCG_TARGET_REG_BITS != 32
26 #error unsupported
27 #endif
29 #ifndef NDEBUG
30 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
31 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
32 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
33 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
34 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
36 #endif
38 /* This is an 8 byte temp slot in the stack frame. */
39 #define STACK_TEMP_OFS -16
41 #ifdef CONFIG_USE_GUEST_BASE
42 #define TCG_GUEST_BASE_REG TCG_REG_R16
43 #else
44 #define TCG_GUEST_BASE_REG TCG_REG_R0
45 #endif
47 static const int tcg_target_reg_alloc_order[] = {
48 TCG_REG_R4,
49 TCG_REG_R5,
50 TCG_REG_R6,
51 TCG_REG_R7,
52 TCG_REG_R8,
53 TCG_REG_R9,
54 TCG_REG_R10,
55 TCG_REG_R11,
56 TCG_REG_R12,
57 TCG_REG_R13,
59 TCG_REG_R17,
60 TCG_REG_R14,
61 TCG_REG_R15,
62 TCG_REG_R16,
64 TCG_REG_R26,
65 TCG_REG_R25,
66 TCG_REG_R24,
67 TCG_REG_R23,
69 TCG_REG_RET0,
70 TCG_REG_RET1,
73 static const int tcg_target_call_iarg_regs[4] = {
74 TCG_REG_R26,
75 TCG_REG_R25,
76 TCG_REG_R24,
77 TCG_REG_R23,
80 static const int tcg_target_call_oarg_regs[2] = {
81 TCG_REG_RET0,
82 TCG_REG_RET1,
85 /* True iff val fits a signed field of width BITS. */
86 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
88 return (val << ((sizeof(tcg_target_long) * 8 - bits))
89 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
92 /* True iff depi can be used to compute (reg | MASK).
93 Accept a bit pattern like:
94 0....01....1
95 1....10....0
96 0..01..10..0
97 Copied from gcc sources. */
98 static inline int or_mask_p(tcg_target_ulong mask)
100 if (mask == 0 || mask == -1) {
101 return 0;
103 mask += mask & -mask;
104 return (mask & (mask - 1)) == 0;
107 /* True iff depi or extru can be used to compute (reg & mask).
108 Accept a bit pattern like these:
109 0....01....1
110 1....10....0
111 1..10..01..1
112 Copied from gcc sources. */
113 static inline int and_mask_p(tcg_target_ulong mask)
115 return or_mask_p(~mask);
118 static int low_sign_ext(int val, int len)
120 return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
123 static int reassemble_12(int as12)
125 return (((as12 & 0x800) >> 11) |
126 ((as12 & 0x400) >> 8) |
127 ((as12 & 0x3ff) << 3));
130 static int reassemble_17(int as17)
132 return (((as17 & 0x10000) >> 16) |
133 ((as17 & 0x0f800) << 5) |
134 ((as17 & 0x00400) >> 8) |
135 ((as17 & 0x003ff) << 3));
138 static int reassemble_21(int as21)
140 return (((as21 & 0x100000) >> 20) |
141 ((as21 & 0x0ffe00) >> 8) |
142 ((as21 & 0x000180) << 7) |
143 ((as21 & 0x00007c) << 14) |
144 ((as21 & 0x000003) << 12));
147 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
148 such relocations are simply fully handled by the assembler. */
149 #define R_PARISC_PCREL12F R_PARISC_NONE
151 static void patch_reloc(uint8_t *code_ptr, int type,
152 intptr_t value, intptr_t addend)
154 uint32_t *insn_ptr = (uint32_t *)code_ptr;
155 uint32_t insn = *insn_ptr;
156 intptr_t pcrel;
158 value += addend;
159 pcrel = (value - ((intptr_t)code_ptr + 8)) >> 2;
161 switch (type) {
162 case R_PARISC_PCREL12F:
163 assert(check_fit_tl(pcrel, 12));
164 /* ??? We assume all patches are forward. See tcg_out_brcond
165 re setting the NUL bit on the branch and eliding the nop. */
166 assert(pcrel >= 0);
167 insn &= ~0x1ffdu;
168 insn |= reassemble_12(pcrel);
169 break;
170 case R_PARISC_PCREL17F:
171 assert(check_fit_tl(pcrel, 17));
172 insn &= ~0x1f1ffdu;
173 insn |= reassemble_17(pcrel);
174 break;
175 default:
176 tcg_abort();
179 *insn_ptr = insn;
182 /* parse target specific constraints */
183 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
185 const char *ct_str;
187 ct_str = *pct_str;
188 switch (ct_str[0]) {
189 case 'r':
190 ct->ct |= TCG_CT_REG;
191 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
192 break;
193 case 'L': /* qemu_ld/st constraint */
194 ct->ct |= TCG_CT_REG;
195 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
196 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
197 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
200 break;
201 case 'Z':
202 ct->ct |= TCG_CT_CONST_0;
203 break;
204 case 'I':
205 ct->ct |= TCG_CT_CONST_S11;
206 break;
207 case 'J':
208 ct->ct |= TCG_CT_CONST_S5;
209 break;
210 case 'K':
211 ct->ct |= TCG_CT_CONST_MS11;
212 break;
213 case 'M':
214 ct->ct |= TCG_CT_CONST_AND;
215 break;
216 case 'O':
217 ct->ct |= TCG_CT_CONST_OR;
218 break;
219 default:
220 return -1;
222 ct_str++;
223 *pct_str = ct_str;
224 return 0;
227 /* test if a constant matches the constraint */
228 static int tcg_target_const_match(tcg_target_long val,
229 const TCGArgConstraint *arg_ct)
231 int ct = arg_ct->ct;
232 if (ct & TCG_CT_CONST) {
233 return 1;
234 } else if (ct & TCG_CT_CONST_0) {
235 return val == 0;
236 } else if (ct & TCG_CT_CONST_S5) {
237 return check_fit_tl(val, 5);
238 } else if (ct & TCG_CT_CONST_S11) {
239 return check_fit_tl(val, 11);
240 } else if (ct & TCG_CT_CONST_MS11) {
241 return check_fit_tl(-val, 11);
242 } else if (ct & TCG_CT_CONST_AND) {
243 return and_mask_p(val);
244 } else if (ct & TCG_CT_CONST_OR) {
245 return or_mask_p(val);
247 return 0;
250 #define INSN_OP(x) ((x) << 26)
251 #define INSN_EXT3BR(x) ((x) << 13)
252 #define INSN_EXT3SH(x) ((x) << 10)
253 #define INSN_EXT4(x) ((x) << 6)
254 #define INSN_EXT5(x) (x)
255 #define INSN_EXT6(x) ((x) << 6)
256 #define INSN_EXT7(x) ((x) << 6)
257 #define INSN_EXT8A(x) ((x) << 6)
258 #define INSN_EXT8B(x) ((x) << 5)
259 #define INSN_T(x) (x)
260 #define INSN_R1(x) ((x) << 16)
261 #define INSN_R2(x) ((x) << 21)
262 #define INSN_DEP_LEN(x) (32 - (x))
263 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
264 #define INSN_SHDEP_P(x) ((x) << 5)
265 #define INSN_COND(x) ((x) << 13)
266 #define INSN_IM11(x) low_sign_ext(x, 11)
267 #define INSN_IM14(x) low_sign_ext(x, 14)
268 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
270 #define COND_NEVER 0
271 #define COND_EQ 1
272 #define COND_LT 2
273 #define COND_LE 3
274 #define COND_LTU 4
275 #define COND_LEU 5
276 #define COND_SV 6
277 #define COND_OD 7
278 #define COND_FALSE 8
280 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
281 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
282 #define INSN_ADDI (INSN_OP(0x2d))
283 #define INSN_ADDIL (INSN_OP(0x0a))
284 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
285 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
286 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
287 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
288 #define INSN_COMICLR (INSN_OP(0x24))
289 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
290 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
291 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
292 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
293 #define INSN_LDIL (INSN_OP(0x08))
294 #define INSN_LDO (INSN_OP(0x0d))
295 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
296 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
297 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
298 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
299 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
300 #define INSN_SUBI (INSN_OP(0x25))
301 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
302 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
303 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
304 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
305 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
306 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
308 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
309 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
310 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
311 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
312 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
313 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
315 #define INSN_LDB (INSN_OP(0x10))
316 #define INSN_LDH (INSN_OP(0x11))
317 #define INSN_LDW (INSN_OP(0x12))
318 #define INSN_LDWM (INSN_OP(0x13))
319 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
321 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
322 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
323 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
325 #define INSN_STB (INSN_OP(0x18))
326 #define INSN_STH (INSN_OP(0x19))
327 #define INSN_STW (INSN_OP(0x1a))
328 #define INSN_STWM (INSN_OP(0x1b))
329 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
331 #define INSN_COMBT (INSN_OP(0x20))
332 #define INSN_COMBF (INSN_OP(0x22))
333 #define INSN_COMIBT (INSN_OP(0x21))
334 #define INSN_COMIBF (INSN_OP(0x23))
336 /* supplied by libgcc */
337 extern void *__canonicalize_funcptr_for_compare(const void *);
339 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
341 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
342 but hppa-dis.c is unaware of this definition */
343 if (ret != arg) {
344 tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
345 | INSN_R2(TCG_REG_R0));
349 static void tcg_out_movi(TCGContext *s, TCGType type,
350 TCGReg ret, tcg_target_long arg)
352 if (check_fit_tl(arg, 14)) {
353 tcg_out32(s, INSN_LDO | INSN_R1(ret)
354 | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
355 } else {
356 uint32_t hi, lo;
357 hi = arg >> 11;
358 lo = arg & 0x7ff;
360 tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
361 if (lo) {
362 tcg_out32(s, INSN_LDO | INSN_R1(ret)
363 | INSN_R2(ret) | INSN_IM14(lo));
368 static void tcg_out_ldst(TCGContext *s, int ret, int addr,
369 tcg_target_long offset, int op)
371 if (!check_fit_tl(offset, 14)) {
372 uint32_t hi, lo, op;
374 hi = offset >> 11;
375 lo = offset & 0x7ff;
377 if (addr == TCG_REG_R0) {
378 op = INSN_LDIL | INSN_R2(TCG_REG_R1);
379 } else {
380 op = INSN_ADDIL | INSN_R2(addr);
382 tcg_out32(s, op | reassemble_21(hi));
384 addr = TCG_REG_R1;
385 offset = lo;
388 if (ret != addr || offset != 0 || op != INSN_LDO) {
389 tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
393 /* This function is required by tcg.c. */
394 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
395 TCGReg arg1, intptr_t arg2)
397 tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
400 /* This function is required by tcg.c. */
401 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret,
402 TCGReg arg1, intptr_t arg2)
404 tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
407 static void tcg_out_ldst_index(TCGContext *s, int data,
408 int base, int index, int op)
410 tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
413 static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
414 tcg_target_long val)
416 tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
419 /* This function is required by tcg.c. */
420 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
422 tcg_out_addi2(s, reg, reg, val);
425 static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
427 tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
430 static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
431 tcg_target_long val, int op)
433 assert(check_fit_tl(val, 11));
434 tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
437 static inline void tcg_out_nop(TCGContext *s)
439 tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
442 static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
444 tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
447 /* Extract LEN bits at position OFS from ARG and place in RET.
448 Note that here the bit ordering is reversed from the PA-RISC
449 standard, such that the right-most bit is 0. */
450 static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
451 unsigned ofs, unsigned len, int sign)
453 assert(ofs < 32 && len <= 32 - ofs);
454 tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
455 | INSN_R1(ret) | INSN_R2(arg)
456 | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
459 /* Likewise with OFS interpreted little-endian. */
460 static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
461 unsigned ofs, unsigned len)
463 assert(ofs < 32 && len <= 32 - ofs);
464 tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
465 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
468 static inline void tcg_out_depi(TCGContext *s, int ret, int arg,
469 unsigned ofs, unsigned len)
471 assert(ofs < 32 && len <= 32 - ofs);
472 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(arg)
473 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
476 static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
477 unsigned count)
479 assert(count < 32);
480 tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
481 | INSN_SHDEP_CP(count));
484 static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
486 tcg_out_mtctl_sar(s, creg);
487 tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
490 static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
492 int bs0, bs1;
494 /* Note that the argument is constrained to match or_mask_p. */
495 for (bs0 = 0; bs0 < 32; bs0++) {
496 if ((m & (1u << bs0)) != 0) {
497 break;
500 for (bs1 = bs0; bs1 < 32; bs1++) {
501 if ((m & (1u << bs1)) == 0) {
502 break;
505 assert(bs1 == 32 || (1ul << bs1) > m);
507 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
508 tcg_out_depi(s, ret, -1, bs0, bs1 - bs0);
511 static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
513 int ls0, ls1, ms0;
515 /* Note that the argument is constrained to match and_mask_p. */
516 for (ls0 = 0; ls0 < 32; ls0++) {
517 if ((m & (1u << ls0)) == 0) {
518 break;
521 for (ls1 = ls0; ls1 < 32; ls1++) {
522 if ((m & (1u << ls1)) != 0) {
523 break;
526 for (ms0 = ls1; ms0 < 32; ms0++) {
527 if ((m & (1u << ms0)) == 0) {
528 break;
531 assert (ms0 == 32);
533 if (ls1 == 32) {
534 tcg_out_extr(s, ret, arg, 0, ls0, 0);
535 } else {
536 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
537 tcg_out_depi(s, ret, 0, ls0, ls1 - ls0);
541 static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
543 tcg_out_extr(s, ret, arg, 0, 8, 1);
546 static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
548 tcg_out_extr(s, ret, arg, 0, 16, 1);
551 static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
553 count &= 31;
554 tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
555 | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
558 static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
560 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
561 tcg_out_mtctl_sar(s, TCG_REG_R20);
562 tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
565 static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
567 count &= 31;
568 tcg_out_extr(s, ret, arg, count, 32 - count, 0);
571 static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
573 tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
576 static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
578 count &= 31;
579 tcg_out_extr(s, ret, arg, count, 32 - count, 1);
582 static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
584 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
585 tcg_out_mtctl_sar(s, TCG_REG_R20);
586 tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
589 static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
591 count &= 31;
592 tcg_out_shd(s, ret, arg, arg, 32 - count);
595 static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
597 tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
598 tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
601 static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
603 count &= 31;
604 tcg_out_shd(s, ret, arg, arg, count);
607 static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
609 tcg_out_vshd(s, ret, arg, arg, creg);
612 static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
614 if (ret != arg) {
615 tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */
617 tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
618 tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
621 static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
623 /* arg = ABCD */
624 tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
625 tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
626 tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
629 static void tcg_out_call(TCGContext *s, const void *func)
631 tcg_target_long val, hi, lo, disp;
633 val = (uint32_t)__canonicalize_funcptr_for_compare(func);
634 disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
636 if (check_fit_tl(disp, 17)) {
637 tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
638 } else {
639 hi = val >> 11;
640 lo = val & 0x7ff;
642 tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
643 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
644 | reassemble_17(lo >> 2));
645 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
649 static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
650 int arg1, int arg2)
652 /* Store both words into the stack for copy to the FPU. */
653 tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW);
654 tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW);
656 /* Load both words into the FPU at the same time. We get away
657 with this because we can address the left and right half of the
658 FPU registers individually once loaded. */
659 /* fldds stack_temp(sp),fr22 */
660 tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK)
661 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
663 /* xmpyu fr22r,fr22,fr22 */
664 tcg_out32(s, 0x3ad64796);
666 /* Store the 64-bit result back into the stack. */
667 /* fstds stack_temp(sp),fr22 */
668 tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK)
669 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
671 /* Load the pieces of the result that the caller requested. */
672 if (reth) {
673 tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW);
675 if (retl) {
676 tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4,
677 INSN_LDW);
681 static void tcg_out_add2(TCGContext *s, int destl, int desth,
682 int al, int ah, int bl, int bh, int blconst)
684 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
686 if (blconst) {
687 tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
688 } else {
689 tcg_out_arith(s, tmp, al, bl, INSN_ADD);
691 tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
693 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
696 static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
697 int bl, int bh, int alconst, int blconst)
699 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
701 if (alconst) {
702 if (blconst) {
703 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
704 bl = TCG_REG_R20;
706 tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
707 } else if (blconst) {
708 tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
709 } else {
710 tcg_out_arith(s, tmp, al, bl, INSN_SUB);
712 tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
714 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
717 static void tcg_out_branch(TCGContext *s, int label_index, int nul)
719 TCGLabel *l = &s->labels[label_index];
720 uint32_t op = nul ? INSN_BL_N : INSN_BL;
722 if (l->has_value) {
723 tcg_target_long val = l->u.value;
725 val -= (tcg_target_long)s->code_ptr + 8;
726 val >>= 2;
727 assert(check_fit_tl(val, 17));
729 tcg_out32(s, op | reassemble_17(val));
730 } else {
731 /* We need to keep the offset unchanged for retranslation. */
732 uint32_t old_insn = *(uint32_t *)s->code_ptr;
734 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
735 tcg_out32(s, op | (old_insn & 0x1f1ffdu));
739 static const uint8_t tcg_cond_to_cmp_cond[] =
741 [TCG_COND_EQ] = COND_EQ,
742 [TCG_COND_NE] = COND_EQ | COND_FALSE,
743 [TCG_COND_LT] = COND_LT,
744 [TCG_COND_GE] = COND_LT | COND_FALSE,
745 [TCG_COND_LE] = COND_LE,
746 [TCG_COND_GT] = COND_LE | COND_FALSE,
747 [TCG_COND_LTU] = COND_LTU,
748 [TCG_COND_GEU] = COND_LTU | COND_FALSE,
749 [TCG_COND_LEU] = COND_LEU,
750 [TCG_COND_GTU] = COND_LEU | COND_FALSE,
753 static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
754 TCGArg c2, int c2const, int label_index)
756 TCGLabel *l = &s->labels[label_index];
757 int op, pacond;
759 /* Note that COMIB operates as if the immediate is the first
760 operand. We model brcond with the immediate in the second
761 to better match what targets are likely to give us. For
762 consistency, model COMB with reversed operands as well. */
763 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
765 if (c2const) {
766 op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
767 op |= INSN_IM5(c2);
768 } else {
769 op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
770 op |= INSN_R1(c2);
772 op |= INSN_R2(c1);
773 op |= INSN_COND(pacond & 7);
775 if (l->has_value) {
776 tcg_target_long val = l->u.value;
778 val -= (tcg_target_long)s->code_ptr + 8;
779 val >>= 2;
780 assert(check_fit_tl(val, 12));
782 /* ??? Assume that all branches to defined labels are backward.
783 Which means that if the nul bit is set, the delay slot is
784 executed if the branch is taken, and not executed in fallthru. */
785 tcg_out32(s, op | reassemble_12(val));
786 tcg_out_nop(s);
787 } else {
788 /* We need to keep the offset unchanged for retranslation. */
789 uint32_t old_insn = *(uint32_t *)s->code_ptr;
791 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
792 /* ??? Assume that all branches to undefined labels are forward.
793 Which means that if the nul bit is set, the delay slot is
794 not executed if the branch is taken, which is what we want. */
795 tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
799 static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
800 TCGArg c1, TCGArg c2, int c2const)
802 int op, pacond;
804 /* Note that COMICLR operates as if the immediate is the first
805 operand. We model setcond with the immediate in the second
806 to better match what targets are likely to give us. For
807 consistency, model COMCLR with reversed operands as well. */
808 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
810 if (c2const) {
811 op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
812 } else {
813 op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
815 op |= INSN_COND(pacond & 7);
816 op |= pacond & COND_FALSE ? 1 << 12 : 0;
818 tcg_out32(s, op);
821 static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
822 TCGArg bl, int blconst, TCGArg bh, int bhconst,
823 int label_index)
825 switch (cond) {
826 case TCG_COND_EQ:
827 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, al, bl, blconst);
828 tcg_out_brcond(s, TCG_COND_EQ, ah, bh, bhconst, label_index);
829 break;
830 case TCG_COND_NE:
831 tcg_out_brcond(s, TCG_COND_NE, al, bl, blconst, label_index);
832 tcg_out_brcond(s, TCG_COND_NE, ah, bh, bhconst, label_index);
833 break;
834 default:
835 tcg_out_brcond(s, tcg_high_cond(cond), ah, bh, bhconst, label_index);
836 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
837 tcg_out_brcond(s, tcg_unsigned_cond(cond),
838 al, bl, blconst, label_index);
839 break;
843 static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
844 TCGArg c1, TCGArg c2, int c2const)
846 tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
847 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
850 static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
851 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
852 TCGArg bh, int bhconst)
854 int scratch = TCG_REG_R20;
856 /* Note that the low parts are fully consumed before scratch is set. */
857 if (ret != ah && (bhconst || ret != bh)) {
858 scratch = ret;
861 switch (cond) {
862 case TCG_COND_EQ:
863 case TCG_COND_NE:
864 tcg_out_setcond(s, cond, scratch, al, bl, blconst);
865 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
866 tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
867 break;
869 case TCG_COND_GE:
870 case TCG_COND_GEU:
871 case TCG_COND_LT:
872 case TCG_COND_LTU:
873 /* Optimize compares with low part zero. */
874 if (bl == 0) {
875 tcg_out_setcond(s, cond, ret, ah, bh, bhconst);
876 return;
878 /* FALLTHRU */
880 case TCG_COND_LE:
881 case TCG_COND_LEU:
882 case TCG_COND_GT:
883 case TCG_COND_GTU:
884 /* <= : ah < bh | (ah == bh && al <= bl) */
885 tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
886 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
887 tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
888 tcg_out_comclr(s, tcg_invert_cond(tcg_high_cond(cond)),
889 TCG_REG_R0, ah, bh, bhconst);
890 tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
891 break;
893 default:
894 tcg_abort();
897 tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
900 static void tcg_out_movcond(TCGContext *s, int cond, TCGArg ret,
901 TCGArg c1, TCGArg c2, int c2const,
902 TCGArg v1, int v1const)
904 tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, c1, c2, c2const);
905 if (v1const) {
906 tcg_out_movi(s, TCG_TYPE_I32, ret, v1);
907 } else {
908 tcg_out_mov(s, TCG_TYPE_I32, ret, v1);
912 #if defined(CONFIG_SOFTMMU)
913 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
914 int mmu_idx) */
915 static const void * const qemu_ld_helpers[4] = {
916 helper_ldb_mmu,
917 helper_ldw_mmu,
918 helper_ldl_mmu,
919 helper_ldq_mmu,
922 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
923 uintxx_t val, int mmu_idx) */
924 static const void * const qemu_st_helpers[4] = {
925 helper_stb_mmu,
926 helper_stw_mmu,
927 helper_stl_mmu,
928 helper_stq_mmu,
931 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
932 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
933 TLB for the memory index. The return value is the offset from ENV
934 contained in R1 afterward (to be used when loading ADDEND); if the
935 return value is 0, R1 is not used. */
937 static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
938 int addrhi, int s_bits, int lab_miss, int offset)
940 int ret;
942 /* Extracting the index into the TLB. The "normal C operation" is
943 r1 = addr_reg >> TARGET_PAGE_BITS;
944 r1 &= CPU_TLB_SIZE - 1;
945 r1 <<= CPU_TLB_ENTRY_BITS;
946 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
947 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
948 operations with an EXTRU. Unfortunately, the current value of
949 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
950 add that follows. */
951 tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
952 tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
953 tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
955 /* Make sure that both the addr_{read,write} and addend can be
956 read with a 14-bit offset from the same base register. */
957 if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
958 ret = 0;
959 } else {
960 ret = (offset + 0x400) & ~0x7ff;
961 offset = ret - offset;
962 tcg_out_addi2(s, TCG_REG_R1, r1, ret);
963 r1 = TCG_REG_R1;
966 /* Load the entry from the computed slot. */
967 if (TARGET_LONG_BITS == 64) {
968 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
969 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
970 } else {
971 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
974 /* Compute the value that ought to appear in the TLB for a hit, namely,
975 the page of the address. We include the low N bits of the address
976 to catch unaligned accesses and force them onto the slow path. Do
977 this computation after having issued the load from the TLB slot to
978 give the load time to complete. */
979 tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
981 /* If not equal, jump to lab_miss. */
982 if (TARGET_LONG_BITS == 64) {
983 tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
984 r0, 0, addrhi, 0, lab_miss);
985 } else {
986 tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
989 return ret;
992 static int tcg_out_arg_reg32(TCGContext *s, int argno, TCGArg v, bool vconst)
994 if (argno < 4) {
995 if (vconst) {
996 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
997 } else {
998 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
1000 } else {
1001 if (vconst && v != 0) {
1002 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, v);
1003 v = TCG_REG_R20;
1005 tcg_out_st(s, TCG_TYPE_I32, v, TCG_REG_CALL_STACK,
1006 TCG_TARGET_CALL_STACK_OFFSET - ((argno - 3) * 4));
1008 return argno + 1;
1011 static int tcg_out_arg_reg64(TCGContext *s, int argno, TCGArg vl, TCGArg vh)
1013 /* 64-bit arguments must go in even reg pairs and stack slots. */
1014 if (argno & 1) {
1015 argno++;
1017 argno = tcg_out_arg_reg32(s, argno, vl, false);
1018 argno = tcg_out_arg_reg32(s, argno, vh, false);
1019 return argno;
1021 #endif
1023 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1024 int addr_reg, int addend_reg, int opc)
1026 #ifdef TARGET_WORDS_BIGENDIAN
1027 const int bswap = 0;
1028 #else
1029 const int bswap = 1;
1030 #endif
1032 switch (opc) {
1033 case 0:
1034 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
1035 break;
1036 case 0 | 4:
1037 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
1038 tcg_out_ext8s(s, datalo_reg, datalo_reg);
1039 break;
1040 case 1:
1041 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
1042 if (bswap) {
1043 tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
1045 break;
1046 case 1 | 4:
1047 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
1048 if (bswap) {
1049 tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
1050 } else {
1051 tcg_out_ext16s(s, datalo_reg, datalo_reg);
1053 break;
1054 case 2:
1055 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
1056 if (bswap) {
1057 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1059 break;
1060 case 3:
1061 if (bswap) {
1062 int t = datahi_reg;
1063 datahi_reg = datalo_reg;
1064 datalo_reg = t;
1066 /* We can't access the low-part with a reg+reg addressing mode,
1067 so perform the addition now and use reg_ofs addressing mode. */
1068 if (addend_reg != TCG_REG_R0) {
1069 tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
1070 addr_reg = TCG_REG_R20;
1072 /* Make sure not to clobber the base register. */
1073 if (datahi_reg == addr_reg) {
1074 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1075 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1076 } else {
1077 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1078 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1080 if (bswap) {
1081 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1082 tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
1084 break;
1085 default:
1086 tcg_abort();
1090 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1092 int datalo_reg = *args++;
1093 /* Note that datahi_reg is only used for 64-bit loads. */
1094 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1095 int addrlo_reg = *args++;
1097 #if defined(CONFIG_SOFTMMU)
1098 /* Note that addrhi_reg is only used for 64-bit guests. */
1099 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1100 int mem_index = *args;
1101 int lab1, lab2, argno, offset;
1103 lab1 = gen_new_label();
1104 lab2 = gen_new_label();
1106 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1107 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1108 addrhi_reg, opc & 3, lab1, offset);
1110 /* TLB Hit. */
1111 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1112 (offset ? TCG_REG_R1 : TCG_REG_R25),
1113 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
1114 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1115 TCG_REG_R20, opc);
1116 tcg_out_branch(s, lab2, 1);
1118 /* TLB Miss. */
1119 /* label1: */
1120 tcg_out_label(s, lab1, s->code_ptr);
1122 argno = 0;
1123 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
1124 if (TARGET_LONG_BITS == 64) {
1125 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1126 } else {
1127 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
1129 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
1131 tcg_out_call(s, qemu_ld_helpers[opc & 3]);
1133 switch (opc) {
1134 case 0:
1135 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
1136 break;
1137 case 0 | 4:
1138 tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
1139 break;
1140 case 1:
1141 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
1142 break;
1143 case 1 | 4:
1144 tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
1145 break;
1146 case 2:
1147 case 2 | 4:
1148 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
1149 break;
1150 case 3:
1151 tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
1152 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
1153 break;
1154 default:
1155 tcg_abort();
1158 /* label2: */
1159 tcg_out_label(s, lab2, s->code_ptr);
1160 #else
1161 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1162 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
1163 #endif
1166 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg,
1167 int datahi_reg, int addr_reg, int opc)
1169 #ifdef TARGET_WORDS_BIGENDIAN
1170 const int bswap = 0;
1171 #else
1172 const int bswap = 1;
1173 #endif
1175 switch (opc) {
1176 case 0:
1177 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
1178 break;
1179 case 1:
1180 if (bswap) {
1181 tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1182 datalo_reg = TCG_REG_R20;
1184 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
1185 break;
1186 case 2:
1187 if (bswap) {
1188 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1189 datalo_reg = TCG_REG_R20;
1191 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
1192 break;
1193 case 3:
1194 if (bswap) {
1195 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1196 tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1197 datahi_reg = TCG_REG_R20;
1198 datalo_reg = TCG_REG_R23;
1200 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1201 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
1202 break;
1203 default:
1204 tcg_abort();
1209 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1211 int datalo_reg = *args++;
1212 /* Note that datahi_reg is only used for 64-bit loads. */
1213 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1214 int addrlo_reg = *args++;
1216 #if defined(CONFIG_SOFTMMU)
1217 /* Note that addrhi_reg is only used for 64-bit guests. */
1218 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1219 int mem_index = *args;
1220 int lab1, lab2, argno, next, offset;
1222 lab1 = gen_new_label();
1223 lab2 = gen_new_label();
1225 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1226 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1227 addrhi_reg, opc, lab1, offset);
1229 /* TLB Hit. */
1230 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1231 (offset ? TCG_REG_R1 : TCG_REG_R25),
1232 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
1234 /* There are no indexed stores, so we must do this addition explitly.
1235 Careful to avoid R20, which is used for the bswaps to follow. */
1236 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1237 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
1238 tcg_out_branch(s, lab2, 1);
1240 /* TLB Miss. */
1241 /* label1: */
1242 tcg_out_label(s, lab1, s->code_ptr);
1244 argno = 0;
1245 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
1246 if (TARGET_LONG_BITS == 64) {
1247 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1248 } else {
1249 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
1252 next = (argno < 4 ? tcg_target_call_iarg_regs[argno] : TCG_REG_R20);
1253 switch(opc) {
1254 case 0:
1255 tcg_out_andi(s, next, datalo_reg, 0xff);
1256 argno = tcg_out_arg_reg32(s, argno, next, false);
1257 break;
1258 case 1:
1259 tcg_out_andi(s, next, datalo_reg, 0xffff);
1260 argno = tcg_out_arg_reg32(s, argno, next, false);
1261 break;
1262 case 2:
1263 argno = tcg_out_arg_reg32(s, argno, datalo_reg, false);
1264 break;
1265 case 3:
1266 argno = tcg_out_arg_reg64(s, argno, datalo_reg, datahi_reg);
1267 break;
1268 default:
1269 tcg_abort();
1271 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
1273 tcg_out_call(s, qemu_st_helpers[opc]);
1275 /* label2: */
1276 tcg_out_label(s, lab2, s->code_ptr);
1277 #else
1278 /* There are no indexed stores, so if GUEST_BASE is set we must do
1279 the add explicitly. Careful to avoid R20, which is used for the
1280 bswaps to follow. */
1281 if (GUEST_BASE != 0) {
1282 tcg_out_arith(s, TCG_REG_R31, addrlo_reg,
1283 TCG_GUEST_BASE_REG, INSN_ADDL);
1284 addrlo_reg = TCG_REG_R31;
1286 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
1287 #endif
1290 static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1292 if (!check_fit_tl(arg, 14)) {
1293 uint32_t hi, lo;
1294 hi = arg & ~0x7ff;
1295 lo = arg & 0x7ff;
1296 if (lo) {
1297 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1298 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1299 tcg_out_addi(s, TCG_REG_RET0, lo);
1300 return;
1302 arg = hi;
1304 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1305 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1308 static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1310 if (s->tb_jmp_offset) {
1311 /* direct jump method */
1312 fprintf(stderr, "goto_tb direct\n");
1313 tcg_abort();
1314 } else {
1315 /* indirect jump method */
1316 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1317 (tcg_target_long)(s->tb_next + arg));
1318 tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1320 s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1323 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1324 const int *const_args)
1326 switch (opc) {
1327 case INDEX_op_exit_tb:
1328 tcg_out_exit_tb(s, args[0]);
1329 break;
1330 case INDEX_op_goto_tb:
1331 tcg_out_goto_tb(s, args[0]);
1332 break;
1334 case INDEX_op_call:
1335 if (const_args[0]) {
1336 tcg_out_call(s, (void *)args[0]);
1337 } else {
1338 /* ??? FIXME: the value in the register in args[0] is almost
1339 certainly a procedure descriptor, not a code address. We
1340 probably need to use the millicode $$dyncall routine. */
1341 tcg_abort();
1343 break;
1345 case INDEX_op_br:
1346 tcg_out_branch(s, args[0], 1);
1347 break;
1349 case INDEX_op_movi_i32:
1350 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1351 break;
1353 case INDEX_op_ld8u_i32:
1354 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1355 break;
1356 case INDEX_op_ld8s_i32:
1357 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1358 tcg_out_ext8s(s, args[0], args[0]);
1359 break;
1360 case INDEX_op_ld16u_i32:
1361 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1362 break;
1363 case INDEX_op_ld16s_i32:
1364 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1365 tcg_out_ext16s(s, args[0], args[0]);
1366 break;
1367 case INDEX_op_ld_i32:
1368 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
1369 break;
1371 case INDEX_op_st8_i32:
1372 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
1373 break;
1374 case INDEX_op_st16_i32:
1375 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
1376 break;
1377 case INDEX_op_st_i32:
1378 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1379 break;
1381 case INDEX_op_add_i32:
1382 if (const_args[2]) {
1383 tcg_out_addi2(s, args[0], args[1], args[2]);
1384 } else {
1385 tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1387 break;
1389 case INDEX_op_sub_i32:
1390 if (const_args[1]) {
1391 if (const_args[2]) {
1392 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1393 } else {
1394 /* Recall that SUBI is a reversed subtract. */
1395 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1397 } else if (const_args[2]) {
1398 tcg_out_addi2(s, args[0], args[1], -args[2]);
1399 } else {
1400 tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1402 break;
1404 case INDEX_op_and_i32:
1405 if (const_args[2]) {
1406 tcg_out_andi(s, args[0], args[1], args[2]);
1407 } else {
1408 tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1410 break;
1412 case INDEX_op_or_i32:
1413 if (const_args[2]) {
1414 tcg_out_ori(s, args[0], args[1], args[2]);
1415 } else {
1416 tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1418 break;
1420 case INDEX_op_xor_i32:
1421 tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1422 break;
1424 case INDEX_op_andc_i32:
1425 if (const_args[2]) {
1426 tcg_out_andi(s, args[0], args[1], ~args[2]);
1427 } else {
1428 tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1430 break;
1432 case INDEX_op_shl_i32:
1433 if (const_args[2]) {
1434 tcg_out_shli(s, args[0], args[1], args[2]);
1435 } else {
1436 tcg_out_shl(s, args[0], args[1], args[2]);
1438 break;
1440 case INDEX_op_shr_i32:
1441 if (const_args[2]) {
1442 tcg_out_shri(s, args[0], args[1], args[2]);
1443 } else {
1444 tcg_out_shr(s, args[0], args[1], args[2]);
1446 break;
1448 case INDEX_op_sar_i32:
1449 if (const_args[2]) {
1450 tcg_out_sari(s, args[0], args[1], args[2]);
1451 } else {
1452 tcg_out_sar(s, args[0], args[1], args[2]);
1454 break;
1456 case INDEX_op_rotl_i32:
1457 if (const_args[2]) {
1458 tcg_out_rotli(s, args[0], args[1], args[2]);
1459 } else {
1460 tcg_out_rotl(s, args[0], args[1], args[2]);
1462 break;
1464 case INDEX_op_rotr_i32:
1465 if (const_args[2]) {
1466 tcg_out_rotri(s, args[0], args[1], args[2]);
1467 } else {
1468 tcg_out_rotr(s, args[0], args[1], args[2]);
1470 break;
1472 case INDEX_op_mul_i32:
1473 tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
1474 break;
1475 case INDEX_op_mulu2_i32:
1476 tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
1477 break;
1479 case INDEX_op_bswap16_i32:
1480 tcg_out_bswap16(s, args[0], args[1], 0);
1481 break;
1482 case INDEX_op_bswap32_i32:
1483 tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1484 break;
1486 case INDEX_op_not_i32:
1487 tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1488 break;
1489 case INDEX_op_ext8s_i32:
1490 tcg_out_ext8s(s, args[0], args[1]);
1491 break;
1492 case INDEX_op_ext16s_i32:
1493 tcg_out_ext16s(s, args[0], args[1]);
1494 break;
1496 case INDEX_op_brcond_i32:
1497 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1498 break;
1499 case INDEX_op_brcond2_i32:
1500 tcg_out_brcond2(s, args[4], args[0], args[1],
1501 args[2], const_args[2],
1502 args[3], const_args[3], args[5]);
1503 break;
1505 case INDEX_op_setcond_i32:
1506 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1507 break;
1508 case INDEX_op_setcond2_i32:
1509 tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1510 args[3], const_args[3], args[4], const_args[4]);
1511 break;
1513 case INDEX_op_movcond_i32:
1514 tcg_out_movcond(s, args[5], args[0], args[1], args[2], const_args[2],
1515 args[3], const_args[3]);
1516 break;
1518 case INDEX_op_add2_i32:
1519 tcg_out_add2(s, args[0], args[1], args[2], args[3],
1520 args[4], args[5], const_args[4]);
1521 break;
1523 case INDEX_op_sub2_i32:
1524 tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1525 args[4], args[5], const_args[2], const_args[4]);
1526 break;
1528 case INDEX_op_deposit_i32:
1529 if (const_args[2]) {
1530 tcg_out_depi(s, args[0], args[2], args[3], args[4]);
1531 } else {
1532 tcg_out_dep(s, args[0], args[2], args[3], args[4]);
1534 break;
1536 case INDEX_op_qemu_ld8u:
1537 tcg_out_qemu_ld(s, args, 0);
1538 break;
1539 case INDEX_op_qemu_ld8s:
1540 tcg_out_qemu_ld(s, args, 0 | 4);
1541 break;
1542 case INDEX_op_qemu_ld16u:
1543 tcg_out_qemu_ld(s, args, 1);
1544 break;
1545 case INDEX_op_qemu_ld16s:
1546 tcg_out_qemu_ld(s, args, 1 | 4);
1547 break;
1548 case INDEX_op_qemu_ld32:
1549 tcg_out_qemu_ld(s, args, 2);
1550 break;
1551 case INDEX_op_qemu_ld64:
1552 tcg_out_qemu_ld(s, args, 3);
1553 break;
1555 case INDEX_op_qemu_st8:
1556 tcg_out_qemu_st(s, args, 0);
1557 break;
1558 case INDEX_op_qemu_st16:
1559 tcg_out_qemu_st(s, args, 1);
1560 break;
1561 case INDEX_op_qemu_st32:
1562 tcg_out_qemu_st(s, args, 2);
1563 break;
1564 case INDEX_op_qemu_st64:
1565 tcg_out_qemu_st(s, args, 3);
1566 break;
1568 default:
1569 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1570 tcg_abort();
1574 static const TCGTargetOpDef hppa_op_defs[] = {
1575 { INDEX_op_exit_tb, { } },
1576 { INDEX_op_goto_tb, { } },
1578 { INDEX_op_call, { "ri" } },
1579 { INDEX_op_br, { } },
1581 { INDEX_op_mov_i32, { "r", "r" } },
1582 { INDEX_op_movi_i32, { "r" } },
1584 { INDEX_op_ld8u_i32, { "r", "r" } },
1585 { INDEX_op_ld8s_i32, { "r", "r" } },
1586 { INDEX_op_ld16u_i32, { "r", "r" } },
1587 { INDEX_op_ld16s_i32, { "r", "r" } },
1588 { INDEX_op_ld_i32, { "r", "r" } },
1589 { INDEX_op_st8_i32, { "rZ", "r" } },
1590 { INDEX_op_st16_i32, { "rZ", "r" } },
1591 { INDEX_op_st_i32, { "rZ", "r" } },
1593 { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1594 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
1595 { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1596 { INDEX_op_or_i32, { "r", "rZ", "rO" } },
1597 { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
1598 /* Note that the second argument will be inverted, which means
1599 we want a constant whose inversion matches M, and that O = ~M.
1600 See the implementation of and_mask_p. */
1601 { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
1603 { INDEX_op_mul_i32, { "r", "r", "r" } },
1604 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1606 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1607 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1608 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1609 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1610 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1612 { INDEX_op_bswap16_i32, { "r", "r" } },
1613 { INDEX_op_bswap32_i32, { "r", "r" } },
1614 { INDEX_op_not_i32, { "r", "r" } },
1616 { INDEX_op_ext8s_i32, { "r", "r" } },
1617 { INDEX_op_ext16s_i32, { "r", "r" } },
1619 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1620 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1622 { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1623 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1625 /* ??? We can actually support a signed 14-bit arg3, but we
1626 only have existing constraints for a signed 11-bit. */
1627 { INDEX_op_movcond_i32, { "r", "rZ", "rI", "rI", "0" } },
1629 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1630 { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1632 { INDEX_op_deposit_i32, { "r", "0", "rJ" } },
1634 #if TARGET_LONG_BITS == 32
1635 { INDEX_op_qemu_ld8u, { "r", "L" } },
1636 { INDEX_op_qemu_ld8s, { "r", "L" } },
1637 { INDEX_op_qemu_ld16u, { "r", "L" } },
1638 { INDEX_op_qemu_ld16s, { "r", "L" } },
1639 { INDEX_op_qemu_ld32, { "r", "L" } },
1640 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1642 { INDEX_op_qemu_st8, { "LZ", "L" } },
1643 { INDEX_op_qemu_st16, { "LZ", "L" } },
1644 { INDEX_op_qemu_st32, { "LZ", "L" } },
1645 { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
1646 #else
1647 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1648 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1649 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1650 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1651 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1652 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1654 { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1655 { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1656 { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1657 { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
1658 #endif
1659 { -1 },
1662 static int tcg_target_callee_save_regs[] = {
1663 /* R2, the return address register, is saved specially
1664 in the caller's frame. */
1665 /* R3, the frame pointer, is not currently modified. */
1666 TCG_REG_R4,
1667 TCG_REG_R5,
1668 TCG_REG_R6,
1669 TCG_REG_R7,
1670 TCG_REG_R8,
1671 TCG_REG_R9,
1672 TCG_REG_R10,
1673 TCG_REG_R11,
1674 TCG_REG_R12,
1675 TCG_REG_R13,
1676 TCG_REG_R14,
1677 TCG_REG_R15,
1678 TCG_REG_R16,
1679 TCG_REG_R17, /* R17 is the global env. */
1680 TCG_REG_R18
1683 #define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
1684 + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
1685 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
1686 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1687 + TCG_TARGET_STACK_ALIGN - 1) \
1688 & -TCG_TARGET_STACK_ALIGN)
1690 static void tcg_target_qemu_prologue(TCGContext *s)
1692 int frame_size, i;
1694 frame_size = FRAME_SIZE;
1696 /* The return address is stored in the caller's frame. */
1697 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
1699 /* Allocate stack frame, saving the first register at the same time. */
1700 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1701 TCG_REG_CALL_STACK, frame_size, INSN_STWM);
1703 /* Save all callee saved registers. */
1704 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1705 tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1706 TCG_REG_CALL_STACK, -frame_size + i * 4);
1709 /* Record the location of the TCG temps. */
1710 tcg_set_frame(s, TCG_REG_CALL_STACK, -frame_size + i * 4,
1711 CPU_TEMP_BUF_NLONGS * sizeof(long));
1713 #ifdef CONFIG_USE_GUEST_BASE
1714 if (GUEST_BASE != 0) {
1715 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1716 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1718 #endif
1720 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1722 /* Jump to TB, and adjust R18 to be the return address. */
1723 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1]));
1724 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
1726 /* Restore callee saved registers. */
1727 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK,
1728 -frame_size - 20);
1729 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1730 tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1731 TCG_REG_CALL_STACK, -frame_size + i * 4);
1734 /* Deallocate stack frame and return. */
1735 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1736 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1737 TCG_REG_CALL_STACK, -frame_size, INSN_LDWM);
1740 static void tcg_target_init(TCGContext *s)
1742 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1744 tcg_regset_clear(tcg_target_call_clobber_regs);
1745 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1746 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1747 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1748 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1749 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1750 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1751 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1752 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1753 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
1755 tcg_regset_clear(s->reserved_regs);
1756 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1757 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1758 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1759 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1760 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1761 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1762 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1763 tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
1764 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */
1765 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1767 tcg_add_target_add_op_defs(hppa_op_defs);
1770 typedef struct {
1771 DebugFrameCIE cie;
1772 DebugFrameFDEHeader fde;
1773 uint8_t fde_def_cfa[4];
1774 uint8_t fde_ret_ofs[3];
1775 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1776 } DebugFrame;
1778 #define ELF_HOST_MACHINE EM_PARISC
1779 #define ELF_HOST_FLAGS EFA_PARISC_1_1
1781 /* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
1782 and other extensions. We don't really care, but if we don't set this
1783 to *something* then the object file won't be properly matched. */
1784 #define ELF_OSABI ELFOSABI_LINUX
1786 static DebugFrame debug_frame = {
1787 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1788 .cie.id = -1,
1789 .cie.version = 1,
1790 .cie.code_align = 1,
1791 .cie.data_align = 1,
1792 .cie.return_column = 2,
1794 /* Total FDE size does not include the "len" member. */
1795 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
1797 .fde_def_cfa = {
1798 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
1799 (-FRAME_SIZE & 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
1800 (-FRAME_SIZE >> 7) & 0x7f
1802 .fde_ret_ofs = {
1803 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
1805 .fde_reg_ofs = {
1806 /* This must match the ordering in tcg_target_callee_save_regs. */
1807 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
1808 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
1809 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
1810 0x80 + 7, 12, /* ... */
1811 0x80 + 8, 16,
1812 0x80 + 9, 20,
1813 0x80 + 10, 24,
1814 0x80 + 11, 28,
1815 0x80 + 12, 32,
1816 0x80 + 13, 36,
1817 0x80 + 14, 40,
1818 0x80 + 15, 44,
1819 0x80 + 16, 48,
1820 0x80 + 17, 52,
1821 0x80 + 18, 56,
1825 void tcg_register_jit(void *buf, size_t buf_size)
1827 debug_frame.fde.func_start = (tcg_target_long) buf;
1828 debug_frame.fde.func_len = buf_size;
1830 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));