2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
34 /* This is an 8 byte temp slot in the stack frame. */
35 #define STACK_TEMP_OFS -16
37 #ifdef CONFIG_USE_GUEST_BASE
38 #define TCG_GUEST_BASE_REG TCG_REG_R16
40 #define TCG_GUEST_BASE_REG TCG_REG_R0
43 static const int tcg_target_reg_alloc_order
[] = {
69 static const int tcg_target_call_iarg_regs
[4] = {
76 static const int tcg_target_call_oarg_regs
[2] = {
81 /* True iff val fits a signed field of width BITS. */
82 static inline int check_fit_tl(tcg_target_long val
, unsigned int bits
)
84 return (val
<< ((sizeof(tcg_target_long
) * 8 - bits
))
85 >> (sizeof(tcg_target_long
) * 8 - bits
)) == val
;
88 /* True iff depi can be used to compute (reg | MASK).
89 Accept a bit pattern like:
93 Copied from gcc sources. */
94 static inline int or_mask_p(tcg_target_ulong mask
)
96 if (mask
== 0 || mask
== -1) {
100 return (mask
& (mask
- 1)) == 0;
103 /* True iff depi or extru can be used to compute (reg & mask).
104 Accept a bit pattern like these:
108 Copied from gcc sources. */
109 static inline int and_mask_p(tcg_target_ulong mask
)
111 return or_mask_p(~mask
);
114 static int low_sign_ext(int val
, int len
)
116 return (((val
<< 1) & ~(-1u << len
)) | ((val
>> (len
- 1)) & 1));
119 static int reassemble_12(int as12
)
121 return (((as12
& 0x800) >> 11) |
122 ((as12
& 0x400) >> 8) |
123 ((as12
& 0x3ff) << 3));
126 static int reassemble_17(int as17
)
128 return (((as17
& 0x10000) >> 16) |
129 ((as17
& 0x0f800) << 5) |
130 ((as17
& 0x00400) >> 8) |
131 ((as17
& 0x003ff) << 3));
134 static int reassemble_21(int as21
)
136 return (((as21
& 0x100000) >> 20) |
137 ((as21
& 0x0ffe00) >> 8) |
138 ((as21
& 0x000180) << 7) |
139 ((as21
& 0x00007c) << 14) |
140 ((as21
& 0x000003) << 12));
143 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144 such relocations are simply fully handled by the assembler. */
145 #define R_PARISC_PCREL12F R_PARISC_NONE
147 static void patch_reloc(uint8_t *code_ptr
, int type
,
148 tcg_target_long value
, tcg_target_long addend
)
150 uint32_t *insn_ptr
= (uint32_t *)code_ptr
;
151 uint32_t insn
= *insn_ptr
;
152 tcg_target_long pcrel
;
155 pcrel
= (value
- ((tcg_target_long
)code_ptr
+ 8)) >> 2;
158 case R_PARISC_PCREL12F
:
159 assert(check_fit_tl(pcrel
, 12));
160 /* ??? We assume all patches are forward. See tcg_out_brcond
161 re setting the NUL bit on the branch and eliding the nop. */
164 insn
|= reassemble_12(pcrel
);
166 case R_PARISC_PCREL17F
:
167 assert(check_fit_tl(pcrel
, 17));
169 insn
|= reassemble_17(pcrel
);
178 /* maximum number of register used for input function arguments */
179 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
184 /* parse target specific constraints */
185 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
192 ct
->ct
|= TCG_CT_REG
;
193 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
195 case 'L': /* qemu_ld/st constraint */
196 ct
->ct
|= TCG_CT_REG
;
197 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
198 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R26
);
199 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R25
);
200 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R24
);
201 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R23
);
204 ct
->ct
|= TCG_CT_CONST_0
;
207 ct
->ct
|= TCG_CT_CONST_S11
;
210 ct
->ct
|= TCG_CT_CONST_S5
;
213 ct
->ct
|= TCG_CT_CONST_MS11
;
216 ct
->ct
|= TCG_CT_CONST_AND
;
219 ct
->ct
|= TCG_CT_CONST_OR
;
229 /* test if a constant matches the constraint */
230 static int tcg_target_const_match(tcg_target_long val
,
231 const TCGArgConstraint
*arg_ct
)
234 if (ct
& TCG_CT_CONST
) {
236 } else if (ct
& TCG_CT_CONST_0
) {
238 } else if (ct
& TCG_CT_CONST_S5
) {
239 return check_fit_tl(val
, 5);
240 } else if (ct
& TCG_CT_CONST_S11
) {
241 return check_fit_tl(val
, 11);
242 } else if (ct
& TCG_CT_CONST_MS11
) {
243 return check_fit_tl(-val
, 11);
244 } else if (ct
& TCG_CT_CONST_AND
) {
245 return and_mask_p(val
);
246 } else if (ct
& TCG_CT_CONST_OR
) {
247 return or_mask_p(val
);
252 #define INSN_OP(x) ((x) << 26)
253 #define INSN_EXT3BR(x) ((x) << 13)
254 #define INSN_EXT3SH(x) ((x) << 10)
255 #define INSN_EXT4(x) ((x) << 6)
256 #define INSN_EXT5(x) (x)
257 #define INSN_EXT6(x) ((x) << 6)
258 #define INSN_EXT7(x) ((x) << 6)
259 #define INSN_EXT8A(x) ((x) << 6)
260 #define INSN_EXT8B(x) ((x) << 5)
261 #define INSN_T(x) (x)
262 #define INSN_R1(x) ((x) << 16)
263 #define INSN_R2(x) ((x) << 21)
264 #define INSN_DEP_LEN(x) (32 - (x))
265 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
266 #define INSN_SHDEP_P(x) ((x) << 5)
267 #define INSN_COND(x) ((x) << 13)
268 #define INSN_IM11(x) low_sign_ext(x, 11)
269 #define INSN_IM14(x) low_sign_ext(x, 14)
270 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
282 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
283 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
284 #define INSN_ADDI (INSN_OP(0x2d))
285 #define INSN_ADDIL (INSN_OP(0x0a))
286 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
287 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
288 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
289 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
290 #define INSN_COMICLR (INSN_OP(0x24))
291 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
292 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
293 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
294 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
295 #define INSN_LDIL (INSN_OP(0x08))
296 #define INSN_LDO (INSN_OP(0x0d))
297 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
298 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
299 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
300 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
301 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
302 #define INSN_SUBI (INSN_OP(0x25))
303 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
304 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
305 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
306 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
307 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
308 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
310 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
311 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
312 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
313 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
314 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
315 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
317 #define INSN_LDB (INSN_OP(0x10))
318 #define INSN_LDH (INSN_OP(0x11))
319 #define INSN_LDW (INSN_OP(0x12))
320 #define INSN_LDWM (INSN_OP(0x13))
321 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
323 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
324 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
325 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
327 #define INSN_STB (INSN_OP(0x18))
328 #define INSN_STH (INSN_OP(0x19))
329 #define INSN_STW (INSN_OP(0x1a))
330 #define INSN_STWM (INSN_OP(0x1b))
331 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
333 #define INSN_COMBT (INSN_OP(0x20))
334 #define INSN_COMBF (INSN_OP(0x22))
335 #define INSN_COMIBT (INSN_OP(0x21))
336 #define INSN_COMIBF (INSN_OP(0x23))
338 /* supplied by libgcc */
339 extern void *__canonicalize_funcptr_for_compare(const void *);
341 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
)
343 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
344 but hppa-dis.c is unaware of this definition */
346 tcg_out32(s
, INSN_OR
| INSN_T(ret
) | INSN_R1(arg
)
347 | INSN_R2(TCG_REG_R0
));
351 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
352 TCGReg ret
, tcg_target_long arg
)
354 if (check_fit_tl(arg
, 14)) {
355 tcg_out32(s
, INSN_LDO
| INSN_R1(ret
)
356 | INSN_R2(TCG_REG_R0
) | INSN_IM14(arg
));
362 tcg_out32(s
, INSN_LDIL
| INSN_R2(ret
) | reassemble_21(hi
));
364 tcg_out32(s
, INSN_LDO
| INSN_R1(ret
)
365 | INSN_R2(ret
) | INSN_IM14(lo
));
370 static void tcg_out_ldst(TCGContext
*s
, int ret
, int addr
,
371 tcg_target_long offset
, int op
)
373 if (!check_fit_tl(offset
, 14)) {
379 if (addr
== TCG_REG_R0
) {
380 op
= INSN_LDIL
| INSN_R2(TCG_REG_R1
);
382 op
= INSN_ADDIL
| INSN_R2(addr
);
384 tcg_out32(s
, op
| reassemble_21(hi
));
390 if (ret
!= addr
|| offset
!= 0 || op
!= INSN_LDO
) {
391 tcg_out32(s
, op
| INSN_R1(ret
) | INSN_R2(addr
) | INSN_IM14(offset
));
395 /* This function is required by tcg.c. */
396 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
397 TCGReg arg1
, tcg_target_long arg2
)
399 tcg_out_ldst(s
, ret
, arg1
, arg2
, INSN_LDW
);
402 /* This function is required by tcg.c. */
403 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg ret
,
404 TCGReg arg1
, tcg_target_long arg2
)
406 tcg_out_ldst(s
, ret
, arg1
, arg2
, INSN_STW
);
409 static void tcg_out_ldst_index(TCGContext
*s
, int data
,
410 int base
, int index
, int op
)
412 tcg_out32(s
, op
| INSN_T(data
) | INSN_R1(index
) | INSN_R2(base
));
415 static inline void tcg_out_addi2(TCGContext
*s
, int ret
, int arg1
,
418 tcg_out_ldst(s
, ret
, arg1
, val
, INSN_LDO
);
421 /* This function is required by tcg.c. */
422 static inline void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
424 tcg_out_addi2(s
, reg
, reg
, val
);
427 static inline void tcg_out_arith(TCGContext
*s
, int t
, int r1
, int r2
, int op
)
429 tcg_out32(s
, op
| INSN_T(t
) | INSN_R1(r1
) | INSN_R2(r2
));
432 static inline void tcg_out_arithi(TCGContext
*s
, int t
, int r1
,
433 tcg_target_long val
, int op
)
435 assert(check_fit_tl(val
, 11));
436 tcg_out32(s
, op
| INSN_R1(t
) | INSN_R2(r1
) | INSN_IM11(val
));
439 static inline void tcg_out_nop(TCGContext
*s
)
441 tcg_out_arith(s
, TCG_REG_R0
, TCG_REG_R0
, TCG_REG_R0
, INSN_OR
);
444 static inline void tcg_out_mtctl_sar(TCGContext
*s
, int arg
)
446 tcg_out32(s
, INSN_MTCTL
| INSN_R2(11) | INSN_R1(arg
));
449 /* Extract LEN bits at position OFS from ARG and place in RET.
450 Note that here the bit ordering is reversed from the PA-RISC
451 standard, such that the right-most bit is 0. */
452 static inline void tcg_out_extr(TCGContext
*s
, int ret
, int arg
,
453 unsigned ofs
, unsigned len
, int sign
)
455 assert(ofs
< 32 && len
<= 32 - ofs
);
456 tcg_out32(s
, (sign
? INSN_EXTRS
: INSN_EXTRU
)
457 | INSN_R1(ret
) | INSN_R2(arg
)
458 | INSN_SHDEP_P(31 - ofs
) | INSN_DEP_LEN(len
));
461 /* Likewise with OFS interpreted little-endian. */
462 static inline void tcg_out_dep(TCGContext
*s
, int ret
, int arg
,
463 unsigned ofs
, unsigned len
)
465 assert(ofs
< 32 && len
<= 32 - ofs
);
466 tcg_out32(s
, INSN_DEP
| INSN_R2(ret
) | INSN_R1(arg
)
467 | INSN_SHDEP_CP(31 - ofs
) | INSN_DEP_LEN(len
));
470 static inline void tcg_out_depi(TCGContext
*s
, int ret
, int arg
,
471 unsigned ofs
, unsigned len
)
473 assert(ofs
< 32 && len
<= 32 - ofs
);
474 tcg_out32(s
, INSN_DEPI
| INSN_R2(ret
) | INSN_IM5(arg
)
475 | INSN_SHDEP_CP(31 - ofs
) | INSN_DEP_LEN(len
));
478 static inline void tcg_out_shd(TCGContext
*s
, int ret
, int hi
, int lo
,
482 tcg_out32(s
, INSN_SHD
| INSN_R1(hi
) | INSN_R2(lo
) | INSN_T(ret
)
483 | INSN_SHDEP_CP(count
));
486 static void tcg_out_vshd(TCGContext
*s
, int ret
, int hi
, int lo
, int creg
)
488 tcg_out_mtctl_sar(s
, creg
);
489 tcg_out32(s
, INSN_VSHD
| INSN_T(ret
) | INSN_R1(hi
) | INSN_R2(lo
));
492 static void tcg_out_ori(TCGContext
*s
, int ret
, int arg
, tcg_target_ulong m
)
496 /* Note that the argument is constrained to match or_mask_p. */
497 for (bs0
= 0; bs0
< 32; bs0
++) {
498 if ((m
& (1u << bs0
)) != 0) {
502 for (bs1
= bs0
; bs1
< 32; bs1
++) {
503 if ((m
& (1u << bs1
)) == 0) {
507 assert(bs1
== 32 || (1ul << bs1
) > m
);
509 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, arg
);
510 tcg_out_depi(s
, ret
, -1, bs0
, bs1
- bs0
);
513 static void tcg_out_andi(TCGContext
*s
, int ret
, int arg
, tcg_target_ulong m
)
517 /* Note that the argument is constrained to match and_mask_p. */
518 for (ls0
= 0; ls0
< 32; ls0
++) {
519 if ((m
& (1u << ls0
)) == 0) {
523 for (ls1
= ls0
; ls1
< 32; ls1
++) {
524 if ((m
& (1u << ls1
)) != 0) {
528 for (ms0
= ls1
; ms0
< 32; ms0
++) {
529 if ((m
& (1u << ms0
)) == 0) {
536 tcg_out_extr(s
, ret
, arg
, 0, ls0
, 0);
538 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, arg
);
539 tcg_out_depi(s
, ret
, 0, ls0
, ls1
- ls0
);
543 static inline void tcg_out_ext8s(TCGContext
*s
, int ret
, int arg
)
545 tcg_out_extr(s
, ret
, arg
, 0, 8, 1);
548 static inline void tcg_out_ext16s(TCGContext
*s
, int ret
, int arg
)
550 tcg_out_extr(s
, ret
, arg
, 0, 16, 1);
553 static void tcg_out_shli(TCGContext
*s
, int ret
, int arg
, int count
)
556 tcg_out32(s
, INSN_ZDEP
| INSN_R2(ret
) | INSN_R1(arg
)
557 | INSN_SHDEP_CP(31 - count
) | INSN_DEP_LEN(32 - count
));
560 static void tcg_out_shl(TCGContext
*s
, int ret
, int arg
, int creg
)
562 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 31, INSN_SUBI
);
563 tcg_out_mtctl_sar(s
, TCG_REG_R20
);
564 tcg_out32(s
, INSN_ZVDEP
| INSN_R2(ret
) | INSN_R1(arg
) | INSN_DEP_LEN(32));
567 static void tcg_out_shri(TCGContext
*s
, int ret
, int arg
, int count
)
570 tcg_out_extr(s
, ret
, arg
, count
, 32 - count
, 0);
573 static void tcg_out_shr(TCGContext
*s
, int ret
, int arg
, int creg
)
575 tcg_out_vshd(s
, ret
, TCG_REG_R0
, arg
, creg
);
578 static void tcg_out_sari(TCGContext
*s
, int ret
, int arg
, int count
)
581 tcg_out_extr(s
, ret
, arg
, count
, 32 - count
, 1);
584 static void tcg_out_sar(TCGContext
*s
, int ret
, int arg
, int creg
)
586 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 31, INSN_SUBI
);
587 tcg_out_mtctl_sar(s
, TCG_REG_R20
);
588 tcg_out32(s
, INSN_VEXTRS
| INSN_R1(ret
) | INSN_R2(arg
) | INSN_DEP_LEN(32));
591 static void tcg_out_rotli(TCGContext
*s
, int ret
, int arg
, int count
)
594 tcg_out_shd(s
, ret
, arg
, arg
, 32 - count
);
597 static void tcg_out_rotl(TCGContext
*s
, int ret
, int arg
, int creg
)
599 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 32, INSN_SUBI
);
600 tcg_out_vshd(s
, ret
, arg
, arg
, TCG_REG_R20
);
603 static void tcg_out_rotri(TCGContext
*s
, int ret
, int arg
, int count
)
606 tcg_out_shd(s
, ret
, arg
, arg
, count
);
609 static void tcg_out_rotr(TCGContext
*s
, int ret
, int arg
, int creg
)
611 tcg_out_vshd(s
, ret
, arg
, arg
, creg
);
614 static void tcg_out_bswap16(TCGContext
*s
, int ret
, int arg
, int sign
)
617 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, arg
); /* arg = xxAB */
619 tcg_out_dep(s
, ret
, ret
, 16, 8); /* ret = xBAB */
620 tcg_out_extr(s
, ret
, ret
, 8, 16, sign
); /* ret = ..BA */
623 static void tcg_out_bswap32(TCGContext
*s
, int ret
, int arg
, int temp
)
626 tcg_out_rotri(s
, temp
, arg
, 16); /* temp = CDAB */
627 tcg_out_dep(s
, temp
, temp
, 16, 8); /* temp = CBAB */
628 tcg_out_shd(s
, ret
, arg
, temp
, 8); /* ret = DCBA */
631 static void tcg_out_call(TCGContext
*s
, const void *func
)
633 tcg_target_long val
, hi
, lo
, disp
;
635 val
= (uint32_t)__canonicalize_funcptr_for_compare(func
);
636 disp
= (val
- ((tcg_target_long
)s
->code_ptr
+ 8)) >> 2;
638 if (check_fit_tl(disp
, 17)) {
639 tcg_out32(s
, INSN_BL_N
| INSN_R2(TCG_REG_RP
) | reassemble_17(disp
));
644 tcg_out32(s
, INSN_LDIL
| INSN_R2(TCG_REG_R20
) | reassemble_21(hi
));
645 tcg_out32(s
, INSN_BLE_SR4
| INSN_R2(TCG_REG_R20
)
646 | reassemble_17(lo
>> 2));
647 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_RP
, TCG_REG_R31
);
651 static void tcg_out_xmpyu(TCGContext
*s
, int retl
, int reth
,
654 /* Store both words into the stack for copy to the FPU. */
655 tcg_out_ldst(s
, arg1
, TCG_REG_CALL_STACK
, STACK_TEMP_OFS
, INSN_STW
);
656 tcg_out_ldst(s
, arg2
, TCG_REG_CALL_STACK
, STACK_TEMP_OFS
+ 4, INSN_STW
);
658 /* Load both words into the FPU at the same time. We get away
659 with this because we can address the left and right half of the
660 FPU registers individually once loaded. */
661 /* fldds stack_temp(sp),fr22 */
662 tcg_out32(s
, INSN_FLDDS
| INSN_R2(TCG_REG_CALL_STACK
)
663 | INSN_IM5(STACK_TEMP_OFS
) | INSN_T(22));
665 /* xmpyu fr22r,fr22,fr22 */
666 tcg_out32(s
, 0x3ad64796);
668 /* Store the 64-bit result back into the stack. */
669 /* fstds stack_temp(sp),fr22 */
670 tcg_out32(s
, INSN_FSTDS
| INSN_R2(TCG_REG_CALL_STACK
)
671 | INSN_IM5(STACK_TEMP_OFS
) | INSN_T(22));
673 /* Load the pieces of the result that the caller requested. */
675 tcg_out_ldst(s
, reth
, TCG_REG_CALL_STACK
, STACK_TEMP_OFS
, INSN_LDW
);
678 tcg_out_ldst(s
, retl
, TCG_REG_CALL_STACK
, STACK_TEMP_OFS
+ 4,
683 static void tcg_out_add2(TCGContext
*s
, int destl
, int desth
,
684 int al
, int ah
, int bl
, int bh
, int blconst
)
686 int tmp
= (destl
== ah
|| destl
== bh
? TCG_REG_R20
: destl
);
689 tcg_out_arithi(s
, tmp
, al
, bl
, INSN_ADDI
);
691 tcg_out_arith(s
, tmp
, al
, bl
, INSN_ADD
);
693 tcg_out_arith(s
, desth
, ah
, bh
, INSN_ADDC
);
695 tcg_out_mov(s
, TCG_TYPE_I32
, destl
, tmp
);
698 static void tcg_out_sub2(TCGContext
*s
, int destl
, int desth
, int al
, int ah
,
699 int bl
, int bh
, int alconst
, int blconst
)
701 int tmp
= (destl
== ah
|| destl
== bh
? TCG_REG_R20
: destl
);
705 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R20
, bl
);
708 tcg_out_arithi(s
, tmp
, bl
, al
, INSN_SUBI
);
709 } else if (blconst
) {
710 tcg_out_arithi(s
, tmp
, al
, -bl
, INSN_ADDI
);
712 tcg_out_arith(s
, tmp
, al
, bl
, INSN_SUB
);
714 tcg_out_arith(s
, desth
, ah
, bh
, INSN_SUBB
);
716 tcg_out_mov(s
, TCG_TYPE_I32
, destl
, tmp
);
719 static void tcg_out_branch(TCGContext
*s
, int label_index
, int nul
)
721 TCGLabel
*l
= &s
->labels
[label_index
];
722 uint32_t op
= nul
? INSN_BL_N
: INSN_BL
;
725 tcg_target_long val
= l
->u
.value
;
727 val
-= (tcg_target_long
)s
->code_ptr
+ 8;
729 assert(check_fit_tl(val
, 17));
731 tcg_out32(s
, op
| reassemble_17(val
));
733 /* We need to keep the offset unchanged for retranslation. */
734 uint32_t old_insn
= *(uint32_t *)s
->code_ptr
;
736 tcg_out_reloc(s
, s
->code_ptr
, R_PARISC_PCREL17F
, label_index
, 0);
737 tcg_out32(s
, op
| (old_insn
& 0x1f1ffdu
));
741 static const uint8_t tcg_cond_to_cmp_cond
[10] =
743 [TCG_COND_EQ
] = COND_EQ
,
744 [TCG_COND_NE
] = COND_EQ
| COND_FALSE
,
745 [TCG_COND_LT
] = COND_LT
,
746 [TCG_COND_GE
] = COND_LT
| COND_FALSE
,
747 [TCG_COND_LE
] = COND_LE
,
748 [TCG_COND_GT
] = COND_LE
| COND_FALSE
,
749 [TCG_COND_LTU
] = COND_LTU
,
750 [TCG_COND_GEU
] = COND_LTU
| COND_FALSE
,
751 [TCG_COND_LEU
] = COND_LEU
,
752 [TCG_COND_GTU
] = COND_LEU
| COND_FALSE
,
755 static void tcg_out_brcond(TCGContext
*s
, int cond
, TCGArg c1
,
756 TCGArg c2
, int c2const
, int label_index
)
758 TCGLabel
*l
= &s
->labels
[label_index
];
761 /* Note that COMIB operates as if the immediate is the first
762 operand. We model brcond with the immediate in the second
763 to better match what targets are likely to give us. For
764 consistency, model COMB with reversed operands as well. */
765 pacond
= tcg_cond_to_cmp_cond
[tcg_swap_cond(cond
)];
768 op
= (pacond
& COND_FALSE
? INSN_COMIBF
: INSN_COMIBT
);
771 op
= (pacond
& COND_FALSE
? INSN_COMBF
: INSN_COMBT
);
775 op
|= INSN_COND(pacond
& 7);
778 tcg_target_long val
= l
->u
.value
;
780 val
-= (tcg_target_long
)s
->code_ptr
+ 8;
782 assert(check_fit_tl(val
, 12));
784 /* ??? Assume that all branches to defined labels are backward.
785 Which means that if the nul bit is set, the delay slot is
786 executed if the branch is taken, and not executed in fallthru. */
787 tcg_out32(s
, op
| reassemble_12(val
));
790 /* We need to keep the offset unchanged for retranslation. */
791 uint32_t old_insn
= *(uint32_t *)s
->code_ptr
;
793 tcg_out_reloc(s
, s
->code_ptr
, R_PARISC_PCREL12F
, label_index
, 0);
794 /* ??? Assume that all branches to undefined labels are forward.
795 Which means that if the nul bit is set, the delay slot is
796 not executed if the branch is taken, which is what we want. */
797 tcg_out32(s
, op
| 2 | (old_insn
& 0x1ffdu
));
801 static void tcg_out_comclr(TCGContext
*s
, int cond
, TCGArg ret
,
802 TCGArg c1
, TCGArg c2
, int c2const
)
806 /* Note that COMICLR operates as if the immediate is the first
807 operand. We model setcond with the immediate in the second
808 to better match what targets are likely to give us. For
809 consistency, model COMCLR with reversed operands as well. */
810 pacond
= tcg_cond_to_cmp_cond
[tcg_swap_cond(cond
)];
813 op
= INSN_COMICLR
| INSN_R2(c1
) | INSN_R1(ret
) | INSN_IM11(c2
);
815 op
= INSN_COMCLR
| INSN_R2(c1
) | INSN_R1(c2
) | INSN_T(ret
);
817 op
|= INSN_COND(pacond
& 7);
818 op
|= pacond
& COND_FALSE
? 1 << 12 : 0;
823 static void tcg_out_brcond2(TCGContext
*s
, int cond
, TCGArg al
, TCGArg ah
,
824 TCGArg bl
, int blconst
, TCGArg bh
, int bhconst
,
830 tcg_out_comclr(s
, tcg_invert_cond(cond
), TCG_REG_R0
, al
, bl
, blconst
);
831 tcg_out_brcond(s
, cond
, ah
, bh
, bhconst
, label_index
);
835 tcg_out_brcond(s
, cond
, ah
, bh
, bhconst
, label_index
);
836 tcg_out_comclr(s
, TCG_COND_NE
, TCG_REG_R0
, ah
, bh
, bhconst
);
837 tcg_out_brcond(s
, tcg_unsigned_cond(cond
),
838 al
, bl
, blconst
, label_index
);
843 static void tcg_out_setcond(TCGContext
*s
, int cond
, TCGArg ret
,
844 TCGArg c1
, TCGArg c2
, int c2const
)
846 tcg_out_comclr(s
, tcg_invert_cond(cond
), ret
, c1
, c2
, c2const
);
847 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, 1);
850 static void tcg_out_setcond2(TCGContext
*s
, int cond
, TCGArg ret
,
851 TCGArg al
, TCGArg ah
, TCGArg bl
, int blconst
,
852 TCGArg bh
, int bhconst
)
854 int scratch
= TCG_REG_R20
;
856 if (ret
!= al
&& ret
!= ah
857 && (blconst
|| ret
!= bl
)
858 && (bhconst
|| ret
!= bh
)) {
865 tcg_out_setcond(s
, cond
, scratch
, al
, bl
, blconst
);
866 tcg_out_comclr(s
, TCG_COND_EQ
, TCG_REG_R0
, ah
, bh
, bhconst
);
867 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, cond
== TCG_COND_NE
);
871 tcg_out_setcond(s
, tcg_unsigned_cond(cond
), scratch
, al
, bl
, blconst
);
872 tcg_out_comclr(s
, TCG_COND_EQ
, TCG_REG_R0
, ah
, bh
, bhconst
);
873 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, 0);
874 tcg_out_comclr(s
, cond
, TCG_REG_R0
, ah
, bh
, bhconst
);
875 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, 1);
879 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, scratch
);
882 #if defined(CONFIG_SOFTMMU)
883 #include "../../softmmu_defs.h"
885 #ifdef CONFIG_TCG_PASS_AREG0
886 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
888 static const void * const qemu_ld_helpers
[4] = {
895 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
896 uintxx_t val, int mmu_idx) */
897 static const void * const qemu_st_helpers
[4] = {
904 /* legacy helper signature: __ld_mmu(target_ulong addr, int
906 static void *qemu_ld_helpers
[4] = {
913 /* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
915 static void *qemu_st_helpers
[4] = {
923 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
924 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
925 TLB for the memory index. The return value is the offset from ENV
926 contained in R1 afterward (to be used when loading ADDEND); if the
927 return value is 0, R1 is not used. */
929 static int tcg_out_tlb_read(TCGContext
*s
, int r0
, int r1
, int addrlo
,
930 int addrhi
, int s_bits
, int lab_miss
, int offset
)
934 /* Extracting the index into the TLB. The "normal C operation" is
935 r1 = addr_reg >> TARGET_PAGE_BITS;
936 r1 &= CPU_TLB_SIZE - 1;
937 r1 <<= CPU_TLB_ENTRY_BITS;
938 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
939 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
940 operations with an EXTRU. Unfortunately, the current value of
941 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
943 tcg_out_extr(s
, r1
, addrlo
, TARGET_PAGE_BITS
, CPU_TLB_BITS
, 0);
944 tcg_out_shli(s
, r1
, r1
, CPU_TLB_ENTRY_BITS
);
945 tcg_out_arith(s
, r1
, r1
, TCG_AREG0
, INSN_ADDL
);
947 /* Make sure that both the addr_{read,write} and addend can be
948 read with a 14-bit offset from the same base register. */
949 if (check_fit_tl(offset
+ CPU_TLB_SIZE
, 14)) {
952 ret
= (offset
+ 0x400) & ~0x7ff;
953 offset
= ret
- offset
;
954 tcg_out_addi2(s
, TCG_REG_R1
, r1
, ret
);
958 /* Load the entry from the computed slot. */
959 if (TARGET_LONG_BITS
== 64) {
960 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R23
, r1
, offset
);
961 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, r1
, offset
+ 4);
963 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, r1
, offset
);
966 /* Compute the value that ought to appear in the TLB for a hit, namely, the page
967 of the address. We include the low N bits of the address to catch unaligned
968 accesses and force them onto the slow path. Do this computation after having
969 issued the load from the TLB slot to give the load time to complete. */
970 tcg_out_andi(s
, r0
, addrlo
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
972 /* If not equal, jump to lab_miss. */
973 if (TARGET_LONG_BITS
== 64) {
974 tcg_out_brcond2(s
, TCG_COND_NE
, TCG_REG_R20
, TCG_REG_R23
,
975 r0
, 0, addrhi
, 0, lab_miss
);
977 tcg_out_brcond(s
, TCG_COND_NE
, TCG_REG_R20
, r0
, 0, lab_miss
);
984 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int datalo_reg
, int datahi_reg
,
985 int addr_reg
, int addend_reg
, int opc
)
987 #ifdef TARGET_WORDS_BIGENDIAN
995 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDBX
);
998 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDBX
);
999 tcg_out_ext8s(s
, datalo_reg
, datalo_reg
);
1002 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDHX
);
1004 tcg_out_bswap16(s
, datalo_reg
, datalo_reg
, 0);
1008 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDHX
);
1010 tcg_out_bswap16(s
, datalo_reg
, datalo_reg
, 1);
1012 tcg_out_ext16s(s
, datalo_reg
, datalo_reg
);
1016 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDWX
);
1018 tcg_out_bswap32(s
, datalo_reg
, datalo_reg
, TCG_REG_R20
);
1024 datahi_reg
= datalo_reg
;
1027 /* We can't access the low-part with a reg+reg addressing mode,
1028 so perform the addition now and use reg_ofs addressing mode. */
1029 if (addend_reg
!= TCG_REG_R0
) {
1030 tcg_out_arith(s
, TCG_REG_R20
, addr_reg
, addend_reg
, INSN_ADD
);
1031 addr_reg
= TCG_REG_R20
;
1033 /* Make sure not to clobber the base register. */
1034 if (datahi_reg
== addr_reg
) {
1035 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 4, INSN_LDW
);
1036 tcg_out_ldst(s
, datahi_reg
, addr_reg
, 0, INSN_LDW
);
1038 tcg_out_ldst(s
, datahi_reg
, addr_reg
, 0, INSN_LDW
);
1039 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 4, INSN_LDW
);
1042 tcg_out_bswap32(s
, datalo_reg
, datalo_reg
, TCG_REG_R20
);
1043 tcg_out_bswap32(s
, datahi_reg
, datahi_reg
, TCG_REG_R20
);
1051 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, int opc
)
1053 int datalo_reg
= *args
++;
1054 /* Note that datahi_reg is only used for 64-bit loads. */
1055 int datahi_reg
= (opc
== 3 ? *args
++ : TCG_REG_R0
);
1056 int addrlo_reg
= *args
++;
1058 #if defined(CONFIG_SOFTMMU)
1059 /* Note that addrhi_reg is only used for 64-bit guests. */
1060 int addrhi_reg
= (TARGET_LONG_BITS
== 64 ? *args
++ : TCG_REG_R0
);
1061 int mem_index
= *args
;
1062 int lab1
, lab2
, argreg
, offset
;
1064 lab1
= gen_new_label();
1065 lab2
= gen_new_label();
1067 offset
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
);
1068 offset
= tcg_out_tlb_read(s
, TCG_REG_R26
, TCG_REG_R25
, addrlo_reg
, addrhi_reg
,
1069 opc
& 3, lab1
, offset
);
1072 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, (offset
? TCG_REG_R1
: TCG_REG_R25
),
1073 offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
) - offset
);
1074 tcg_out_qemu_ld_direct(s
, datalo_reg
, datahi_reg
, addrlo_reg
, TCG_REG_R20
, opc
);
1075 tcg_out_branch(s
, lab2
, 1);
1079 tcg_out_label(s
, lab1
, s
->code_ptr
);
1081 argreg
= TCG_REG_R26
;
1082 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, addrlo_reg
);
1083 if (TARGET_LONG_BITS
== 64) {
1084 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, addrhi_reg
);
1086 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1088 #ifdef CONFIG_TCG_PASS_AREG0
1089 /* XXX/FIXME: suboptimal */
1090 tcg_out_mov(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[2],
1091 tcg_target_call_iarg_regs
[1]);
1092 tcg_out_mov(s
, TCG_TYPE_TL
, tcg_target_call_iarg_regs
[1],
1093 tcg_target_call_iarg_regs
[0]);
1094 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0],
1097 tcg_out_call(s
, qemu_ld_helpers
[opc
& 3]);
1101 tcg_out_andi(s
, datalo_reg
, TCG_REG_RET0
, 0xff);
1104 tcg_out_ext8s(s
, datalo_reg
, TCG_REG_RET0
);
1107 tcg_out_andi(s
, datalo_reg
, TCG_REG_RET0
, 0xffff);
1110 tcg_out_ext16s(s
, datalo_reg
, TCG_REG_RET0
);
1114 tcg_out_mov(s
, TCG_TYPE_I32
, datalo_reg
, TCG_REG_RET0
);
1117 tcg_out_mov(s
, TCG_TYPE_I32
, datahi_reg
, TCG_REG_RET0
);
1118 tcg_out_mov(s
, TCG_TYPE_I32
, datalo_reg
, TCG_REG_RET1
);
1125 tcg_out_label(s
, lab2
, s
->code_ptr
);
1127 tcg_out_qemu_ld_direct(s
, datalo_reg
, datahi_reg
, addrlo_reg
,
1128 (GUEST_BASE
? TCG_GUEST_BASE_REG
: TCG_REG_R0
), opc
);
1132 static void tcg_out_qemu_st_direct(TCGContext
*s
, int datalo_reg
, int datahi_reg
,
1133 int addr_reg
, int opc
)
1135 #ifdef TARGET_WORDS_BIGENDIAN
1136 const int bswap
= 0;
1138 const int bswap
= 1;
1143 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 0, INSN_STB
);
1147 tcg_out_bswap16(s
, TCG_REG_R20
, datalo_reg
, 0);
1148 datalo_reg
= TCG_REG_R20
;
1150 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 0, INSN_STH
);
1154 tcg_out_bswap32(s
, TCG_REG_R20
, datalo_reg
, TCG_REG_R20
);
1155 datalo_reg
= TCG_REG_R20
;
1157 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 0, INSN_STW
);
1161 tcg_out_bswap32(s
, TCG_REG_R20
, datalo_reg
, TCG_REG_R20
);
1162 tcg_out_bswap32(s
, TCG_REG_R23
, datahi_reg
, TCG_REG_R23
);
1163 datahi_reg
= TCG_REG_R20
;
1164 datalo_reg
= TCG_REG_R23
;
1166 tcg_out_ldst(s
, datahi_reg
, addr_reg
, 0, INSN_STW
);
1167 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 4, INSN_STW
);
1175 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, int opc
)
1177 int datalo_reg
= *args
++;
1178 /* Note that datahi_reg is only used for 64-bit loads. */
1179 int datahi_reg
= (opc
== 3 ? *args
++ : TCG_REG_R0
);
1180 int addrlo_reg
= *args
++;
1182 #if defined(CONFIG_SOFTMMU)
1183 /* Note that addrhi_reg is only used for 64-bit guests. */
1184 int addrhi_reg
= (TARGET_LONG_BITS
== 64 ? *args
++ : TCG_REG_R0
);
1185 int mem_index
= *args
;
1186 int lab1
, lab2
, argreg
, offset
;
1188 lab1
= gen_new_label();
1189 lab2
= gen_new_label();
1191 offset
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
);
1192 offset
= tcg_out_tlb_read(s
, TCG_REG_R26
, TCG_REG_R25
, addrlo_reg
, addrhi_reg
,
1196 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, (offset
? TCG_REG_R1
: TCG_REG_R25
),
1197 offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
) - offset
);
1199 /* There are no indexed stores, so we must do this addition explitly.
1200 Careful to avoid R20, which is used for the bswaps to follow. */
1201 tcg_out_arith(s
, TCG_REG_R31
, addrlo_reg
, TCG_REG_R20
, INSN_ADDL
);
1202 tcg_out_qemu_st_direct(s
, datalo_reg
, datahi_reg
, TCG_REG_R31
, opc
);
1203 tcg_out_branch(s
, lab2
, 1);
1207 tcg_out_label(s
, lab1
, s
->code_ptr
);
1209 argreg
= TCG_REG_R26
;
1210 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, addrlo_reg
);
1211 if (TARGET_LONG_BITS
== 64) {
1212 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, addrhi_reg
);
1217 tcg_out_andi(s
, argreg
--, datalo_reg
, 0xff);
1218 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1221 tcg_out_andi(s
, argreg
--, datalo_reg
, 0xffff);
1222 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1225 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, datalo_reg
);
1226 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1229 /* Because of the alignment required by the 64-bit data argument,
1230 we will always use R23/R24. Also, we will always run out of
1231 argument registers for storing mem_index, so that will have
1232 to go on the stack. */
1233 if (mem_index
== 0) {
1234 argreg
= TCG_REG_R0
;
1236 argreg
= TCG_REG_R20
;
1237 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1239 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_R23
, datahi_reg
);
1240 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_R24
, datalo_reg
);
1241 tcg_out_st(s
, TCG_TYPE_I32
, argreg
, TCG_REG_CALL_STACK
,
1242 TCG_TARGET_CALL_STACK_OFFSET
- 4);
1248 #ifdef CONFIG_TCG_PASS_AREG0
1249 /* XXX/FIXME: suboptimal */
1250 tcg_out_mov(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[3],
1251 tcg_target_call_iarg_regs
[2]);
1252 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[2],
1253 tcg_target_call_iarg_regs
[1]);
1254 tcg_out_mov(s
, TCG_TYPE_TL
, tcg_target_call_iarg_regs
[1],
1255 tcg_target_call_iarg_regs
[0]);
1256 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0],
1259 tcg_out_call(s
, qemu_st_helpers
[opc
]);
1262 tcg_out_label(s
, lab2
, s
->code_ptr
);
1264 /* There are no indexed stores, so if GUEST_BASE is set we must do the add
1265 explicitly. Careful to avoid R20, which is used for the bswaps to follow. */
1266 if (GUEST_BASE
!= 0) {
1267 tcg_out_arith(s
, TCG_REG_R31
, addrlo_reg
, TCG_GUEST_BASE_REG
, INSN_ADDL
);
1268 addrlo_reg
= TCG_REG_R31
;
1270 tcg_out_qemu_st_direct(s
, datalo_reg
, datahi_reg
, addrlo_reg
, opc
);
1274 static void tcg_out_exit_tb(TCGContext
*s
, TCGArg arg
)
1276 if (!check_fit_tl(arg
, 14)) {
1281 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RET0
, hi
);
1282 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_R18
));
1283 tcg_out_addi(s
, TCG_REG_RET0
, lo
);
1288 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_R18
));
1289 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RET0
, arg
);
1292 static void tcg_out_goto_tb(TCGContext
*s
, TCGArg arg
)
1294 if (s
->tb_jmp_offset
) {
1295 /* direct jump method */
1296 fprintf(stderr
, "goto_tb direct\n");
1299 /* indirect jump method */
1300 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, TCG_REG_R0
,
1301 (tcg_target_long
)(s
->tb_next
+ arg
));
1302 tcg_out32(s
, INSN_BV_N
| INSN_R2(TCG_REG_R20
));
1304 s
->tb_next_offset
[arg
] = s
->code_ptr
- s
->code_buf
;
1307 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
1308 const int *const_args
)
1311 case INDEX_op_exit_tb
:
1312 tcg_out_exit_tb(s
, args
[0]);
1314 case INDEX_op_goto_tb
:
1315 tcg_out_goto_tb(s
, args
[0]);
1319 if (const_args
[0]) {
1320 tcg_out_call(s
, (void *)args
[0]);
1322 /* ??? FIXME: the value in the register in args[0] is almost
1323 certainly a procedure descriptor, not a code address. We
1324 probably need to use the millicode $$dyncall routine. */
1330 fprintf(stderr
, "unimplemented jmp\n");
1335 tcg_out_branch(s
, args
[0], 1);
1338 case INDEX_op_movi_i32
:
1339 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], (uint32_t)args
[1]);
1342 case INDEX_op_ld8u_i32
:
1343 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDB
);
1345 case INDEX_op_ld8s_i32
:
1346 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDB
);
1347 tcg_out_ext8s(s
, args
[0], args
[0]);
1349 case INDEX_op_ld16u_i32
:
1350 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDH
);
1352 case INDEX_op_ld16s_i32
:
1353 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDH
);
1354 tcg_out_ext16s(s
, args
[0], args
[0]);
1356 case INDEX_op_ld_i32
:
1357 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDW
);
1360 case INDEX_op_st8_i32
:
1361 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STB
);
1363 case INDEX_op_st16_i32
:
1364 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STH
);
1366 case INDEX_op_st_i32
:
1367 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STW
);
1370 case INDEX_op_add_i32
:
1371 if (const_args
[2]) {
1372 tcg_out_addi2(s
, args
[0], args
[1], args
[2]);
1374 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_ADDL
);
1378 case INDEX_op_sub_i32
:
1379 if (const_args
[1]) {
1380 if (const_args
[2]) {
1381 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1] - args
[2]);
1383 /* Recall that SUBI is a reversed subtract. */
1384 tcg_out_arithi(s
, args
[0], args
[2], args
[1], INSN_SUBI
);
1386 } else if (const_args
[2]) {
1387 tcg_out_addi2(s
, args
[0], args
[1], -args
[2]);
1389 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_SUB
);
1393 case INDEX_op_and_i32
:
1394 if (const_args
[2]) {
1395 tcg_out_andi(s
, args
[0], args
[1], args
[2]);
1397 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_AND
);
1401 case INDEX_op_or_i32
:
1402 if (const_args
[2]) {
1403 tcg_out_ori(s
, args
[0], args
[1], args
[2]);
1405 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_OR
);
1409 case INDEX_op_xor_i32
:
1410 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_XOR
);
1413 case INDEX_op_andc_i32
:
1414 if (const_args
[2]) {
1415 tcg_out_andi(s
, args
[0], args
[1], ~args
[2]);
1417 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_ANDCM
);
1421 case INDEX_op_shl_i32
:
1422 if (const_args
[2]) {
1423 tcg_out_shli(s
, args
[0], args
[1], args
[2]);
1425 tcg_out_shl(s
, args
[0], args
[1], args
[2]);
1429 case INDEX_op_shr_i32
:
1430 if (const_args
[2]) {
1431 tcg_out_shri(s
, args
[0], args
[1], args
[2]);
1433 tcg_out_shr(s
, args
[0], args
[1], args
[2]);
1437 case INDEX_op_sar_i32
:
1438 if (const_args
[2]) {
1439 tcg_out_sari(s
, args
[0], args
[1], args
[2]);
1441 tcg_out_sar(s
, args
[0], args
[1], args
[2]);
1445 case INDEX_op_rotl_i32
:
1446 if (const_args
[2]) {
1447 tcg_out_rotli(s
, args
[0], args
[1], args
[2]);
1449 tcg_out_rotl(s
, args
[0], args
[1], args
[2]);
1453 case INDEX_op_rotr_i32
:
1454 if (const_args
[2]) {
1455 tcg_out_rotri(s
, args
[0], args
[1], args
[2]);
1457 tcg_out_rotr(s
, args
[0], args
[1], args
[2]);
1461 case INDEX_op_mul_i32
:
1462 tcg_out_xmpyu(s
, args
[0], TCG_REG_R0
, args
[1], args
[2]);
1464 case INDEX_op_mulu2_i32
:
1465 tcg_out_xmpyu(s
, args
[0], args
[1], args
[2], args
[3]);
1468 case INDEX_op_bswap16_i32
:
1469 tcg_out_bswap16(s
, args
[0], args
[1], 0);
1471 case INDEX_op_bswap32_i32
:
1472 tcg_out_bswap32(s
, args
[0], args
[1], TCG_REG_R20
);
1475 case INDEX_op_not_i32
:
1476 tcg_out_arithi(s
, args
[0], args
[1], -1, INSN_SUBI
);
1478 case INDEX_op_ext8s_i32
:
1479 tcg_out_ext8s(s
, args
[0], args
[1]);
1481 case INDEX_op_ext16s_i32
:
1482 tcg_out_ext16s(s
, args
[0], args
[1]);
1485 case INDEX_op_brcond_i32
:
1486 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1], args
[3]);
1488 case INDEX_op_brcond2_i32
:
1489 tcg_out_brcond2(s
, args
[4], args
[0], args
[1],
1490 args
[2], const_args
[2],
1491 args
[3], const_args
[3], args
[5]);
1494 case INDEX_op_setcond_i32
:
1495 tcg_out_setcond(s
, args
[3], args
[0], args
[1], args
[2], const_args
[2]);
1497 case INDEX_op_setcond2_i32
:
1498 tcg_out_setcond2(s
, args
[5], args
[0], args
[1], args
[2],
1499 args
[3], const_args
[3], args
[4], const_args
[4]);
1502 case INDEX_op_add2_i32
:
1503 tcg_out_add2(s
, args
[0], args
[1], args
[2], args
[3],
1504 args
[4], args
[5], const_args
[4]);
1507 case INDEX_op_sub2_i32
:
1508 tcg_out_sub2(s
, args
[0], args
[1], args
[2], args
[3],
1509 args
[4], args
[5], const_args
[2], const_args
[4]);
1512 case INDEX_op_deposit_i32
:
1513 if (const_args
[2]) {
1514 tcg_out_depi(s
, args
[0], args
[2], args
[3], args
[4]);
1516 tcg_out_dep(s
, args
[0], args
[2], args
[3], args
[4]);
1520 case INDEX_op_qemu_ld8u
:
1521 tcg_out_qemu_ld(s
, args
, 0);
1523 case INDEX_op_qemu_ld8s
:
1524 tcg_out_qemu_ld(s
, args
, 0 | 4);
1526 case INDEX_op_qemu_ld16u
:
1527 tcg_out_qemu_ld(s
, args
, 1);
1529 case INDEX_op_qemu_ld16s
:
1530 tcg_out_qemu_ld(s
, args
, 1 | 4);
1532 case INDEX_op_qemu_ld32
:
1533 tcg_out_qemu_ld(s
, args
, 2);
1535 case INDEX_op_qemu_ld64
:
1536 tcg_out_qemu_ld(s
, args
, 3);
1539 case INDEX_op_qemu_st8
:
1540 tcg_out_qemu_st(s
, args
, 0);
1542 case INDEX_op_qemu_st16
:
1543 tcg_out_qemu_st(s
, args
, 1);
1545 case INDEX_op_qemu_st32
:
1546 tcg_out_qemu_st(s
, args
, 2);
1548 case INDEX_op_qemu_st64
:
1549 tcg_out_qemu_st(s
, args
, 3);
1553 fprintf(stderr
, "unknown opcode 0x%x\n", opc
);
1558 static const TCGTargetOpDef hppa_op_defs
[] = {
1559 { INDEX_op_exit_tb
, { } },
1560 { INDEX_op_goto_tb
, { } },
1562 { INDEX_op_call
, { "ri" } },
1563 { INDEX_op_jmp
, { "r" } },
1564 { INDEX_op_br
, { } },
1566 { INDEX_op_mov_i32
, { "r", "r" } },
1567 { INDEX_op_movi_i32
, { "r" } },
1569 { INDEX_op_ld8u_i32
, { "r", "r" } },
1570 { INDEX_op_ld8s_i32
, { "r", "r" } },
1571 { INDEX_op_ld16u_i32
, { "r", "r" } },
1572 { INDEX_op_ld16s_i32
, { "r", "r" } },
1573 { INDEX_op_ld_i32
, { "r", "r" } },
1574 { INDEX_op_st8_i32
, { "rZ", "r" } },
1575 { INDEX_op_st16_i32
, { "rZ", "r" } },
1576 { INDEX_op_st_i32
, { "rZ", "r" } },
1578 { INDEX_op_add_i32
, { "r", "rZ", "ri" } },
1579 { INDEX_op_sub_i32
, { "r", "rI", "ri" } },
1580 { INDEX_op_and_i32
, { "r", "rZ", "rM" } },
1581 { INDEX_op_or_i32
, { "r", "rZ", "rO" } },
1582 { INDEX_op_xor_i32
, { "r", "rZ", "rZ" } },
1583 /* Note that the second argument will be inverted, which means
1584 we want a constant whose inversion matches M, and that O = ~M.
1585 See the implementation of and_mask_p. */
1586 { INDEX_op_andc_i32
, { "r", "rZ", "rO" } },
1588 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1589 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1591 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1592 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1593 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1594 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
1595 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
1597 { INDEX_op_bswap16_i32
, { "r", "r" } },
1598 { INDEX_op_bswap32_i32
, { "r", "r" } },
1599 { INDEX_op_not_i32
, { "r", "r" } },
1601 { INDEX_op_ext8s_i32
, { "r", "r" } },
1602 { INDEX_op_ext16s_i32
, { "r", "r" } },
1604 { INDEX_op_brcond_i32
, { "rZ", "rJ" } },
1605 { INDEX_op_brcond2_i32
, { "rZ", "rZ", "rJ", "rJ" } },
1607 { INDEX_op_setcond_i32
, { "r", "rZ", "rI" } },
1608 { INDEX_op_setcond2_i32
, { "r", "rZ", "rZ", "rI", "rI" } },
1610 { INDEX_op_add2_i32
, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1611 { INDEX_op_sub2_i32
, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1613 { INDEX_op_deposit_i32
, { "r", "0", "rJ" } },
1615 #if TARGET_LONG_BITS == 32
1616 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1617 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1618 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1619 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1620 { INDEX_op_qemu_ld32
, { "r", "L" } },
1621 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
1623 { INDEX_op_qemu_st8
, { "LZ", "L" } },
1624 { INDEX_op_qemu_st16
, { "LZ", "L" } },
1625 { INDEX_op_qemu_st32
, { "LZ", "L" } },
1626 { INDEX_op_qemu_st64
, { "LZ", "LZ", "L" } },
1628 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
1629 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
1630 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
1631 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
1632 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
1633 { INDEX_op_qemu_ld64
, { "r", "r", "L", "L" } },
1635 { INDEX_op_qemu_st8
, { "LZ", "L", "L" } },
1636 { INDEX_op_qemu_st16
, { "LZ", "L", "L" } },
1637 { INDEX_op_qemu_st32
, { "LZ", "L", "L" } },
1638 { INDEX_op_qemu_st64
, { "LZ", "LZ", "L", "L" } },
1643 static int tcg_target_callee_save_regs
[] = {
1644 /* R2, the return address register, is saved specially
1645 in the caller's frame. */
1646 /* R3, the frame pointer, is not currently modified. */
1660 TCG_REG_R17
, /* R17 is the global env. */
1664 #define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
1665 + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
1666 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
1667 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1668 + TCG_TARGET_STACK_ALIGN - 1) \
1669 & -TCG_TARGET_STACK_ALIGN)
1671 static void tcg_target_qemu_prologue(TCGContext
*s
)
1675 frame_size
= FRAME_SIZE
;
1677 /* The return address is stored in the caller's frame. */
1678 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_REG_RP
, TCG_REG_CALL_STACK
, -20);
1680 /* Allocate stack frame, saving the first register at the same time. */
1681 tcg_out_ldst(s
, tcg_target_callee_save_regs
[0],
1682 TCG_REG_CALL_STACK
, frame_size
, INSN_STWM
);
1684 /* Save all callee saved registers. */
1685 for (i
= 1; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1686 tcg_out_st(s
, TCG_TYPE_PTR
, tcg_target_callee_save_regs
[i
],
1687 TCG_REG_CALL_STACK
, -frame_size
+ i
* 4);
1690 /* Record the location of the TCG temps. */
1691 tcg_set_frame(s
, TCG_REG_CALL_STACK
, -frame_size
+ i
* 4,
1692 CPU_TEMP_BUF_NLONGS
* sizeof(long));
1694 #ifdef CONFIG_USE_GUEST_BASE
1695 if (GUEST_BASE
!= 0) {
1696 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
1697 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
1701 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1703 /* Jump to TB, and adjust R18 to be the return address. */
1704 tcg_out32(s
, INSN_BLE_SR4
| INSN_R2(tcg_target_call_iarg_regs
[1]));
1705 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_R18
, TCG_REG_R31
);
1707 /* Restore callee saved registers. */
1708 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_RP
, TCG_REG_CALL_STACK
,
1710 for (i
= 1; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1711 tcg_out_ld(s
, TCG_TYPE_PTR
, tcg_target_callee_save_regs
[i
],
1712 TCG_REG_CALL_STACK
, -frame_size
+ i
* 4);
1715 /* Deallocate stack frame and return. */
1716 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_RP
));
1717 tcg_out_ldst(s
, tcg_target_callee_save_regs
[0],
1718 TCG_REG_CALL_STACK
, -frame_size
, INSN_LDWM
);
1721 static void tcg_target_init(TCGContext
*s
)
1723 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffffffff);
1725 tcg_regset_clear(tcg_target_call_clobber_regs
);
1726 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R20
);
1727 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R21
);
1728 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R22
);
1729 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R23
);
1730 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R24
);
1731 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R25
);
1732 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R26
);
1733 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RET0
);
1734 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RET1
);
1736 tcg_regset_clear(s
->reserved_regs
);
1737 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
); /* hardwired to zero */
1738 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R1
); /* addil target */
1739 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RP
); /* link register */
1740 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R3
); /* frame pointer */
1741 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R18
); /* return pointer */
1742 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R19
); /* clobbered w/o pic */
1743 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R20
); /* reserved */
1744 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_DP
); /* data pointer */
1745 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
); /* stack pointer */
1746 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R31
); /* ble link reg */
1748 tcg_add_target_add_op_defs(hppa_op_defs
);
1752 uint32_t len
__attribute__((aligned((sizeof(void *)))));
1755 char augmentation
[1];
1758 uint8_t return_column
;
1762 uint32_t len
__attribute__((aligned((sizeof(void *)))));
1763 uint32_t cie_offset
;
1764 tcg_target_long func_start
__attribute__((packed
));
1765 tcg_target_long func_len
__attribute__((packed
));
1768 uint8_t reg_ofs
[ARRAY_SIZE(tcg_target_callee_save_regs
) * 2];
1776 #define ELF_HOST_MACHINE EM_PARISC
1777 #define ELF_HOST_FLAGS EFA_PARISC_1_1
1779 /* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
1780 and other extensions. We don't really care, but if we don't set this
1781 to *something* then the object file won't be properly matched. */
1782 #define ELF_OSABI ELFOSABI_LINUX
1784 static DebugFrame debug_frame
= {
1785 .cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
1788 .cie
.code_align
= 1,
1789 .cie
.data_align
= 1,
1790 .cie
.return_column
= 2,
1792 .fde
.len
= sizeof(DebugFrameFDE
)-4, /* length after .len member */
1794 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
1795 (-FRAME_SIZE
& 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
1796 (-FRAME_SIZE
>> 7) & 0x7f
1799 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
1802 /* This must match the ordering in tcg_target_callee_save_regs. */
1803 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
1804 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
1805 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
1806 0x80 + 7, 12, /* ... */
1821 void tcg_register_jit(void *buf
, size_t buf_size
)
1823 debug_frame
.fde
.func_start
= (tcg_target_long
) buf
;
1824 debug_frame
.fde
.func_len
= buf_size
;
1826 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));