2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
34 /* This is an 8 byte temp slot in the stack frame. */
35 #define STACK_TEMP_OFS -16
37 #ifdef CONFIG_USE_GUEST_BASE
38 #define TCG_GUEST_BASE_REG TCG_REG_R16
40 #define TCG_GUEST_BASE_REG TCG_REG_R0
43 static const int tcg_target_reg_alloc_order
[] = {
69 static const int tcg_target_call_iarg_regs
[4] = {
76 static const int tcg_target_call_oarg_regs
[2] = {
81 /* True iff val fits a signed field of width BITS. */
82 static inline int check_fit_tl(tcg_target_long val
, unsigned int bits
)
84 return (val
<< ((sizeof(tcg_target_long
) * 8 - bits
))
85 >> (sizeof(tcg_target_long
) * 8 - bits
)) == val
;
88 /* True iff depi can be used to compute (reg | MASK).
89 Accept a bit pattern like:
93 Copied from gcc sources. */
94 static inline int or_mask_p(tcg_target_ulong mask
)
96 if (mask
== 0 || mask
== -1) {
100 return (mask
& (mask
- 1)) == 0;
103 /* True iff depi or extru can be used to compute (reg & mask).
104 Accept a bit pattern like these:
108 Copied from gcc sources. */
109 static inline int and_mask_p(tcg_target_ulong mask
)
111 return or_mask_p(~mask
);
114 static int low_sign_ext(int val
, int len
)
116 return (((val
<< 1) & ~(-1u << len
)) | ((val
>> (len
- 1)) & 1));
119 static int reassemble_12(int as12
)
121 return (((as12
& 0x800) >> 11) |
122 ((as12
& 0x400) >> 8) |
123 ((as12
& 0x3ff) << 3));
126 static int reassemble_17(int as17
)
128 return (((as17
& 0x10000) >> 16) |
129 ((as17
& 0x0f800) << 5) |
130 ((as17
& 0x00400) >> 8) |
131 ((as17
& 0x003ff) << 3));
134 static int reassemble_21(int as21
)
136 return (((as21
& 0x100000) >> 20) |
137 ((as21
& 0x0ffe00) >> 8) |
138 ((as21
& 0x000180) << 7) |
139 ((as21
& 0x00007c) << 14) |
140 ((as21
& 0x000003) << 12));
143 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144 such relocations are simply fully handled by the assembler. */
145 #define R_PARISC_PCREL12F R_PARISC_NONE
147 static void patch_reloc(uint8_t *code_ptr
, int type
,
148 tcg_target_long value
, tcg_target_long addend
)
150 uint32_t *insn_ptr
= (uint32_t *)code_ptr
;
151 uint32_t insn
= *insn_ptr
;
152 tcg_target_long pcrel
;
155 pcrel
= (value
- ((tcg_target_long
)code_ptr
+ 8)) >> 2;
158 case R_PARISC_PCREL12F
:
159 assert(check_fit_tl(pcrel
, 12));
160 /* ??? We assume all patches are forward. See tcg_out_brcond
161 re setting the NUL bit on the branch and eliding the nop. */
164 insn
|= reassemble_12(pcrel
);
166 case R_PARISC_PCREL17F
:
167 assert(check_fit_tl(pcrel
, 17));
169 insn
|= reassemble_17(pcrel
);
178 /* maximum number of register used for input function arguments */
179 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
184 /* parse target specific constraints */
185 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
192 ct
->ct
|= TCG_CT_REG
;
193 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
195 case 'L': /* qemu_ld/st constraint */
196 ct
->ct
|= TCG_CT_REG
;
197 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
198 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R26
);
199 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R25
);
200 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R24
);
201 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R23
);
204 ct
->ct
|= TCG_CT_CONST_0
;
207 ct
->ct
|= TCG_CT_CONST_S11
;
210 ct
->ct
|= TCG_CT_CONST_S5
;
213 ct
->ct
|= TCG_CT_CONST_MS11
;
216 ct
->ct
|= TCG_CT_CONST_AND
;
219 ct
->ct
|= TCG_CT_CONST_OR
;
229 /* test if a constant matches the constraint */
230 static int tcg_target_const_match(tcg_target_long val
,
231 const TCGArgConstraint
*arg_ct
)
234 if (ct
& TCG_CT_CONST
) {
236 } else if (ct
& TCG_CT_CONST_0
) {
238 } else if (ct
& TCG_CT_CONST_S5
) {
239 return check_fit_tl(val
, 5);
240 } else if (ct
& TCG_CT_CONST_S11
) {
241 return check_fit_tl(val
, 11);
242 } else if (ct
& TCG_CT_CONST_MS11
) {
243 return check_fit_tl(-val
, 11);
244 } else if (ct
& TCG_CT_CONST_AND
) {
245 return and_mask_p(val
);
246 } else if (ct
& TCG_CT_CONST_OR
) {
247 return or_mask_p(val
);
252 #define INSN_OP(x) ((x) << 26)
253 #define INSN_EXT3BR(x) ((x) << 13)
254 #define INSN_EXT3SH(x) ((x) << 10)
255 #define INSN_EXT4(x) ((x) << 6)
256 #define INSN_EXT5(x) (x)
257 #define INSN_EXT6(x) ((x) << 6)
258 #define INSN_EXT7(x) ((x) << 6)
259 #define INSN_EXT8A(x) ((x) << 6)
260 #define INSN_EXT8B(x) ((x) << 5)
261 #define INSN_T(x) (x)
262 #define INSN_R1(x) ((x) << 16)
263 #define INSN_R2(x) ((x) << 21)
264 #define INSN_DEP_LEN(x) (32 - (x))
265 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
266 #define INSN_SHDEP_P(x) ((x) << 5)
267 #define INSN_COND(x) ((x) << 13)
268 #define INSN_IM11(x) low_sign_ext(x, 11)
269 #define INSN_IM14(x) low_sign_ext(x, 14)
270 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
282 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
283 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
284 #define INSN_ADDI (INSN_OP(0x2d))
285 #define INSN_ADDIL (INSN_OP(0x0a))
286 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
287 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
288 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
289 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
290 #define INSN_COMICLR (INSN_OP(0x24))
291 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
292 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
293 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
294 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
295 #define INSN_LDIL (INSN_OP(0x08))
296 #define INSN_LDO (INSN_OP(0x0d))
297 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
298 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
299 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
300 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
301 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
302 #define INSN_SUBI (INSN_OP(0x25))
303 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
304 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
305 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
306 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
307 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
308 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
310 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
311 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
312 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
313 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
314 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
315 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
317 #define INSN_LDB (INSN_OP(0x10))
318 #define INSN_LDH (INSN_OP(0x11))
319 #define INSN_LDW (INSN_OP(0x12))
320 #define INSN_LDWM (INSN_OP(0x13))
321 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
323 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
324 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
325 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
327 #define INSN_STB (INSN_OP(0x18))
328 #define INSN_STH (INSN_OP(0x19))
329 #define INSN_STW (INSN_OP(0x1a))
330 #define INSN_STWM (INSN_OP(0x1b))
331 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
333 #define INSN_COMBT (INSN_OP(0x20))
334 #define INSN_COMBF (INSN_OP(0x22))
335 #define INSN_COMIBT (INSN_OP(0x21))
336 #define INSN_COMIBF (INSN_OP(0x23))
338 /* supplied by libgcc */
339 extern void *__canonicalize_funcptr_for_compare(void *);
341 static void tcg_out_mov(TCGContext
*s
, TCGType type
, int ret
, int arg
)
343 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
344 but hppa-dis.c is unaware of this definition */
346 tcg_out32(s
, INSN_OR
| INSN_T(ret
) | INSN_R1(arg
)
347 | INSN_R2(TCG_REG_R0
));
351 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
352 int ret
, tcg_target_long arg
)
354 if (check_fit_tl(arg
, 14)) {
355 tcg_out32(s
, INSN_LDO
| INSN_R1(ret
)
356 | INSN_R2(TCG_REG_R0
) | INSN_IM14(arg
));
362 tcg_out32(s
, INSN_LDIL
| INSN_R2(ret
) | reassemble_21(hi
));
364 tcg_out32(s
, INSN_LDO
| INSN_R1(ret
)
365 | INSN_R2(ret
) | INSN_IM14(lo
));
370 static void tcg_out_ldst(TCGContext
*s
, int ret
, int addr
,
371 tcg_target_long offset
, int op
)
373 if (!check_fit_tl(offset
, 14)) {
379 if (addr
== TCG_REG_R0
) {
380 op
= INSN_LDIL
| INSN_R2(TCG_REG_R1
);
382 op
= INSN_ADDIL
| INSN_R2(addr
);
384 tcg_out32(s
, op
| reassemble_21(hi
));
390 if (ret
!= addr
|| offset
!= 0 || op
!= INSN_LDO
) {
391 tcg_out32(s
, op
| INSN_R1(ret
) | INSN_R2(addr
) | INSN_IM14(offset
));
395 /* This function is required by tcg.c. */
396 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int ret
,
397 int arg1
, tcg_target_long arg2
)
399 tcg_out_ldst(s
, ret
, arg1
, arg2
, INSN_LDW
);
402 /* This function is required by tcg.c. */
403 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int ret
,
404 int arg1
, tcg_target_long arg2
)
406 tcg_out_ldst(s
, ret
, arg1
, arg2
, INSN_STW
);
409 static void tcg_out_ldst_index(TCGContext
*s
, int data
,
410 int base
, int index
, int op
)
412 tcg_out32(s
, op
| INSN_T(data
) | INSN_R1(index
) | INSN_R2(base
));
415 static inline void tcg_out_addi2(TCGContext
*s
, int ret
, int arg1
,
418 tcg_out_ldst(s
, ret
, arg1
, val
, INSN_LDO
);
421 /* This function is required by tcg.c. */
422 static inline void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
424 tcg_out_addi2(s
, reg
, reg
, val
);
427 static inline void tcg_out_arith(TCGContext
*s
, int t
, int r1
, int r2
, int op
)
429 tcg_out32(s
, op
| INSN_T(t
) | INSN_R1(r1
) | INSN_R2(r2
));
432 static inline void tcg_out_arithi(TCGContext
*s
, int t
, int r1
,
433 tcg_target_long val
, int op
)
435 assert(check_fit_tl(val
, 11));
436 tcg_out32(s
, op
| INSN_R1(t
) | INSN_R2(r1
) | INSN_IM11(val
));
439 static inline void tcg_out_nop(TCGContext
*s
)
441 tcg_out_arith(s
, TCG_REG_R0
, TCG_REG_R0
, TCG_REG_R0
, INSN_OR
);
444 static inline void tcg_out_mtctl_sar(TCGContext
*s
, int arg
)
446 tcg_out32(s
, INSN_MTCTL
| INSN_R2(11) | INSN_R1(arg
));
449 /* Extract LEN bits at position OFS from ARG and place in RET.
450 Note that here the bit ordering is reversed from the PA-RISC
451 standard, such that the right-most bit is 0. */
452 static inline void tcg_out_extr(TCGContext
*s
, int ret
, int arg
,
453 unsigned ofs
, unsigned len
, int sign
)
455 assert(ofs
< 32 && len
<= 32 - ofs
);
456 tcg_out32(s
, (sign
? INSN_EXTRS
: INSN_EXTRU
)
457 | INSN_R1(ret
) | INSN_R2(arg
)
458 | INSN_SHDEP_P(31 - ofs
) | INSN_DEP_LEN(len
));
461 /* Likewise with OFS interpreted little-endian. */
462 static inline void tcg_out_dep(TCGContext
*s
, int ret
, int arg
,
463 unsigned ofs
, unsigned len
)
465 assert(ofs
< 32 && len
<= 32 - ofs
);
466 tcg_out32(s
, INSN_DEP
| INSN_R2(ret
) | INSN_R1(arg
)
467 | INSN_SHDEP_CP(31 - ofs
) | INSN_DEP_LEN(len
));
470 static inline void tcg_out_shd(TCGContext
*s
, int ret
, int hi
, int lo
,
474 tcg_out32(s
, INSN_SHD
| INSN_R1(hi
) | INSN_R2(lo
) | INSN_T(ret
)
475 | INSN_SHDEP_CP(count
));
478 static void tcg_out_vshd(TCGContext
*s
, int ret
, int hi
, int lo
, int creg
)
480 tcg_out_mtctl_sar(s
, creg
);
481 tcg_out32(s
, INSN_VSHD
| INSN_T(ret
) | INSN_R1(hi
) | INSN_R2(lo
));
484 static void tcg_out_ori(TCGContext
*s
, int ret
, int arg
, tcg_target_ulong m
)
488 /* Note that the argument is constrained to match or_mask_p. */
489 for (bs0
= 0; bs0
< 32; bs0
++) {
490 if ((m
& (1u << bs0
)) != 0) {
494 for (bs1
= bs0
; bs1
< 32; bs1
++) {
495 if ((m
& (1u << bs1
)) == 0) {
499 assert(bs1
== 32 || (1ul << bs1
) > m
);
501 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, arg
);
502 tcg_out32(s
, INSN_DEPI
| INSN_R2(ret
) | INSN_IM5(-1)
503 | INSN_SHDEP_CP(31 - bs0
) | INSN_DEP_LEN(bs1
- bs0
));
506 static void tcg_out_andi(TCGContext
*s
, int ret
, int arg
, tcg_target_ulong m
)
510 /* Note that the argument is constrained to match and_mask_p. */
511 for (ls0
= 0; ls0
< 32; ls0
++) {
512 if ((m
& (1u << ls0
)) == 0) {
516 for (ls1
= ls0
; ls1
< 32; ls1
++) {
517 if ((m
& (1u << ls1
)) != 0) {
521 for (ms0
= ls1
; ms0
< 32; ms0
++) {
522 if ((m
& (1u << ms0
)) == 0) {
529 tcg_out_extr(s
, ret
, arg
, 0, ls0
, 0);
531 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, arg
);
532 tcg_out32(s
, INSN_DEPI
| INSN_R2(ret
) | INSN_IM5(0)
533 | INSN_SHDEP_CP(31 - ls0
) | INSN_DEP_LEN(ls1
- ls0
));
537 static inline void tcg_out_ext8s(TCGContext
*s
, int ret
, int arg
)
539 tcg_out_extr(s
, ret
, arg
, 0, 8, 1);
542 static inline void tcg_out_ext16s(TCGContext
*s
, int ret
, int arg
)
544 tcg_out_extr(s
, ret
, arg
, 0, 16, 1);
547 static void tcg_out_shli(TCGContext
*s
, int ret
, int arg
, int count
)
550 tcg_out32(s
, INSN_ZDEP
| INSN_R2(ret
) | INSN_R1(arg
)
551 | INSN_SHDEP_CP(31 - count
) | INSN_DEP_LEN(32 - count
));
554 static void tcg_out_shl(TCGContext
*s
, int ret
, int arg
, int creg
)
556 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 31, INSN_SUBI
);
557 tcg_out_mtctl_sar(s
, TCG_REG_R20
);
558 tcg_out32(s
, INSN_ZVDEP
| INSN_R2(ret
) | INSN_R1(arg
) | INSN_DEP_LEN(32));
561 static void tcg_out_shri(TCGContext
*s
, int ret
, int arg
, int count
)
564 tcg_out_extr(s
, ret
, arg
, count
, 32 - count
, 0);
567 static void tcg_out_shr(TCGContext
*s
, int ret
, int arg
, int creg
)
569 tcg_out_vshd(s
, ret
, TCG_REG_R0
, arg
, creg
);
572 static void tcg_out_sari(TCGContext
*s
, int ret
, int arg
, int count
)
575 tcg_out_extr(s
, ret
, arg
, count
, 32 - count
, 1);
578 static void tcg_out_sar(TCGContext
*s
, int ret
, int arg
, int creg
)
580 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 31, INSN_SUBI
);
581 tcg_out_mtctl_sar(s
, TCG_REG_R20
);
582 tcg_out32(s
, INSN_VEXTRS
| INSN_R1(ret
) | INSN_R2(arg
) | INSN_DEP_LEN(32));
585 static void tcg_out_rotli(TCGContext
*s
, int ret
, int arg
, int count
)
588 tcg_out_shd(s
, ret
, arg
, arg
, 32 - count
);
591 static void tcg_out_rotl(TCGContext
*s
, int ret
, int arg
, int creg
)
593 tcg_out_arithi(s
, TCG_REG_R20
, creg
, 32, INSN_SUBI
);
594 tcg_out_vshd(s
, ret
, arg
, arg
, TCG_REG_R20
);
597 static void tcg_out_rotri(TCGContext
*s
, int ret
, int arg
, int count
)
600 tcg_out_shd(s
, ret
, arg
, arg
, count
);
603 static void tcg_out_rotr(TCGContext
*s
, int ret
, int arg
, int creg
)
605 tcg_out_vshd(s
, ret
, arg
, arg
, creg
);
608 static void tcg_out_bswap16(TCGContext
*s
, int ret
, int arg
, int sign
)
611 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, arg
); /* arg = xxAB */
613 tcg_out_dep(s
, ret
, ret
, 16, 8); /* ret = xBAB */
614 tcg_out_extr(s
, ret
, ret
, 8, 16, sign
); /* ret = ..BA */
617 static void tcg_out_bswap32(TCGContext
*s
, int ret
, int arg
, int temp
)
620 tcg_out_rotri(s
, temp
, arg
, 16); /* temp = CDAB */
621 tcg_out_dep(s
, temp
, temp
, 16, 8); /* temp = CBAB */
622 tcg_out_shd(s
, ret
, arg
, temp
, 8); /* ret = DCBA */
625 static void tcg_out_call(TCGContext
*s
, void *func
)
627 tcg_target_long val
, hi
, lo
, disp
;
629 val
= (uint32_t)__canonicalize_funcptr_for_compare(func
);
630 disp
= (val
- ((tcg_target_long
)s
->code_ptr
+ 8)) >> 2;
632 if (check_fit_tl(disp
, 17)) {
633 tcg_out32(s
, INSN_BL_N
| INSN_R2(TCG_REG_RP
) | reassemble_17(disp
));
638 tcg_out32(s
, INSN_LDIL
| INSN_R2(TCG_REG_R20
) | reassemble_21(hi
));
639 tcg_out32(s
, INSN_BLE_SR4
| INSN_R2(TCG_REG_R20
)
640 | reassemble_17(lo
>> 2));
641 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_RP
, TCG_REG_R31
);
645 static void tcg_out_xmpyu(TCGContext
*s
, int retl
, int reth
,
648 /* Store both words into the stack for copy to the FPU. */
649 tcg_out_ldst(s
, arg1
, TCG_REG_SP
, STACK_TEMP_OFS
, INSN_STW
);
650 tcg_out_ldst(s
, arg2
, TCG_REG_SP
, STACK_TEMP_OFS
+ 4, INSN_STW
);
652 /* Load both words into the FPU at the same time. We get away
653 with this because we can address the left and right half of the
654 FPU registers individually once loaded. */
655 /* fldds stack_temp(sp),fr22 */
656 tcg_out32(s
, INSN_FLDDS
| INSN_R2(TCG_REG_SP
)
657 | INSN_IM5(STACK_TEMP_OFS
) | INSN_T(22));
659 /* xmpyu fr22r,fr22,fr22 */
660 tcg_out32(s
, 0x3ad64796);
662 /* Store the 64-bit result back into the stack. */
663 /* fstds stack_temp(sp),fr22 */
664 tcg_out32(s
, INSN_FSTDS
| INSN_R2(TCG_REG_SP
)
665 | INSN_IM5(STACK_TEMP_OFS
) | INSN_T(22));
667 /* Load the pieces of the result that the caller requested. */
669 tcg_out_ldst(s
, reth
, TCG_REG_SP
, STACK_TEMP_OFS
, INSN_LDW
);
672 tcg_out_ldst(s
, retl
, TCG_REG_SP
, STACK_TEMP_OFS
+ 4, INSN_LDW
);
676 static void tcg_out_add2(TCGContext
*s
, int destl
, int desth
,
677 int al
, int ah
, int bl
, int bh
, int blconst
)
679 int tmp
= (destl
== ah
|| destl
== bh
? TCG_REG_R20
: destl
);
682 tcg_out_arithi(s
, tmp
, al
, bl
, INSN_ADDI
);
684 tcg_out_arith(s
, tmp
, al
, bl
, INSN_ADD
);
686 tcg_out_arith(s
, desth
, ah
, bh
, INSN_ADDC
);
688 tcg_out_mov(s
, TCG_TYPE_I32
, destl
, tmp
);
691 static void tcg_out_sub2(TCGContext
*s
, int destl
, int desth
, int al
, int ah
,
692 int bl
, int bh
, int alconst
, int blconst
)
694 int tmp
= (destl
== ah
|| destl
== bh
? TCG_REG_R20
: destl
);
698 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R20
, bl
);
701 tcg_out_arithi(s
, tmp
, bl
, al
, INSN_SUBI
);
702 } else if (blconst
) {
703 tcg_out_arithi(s
, tmp
, al
, -bl
, INSN_ADDI
);
705 tcg_out_arith(s
, tmp
, al
, bl
, INSN_SUB
);
707 tcg_out_arith(s
, desth
, ah
, bh
, INSN_SUBB
);
709 tcg_out_mov(s
, TCG_TYPE_I32
, destl
, tmp
);
712 static void tcg_out_branch(TCGContext
*s
, int label_index
, int nul
)
714 TCGLabel
*l
= &s
->labels
[label_index
];
715 uint32_t op
= nul
? INSN_BL_N
: INSN_BL
;
718 tcg_target_long val
= l
->u
.value
;
720 val
-= (tcg_target_long
)s
->code_ptr
+ 8;
722 assert(check_fit_tl(val
, 17));
724 tcg_out32(s
, op
| reassemble_17(val
));
726 /* We need to keep the offset unchanged for retranslation. */
727 uint32_t old_insn
= *(uint32_t *)s
->code_ptr
;
729 tcg_out_reloc(s
, s
->code_ptr
, R_PARISC_PCREL17F
, label_index
, 0);
730 tcg_out32(s
, op
| (old_insn
& 0x1f1ffdu
));
734 static const uint8_t tcg_cond_to_cmp_cond
[10] =
736 [TCG_COND_EQ
] = COND_EQ
,
737 [TCG_COND_NE
] = COND_EQ
| COND_FALSE
,
738 [TCG_COND_LT
] = COND_LT
,
739 [TCG_COND_GE
] = COND_LT
| COND_FALSE
,
740 [TCG_COND_LE
] = COND_LE
,
741 [TCG_COND_GT
] = COND_LE
| COND_FALSE
,
742 [TCG_COND_LTU
] = COND_LTU
,
743 [TCG_COND_GEU
] = COND_LTU
| COND_FALSE
,
744 [TCG_COND_LEU
] = COND_LEU
,
745 [TCG_COND_GTU
] = COND_LEU
| COND_FALSE
,
748 static void tcg_out_brcond(TCGContext
*s
, int cond
, TCGArg c1
,
749 TCGArg c2
, int c2const
, int label_index
)
751 TCGLabel
*l
= &s
->labels
[label_index
];
754 /* Note that COMIB operates as if the immediate is the first
755 operand. We model brcond with the immediate in the second
756 to better match what targets are likely to give us. For
757 consistency, model COMB with reversed operands as well. */
758 pacond
= tcg_cond_to_cmp_cond
[tcg_swap_cond(cond
)];
761 op
= (pacond
& COND_FALSE
? INSN_COMIBF
: INSN_COMIBT
);
764 op
= (pacond
& COND_FALSE
? INSN_COMBF
: INSN_COMBT
);
768 op
|= INSN_COND(pacond
& 7);
771 tcg_target_long val
= l
->u
.value
;
773 val
-= (tcg_target_long
)s
->code_ptr
+ 8;
775 assert(check_fit_tl(val
, 12));
777 /* ??? Assume that all branches to defined labels are backward.
778 Which means that if the nul bit is set, the delay slot is
779 executed if the branch is taken, and not executed in fallthru. */
780 tcg_out32(s
, op
| reassemble_12(val
));
783 /* We need to keep the offset unchanged for retranslation. */
784 uint32_t old_insn
= *(uint32_t *)s
->code_ptr
;
786 tcg_out_reloc(s
, s
->code_ptr
, R_PARISC_PCREL12F
, label_index
, 0);
787 /* ??? Assume that all branches to undefined labels are forward.
788 Which means that if the nul bit is set, the delay slot is
789 not executed if the branch is taken, which is what we want. */
790 tcg_out32(s
, op
| 2 | (old_insn
& 0x1ffdu
));
794 static void tcg_out_comclr(TCGContext
*s
, int cond
, TCGArg ret
,
795 TCGArg c1
, TCGArg c2
, int c2const
)
799 /* Note that COMICLR operates as if the immediate is the first
800 operand. We model setcond with the immediate in the second
801 to better match what targets are likely to give us. For
802 consistency, model COMCLR with reversed operands as well. */
803 pacond
= tcg_cond_to_cmp_cond
[tcg_swap_cond(cond
)];
806 op
= INSN_COMICLR
| INSN_R2(c1
) | INSN_R1(ret
) | INSN_IM11(c2
);
808 op
= INSN_COMCLR
| INSN_R2(c1
) | INSN_R1(c2
) | INSN_T(ret
);
810 op
|= INSN_COND(pacond
& 7);
811 op
|= pacond
& COND_FALSE
? 1 << 12 : 0;
816 static void tcg_out_brcond2(TCGContext
*s
, int cond
, TCGArg al
, TCGArg ah
,
817 TCGArg bl
, int blconst
, TCGArg bh
, int bhconst
,
823 tcg_out_comclr(s
, tcg_invert_cond(cond
), TCG_REG_R0
, al
, bl
, blconst
);
824 tcg_out_brcond(s
, cond
, ah
, bh
, bhconst
, label_index
);
828 tcg_out_brcond(s
, cond
, ah
, bh
, bhconst
, label_index
);
829 tcg_out_comclr(s
, TCG_COND_NE
, TCG_REG_R0
, ah
, bh
, bhconst
);
830 tcg_out_brcond(s
, tcg_unsigned_cond(cond
),
831 al
, bl
, blconst
, label_index
);
836 static void tcg_out_setcond(TCGContext
*s
, int cond
, TCGArg ret
,
837 TCGArg c1
, TCGArg c2
, int c2const
)
839 tcg_out_comclr(s
, tcg_invert_cond(cond
), ret
, c1
, c2
, c2const
);
840 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, 1);
843 static void tcg_out_setcond2(TCGContext
*s
, int cond
, TCGArg ret
,
844 TCGArg al
, TCGArg ah
, TCGArg bl
, int blconst
,
845 TCGArg bh
, int bhconst
)
847 int scratch
= TCG_REG_R20
;
849 if (ret
!= al
&& ret
!= ah
850 && (blconst
|| ret
!= bl
)
851 && (bhconst
|| ret
!= bh
)) {
858 tcg_out_setcond(s
, cond
, scratch
, al
, bl
, blconst
);
859 tcg_out_comclr(s
, TCG_COND_EQ
, TCG_REG_R0
, ah
, bh
, bhconst
);
860 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, cond
== TCG_COND_NE
);
864 tcg_out_setcond(s
, tcg_unsigned_cond(cond
), scratch
, al
, bl
, blconst
);
865 tcg_out_comclr(s
, TCG_COND_EQ
, TCG_REG_R0
, ah
, bh
, bhconst
);
866 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, 0);
867 tcg_out_comclr(s
, cond
, TCG_REG_R0
, ah
, bh
, bhconst
);
868 tcg_out_movi(s
, TCG_TYPE_I32
, scratch
, 1);
872 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, scratch
);
875 #if defined(CONFIG_SOFTMMU)
876 #include "../../softmmu_defs.h"
878 static void *qemu_ld_helpers
[4] = {
885 static void *qemu_st_helpers
[4] = {
892 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
893 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
894 TLB for the memory index. The return value is the offset from ENV
895 contained in R1 afterward (to be used when loading ADDEND); if the
896 return value is 0, R1 is not used. */
898 static int tcg_out_tlb_read(TCGContext
*s
, int r0
, int r1
, int addrlo
,
899 int addrhi
, int s_bits
, int lab_miss
, int offset
)
903 /* Extracting the index into the TLB. The "normal C operation" is
904 r1 = addr_reg >> TARGET_PAGE_BITS;
905 r1 &= CPU_TLB_SIZE - 1;
906 r1 <<= CPU_TLB_ENTRY_BITS;
907 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
908 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
909 operations with an EXTRU. Unfortunately, the current value of
910 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
912 tcg_out_extr(s
, r1
, addrlo
, TARGET_PAGE_BITS
, CPU_TLB_BITS
, 0);
913 tcg_out_shli(s
, r1
, r1
, CPU_TLB_ENTRY_BITS
);
914 tcg_out_arith(s
, r1
, r1
, TCG_AREG0
, INSN_ADDL
);
916 /* Make sure that both the addr_{read,write} and addend can be
917 read with a 14-bit offset from the same base register. */
918 if (check_fit_tl(offset
+ CPU_TLB_SIZE
, 14)) {
921 ret
= (offset
+ 0x400) & ~0x7ff;
922 offset
= ret
- offset
;
923 tcg_out_addi2(s
, TCG_REG_R1
, r1
, ret
);
927 /* Load the entry from the computed slot. */
928 if (TARGET_LONG_BITS
== 64) {
929 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R23
, r1
, offset
);
930 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, r1
, offset
+ 4);
932 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, r1
, offset
);
935 /* Compute the value that ought to appear in the TLB for a hit, namely, the page
936 of the address. We include the low N bits of the address to catch unaligned
937 accesses and force them onto the slow path. Do this computation after having
938 issued the load from the TLB slot to give the load time to complete. */
939 tcg_out_andi(s
, r0
, addrlo
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
941 /* If not equal, jump to lab_miss. */
942 if (TARGET_LONG_BITS
== 64) {
943 tcg_out_brcond2(s
, TCG_COND_NE
, TCG_REG_R20
, TCG_REG_R23
,
944 r0
, 0, addrhi
, 0, lab_miss
);
946 tcg_out_brcond(s
, TCG_COND_NE
, TCG_REG_R20
, r0
, 0, lab_miss
);
953 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int datalo_reg
, int datahi_reg
,
954 int addr_reg
, int addend_reg
, int opc
)
956 #ifdef TARGET_WORDS_BIGENDIAN
964 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDBX
);
967 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDBX
);
968 tcg_out_ext8s(s
, datalo_reg
, datalo_reg
);
971 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDHX
);
973 tcg_out_bswap16(s
, datalo_reg
, datalo_reg
, 0);
977 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDHX
);
979 tcg_out_bswap16(s
, datalo_reg
, datalo_reg
, 1);
981 tcg_out_ext16s(s
, datalo_reg
, datalo_reg
);
985 tcg_out_ldst_index(s
, datalo_reg
, addr_reg
, addend_reg
, INSN_LDWX
);
987 tcg_out_bswap32(s
, datalo_reg
, datalo_reg
, TCG_REG_R20
);
993 datahi_reg
= datalo_reg
;
996 /* We can't access the low-part with a reg+reg addressing mode,
997 so perform the addition now and use reg_ofs addressing mode. */
998 if (addend_reg
!= TCG_REG_R0
) {
999 tcg_out_arith(s
, TCG_REG_R20
, addr_reg
, addend_reg
, INSN_ADD
);
1000 addr_reg
= TCG_REG_R20
;
1002 /* Make sure not to clobber the base register. */
1003 if (datahi_reg
== addr_reg
) {
1004 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 4, INSN_LDW
);
1005 tcg_out_ldst(s
, datahi_reg
, addr_reg
, 0, INSN_LDW
);
1007 tcg_out_ldst(s
, datahi_reg
, addr_reg
, 0, INSN_LDW
);
1008 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 4, INSN_LDW
);
1011 tcg_out_bswap32(s
, datalo_reg
, datalo_reg
, TCG_REG_R20
);
1012 tcg_out_bswap32(s
, datahi_reg
, datahi_reg
, TCG_REG_R20
);
1020 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, int opc
)
1022 int datalo_reg
= *args
++;
1023 /* Note that datahi_reg is only used for 64-bit loads. */
1024 int datahi_reg
= (opc
== 3 ? *args
++ : TCG_REG_R0
);
1025 int addrlo_reg
= *args
++;
1027 #if defined(CONFIG_SOFTMMU)
1028 /* Note that addrhi_reg is only used for 64-bit guests. */
1029 int addrhi_reg
= (TARGET_LONG_BITS
== 64 ? *args
++ : TCG_REG_R0
);
1030 int mem_index
= *args
;
1031 int lab1
, lab2
, argreg
, offset
;
1033 lab1
= gen_new_label();
1034 lab2
= gen_new_label();
1036 offset
= offsetof(CPUState
, tlb_table
[mem_index
][0].addr_read
);
1037 offset
= tcg_out_tlb_read(s
, TCG_REG_R26
, TCG_REG_R25
, addrlo_reg
, addrhi_reg
,
1038 opc
& 3, lab1
, offset
);
1041 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, (offset
? TCG_REG_R1
: TCG_REG_R25
),
1042 offsetof(CPUState
, tlb_table
[mem_index
][0].addend
) - offset
);
1043 tcg_out_qemu_ld_direct(s
, datalo_reg
, datahi_reg
, addrlo_reg
, TCG_REG_R20
, opc
);
1044 tcg_out_branch(s
, lab2
, 1);
1048 tcg_out_label(s
, lab1
, (tcg_target_long
)s
->code_ptr
);
1050 argreg
= TCG_REG_R26
;
1051 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, addrlo_reg
);
1052 if (TARGET_LONG_BITS
== 64) {
1053 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, addrhi_reg
);
1055 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1057 tcg_out_call(s
, qemu_ld_helpers
[opc
& 3]);
1061 tcg_out_andi(s
, datalo_reg
, TCG_REG_RET0
, 0xff);
1064 tcg_out_ext8s(s
, datalo_reg
, TCG_REG_RET0
);
1067 tcg_out_andi(s
, datalo_reg
, TCG_REG_RET0
, 0xffff);
1070 tcg_out_ext16s(s
, datalo_reg
, TCG_REG_RET0
);
1074 tcg_out_mov(s
, TCG_TYPE_I32
, datalo_reg
, TCG_REG_RET0
);
1077 tcg_out_mov(s
, TCG_TYPE_I32
, datahi_reg
, TCG_REG_RET0
);
1078 tcg_out_mov(s
, TCG_TYPE_I32
, datalo_reg
, TCG_REG_RET1
);
1085 tcg_out_label(s
, lab2
, (tcg_target_long
)s
->code_ptr
);
1087 tcg_out_qemu_ld_direct(s
, datalo_reg
, datahi_reg
, addrlo_reg
,
1088 (GUEST_BASE
? TCG_GUEST_BASE_REG
: TCG_REG_R0
), opc
);
1092 static void tcg_out_qemu_st_direct(TCGContext
*s
, int datalo_reg
, int datahi_reg
,
1093 int addr_reg
, int opc
)
1095 #ifdef TARGET_WORDS_BIGENDIAN
1096 const int bswap
= 0;
1098 const int bswap
= 1;
1103 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 0, INSN_STB
);
1107 tcg_out_bswap16(s
, TCG_REG_R20
, datalo_reg
, 0);
1108 datalo_reg
= TCG_REG_R20
;
1110 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 0, INSN_STH
);
1114 tcg_out_bswap32(s
, TCG_REG_R20
, datalo_reg
, TCG_REG_R20
);
1115 datalo_reg
= TCG_REG_R20
;
1117 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 0, INSN_STW
);
1121 tcg_out_bswap32(s
, TCG_REG_R20
, datalo_reg
, TCG_REG_R20
);
1122 tcg_out_bswap32(s
, TCG_REG_R23
, datahi_reg
, TCG_REG_R23
);
1123 datahi_reg
= TCG_REG_R20
;
1124 datalo_reg
= TCG_REG_R23
;
1126 tcg_out_ldst(s
, datahi_reg
, addr_reg
, 0, INSN_STW
);
1127 tcg_out_ldst(s
, datalo_reg
, addr_reg
, 4, INSN_STW
);
1135 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, int opc
)
1137 int datalo_reg
= *args
++;
1138 /* Note that datahi_reg is only used for 64-bit loads. */
1139 int datahi_reg
= (opc
== 3 ? *args
++ : TCG_REG_R0
);
1140 int addrlo_reg
= *args
++;
1142 #if defined(CONFIG_SOFTMMU)
1143 /* Note that addrhi_reg is only used for 64-bit guests. */
1144 int addrhi_reg
= (TARGET_LONG_BITS
== 64 ? *args
++ : TCG_REG_R0
);
1145 int mem_index
= *args
;
1146 int lab1
, lab2
, argreg
, offset
;
1148 lab1
= gen_new_label();
1149 lab2
= gen_new_label();
1151 offset
= offsetof(CPUState
, tlb_table
[mem_index
][0].addr_write
);
1152 offset
= tcg_out_tlb_read(s
, TCG_REG_R26
, TCG_REG_R25
, addrlo_reg
, addrhi_reg
,
1156 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, (offset
? TCG_REG_R1
: TCG_REG_R25
),
1157 offsetof(CPUState
, tlb_table
[mem_index
][0].addend
) - offset
);
1159 /* There are no indexed stores, so we must do this addition explitly.
1160 Careful to avoid R20, which is used for the bswaps to follow. */
1161 tcg_out_arith(s
, TCG_REG_R31
, addrlo_reg
, TCG_REG_R20
, INSN_ADDL
);
1162 tcg_out_qemu_st_direct(s
, datalo_reg
, datahi_reg
, TCG_REG_R31
, opc
);
1163 tcg_out_branch(s
, lab2
, 1);
1167 tcg_out_label(s
, lab1
, (tcg_target_long
)s
->code_ptr
);
1169 argreg
= TCG_REG_R26
;
1170 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, addrlo_reg
);
1171 if (TARGET_LONG_BITS
== 64) {
1172 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, addrhi_reg
);
1177 tcg_out_andi(s
, argreg
--, datalo_reg
, 0xff);
1178 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1181 tcg_out_andi(s
, argreg
--, datalo_reg
, 0xffff);
1182 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1185 tcg_out_mov(s
, TCG_TYPE_I32
, argreg
--, datalo_reg
);
1186 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1189 /* Because of the alignment required by the 64-bit data argument,
1190 we will always use R23/R24. Also, we will always run out of
1191 argument registers for storing mem_index, so that will have
1192 to go on the stack. */
1193 if (mem_index
== 0) {
1194 argreg
= TCG_REG_R0
;
1196 argreg
= TCG_REG_R20
;
1197 tcg_out_movi(s
, TCG_TYPE_I32
, argreg
, mem_index
);
1199 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_R23
, datahi_reg
);
1200 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_R24
, datalo_reg
);
1201 tcg_out_st(s
, TCG_TYPE_I32
, argreg
, TCG_REG_SP
,
1202 TCG_TARGET_CALL_STACK_OFFSET
- 4);
1208 tcg_out_call(s
, qemu_st_helpers
[opc
]);
1211 tcg_out_label(s
, lab2
, (tcg_target_long
)s
->code_ptr
);
1213 /* There are no indexed stores, so if GUEST_BASE is set we must do the add
1214 explicitly. Careful to avoid R20, which is used for the bswaps to follow. */
1215 if (GUEST_BASE
!= 0) {
1216 tcg_out_arith(s
, TCG_REG_R31
, addrlo_reg
, TCG_GUEST_BASE_REG
, INSN_ADDL
);
1217 addrlo_reg
= TCG_REG_R31
;
1219 tcg_out_qemu_st_direct(s
, datalo_reg
, datahi_reg
, addrlo_reg
, opc
);
1223 static void tcg_out_exit_tb(TCGContext
*s
, TCGArg arg
)
1225 if (!check_fit_tl(arg
, 14)) {
1230 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RET0
, hi
);
1231 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_R18
));
1232 tcg_out_addi(s
, TCG_REG_RET0
, lo
);
1237 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_R18
));
1238 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RET0
, arg
);
1241 static void tcg_out_goto_tb(TCGContext
*s
, TCGArg arg
)
1243 if (s
->tb_jmp_offset
) {
1244 /* direct jump method */
1245 fprintf(stderr
, "goto_tb direct\n");
1248 /* indirect jump method */
1249 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R20
, TCG_REG_R0
,
1250 (tcg_target_long
)(s
->tb_next
+ arg
));
1251 tcg_out32(s
, INSN_BV_N
| INSN_R2(TCG_REG_R20
));
1253 s
->tb_next_offset
[arg
] = s
->code_ptr
- s
->code_buf
;
1256 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
1257 const int *const_args
)
1260 case INDEX_op_exit_tb
:
1261 tcg_out_exit_tb(s
, args
[0]);
1263 case INDEX_op_goto_tb
:
1264 tcg_out_goto_tb(s
, args
[0]);
1268 if (const_args
[0]) {
1269 tcg_out_call(s
, (void *)args
[0]);
1271 /* ??? FIXME: the value in the register in args[0] is almost
1272 certainly a procedure descriptor, not a code address. We
1273 probably need to use the millicode $$dyncall routine. */
1279 fprintf(stderr
, "unimplemented jmp\n");
1284 tcg_out_branch(s
, args
[0], 1);
1287 case INDEX_op_movi_i32
:
1288 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], (uint32_t)args
[1]);
1291 case INDEX_op_ld8u_i32
:
1292 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDB
);
1294 case INDEX_op_ld8s_i32
:
1295 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDB
);
1296 tcg_out_ext8s(s
, args
[0], args
[0]);
1298 case INDEX_op_ld16u_i32
:
1299 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDH
);
1301 case INDEX_op_ld16s_i32
:
1302 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDH
);
1303 tcg_out_ext16s(s
, args
[0], args
[0]);
1305 case INDEX_op_ld_i32
:
1306 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_LDW
);
1309 case INDEX_op_st8_i32
:
1310 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STB
);
1312 case INDEX_op_st16_i32
:
1313 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STH
);
1315 case INDEX_op_st_i32
:
1316 tcg_out_ldst(s
, args
[0], args
[1], args
[2], INSN_STW
);
1319 case INDEX_op_add_i32
:
1320 if (const_args
[2]) {
1321 tcg_out_addi2(s
, args
[0], args
[1], args
[2]);
1323 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_ADDL
);
1327 case INDEX_op_sub_i32
:
1328 if (const_args
[1]) {
1329 if (const_args
[2]) {
1330 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1] - args
[2]);
1332 /* Recall that SUBI is a reversed subtract. */
1333 tcg_out_arithi(s
, args
[0], args
[2], args
[1], INSN_SUBI
);
1335 } else if (const_args
[2]) {
1336 tcg_out_addi2(s
, args
[0], args
[1], -args
[2]);
1338 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_SUB
);
1342 case INDEX_op_and_i32
:
1343 if (const_args
[2]) {
1344 tcg_out_andi(s
, args
[0], args
[1], args
[2]);
1346 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_AND
);
1350 case INDEX_op_or_i32
:
1351 if (const_args
[2]) {
1352 tcg_out_ori(s
, args
[0], args
[1], args
[2]);
1354 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_OR
);
1358 case INDEX_op_xor_i32
:
1359 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_XOR
);
1362 case INDEX_op_andc_i32
:
1363 if (const_args
[2]) {
1364 tcg_out_andi(s
, args
[0], args
[1], ~args
[2]);
1366 tcg_out_arith(s
, args
[0], args
[1], args
[2], INSN_ANDCM
);
1370 case INDEX_op_shl_i32
:
1371 if (const_args
[2]) {
1372 tcg_out_shli(s
, args
[0], args
[1], args
[2]);
1374 tcg_out_shl(s
, args
[0], args
[1], args
[2]);
1378 case INDEX_op_shr_i32
:
1379 if (const_args
[2]) {
1380 tcg_out_shri(s
, args
[0], args
[1], args
[2]);
1382 tcg_out_shr(s
, args
[0], args
[1], args
[2]);
1386 case INDEX_op_sar_i32
:
1387 if (const_args
[2]) {
1388 tcg_out_sari(s
, args
[0], args
[1], args
[2]);
1390 tcg_out_sar(s
, args
[0], args
[1], args
[2]);
1394 case INDEX_op_rotl_i32
:
1395 if (const_args
[2]) {
1396 tcg_out_rotli(s
, args
[0], args
[1], args
[2]);
1398 tcg_out_rotl(s
, args
[0], args
[1], args
[2]);
1402 case INDEX_op_rotr_i32
:
1403 if (const_args
[2]) {
1404 tcg_out_rotri(s
, args
[0], args
[1], args
[2]);
1406 tcg_out_rotr(s
, args
[0], args
[1], args
[2]);
1410 case INDEX_op_mul_i32
:
1411 tcg_out_xmpyu(s
, args
[0], TCG_REG_R0
, args
[1], args
[2]);
1413 case INDEX_op_mulu2_i32
:
1414 tcg_out_xmpyu(s
, args
[0], args
[1], args
[2], args
[3]);
1417 case INDEX_op_bswap16_i32
:
1418 tcg_out_bswap16(s
, args
[0], args
[1], 0);
1420 case INDEX_op_bswap32_i32
:
1421 tcg_out_bswap32(s
, args
[0], args
[1], TCG_REG_R20
);
1424 case INDEX_op_not_i32
:
1425 tcg_out_arithi(s
, args
[0], args
[1], -1, INSN_SUBI
);
1427 case INDEX_op_ext8s_i32
:
1428 tcg_out_ext8s(s
, args
[0], args
[1]);
1430 case INDEX_op_ext16s_i32
:
1431 tcg_out_ext16s(s
, args
[0], args
[1]);
1434 case INDEX_op_brcond_i32
:
1435 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1], args
[3]);
1437 case INDEX_op_brcond2_i32
:
1438 tcg_out_brcond2(s
, args
[4], args
[0], args
[1],
1439 args
[2], const_args
[2],
1440 args
[3], const_args
[3], args
[5]);
1443 case INDEX_op_setcond_i32
:
1444 tcg_out_setcond(s
, args
[3], args
[0], args
[1], args
[2], const_args
[2]);
1446 case INDEX_op_setcond2_i32
:
1447 tcg_out_setcond2(s
, args
[5], args
[0], args
[1], args
[2],
1448 args
[3], const_args
[3], args
[4], const_args
[4]);
1451 case INDEX_op_add2_i32
:
1452 tcg_out_add2(s
, args
[0], args
[1], args
[2], args
[3],
1453 args
[4], args
[5], const_args
[4]);
1456 case INDEX_op_sub2_i32
:
1457 tcg_out_sub2(s
, args
[0], args
[1], args
[2], args
[3],
1458 args
[4], args
[5], const_args
[2], const_args
[4]);
1461 case INDEX_op_qemu_ld8u
:
1462 tcg_out_qemu_ld(s
, args
, 0);
1464 case INDEX_op_qemu_ld8s
:
1465 tcg_out_qemu_ld(s
, args
, 0 | 4);
1467 case INDEX_op_qemu_ld16u
:
1468 tcg_out_qemu_ld(s
, args
, 1);
1470 case INDEX_op_qemu_ld16s
:
1471 tcg_out_qemu_ld(s
, args
, 1 | 4);
1473 case INDEX_op_qemu_ld32
:
1474 tcg_out_qemu_ld(s
, args
, 2);
1476 case INDEX_op_qemu_ld64
:
1477 tcg_out_qemu_ld(s
, args
, 3);
1480 case INDEX_op_qemu_st8
:
1481 tcg_out_qemu_st(s
, args
, 0);
1483 case INDEX_op_qemu_st16
:
1484 tcg_out_qemu_st(s
, args
, 1);
1486 case INDEX_op_qemu_st32
:
1487 tcg_out_qemu_st(s
, args
, 2);
1489 case INDEX_op_qemu_st64
:
1490 tcg_out_qemu_st(s
, args
, 3);
1494 fprintf(stderr
, "unknown opcode 0x%x\n", opc
);
1499 static const TCGTargetOpDef hppa_op_defs
[] = {
1500 { INDEX_op_exit_tb
, { } },
1501 { INDEX_op_goto_tb
, { } },
1503 { INDEX_op_call
, { "ri" } },
1504 { INDEX_op_jmp
, { "r" } },
1505 { INDEX_op_br
, { } },
1507 { INDEX_op_mov_i32
, { "r", "r" } },
1508 { INDEX_op_movi_i32
, { "r" } },
1510 { INDEX_op_ld8u_i32
, { "r", "r" } },
1511 { INDEX_op_ld8s_i32
, { "r", "r" } },
1512 { INDEX_op_ld16u_i32
, { "r", "r" } },
1513 { INDEX_op_ld16s_i32
, { "r", "r" } },
1514 { INDEX_op_ld_i32
, { "r", "r" } },
1515 { INDEX_op_st8_i32
, { "rZ", "r" } },
1516 { INDEX_op_st16_i32
, { "rZ", "r" } },
1517 { INDEX_op_st_i32
, { "rZ", "r" } },
1519 { INDEX_op_add_i32
, { "r", "rZ", "ri" } },
1520 { INDEX_op_sub_i32
, { "r", "rI", "ri" } },
1521 { INDEX_op_and_i32
, { "r", "rZ", "rM" } },
1522 { INDEX_op_or_i32
, { "r", "rZ", "rO" } },
1523 { INDEX_op_xor_i32
, { "r", "rZ", "rZ" } },
1524 /* Note that the second argument will be inverted, which means
1525 we want a constant whose inversion matches M, and that O = ~M.
1526 See the implementation of and_mask_p. */
1527 { INDEX_op_andc_i32
, { "r", "rZ", "rO" } },
1529 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1530 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1532 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1533 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1534 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1535 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
1536 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
1538 { INDEX_op_bswap16_i32
, { "r", "r" } },
1539 { INDEX_op_bswap32_i32
, { "r", "r" } },
1540 { INDEX_op_not_i32
, { "r", "r" } },
1542 { INDEX_op_ext8s_i32
, { "r", "r" } },
1543 { INDEX_op_ext16s_i32
, { "r", "r" } },
1545 { INDEX_op_brcond_i32
, { "rZ", "rJ" } },
1546 { INDEX_op_brcond2_i32
, { "rZ", "rZ", "rJ", "rJ" } },
1548 { INDEX_op_setcond_i32
, { "r", "rZ", "rI" } },
1549 { INDEX_op_setcond2_i32
, { "r", "rZ", "rZ", "rI", "rI" } },
1551 { INDEX_op_add2_i32
, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1552 { INDEX_op_sub2_i32
, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1554 #if TARGET_LONG_BITS == 32
1555 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1556 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1557 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1558 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1559 { INDEX_op_qemu_ld32
, { "r", "L" } },
1560 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
1562 { INDEX_op_qemu_st8
, { "LZ", "L" } },
1563 { INDEX_op_qemu_st16
, { "LZ", "L" } },
1564 { INDEX_op_qemu_st32
, { "LZ", "L" } },
1565 { INDEX_op_qemu_st64
, { "LZ", "LZ", "L" } },
1567 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
1568 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
1569 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
1570 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
1571 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
1572 { INDEX_op_qemu_ld64
, { "r", "r", "L", "L" } },
1574 { INDEX_op_qemu_st8
, { "LZ", "L", "L" } },
1575 { INDEX_op_qemu_st16
, { "LZ", "L", "L" } },
1576 { INDEX_op_qemu_st32
, { "LZ", "L", "L" } },
1577 { INDEX_op_qemu_st64
, { "LZ", "LZ", "L", "L" } },
1582 static int tcg_target_callee_save_regs
[] = {
1583 /* R2, the return address register, is saved specially
1584 in the caller's frame. */
1585 /* R3, the frame pointer, is not currently modified. */
1599 /* R17 is the global env, so no need to save. */
1603 static void tcg_target_qemu_prologue(TCGContext
*s
)
1607 /* Allocate space for the fixed frame marker. */
1608 frame_size
= -TCG_TARGET_CALL_STACK_OFFSET
;
1609 frame_size
+= TCG_TARGET_STATIC_CALL_ARGS_SIZE
;
1611 /* Allocate space for the saved registers. */
1612 frame_size
+= ARRAY_SIZE(tcg_target_callee_save_regs
) * 4;
1614 /* Align the allocated space. */
1615 frame_size
= ((frame_size
+ TCG_TARGET_STACK_ALIGN
- 1)
1616 & -TCG_TARGET_STACK_ALIGN
);
1618 /* The return address is stored in the caller's frame. */
1619 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_REG_RP
, TCG_REG_SP
, -20);
1621 /* Allocate stack frame, saving the first register at the same time. */
1622 tcg_out_ldst(s
, tcg_target_callee_save_regs
[0],
1623 TCG_REG_SP
, frame_size
, INSN_STWM
);
1625 /* Save all callee saved registers. */
1626 for (i
= 1; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1627 tcg_out_st(s
, TCG_TYPE_PTR
, tcg_target_callee_save_regs
[i
],
1628 TCG_REG_SP
, -frame_size
+ i
* 4);
1631 #ifdef CONFIG_USE_GUEST_BASE
1632 if (GUEST_BASE
!= 0) {
1633 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
1634 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
1638 /* Jump to TB, and adjust R18 to be the return address. */
1639 tcg_out32(s
, INSN_BLE_SR4
| INSN_R2(TCG_REG_R26
));
1640 tcg_out_mov(s
, TCG_TYPE_I32
, TCG_REG_R18
, TCG_REG_R31
);
1642 /* Restore callee saved registers. */
1643 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_RP
, TCG_REG_SP
, -frame_size
- 20);
1644 for (i
= 1; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1645 tcg_out_ld(s
, TCG_TYPE_PTR
, tcg_target_callee_save_regs
[i
],
1646 TCG_REG_SP
, -frame_size
+ i
* 4);
1649 /* Deallocate stack frame and return. */
1650 tcg_out32(s
, INSN_BV
| INSN_R2(TCG_REG_RP
));
1651 tcg_out_ldst(s
, tcg_target_callee_save_regs
[0],
1652 TCG_REG_SP
, -frame_size
, INSN_LDWM
);
1655 static void tcg_target_init(TCGContext
*s
)
1657 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffffffff);
1659 tcg_regset_clear(tcg_target_call_clobber_regs
);
1660 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R20
);
1661 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R21
);
1662 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R22
);
1663 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R23
);
1664 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R24
);
1665 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R25
);
1666 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R26
);
1667 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RET0
);
1668 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RET1
);
1670 tcg_regset_clear(s
->reserved_regs
);
1671 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
); /* hardwired to zero */
1672 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R1
); /* addil target */
1673 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RP
); /* link register */
1674 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R3
); /* frame pointer */
1675 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R18
); /* return pointer */
1676 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R19
); /* clobbered w/o pic */
1677 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R20
); /* reserved */
1678 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_DP
); /* data pointer */
1679 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_SP
); /* stack pointer */
1680 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R31
); /* ble link reg */
1682 tcg_add_target_add_op_defs(hppa_op_defs
);