2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "tcg-be-ldst.h"
27 #if defined _CALL_DARWIN || defined __APPLE__
28 #define TCG_TARGET_CALL_DARWIN
31 # define TCG_TARGET_CALL_ALIGN_ARGS 1
34 /* For some memory operations, we need a scratch that isn't R0. For the AIX
35 calling convention, we can re-use the TOC register since we'll be reloading
36 it at every call. Otherwise R12 will do nicely as neither a call-saved
37 register nor a parameter register. */
39 # define TCG_REG_TMP1 TCG_REG_R2
41 # define TCG_REG_TMP1 TCG_REG_R12
44 /* For the 64-bit target, we don't like the 5 insn sequence needed to build
45 full 64-bit addresses. Better to have a base register to which we can
46 apply a 32-bit displacement.
48 There are generally three items of interest:
49 (1) helper functions in the main executable,
50 (2) TranslationBlock data structures,
51 (3) the return address in the epilogue.
53 For user-only, we USE_STATIC_CODE_GEN_BUFFER, so the code_gen_buffer
54 will be inside the main executable, and thus near enough to make a
55 pointer to the epilogue be within 2GB of all helper functions.
57 For softmmu, we'll let the kernel choose the address of code_gen_buffer,
58 and odds are it'll be somewhere close to the main malloc arena, and so
59 a pointer to the epilogue will be within 2GB of the TranslationBlocks.
61 For --enable-pie, everything will be kinda near everything else,
62 somewhere in high memory.
64 Thus we choose to keep the return address in a call-saved register. */
65 #define TCG_REG_RA TCG_REG_R31
66 #define USE_REG_RA (TCG_TARGET_REG_BITS == 64)
68 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
69 #define SZP ((int)sizeof(void *))
71 /* Shorthand for size of a register. */
72 #define SZR (TCG_TARGET_REG_BITS / 8)
74 #define TCG_CT_CONST_S16 0x100
75 #define TCG_CT_CONST_U16 0x200
76 #define TCG_CT_CONST_S32 0x400
77 #define TCG_CT_CONST_U32 0x800
78 #define TCG_CT_CONST_ZERO 0x1000
79 #define TCG_CT_CONST_MONE 0x2000
81 static tcg_insn_unit
*tb_ret_addr
;
88 static bool have_isa_2_06
;
89 #define HAVE_ISA_2_06 have_isa_2_06
90 #define HAVE_ISEL have_isa_2_06
92 #ifdef CONFIG_USE_GUEST_BASE
93 #define TCG_GUEST_BASE_REG 30
95 #define TCG_GUEST_BASE_REG 0
99 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
135 static const int tcg_target_reg_alloc_order
[] = {
136 TCG_REG_R14
, /* call saved registers */
154 TCG_REG_R12
, /* call clobbered, non-arguments */
158 TCG_REG_R10
, /* call clobbered, arguments */
168 static const int tcg_target_call_iarg_regs
[] = {
179 static const int tcg_target_call_oarg_regs
[] = {
184 static const int tcg_target_callee_save_regs
[] = {
185 #ifdef TCG_TARGET_CALL_DARWIN
201 TCG_REG_R27
, /* currently used for the global env */
208 static inline bool in_range_b(tcg_target_long target
)
210 return target
== sextract64(target
, 0, 26);
213 static uint32_t reloc_pc24_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
215 ptrdiff_t disp
= tcg_ptr_byte_diff(target
, pc
);
216 assert(in_range_b(disp
));
217 return disp
& 0x3fffffc;
220 static void reloc_pc24(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
222 *pc
= (*pc
& ~0x3fffffc) | reloc_pc24_val(pc
, target
);
225 static uint16_t reloc_pc14_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
227 ptrdiff_t disp
= tcg_ptr_byte_diff(target
, pc
);
228 assert(disp
== (int16_t) disp
);
229 return disp
& 0xfffc;
232 static void reloc_pc14(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
234 *pc
= (*pc
& ~0xfffc) | reloc_pc14_val(pc
, target
);
237 static inline void tcg_out_b_noaddr(TCGContext
*s
, int insn
)
239 unsigned retrans
= *s
->code_ptr
& 0x3fffffc;
240 tcg_out32(s
, insn
| retrans
);
243 static inline void tcg_out_bc_noaddr(TCGContext
*s
, int insn
)
245 unsigned retrans
= *s
->code_ptr
& 0xfffc;
246 tcg_out32(s
, insn
| retrans
);
249 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
250 intptr_t value
, intptr_t addend
)
252 tcg_insn_unit
*target
= (tcg_insn_unit
*)value
;
257 reloc_pc14(code_ptr
, target
);
260 reloc_pc24(code_ptr
, target
);
267 /* parse target specific constraints */
268 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
274 case 'A': case 'B': case 'C': case 'D':
275 ct
->ct
|= TCG_CT_REG
;
276 tcg_regset_set_reg(ct
->u
.regs
, 3 + ct_str
[0] - 'A');
279 ct
->ct
|= TCG_CT_REG
;
280 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
282 case 'L': /* qemu_ld constraint */
283 ct
->ct
|= TCG_CT_REG
;
284 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
285 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
286 #ifdef CONFIG_SOFTMMU
287 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R4
);
288 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R5
);
291 case 'S': /* qemu_st constraint */
292 ct
->ct
|= TCG_CT_REG
;
293 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
294 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
295 #ifdef CONFIG_SOFTMMU
296 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R4
);
297 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R5
);
298 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R6
);
302 ct
->ct
|= TCG_CT_CONST_S16
;
305 ct
->ct
|= TCG_CT_CONST_U16
;
308 ct
->ct
|= TCG_CT_CONST_MONE
;
311 ct
->ct
|= TCG_CT_CONST_S32
;
314 ct
->ct
|= TCG_CT_CONST_U32
;
317 ct
->ct
|= TCG_CT_CONST_ZERO
;
327 /* test if a constant matches the constraint */
328 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
329 const TCGArgConstraint
*arg_ct
)
332 if (ct
& TCG_CT_CONST
) {
336 /* The only 32-bit constraint we use aside from
337 TCG_CT_CONST is TCG_CT_CONST_S16. */
338 if (type
== TCG_TYPE_I32
) {
342 if ((ct
& TCG_CT_CONST_S16
) && val
== (int16_t)val
) {
344 } else if ((ct
& TCG_CT_CONST_U16
) && val
== (uint16_t)val
) {
346 } else if ((ct
& TCG_CT_CONST_S32
) && val
== (int32_t)val
) {
348 } else if ((ct
& TCG_CT_CONST_U32
) && val
== (uint32_t)val
) {
350 } else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
352 } else if ((ct
& TCG_CT_CONST_MONE
) && val
== -1) {
358 #define OPCD(opc) ((opc)<<26)
359 #define XO19(opc) (OPCD(19)|((opc)<<1))
360 #define MD30(opc) (OPCD(30)|((opc)<<2))
361 #define MDS30(opc) (OPCD(30)|((opc)<<1))
362 #define XO31(opc) (OPCD(31)|((opc)<<1))
363 #define XO58(opc) (OPCD(58)|(opc))
364 #define XO62(opc) (OPCD(62)|(opc))
368 #define LBZ OPCD( 34)
369 #define LHZ OPCD( 40)
370 #define LHA OPCD( 42)
371 #define LWZ OPCD( 32)
372 #define STB OPCD( 38)
373 #define STH OPCD( 44)
374 #define STW OPCD( 36)
377 #define STDU XO62( 1)
378 #define STDX XO31(149)
381 #define LDX XO31( 21)
384 #define LWAX XO31(341)
386 #define ADDIC OPCD( 12)
387 #define ADDI OPCD( 14)
388 #define ADDIS OPCD( 15)
389 #define ORI OPCD( 24)
390 #define ORIS OPCD( 25)
391 #define XORI OPCD( 26)
392 #define XORIS OPCD( 27)
393 #define ANDI OPCD( 28)
394 #define ANDIS OPCD( 29)
395 #define MULLI OPCD( 7)
396 #define CMPLI OPCD( 10)
397 #define CMPI OPCD( 11)
398 #define SUBFIC OPCD( 8)
400 #define LWZU OPCD( 33)
401 #define STWU OPCD( 37)
403 #define RLWIMI OPCD( 20)
404 #define RLWINM OPCD( 21)
405 #define RLWNM OPCD( 23)
407 #define RLDICL MD30( 0)
408 #define RLDICR MD30( 1)
409 #define RLDIMI MD30( 3)
410 #define RLDCL MDS30( 8)
412 #define BCLR XO19( 16)
413 #define BCCTR XO19(528)
414 #define CRAND XO19(257)
415 #define CRANDC XO19(129)
416 #define CRNAND XO19(225)
417 #define CROR XO19(449)
418 #define CRNOR XO19( 33)
420 #define EXTSB XO31(954)
421 #define EXTSH XO31(922)
422 #define EXTSW XO31(986)
423 #define ADD XO31(266)
424 #define ADDE XO31(138)
425 #define ADDME XO31(234)
426 #define ADDZE XO31(202)
427 #define ADDC XO31( 10)
428 #define AND XO31( 28)
429 #define SUBF XO31( 40)
430 #define SUBFC XO31( 8)
431 #define SUBFE XO31(136)
432 #define SUBFME XO31(232)
433 #define SUBFZE XO31(200)
435 #define XOR XO31(316)
436 #define MULLW XO31(235)
437 #define MULHW XO31( 75)
438 #define MULHWU XO31( 11)
439 #define DIVW XO31(491)
440 #define DIVWU XO31(459)
442 #define CMPL XO31( 32)
443 #define LHBRX XO31(790)
444 #define LWBRX XO31(534)
445 #define LDBRX XO31(532)
446 #define STHBRX XO31(918)
447 #define STWBRX XO31(662)
448 #define STDBRX XO31(660)
449 #define MFSPR XO31(339)
450 #define MTSPR XO31(467)
451 #define SRAWI XO31(824)
452 #define NEG XO31(104)
453 #define MFCR XO31( 19)
454 #define MFOCRF (MFCR | (1u << 20))
455 #define NOR XO31(124)
456 #define CNTLZW XO31( 26)
457 #define CNTLZD XO31( 58)
458 #define ANDC XO31( 60)
459 #define ORC XO31(412)
460 #define EQV XO31(284)
461 #define NAND XO31(476)
462 #define ISEL XO31( 15)
464 #define MULLD XO31(233)
465 #define MULHD XO31( 73)
466 #define MULHDU XO31( 9)
467 #define DIVD XO31(489)
468 #define DIVDU XO31(457)
470 #define LBZX XO31( 87)
471 #define LHZX XO31(279)
472 #define LHAX XO31(343)
473 #define LWZX XO31( 23)
474 #define STBX XO31(215)
475 #define STHX XO31(407)
476 #define STWX XO31(151)
478 #define SPR(a, b) ((((a)<<5)|(b))<<11)
480 #define CTR SPR(9, 0)
482 #define SLW XO31( 24)
483 #define SRW XO31(536)
484 #define SRAW XO31(792)
486 #define SLD XO31( 27)
487 #define SRD XO31(539)
488 #define SRAD XO31(794)
489 #define SRADI XO31(413<<1)
492 #define TRAP (TW | TO(31))
494 #define NOP ORI /* ori 0,0,0 */
496 #define RT(r) ((r)<<21)
497 #define RS(r) ((r)<<21)
498 #define RA(r) ((r)<<16)
499 #define RB(r) ((r)<<11)
500 #define TO(t) ((t)<<21)
501 #define SH(s) ((s)<<11)
502 #define MB(b) ((b)<<6)
503 #define ME(e) ((e)<<1)
504 #define BO(o) ((o)<<21)
505 #define MB64(b) ((b)<<5)
506 #define FXM(b) (1 << (19 - (b)))
510 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
511 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
512 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
513 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
515 #define BF(n) ((n)<<23)
516 #define BI(n, c) (((c)+((n)*4))<<16)
517 #define BT(n, c) (((c)+((n)*4))<<21)
518 #define BA(n, c) (((c)+((n)*4))<<16)
519 #define BB(n, c) (((c)+((n)*4))<<11)
520 #define BC_(n, c) (((c)+((n)*4))<<6)
522 #define BO_COND_TRUE BO(12)
523 #define BO_COND_FALSE BO( 4)
524 #define BO_ALWAYS BO(20)
533 static const uint32_t tcg_to_bc
[] = {
534 [TCG_COND_EQ
] = BC
| BI(7, CR_EQ
) | BO_COND_TRUE
,
535 [TCG_COND_NE
] = BC
| BI(7, CR_EQ
) | BO_COND_FALSE
,
536 [TCG_COND_LT
] = BC
| BI(7, CR_LT
) | BO_COND_TRUE
,
537 [TCG_COND_GE
] = BC
| BI(7, CR_LT
) | BO_COND_FALSE
,
538 [TCG_COND_LE
] = BC
| BI(7, CR_GT
) | BO_COND_FALSE
,
539 [TCG_COND_GT
] = BC
| BI(7, CR_GT
) | BO_COND_TRUE
,
540 [TCG_COND_LTU
] = BC
| BI(7, CR_LT
) | BO_COND_TRUE
,
541 [TCG_COND_GEU
] = BC
| BI(7, CR_LT
) | BO_COND_FALSE
,
542 [TCG_COND_LEU
] = BC
| BI(7, CR_GT
) | BO_COND_FALSE
,
543 [TCG_COND_GTU
] = BC
| BI(7, CR_GT
) | BO_COND_TRUE
,
546 /* The low bit here is set if the RA and RB fields must be inverted. */
547 static const uint32_t tcg_to_isel
[] = {
548 [TCG_COND_EQ
] = ISEL
| BC_(7, CR_EQ
),
549 [TCG_COND_NE
] = ISEL
| BC_(7, CR_EQ
) | 1,
550 [TCG_COND_LT
] = ISEL
| BC_(7, CR_LT
),
551 [TCG_COND_GE
] = ISEL
| BC_(7, CR_LT
) | 1,
552 [TCG_COND_LE
] = ISEL
| BC_(7, CR_GT
) | 1,
553 [TCG_COND_GT
] = ISEL
| BC_(7, CR_GT
),
554 [TCG_COND_LTU
] = ISEL
| BC_(7, CR_LT
),
555 [TCG_COND_GEU
] = ISEL
| BC_(7, CR_LT
) | 1,
556 [TCG_COND_LEU
] = ISEL
| BC_(7, CR_GT
) | 1,
557 [TCG_COND_GTU
] = ISEL
| BC_(7, CR_GT
),
560 static void tcg_out_mem_long(TCGContext
*s
, int opi
, int opx
, TCGReg rt
,
561 TCGReg base
, tcg_target_long offset
);
563 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
)
565 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
567 tcg_out32(s
, OR
| SAB(arg
, ret
, arg
));
571 static inline void tcg_out_rld(TCGContext
*s
, int op
, TCGReg ra
, TCGReg rs
,
574 assert(TCG_TARGET_REG_BITS
== 64);
575 sh
= SH(sh
& 0x1f) | (((sh
>> 5) & 1) << 1);
576 mb
= MB64((mb
>> 5) | ((mb
<< 1) & 0x3f));
577 tcg_out32(s
, op
| RA(ra
) | RS(rs
) | sh
| mb
);
580 static inline void tcg_out_rlw(TCGContext
*s
, int op
, TCGReg ra
, TCGReg rs
,
581 int sh
, int mb
, int me
)
583 tcg_out32(s
, op
| RA(ra
) | RS(rs
) | SH(sh
) | MB(mb
) | ME(me
));
586 static inline void tcg_out_ext32u(TCGContext
*s
, TCGReg dst
, TCGReg src
)
588 tcg_out_rld(s
, RLDICL
, dst
, src
, 0, 32);
591 static inline void tcg_out_shli32(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
593 tcg_out_rlw(s
, RLWINM
, dst
, src
, c
, 0, 31 - c
);
596 static inline void tcg_out_shli64(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
598 tcg_out_rld(s
, RLDICR
, dst
, src
, c
, 63 - c
);
601 static inline void tcg_out_shri32(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
603 tcg_out_rlw(s
, RLWINM
, dst
, src
, 32 - c
, c
, 31);
606 static inline void tcg_out_shri64(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
608 tcg_out_rld(s
, RLDICL
, dst
, src
, 64 - c
, c
);
611 static void tcg_out_movi32(TCGContext
*s
, TCGReg ret
, int32_t arg
)
613 if (arg
== (int16_t) arg
) {
614 tcg_out32(s
, ADDI
| TAI(ret
, 0, arg
));
616 tcg_out32(s
, ADDIS
| TAI(ret
, 0, arg
>> 16));
618 tcg_out32(s
, ORI
| SAI(ret
, ret
, arg
));
623 static void tcg_out_movi(TCGContext
*s
, TCGType type
, TCGReg ret
,
626 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
627 if (type
== TCG_TYPE_I32
|| arg
== (int32_t)arg
) {
628 tcg_out_movi32(s
, ret
, arg
);
629 } else if (arg
== (uint32_t)arg
&& !(arg
& 0x8000)) {
630 tcg_out32(s
, ADDI
| TAI(ret
, 0, arg
));
631 tcg_out32(s
, ORIS
| SAI(ret
, ret
, arg
>> 16));
636 intptr_t diff
= arg
- (intptr_t)tb_ret_addr
;
637 if (diff
== (int32_t)diff
) {
638 tcg_out_mem_long(s
, ADDI
, ADD
, ret
, TCG_REG_RA
, diff
);
643 high
= arg
>> 31 >> 1;
644 tcg_out_movi32(s
, ret
, high
);
646 tcg_out_shli64(s
, ret
, ret
, 32);
648 if (arg
& 0xffff0000) {
649 tcg_out32(s
, ORIS
| SAI(ret
, ret
, arg
>> 16));
652 tcg_out32(s
, ORI
| SAI(ret
, ret
, arg
));
657 static bool mask_operand(uint32_t c
, int *mb
, int *me
)
661 /* Accept a bit pattern like:
665 Keep track of the transitions. */
666 if (c
== 0 || c
== -1) {
672 if (test
& (test
- 1)) {
677 *mb
= test
? clz32(test
& -test
) + 1 : 0;
681 static bool mask64_operand(uint64_t c
, int *mb
, int *me
)
690 /* Accept 1..10..0. */
696 /* Accept 0..01..1. */
697 if (lsb
== 1 && (c
& (c
+ 1)) == 0) {
698 *mb
= clz64(c
+ 1) + 1;
705 static void tcg_out_andi32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
)
709 if ((c
& 0xffff) == c
) {
710 tcg_out32(s
, ANDI
| SAI(src
, dst
, c
));
712 } else if ((c
& 0xffff0000) == c
) {
713 tcg_out32(s
, ANDIS
| SAI(src
, dst
, c
>> 16));
715 } else if (mask_operand(c
, &mb
, &me
)) {
716 tcg_out_rlw(s
, RLWINM
, dst
, src
, 0, mb
, me
);
718 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R0
, c
);
719 tcg_out32(s
, AND
| SAB(src
, dst
, TCG_REG_R0
));
723 static void tcg_out_andi64(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint64_t c
)
727 assert(TCG_TARGET_REG_BITS
== 64);
728 if ((c
& 0xffff) == c
) {
729 tcg_out32(s
, ANDI
| SAI(src
, dst
, c
));
731 } else if ((c
& 0xffff0000) == c
) {
732 tcg_out32(s
, ANDIS
| SAI(src
, dst
, c
>> 16));
734 } else if (mask64_operand(c
, &mb
, &me
)) {
736 tcg_out_rld(s
, RLDICR
, dst
, src
, 0, me
);
738 tcg_out_rld(s
, RLDICL
, dst
, src
, 0, mb
);
741 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_R0
, c
);
742 tcg_out32(s
, AND
| SAB(src
, dst
, TCG_REG_R0
));
746 static void tcg_out_zori32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
,
747 int op_lo
, int op_hi
)
750 tcg_out32(s
, op_hi
| SAI(src
, dst
, c
>> 16));
754 tcg_out32(s
, op_lo
| SAI(src
, dst
, c
));
759 static void tcg_out_ori32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
)
761 tcg_out_zori32(s
, dst
, src
, c
, ORI
, ORIS
);
764 static void tcg_out_xori32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
)
766 tcg_out_zori32(s
, dst
, src
, c
, XORI
, XORIS
);
769 static void tcg_out_b(TCGContext
*s
, int mask
, tcg_insn_unit
*target
)
771 ptrdiff_t disp
= tcg_pcrel_diff(s
, target
);
772 if (in_range_b(disp
)) {
773 tcg_out32(s
, B
| (disp
& 0x3fffffc) | mask
);
775 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R0
, (uintptr_t)target
);
776 tcg_out32(s
, MTSPR
| RS(TCG_REG_R0
) | CTR
);
777 tcg_out32(s
, BCCTR
| BO_ALWAYS
| mask
);
781 static void tcg_out_mem_long(TCGContext
*s
, int opi
, int opx
, TCGReg rt
,
782 TCGReg base
, tcg_target_long offset
)
784 tcg_target_long orig
= offset
, l0
, l1
, extra
= 0, align
= 0;
785 bool is_store
= false;
786 TCGReg rs
= TCG_REG_TMP1
;
793 if (rt
!= TCG_REG_R0
) {
801 case STB
: case STH
: case STW
:
806 /* For unaligned, or very large offsets, use the indexed form. */
807 if (offset
& align
|| offset
!= (int32_t)offset
) {
811 tcg_debug_assert(!is_store
|| rs
!= rt
);
812 tcg_out_movi(s
, TCG_TYPE_PTR
, rs
, orig
);
813 tcg_out32(s
, opx
| TAB(rt
, base
, rs
));
817 l0
= (int16_t)offset
;
818 offset
= (offset
- l0
) >> 16;
819 l1
= (int16_t)offset
;
821 if (l1
< 0 && orig
>= 0) {
823 l1
= (int16_t)(offset
- 0x4000);
826 tcg_out32(s
, ADDIS
| TAI(rs
, base
, l1
));
830 tcg_out32(s
, ADDIS
| TAI(rs
, base
, extra
));
833 if (opi
!= ADDI
|| base
!= rt
|| l0
!= 0) {
834 tcg_out32(s
, opi
| TAI(rt
, base
, l0
));
838 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
839 TCGReg arg1
, intptr_t arg2
)
843 assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
844 if (type
== TCG_TYPE_I32
) {
845 opi
= LWZ
, opx
= LWZX
;
849 tcg_out_mem_long(s
, opi
, opx
, ret
, arg1
, arg2
);
852 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
853 TCGReg arg1
, intptr_t arg2
)
857 assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
858 if (type
== TCG_TYPE_I32
) {
859 opi
= STW
, opx
= STWX
;
861 opi
= STD
, opx
= STDX
;
863 tcg_out_mem_long(s
, opi
, opx
, arg
, arg1
, arg2
);
866 static void tcg_out_cmp(TCGContext
*s
, int cond
, TCGArg arg1
, TCGArg arg2
,
867 int const_arg2
, int cr
, TCGType type
)
872 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
874 /* Simplify the comparisons below wrt CMPI. */
875 if (type
== TCG_TYPE_I32
) {
876 arg2
= (int32_t)arg2
;
883 if ((int16_t) arg2
== arg2
) {
887 } else if ((uint16_t) arg2
== arg2
) {
902 if ((int16_t) arg2
== arg2
) {
917 if ((uint16_t) arg2
== arg2
) {
930 op
|= BF(cr
) | ((type
== TCG_TYPE_I64
) << 21);
933 tcg_out32(s
, op
| RA(arg1
) | (arg2
& 0xffff));
936 tcg_out_movi(s
, type
, TCG_REG_R0
, arg2
);
939 tcg_out32(s
, op
| RA(arg1
) | RB(arg2
));
943 static void tcg_out_setcond_eq0(TCGContext
*s
, TCGType type
,
944 TCGReg dst
, TCGReg src
)
946 if (type
== TCG_TYPE_I32
) {
947 tcg_out32(s
, CNTLZW
| RS(src
) | RA(dst
));
948 tcg_out_shri32(s
, dst
, dst
, 5);
950 tcg_out32(s
, CNTLZD
| RS(src
) | RA(dst
));
951 tcg_out_shri64(s
, dst
, dst
, 6);
955 static void tcg_out_setcond_ne0(TCGContext
*s
, TCGReg dst
, TCGReg src
)
957 /* X != 0 implies X + -1 generates a carry. Extra addition
958 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
960 tcg_out32(s
, ADDIC
| TAI(dst
, src
, -1));
961 tcg_out32(s
, SUBFE
| TAB(dst
, dst
, src
));
963 tcg_out32(s
, ADDIC
| TAI(TCG_REG_R0
, src
, -1));
964 tcg_out32(s
, SUBFE
| TAB(dst
, TCG_REG_R0
, src
));
968 static TCGReg
tcg_gen_setcond_xor(TCGContext
*s
, TCGReg arg1
, TCGArg arg2
,
972 if ((uint32_t)arg2
== arg2
) {
973 tcg_out_xori32(s
, TCG_REG_R0
, arg1
, arg2
);
975 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_R0
, arg2
);
976 tcg_out32(s
, XOR
| SAB(arg1
, TCG_REG_R0
, TCG_REG_R0
));
979 tcg_out32(s
, XOR
| SAB(arg1
, TCG_REG_R0
, arg2
));
984 static void tcg_out_setcond(TCGContext
*s
, TCGType type
, TCGCond cond
,
985 TCGArg arg0
, TCGArg arg1
, TCGArg arg2
,
990 assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
992 /* Ignore high bits of a potential constant arg2. */
993 if (type
== TCG_TYPE_I32
) {
994 arg2
= (uint32_t)arg2
;
997 /* Handle common and trivial cases before handling anything else. */
1001 tcg_out_setcond_eq0(s
, type
, arg0
, arg1
);
1004 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
1005 tcg_out_ext32u(s
, TCG_REG_R0
, arg1
);
1008 tcg_out_setcond_ne0(s
, arg0
, arg1
);
1011 tcg_out32(s
, NOR
| SAB(arg1
, arg0
, arg1
));
1015 /* Extract the sign bit. */
1016 if (type
== TCG_TYPE_I32
) {
1017 tcg_out_shri32(s
, arg0
, arg1
, 31);
1019 tcg_out_shri64(s
, arg0
, arg1
, 63);
1027 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1028 All other cases below are also at least 3 insns, so speed up the
1029 code generator by not considering them and always using ISEL. */
1033 tcg_out_cmp(s
, cond
, arg1
, arg2
, const_arg2
, 7, type
);
1035 isel
= tcg_to_isel
[cond
];
1037 tcg_out_movi(s
, type
, arg0
, 1);
1039 /* arg0 = (bc ? 0 : 1) */
1040 tab
= TAB(arg0
, 0, arg0
);
1043 /* arg0 = (bc ? 1 : 0) */
1044 tcg_out_movi(s
, type
, TCG_REG_R0
, 0);
1045 tab
= TAB(arg0
, arg0
, TCG_REG_R0
);
1047 tcg_out32(s
, isel
| tab
);
1053 arg1
= tcg_gen_setcond_xor(s
, arg1
, arg2
, const_arg2
);
1054 tcg_out_setcond_eq0(s
, type
, arg0
, arg1
);
1058 arg1
= tcg_gen_setcond_xor(s
, arg1
, arg2
, const_arg2
);
1059 /* Discard the high bits only once, rather than both inputs. */
1060 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
1061 tcg_out_ext32u(s
, TCG_REG_R0
, arg1
);
1064 tcg_out_setcond_ne0(s
, arg0
, arg1
);
1082 crop
= CRNOR
| BT(7, CR_EQ
) | BA(7, CR_LT
) | BB(7, CR_LT
);
1088 crop
= CRNOR
| BT(7, CR_EQ
) | BA(7, CR_GT
) | BB(7, CR_GT
);
1090 tcg_out_cmp(s
, cond
, arg1
, arg2
, const_arg2
, 7, type
);
1094 tcg_out32(s
, MFOCRF
| RT(TCG_REG_R0
) | FXM(7));
1095 tcg_out_rlw(s
, RLWINM
, arg0
, TCG_REG_R0
, sh
, 31, 31);
1103 static void tcg_out_bc(TCGContext
*s
, int bc
, int label_index
)
1105 TCGLabel
*l
= &s
->labels
[label_index
];
1108 tcg_out32(s
, bc
| reloc_pc14_val(s
->code_ptr
, l
->u
.value_ptr
));
1110 tcg_out_reloc(s
, s
->code_ptr
, R_PPC_REL14
, label_index
, 0);
1111 tcg_out_bc_noaddr(s
, bc
);
1115 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
,
1116 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
1117 int label_index
, TCGType type
)
1119 tcg_out_cmp(s
, cond
, arg1
, arg2
, const_arg2
, 7, type
);
1120 tcg_out_bc(s
, tcg_to_bc
[cond
], label_index
);
1123 static void tcg_out_movcond(TCGContext
*s
, TCGType type
, TCGCond cond
,
1124 TCGArg dest
, TCGArg c1
, TCGArg c2
, TCGArg v1
,
1125 TCGArg v2
, bool const_c2
)
1127 /* If for some reason both inputs are zero, don't produce bad code. */
1128 if (v1
== 0 && v2
== 0) {
1129 tcg_out_movi(s
, type
, dest
, 0);
1133 tcg_out_cmp(s
, cond
, c1
, c2
, const_c2
, 7, type
);
1136 int isel
= tcg_to_isel
[cond
];
1138 /* Swap the V operands if the operation indicates inversion. */
1145 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1147 tcg_out_movi(s
, type
, TCG_REG_R0
, 0);
1149 tcg_out32(s
, isel
| TAB(dest
, v1
, v2
));
1152 cond
= tcg_invert_cond(cond
);
1154 } else if (dest
!= v1
) {
1156 tcg_out_movi(s
, type
, dest
, 0);
1158 tcg_out_mov(s
, type
, dest
, v1
);
1161 /* Branch forward over one insn */
1162 tcg_out32(s
, tcg_to_bc
[cond
] | 8);
1164 tcg_out_movi(s
, type
, dest
, 0);
1166 tcg_out_mov(s
, type
, dest
, v2
);
1171 static void tcg_out_cmp2(TCGContext
*s
, const TCGArg
*args
,
1172 const int *const_args
)
1174 static const struct { uint8_t bit1
, bit2
; } bits
[] = {
1175 [TCG_COND_LT
] = { CR_LT
, CR_LT
},
1176 [TCG_COND_LE
] = { CR_LT
, CR_GT
},
1177 [TCG_COND_GT
] = { CR_GT
, CR_GT
},
1178 [TCG_COND_GE
] = { CR_GT
, CR_LT
},
1179 [TCG_COND_LTU
] = { CR_LT
, CR_LT
},
1180 [TCG_COND_LEU
] = { CR_LT
, CR_GT
},
1181 [TCG_COND_GTU
] = { CR_GT
, CR_GT
},
1182 [TCG_COND_GEU
] = { CR_GT
, CR_LT
},
1185 TCGCond cond
= args
[4], cond2
;
1186 TCGArg al
, ah
, bl
, bh
;
1187 int blconst
, bhconst
;
1194 blconst
= const_args
[2];
1195 bhconst
= const_args
[3];
1204 tcg_out_cmp(s
, cond
, al
, bl
, blconst
, 6, TCG_TYPE_I32
);
1205 tcg_out_cmp(s
, cond
, ah
, bh
, bhconst
, 7, TCG_TYPE_I32
);
1206 tcg_out32(s
, op
| BT(7, CR_EQ
) | BA(6, CR_EQ
) | BB(7, CR_EQ
));
1217 bit1
= bits
[cond
].bit1
;
1218 bit2
= bits
[cond
].bit2
;
1219 op
= (bit1
!= bit2
? CRANDC
: CRAND
);
1220 cond2
= tcg_unsigned_cond(cond
);
1222 tcg_out_cmp(s
, cond
, ah
, bh
, bhconst
, 6, TCG_TYPE_I32
);
1223 tcg_out_cmp(s
, cond2
, al
, bl
, blconst
, 7, TCG_TYPE_I32
);
1224 tcg_out32(s
, op
| BT(7, CR_EQ
) | BA(6, CR_EQ
) | BB(7, bit2
));
1225 tcg_out32(s
, CROR
| BT(7, CR_EQ
) | BA(6, bit1
) | BB(7, CR_EQ
));
1233 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
1234 const int *const_args
)
1236 tcg_out_cmp2(s
, args
+ 1, const_args
+ 1);
1237 tcg_out32(s
, MFOCRF
| RT(TCG_REG_R0
) | FXM(7));
1238 tcg_out_rlw(s
, RLWINM
, args
[0], TCG_REG_R0
, 31, 31, 31);
1241 static void tcg_out_brcond2 (TCGContext
*s
, const TCGArg
*args
,
1242 const int *const_args
)
1244 tcg_out_cmp2(s
, args
, const_args
);
1245 tcg_out_bc(s
, BC
| BI(7, CR_EQ
) | BO_COND_TRUE
, args
[5]);
1248 void ppc_tb_set_jmp_target(uintptr_t jmp_addr
, uintptr_t addr
)
1252 s
.code_buf
= s
.code_ptr
= (tcg_insn_unit
*)jmp_addr
;
1253 tcg_out_b(&s
, 0, (tcg_insn_unit
*)addr
);
1254 flush_icache_range(jmp_addr
, jmp_addr
+ tcg_current_code_size(&s
));
1257 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*target
)
1260 /* Look through the descriptor. If the branch is in range, and we
1261 don't have to spend too much effort on building the toc. */
1262 void *tgt
= ((void **)target
)[0];
1263 uintptr_t toc
= ((uintptr_t *)target
)[1];
1264 intptr_t diff
= tcg_pcrel_diff(s
, tgt
);
1266 if (in_range_b(diff
) && toc
== (uint32_t)toc
) {
1267 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP1
, toc
);
1268 tcg_out_b(s
, LK
, tgt
);
1270 /* Fold the low bits of the constant into the addresses below. */
1271 intptr_t arg
= (intptr_t)target
;
1272 int ofs
= (int16_t)arg
;
1274 if (ofs
+ 8 < 0x8000) {
1279 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP1
, arg
);
1280 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R0
, TCG_REG_TMP1
, ofs
);
1281 tcg_out32(s
, MTSPR
| RA(TCG_REG_R0
) | CTR
);
1282 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R2
, TCG_REG_TMP1
, ofs
+ SZP
);
1283 tcg_out32(s
, BCCTR
| BO_ALWAYS
| LK
);
1285 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1288 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1289 address, which the callee uses to compute its TOC address. */
1290 /* FIXME: when the branch is in range, we could avoid r12 load if we
1291 knew that the destination uses the same TOC, and what its local
1292 entry point offset is. */
1293 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R12
, (intptr_t)target
);
1295 diff
= tcg_pcrel_diff(s
, target
);
1296 if (in_range_b(diff
)) {
1297 tcg_out_b(s
, LK
, target
);
1299 tcg_out32(s
, MTSPR
| RS(TCG_REG_R12
) | CTR
);
1300 tcg_out32(s
, BCCTR
| BO_ALWAYS
| LK
);
1303 tcg_out_b(s
, LK
, target
);
1307 static const uint32_t qemu_ldx_opc
[16] = {
1314 [MO_BSWAP
| MO_UB
] = LBZX
,
1315 [MO_BSWAP
| MO_UW
] = LHBRX
,
1316 [MO_BSWAP
| MO_UL
] = LWBRX
,
1317 [MO_BSWAP
| MO_Q
] = LDBRX
,
1320 static const uint32_t qemu_stx_opc
[16] = {
1325 [MO_BSWAP
| MO_UB
] = STBX
,
1326 [MO_BSWAP
| MO_UW
] = STHBRX
,
1327 [MO_BSWAP
| MO_UL
] = STWBRX
,
1328 [MO_BSWAP
| MO_Q
] = STDBRX
,
1331 static const uint32_t qemu_exts_opc
[4] = {
1332 EXTSB
, EXTSH
, EXTSW
, 0
1335 #if defined (CONFIG_SOFTMMU)
1336 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1337 * int mmu_idx, uintptr_t ra)
1339 static void * const qemu_ld_helpers
[16] = {
1340 [MO_UB
] = helper_ret_ldub_mmu
,
1341 [MO_LEUW
] = helper_le_lduw_mmu
,
1342 [MO_LEUL
] = helper_le_ldul_mmu
,
1343 [MO_LEQ
] = helper_le_ldq_mmu
,
1344 [MO_BEUW
] = helper_be_lduw_mmu
,
1345 [MO_BEUL
] = helper_be_ldul_mmu
,
1346 [MO_BEQ
] = helper_be_ldq_mmu
,
1349 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1350 * uintxx_t val, int mmu_idx, uintptr_t ra)
1352 static void * const qemu_st_helpers
[16] = {
1353 [MO_UB
] = helper_ret_stb_mmu
,
1354 [MO_LEUW
] = helper_le_stw_mmu
,
1355 [MO_LEUL
] = helper_le_stl_mmu
,
1356 [MO_LEQ
] = helper_le_stq_mmu
,
1357 [MO_BEUW
] = helper_be_stw_mmu
,
1358 [MO_BEUL
] = helper_be_stl_mmu
,
1359 [MO_BEQ
] = helper_be_stq_mmu
,
1362 /* Perform the TLB load and compare. Places the result of the comparison
1363 in CR7, loads the addend of the TLB into R3, and returns the register
1364 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1366 static TCGReg
tcg_out_tlb_read(TCGContext
*s
, TCGMemOp s_bits
,
1367 TCGReg addrlo
, TCGReg addrhi
,
1368 int mem_index
, bool is_read
)
1372 ? offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
)
1373 : offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
));
1374 int add_off
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1375 TCGReg base
= TCG_AREG0
;
1377 /* Extract the page index, shifted into place for tlb index. */
1378 if (TCG_TARGET_REG_BITS
== 64) {
1379 if (TARGET_LONG_BITS
== 32) {
1380 /* Zero-extend the address into a place helpful for further use. */
1381 tcg_out_ext32u(s
, TCG_REG_R4
, addrlo
);
1382 addrlo
= TCG_REG_R4
;
1384 tcg_out_rld(s
, RLDICL
, TCG_REG_R3
, addrlo
,
1385 64 - TARGET_PAGE_BITS
, 64 - CPU_TLB_BITS
);
1389 /* Compensate for very large offsets. */
1390 if (add_off
>= 0x8000) {
1391 /* Most target env are smaller than 32k; none are larger than 64k.
1392 Simplify the logic here merely to offset by 0x7ff0, giving us a
1393 range just shy of 64k. Check this assumption. */
1394 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
,
1395 tlb_table
[NB_MMU_MODES
- 1][1])
1397 tcg_out32(s
, ADDI
| TAI(TCG_REG_TMP1
, base
, 0x7ff0));
1398 base
= TCG_REG_TMP1
;
1403 /* Extraction and shifting, part 2. */
1404 if (TCG_TARGET_REG_BITS
== 32 || TARGET_LONG_BITS
== 32) {
1405 tcg_out_rlw(s
, RLWINM
, TCG_REG_R3
, addrlo
,
1406 32 - (TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
),
1407 32 - (CPU_TLB_BITS
+ CPU_TLB_ENTRY_BITS
),
1408 31 - CPU_TLB_ENTRY_BITS
);
1410 tcg_out_shli64(s
, TCG_REG_R3
, TCG_REG_R3
, CPU_TLB_ENTRY_BITS
);
1413 tcg_out32(s
, ADD
| TAB(TCG_REG_R3
, TCG_REG_R3
, base
));
1415 /* Load the tlb comparator. */
1416 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1417 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_REG_R4
, TCG_REG_R3
, cmp_off
);
1418 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_REG_TMP1
, TCG_REG_R3
, cmp_off
+ 4);
1420 tcg_out_ld(s
, TCG_TYPE_TL
, TCG_REG_TMP1
, TCG_REG_R3
, cmp_off
);
1423 /* Load the TLB addend for use on the fast path. Do this asap
1424 to minimize any load use delay. */
1425 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R3
, TCG_REG_R3
, add_off
);
1427 /* Clear the non-page, non-alignment bits from the address. */
1428 if (TCG_TARGET_REG_BITS
== 32 || TARGET_LONG_BITS
== 32) {
1429 tcg_out_rlw(s
, RLWINM
, TCG_REG_R0
, addrlo
, 0,
1430 (32 - s_bits
) & 31, 31 - TARGET_PAGE_BITS
);
1431 } else if (!s_bits
) {
1432 tcg_out_rld(s
, RLDICR
, TCG_REG_R0
, addrlo
,
1433 0, 63 - TARGET_PAGE_BITS
);
1435 tcg_out_rld(s
, RLDICL
, TCG_REG_R0
, addrlo
,
1436 64 - TARGET_PAGE_BITS
, TARGET_PAGE_BITS
- s_bits
);
1437 tcg_out_rld(s
, RLDICL
, TCG_REG_R0
, TCG_REG_R0
, TARGET_PAGE_BITS
, 0);
1440 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1441 tcg_out_cmp(s
, TCG_COND_EQ
, TCG_REG_R0
, TCG_REG_TMP1
,
1442 0, 7, TCG_TYPE_I32
);
1443 tcg_out_cmp(s
, TCG_COND_EQ
, addrhi
, TCG_REG_R4
, 0, 6, TCG_TYPE_I32
);
1444 tcg_out32(s
, CRAND
| BT(7, CR_EQ
) | BA(6, CR_EQ
) | BB(7, CR_EQ
));
1446 tcg_out_cmp(s
, TCG_COND_EQ
, TCG_REG_R0
, TCG_REG_TMP1
,
1453 /* Record the context of a call to the out of line helper code for the slow
1454 path for a load or store, so that we can later generate the correct
1456 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOp opc
,
1457 TCGReg datalo_reg
, TCGReg datahi_reg
,
1458 TCGReg addrlo_reg
, TCGReg addrhi_reg
,
1459 int mem_index
, tcg_insn_unit
*raddr
,
1460 tcg_insn_unit
*lptr
)
1462 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1464 label
->is_ld
= is_ld
;
1466 label
->datalo_reg
= datalo_reg
;
1467 label
->datahi_reg
= datahi_reg
;
1468 label
->addrlo_reg
= addrlo_reg
;
1469 label
->addrhi_reg
= addrhi_reg
;
1470 label
->mem_index
= mem_index
;
1471 label
->raddr
= raddr
;
1472 label
->label_ptr
[0] = lptr
;
1475 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1477 TCGMemOp opc
= lb
->opc
;
1478 TCGReg hi
, lo
, arg
= TCG_REG_R3
;
1480 reloc_pc14(lb
->label_ptr
[0], s
->code_ptr
);
1482 tcg_out_mov(s
, TCG_TYPE_PTR
, arg
++, TCG_AREG0
);
1484 lo
= lb
->addrlo_reg
;
1485 hi
= lb
->addrhi_reg
;
1486 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1487 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1490 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, hi
);
1491 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, lo
);
1493 /* If the address needed to be zero-extended, we'll have already
1494 placed it in R4. The only remaining case is 64-bit guest. */
1495 tcg_out_mov(s
, TCG_TYPE_TL
, arg
++, lo
);
1498 tcg_out_movi(s
, TCG_TYPE_I32
, arg
++, lb
->mem_index
);
1499 tcg_out32(s
, MFSPR
| RT(arg
) | LR
);
1501 tcg_out_call(s
, qemu_ld_helpers
[opc
& ~MO_SIGN
]);
1503 lo
= lb
->datalo_reg
;
1504 hi
= lb
->datahi_reg
;
1505 if (TCG_TARGET_REG_BITS
== 32 && (opc
& MO_SIZE
) == MO_64
) {
1506 tcg_out_mov(s
, TCG_TYPE_I32
, lo
, TCG_REG_R4
);
1507 tcg_out_mov(s
, TCG_TYPE_I32
, hi
, TCG_REG_R3
);
1508 } else if (opc
& MO_SIGN
) {
1509 uint32_t insn
= qemu_exts_opc
[opc
& MO_SIZE
];
1510 tcg_out32(s
, insn
| RA(lo
) | RS(TCG_REG_R3
));
1512 tcg_out_mov(s
, TCG_TYPE_REG
, lo
, TCG_REG_R3
);
1515 tcg_out_b(s
, 0, lb
->raddr
);
1518 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1520 TCGMemOp opc
= lb
->opc
;
1521 TCGMemOp s_bits
= opc
& MO_SIZE
;
1522 TCGReg hi
, lo
, arg
= TCG_REG_R3
;
1524 reloc_pc14(lb
->label_ptr
[0], s
->code_ptr
);
1526 tcg_out_mov(s
, TCG_TYPE_PTR
, arg
++, TCG_AREG0
);
1528 lo
= lb
->addrlo_reg
;
1529 hi
= lb
->addrhi_reg
;
1530 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1531 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1534 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, hi
);
1535 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, lo
);
1537 /* If the address needed to be zero-extended, we'll have already
1538 placed it in R4. The only remaining case is 64-bit guest. */
1539 tcg_out_mov(s
, TCG_TYPE_TL
, arg
++, lo
);
1542 lo
= lb
->datalo_reg
;
1543 hi
= lb
->datahi_reg
;
1544 if (TCG_TARGET_REG_BITS
== 32) {
1547 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1550 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, hi
);
1553 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, lo
);
1556 tcg_out_rlw(s
, RLWINM
, arg
++, lo
, 0, 32 - (8 << s_bits
), 31);
1560 if (s_bits
== MO_64
) {
1561 tcg_out_mov(s
, TCG_TYPE_I64
, arg
++, lo
);
1563 tcg_out_rld(s
, RLDICL
, arg
++, lo
, 0, 64 - (8 << s_bits
));
1567 tcg_out_movi(s
, TCG_TYPE_I32
, arg
++, lb
->mem_index
);
1568 tcg_out32(s
, MFSPR
| RT(arg
) | LR
);
1570 tcg_out_call(s
, qemu_st_helpers
[opc
]);
1572 tcg_out_b(s
, 0, lb
->raddr
);
1574 #endif /* SOFTMMU */
1576 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1578 TCGReg datalo
, datahi
, addrlo
, rbase
;
1579 TCGReg addrhi
__attribute__((unused
));
1580 TCGMemOp opc
, s_bits
;
1581 #ifdef CONFIG_SOFTMMU
1583 tcg_insn_unit
*label_ptr
;
1587 datahi
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1589 addrhi
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1591 s_bits
= opc
& MO_SIZE
;
1593 #ifdef CONFIG_SOFTMMU
1595 addrlo
= tcg_out_tlb_read(s
, s_bits
, addrlo
, addrhi
, mem_index
, true);
1597 /* Load a pointer into the current opcode w/conditional branch-link. */
1598 label_ptr
= s
->code_ptr
;
1599 tcg_out_bc_noaddr(s
, BC
| BI(7, CR_EQ
) | BO_COND_FALSE
| LK
);
1602 #else /* !CONFIG_SOFTMMU */
1603 rbase
= GUEST_BASE
? TCG_GUEST_BASE_REG
: 0;
1604 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1605 tcg_out_ext32u(s
, TCG_REG_TMP1
, addrlo
);
1606 addrlo
= TCG_REG_TMP1
;
1610 if (TCG_TARGET_REG_BITS
== 32 && s_bits
== MO_64
) {
1611 if (opc
& MO_BSWAP
) {
1612 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1613 tcg_out32(s
, LWBRX
| TAB(datalo
, rbase
, addrlo
));
1614 tcg_out32(s
, LWBRX
| TAB(datahi
, rbase
, TCG_REG_R0
));
1615 } else if (rbase
!= 0) {
1616 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1617 tcg_out32(s
, LWZX
| TAB(datahi
, rbase
, addrlo
));
1618 tcg_out32(s
, LWZX
| TAB(datalo
, rbase
, TCG_REG_R0
));
1619 } else if (addrlo
== datahi
) {
1620 tcg_out32(s
, LWZ
| TAI(datalo
, addrlo
, 4));
1621 tcg_out32(s
, LWZ
| TAI(datahi
, addrlo
, 0));
1623 tcg_out32(s
, LWZ
| TAI(datahi
, addrlo
, 0));
1624 tcg_out32(s
, LWZ
| TAI(datalo
, addrlo
, 4));
1627 uint32_t insn
= qemu_ldx_opc
[opc
];
1628 if (!HAVE_ISA_2_06
&& insn
== LDBRX
) {
1629 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1630 tcg_out32(s
, LWBRX
| TAB(datalo
, rbase
, addrlo
));
1631 tcg_out32(s
, LWBRX
| TAB(TCG_REG_R0
, rbase
, TCG_REG_R0
));
1632 tcg_out_rld(s
, RLDIMI
, datalo
, TCG_REG_R0
, 32, 0);
1634 tcg_out32(s
, insn
| TAB(datalo
, rbase
, addrlo
));
1636 insn
= qemu_ldx_opc
[opc
& (MO_SIZE
| MO_BSWAP
)];
1637 tcg_out32(s
, insn
| TAB(datalo
, rbase
, addrlo
));
1638 insn
= qemu_exts_opc
[s_bits
];
1639 tcg_out32(s
, insn
| RA(datalo
) | RS(datalo
));
1643 #ifdef CONFIG_SOFTMMU
1644 add_qemu_ldst_label(s
, true, opc
, datalo
, datahi
, addrlo
, addrhi
,
1645 mem_index
, s
->code_ptr
, label_ptr
);
1649 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1651 TCGReg datalo
, datahi
, addrlo
, rbase
;
1652 TCGReg addrhi
__attribute__((unused
));
1653 TCGMemOp opc
, s_bits
;
1654 #ifdef CONFIG_SOFTMMU
1656 tcg_insn_unit
*label_ptr
;
1660 datahi
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1662 addrhi
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1664 s_bits
= opc
& MO_SIZE
;
1666 #ifdef CONFIG_SOFTMMU
1668 addrlo
= tcg_out_tlb_read(s
, s_bits
, addrlo
, addrhi
, mem_index
, false);
1670 /* Load a pointer into the current opcode w/conditional branch-link. */
1671 label_ptr
= s
->code_ptr
;
1672 tcg_out_bc_noaddr(s
, BC
| BI(7, CR_EQ
) | BO_COND_FALSE
| LK
);
1675 #else /* !CONFIG_SOFTMMU */
1676 rbase
= GUEST_BASE
? TCG_GUEST_BASE_REG
: 0;
1677 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1678 tcg_out_ext32u(s
, TCG_REG_TMP1
, addrlo
);
1679 addrlo
= TCG_REG_TMP1
;
1683 if (TCG_TARGET_REG_BITS
== 32 && s_bits
== MO_64
) {
1684 if (opc
& MO_BSWAP
) {
1685 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1686 tcg_out32(s
, STWBRX
| SAB(datalo
, rbase
, addrlo
));
1687 tcg_out32(s
, STWBRX
| SAB(datahi
, rbase
, TCG_REG_R0
));
1688 } else if (rbase
!= 0) {
1689 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1690 tcg_out32(s
, STWX
| SAB(datahi
, rbase
, addrlo
));
1691 tcg_out32(s
, STWX
| SAB(datalo
, rbase
, TCG_REG_R0
));
1693 tcg_out32(s
, STW
| TAI(datahi
, addrlo
, 0));
1694 tcg_out32(s
, STW
| TAI(datalo
, addrlo
, 4));
1697 uint32_t insn
= qemu_stx_opc
[opc
];
1698 if (!HAVE_ISA_2_06
&& insn
== STDBRX
) {
1699 tcg_out32(s
, STWBRX
| SAB(datalo
, rbase
, addrlo
));
1700 tcg_out32(s
, ADDI
| TAI(TCG_REG_TMP1
, addrlo
, 4));
1701 tcg_out_shri64(s
, TCG_REG_R0
, datalo
, 32);
1702 tcg_out32(s
, STWBRX
| SAB(TCG_REG_R0
, rbase
, TCG_REG_TMP1
));
1704 tcg_out32(s
, insn
| SAB(datalo
, rbase
, addrlo
));
1708 #ifdef CONFIG_SOFTMMU
1709 add_qemu_ldst_label(s
, false, opc
, datalo
, datahi
, addrlo
, addrhi
,
1710 mem_index
, s
->code_ptr
, label_ptr
);
1714 /* Parameters for function call generation, used in tcg.c. */
1715 #define TCG_TARGET_STACK_ALIGN 16
1716 #define TCG_TARGET_EXTEND_ARGS 1
1719 # define LINK_AREA_SIZE (6 * SZR)
1720 # define LR_OFFSET (1 * SZR)
1721 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
1722 #elif defined(TCG_TARGET_CALL_DARWIN)
1723 # define LINK_AREA_SIZE (6 * SZR)
1724 # define LR_OFFSET (2 * SZR)
1725 #elif TCG_TARGET_REG_BITS == 64
1726 # if defined(_CALL_ELF) && _CALL_ELF == 2
1727 # define LINK_AREA_SIZE (4 * SZR)
1728 # define LR_OFFSET (1 * SZR)
1730 #else /* TCG_TARGET_REG_BITS == 32 */
1731 # if defined(_CALL_SYSV)
1732 # define LINK_AREA_SIZE (2 * SZR)
1733 # define LR_OFFSET (1 * SZR)
1737 # error "Unhandled abi"
1739 #ifndef TCG_TARGET_CALL_STACK_OFFSET
1740 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
1743 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1744 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
1746 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
1747 + TCG_STATIC_CALL_ARGS_SIZE \
1748 + CPU_TEMP_BUF_SIZE \
1750 + TCG_TARGET_STACK_ALIGN - 1) \
1751 & -TCG_TARGET_STACK_ALIGN)
1753 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
1755 static void tcg_target_qemu_prologue(TCGContext
*s
)
1760 void **desc
= (void **)s
->code_ptr
;
1761 desc
[0] = desc
+ 2; /* entry point */
1762 desc
[1] = 0; /* environment pointer */
1763 s
->code_ptr
= (void *)(desc
+ 2); /* skip over descriptor */
1766 tcg_set_frame(s
, TCG_REG_CALL_STACK
, REG_SAVE_BOT
- CPU_TEMP_BUF_SIZE
,
1770 tcg_out32(s
, MFSPR
| RT(TCG_REG_R0
) | LR
);
1771 tcg_out32(s
, (SZR
== 8 ? STDU
: STWU
)
1772 | SAI(TCG_REG_R1
, TCG_REG_R1
, -FRAME_SIZE
));
1774 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); ++i
) {
1775 tcg_out_st(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
1776 TCG_REG_R1
, REG_SAVE_BOT
+ i
* SZR
);
1778 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_REG_R0
, TCG_REG_R1
, FRAME_SIZE
+LR_OFFSET
);
1780 #ifdef CONFIG_USE_GUEST_BASE
1782 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
1783 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
1787 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1788 tcg_out32(s
, MTSPR
| RS(tcg_target_call_iarg_regs
[1]) | CTR
);
1792 /* Make the caller load the value as the TOC into R2. */
1793 tb_ret_addr
= s
->code_ptr
+ 2;
1794 desc
[1] = tb_ret_addr
;
1795 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_RA
, TCG_REG_R2
);
1796 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1797 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1798 /* Compute from the incoming R12 value. */
1799 tb_ret_addr
= s
->code_ptr
+ 2;
1800 tcg_out32(s
, ADDI
| TAI(TCG_REG_RA
, TCG_REG_R12
,
1801 tcg_ptr_byte_diff(tb_ret_addr
, s
->code_buf
)));
1802 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1804 /* Reserve max 5 insns for the constant load. */
1805 tb_ret_addr
= s
->code_ptr
+ 6;
1806 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RA
, (intptr_t)tb_ret_addr
);
1807 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1808 while (s
->code_ptr
< tb_ret_addr
) {
1813 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1814 tb_ret_addr
= s
->code_ptr
;
1818 assert(tb_ret_addr
== s
->code_ptr
);
1820 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R0
, TCG_REG_R1
, FRAME_SIZE
+LR_OFFSET
);
1821 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); ++i
) {
1822 tcg_out_ld(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
1823 TCG_REG_R1
, REG_SAVE_BOT
+ i
* SZR
);
1825 tcg_out32(s
, MTSPR
| RS(TCG_REG_R0
) | LR
);
1826 tcg_out32(s
, ADDI
| TAI(TCG_REG_R1
, TCG_REG_R1
, FRAME_SIZE
));
1827 tcg_out32(s
, BCLR
| BO_ALWAYS
);
1830 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
1831 const int *const_args
)
1837 case INDEX_op_exit_tb
:
1839 ptrdiff_t disp
= tcg_pcrel_diff(s
, tb_ret_addr
);
1841 /* If we can use a direct branch, otherwise use the value in RA.
1842 Note that the direct branch is always forward. If it's in
1843 range now, it'll still be in range after the movi. Don't
1844 bother about the 20 bytes where the test here fails but it
1845 would succeed below. */
1846 if (!in_range_b(disp
)) {
1847 tcg_out32(s
, MTSPR
| RS(TCG_REG_RA
) | CTR
);
1848 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R3
, args
[0]);
1849 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1853 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R3
, args
[0]);
1854 tcg_out_b(s
, 0, tb_ret_addr
);
1856 case INDEX_op_goto_tb
:
1857 if (s
->tb_jmp_offset
) {
1858 /* Direct jump method. */
1859 s
->tb_jmp_offset
[args
[0]] = tcg_current_code_size(s
);
1862 /* Indirect jump method. */
1865 s
->tb_next_offset
[args
[0]] = tcg_current_code_size(s
);
1869 TCGLabel
*l
= &s
->labels
[args
[0]];
1872 tcg_out_b(s
, 0, l
->u
.value_ptr
);
1874 tcg_out_reloc(s
, s
->code_ptr
, R_PPC_REL24
, args
[0], 0);
1875 tcg_out_b_noaddr(s
, B
);
1879 case INDEX_op_ld8u_i32
:
1880 case INDEX_op_ld8u_i64
:
1881 tcg_out_mem_long(s
, LBZ
, LBZX
, args
[0], args
[1], args
[2]);
1883 case INDEX_op_ld8s_i32
:
1884 case INDEX_op_ld8s_i64
:
1885 tcg_out_mem_long(s
, LBZ
, LBZX
, args
[0], args
[1], args
[2]);
1886 tcg_out32(s
, EXTSB
| RS(args
[0]) | RA(args
[0]));
1888 case INDEX_op_ld16u_i32
:
1889 case INDEX_op_ld16u_i64
:
1890 tcg_out_mem_long(s
, LHZ
, LHZX
, args
[0], args
[1], args
[2]);
1892 case INDEX_op_ld16s_i32
:
1893 case INDEX_op_ld16s_i64
:
1894 tcg_out_mem_long(s
, LHA
, LHAX
, args
[0], args
[1], args
[2]);
1896 case INDEX_op_ld_i32
:
1897 case INDEX_op_ld32u_i64
:
1898 tcg_out_mem_long(s
, LWZ
, LWZX
, args
[0], args
[1], args
[2]);
1900 case INDEX_op_ld32s_i64
:
1901 tcg_out_mem_long(s
, LWA
, LWAX
, args
[0], args
[1], args
[2]);
1903 case INDEX_op_ld_i64
:
1904 tcg_out_mem_long(s
, LD
, LDX
, args
[0], args
[1], args
[2]);
1906 case INDEX_op_st8_i32
:
1907 case INDEX_op_st8_i64
:
1908 tcg_out_mem_long(s
, STB
, STBX
, args
[0], args
[1], args
[2]);
1910 case INDEX_op_st16_i32
:
1911 case INDEX_op_st16_i64
:
1912 tcg_out_mem_long(s
, STH
, STHX
, args
[0], args
[1], args
[2]);
1914 case INDEX_op_st_i32
:
1915 case INDEX_op_st32_i64
:
1916 tcg_out_mem_long(s
, STW
, STWX
, args
[0], args
[1], args
[2]);
1918 case INDEX_op_st_i64
:
1919 tcg_out_mem_long(s
, STD
, STDX
, args
[0], args
[1], args
[2]);
1922 case INDEX_op_add_i32
:
1923 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1924 if (const_args
[2]) {
1926 tcg_out_mem_long(s
, ADDI
, ADD
, a0
, a1
, (int32_t)a2
);
1928 tcg_out32(s
, ADD
| TAB(a0
, a1
, a2
));
1931 case INDEX_op_sub_i32
:
1932 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1933 if (const_args
[1]) {
1934 if (const_args
[2]) {
1935 tcg_out_movi(s
, TCG_TYPE_I32
, a0
, a1
- a2
);
1937 tcg_out32(s
, SUBFIC
| TAI(a0
, a2
, a1
));
1939 } else if (const_args
[2]) {
1943 tcg_out32(s
, SUBF
| TAB(a0
, a2
, a1
));
1947 case INDEX_op_and_i32
:
1948 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1949 if (const_args
[2]) {
1950 tcg_out_andi32(s
, a0
, a1
, a2
);
1952 tcg_out32(s
, AND
| SAB(a1
, a0
, a2
));
1955 case INDEX_op_and_i64
:
1956 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1957 if (const_args
[2]) {
1958 tcg_out_andi64(s
, a0
, a1
, a2
);
1960 tcg_out32(s
, AND
| SAB(a1
, a0
, a2
));
1963 case INDEX_op_or_i64
:
1964 case INDEX_op_or_i32
:
1965 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1966 if (const_args
[2]) {
1967 tcg_out_ori32(s
, a0
, a1
, a2
);
1969 tcg_out32(s
, OR
| SAB(a1
, a0
, a2
));
1972 case INDEX_op_xor_i64
:
1973 case INDEX_op_xor_i32
:
1974 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1975 if (const_args
[2]) {
1976 tcg_out_xori32(s
, a0
, a1
, a2
);
1978 tcg_out32(s
, XOR
| SAB(a1
, a0
, a2
));
1981 case INDEX_op_andc_i32
:
1982 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1983 if (const_args
[2]) {
1984 tcg_out_andi32(s
, a0
, a1
, ~a2
);
1986 tcg_out32(s
, ANDC
| SAB(a1
, a0
, a2
));
1989 case INDEX_op_andc_i64
:
1990 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1991 if (const_args
[2]) {
1992 tcg_out_andi64(s
, a0
, a1
, ~a2
);
1994 tcg_out32(s
, ANDC
| SAB(a1
, a0
, a2
));
1997 case INDEX_op_orc_i32
:
1998 if (const_args
[2]) {
1999 tcg_out_ori32(s
, args
[0], args
[1], ~args
[2]);
2003 case INDEX_op_orc_i64
:
2004 tcg_out32(s
, ORC
| SAB(args
[1], args
[0], args
[2]));
2006 case INDEX_op_eqv_i32
:
2007 if (const_args
[2]) {
2008 tcg_out_xori32(s
, args
[0], args
[1], ~args
[2]);
2012 case INDEX_op_eqv_i64
:
2013 tcg_out32(s
, EQV
| SAB(args
[1], args
[0], args
[2]));
2015 case INDEX_op_nand_i32
:
2016 case INDEX_op_nand_i64
:
2017 tcg_out32(s
, NAND
| SAB(args
[1], args
[0], args
[2]));
2019 case INDEX_op_nor_i32
:
2020 case INDEX_op_nor_i64
:
2021 tcg_out32(s
, NOR
| SAB(args
[1], args
[0], args
[2]));
2024 case INDEX_op_mul_i32
:
2025 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2026 if (const_args
[2]) {
2027 tcg_out32(s
, MULLI
| TAI(a0
, a1
, a2
));
2029 tcg_out32(s
, MULLW
| TAB(a0
, a1
, a2
));
2033 case INDEX_op_div_i32
:
2034 tcg_out32(s
, DIVW
| TAB(args
[0], args
[1], args
[2]));
2037 case INDEX_op_divu_i32
:
2038 tcg_out32(s
, DIVWU
| TAB(args
[0], args
[1], args
[2]));
2041 case INDEX_op_shl_i32
:
2042 if (const_args
[2]) {
2043 tcg_out_shli32(s
, args
[0], args
[1], args
[2]);
2045 tcg_out32(s
, SLW
| SAB(args
[1], args
[0], args
[2]));
2048 case INDEX_op_shr_i32
:
2049 if (const_args
[2]) {
2050 tcg_out_shri32(s
, args
[0], args
[1], args
[2]);
2052 tcg_out32(s
, SRW
| SAB(args
[1], args
[0], args
[2]));
2055 case INDEX_op_sar_i32
:
2056 if (const_args
[2]) {
2057 tcg_out32(s
, SRAWI
| RS(args
[1]) | RA(args
[0]) | SH(args
[2]));
2059 tcg_out32(s
, SRAW
| SAB(args
[1], args
[0], args
[2]));
2062 case INDEX_op_rotl_i32
:
2063 if (const_args
[2]) {
2064 tcg_out_rlw(s
, RLWINM
, args
[0], args
[1], args
[2], 0, 31);
2066 tcg_out32(s
, RLWNM
| SAB(args
[1], args
[0], args
[2])
2070 case INDEX_op_rotr_i32
:
2071 if (const_args
[2]) {
2072 tcg_out_rlw(s
, RLWINM
, args
[0], args
[1], 32 - args
[2], 0, 31);
2074 tcg_out32(s
, SUBFIC
| TAI(TCG_REG_R0
, args
[2], 32));
2075 tcg_out32(s
, RLWNM
| SAB(args
[1], args
[0], TCG_REG_R0
)
2080 case INDEX_op_brcond_i32
:
2081 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1],
2082 args
[3], TCG_TYPE_I32
);
2084 case INDEX_op_brcond_i64
:
2085 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1],
2086 args
[3], TCG_TYPE_I64
);
2088 case INDEX_op_brcond2_i32
:
2089 tcg_out_brcond2(s
, args
, const_args
);
2092 case INDEX_op_neg_i32
:
2093 case INDEX_op_neg_i64
:
2094 tcg_out32(s
, NEG
| RT(args
[0]) | RA(args
[1]));
2097 case INDEX_op_not_i32
:
2098 case INDEX_op_not_i64
:
2099 tcg_out32(s
, NOR
| SAB(args
[1], args
[0], args
[1]));
2102 case INDEX_op_add_i64
:
2103 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2104 if (const_args
[2]) {
2106 tcg_out_mem_long(s
, ADDI
, ADD
, a0
, a1
, a2
);
2108 tcg_out32(s
, ADD
| TAB(a0
, a1
, a2
));
2111 case INDEX_op_sub_i64
:
2112 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2113 if (const_args
[1]) {
2114 if (const_args
[2]) {
2115 tcg_out_movi(s
, TCG_TYPE_I64
, a0
, a1
- a2
);
2117 tcg_out32(s
, SUBFIC
| TAI(a0
, a2
, a1
));
2119 } else if (const_args
[2]) {
2123 tcg_out32(s
, SUBF
| TAB(a0
, a2
, a1
));
2127 case INDEX_op_shl_i64
:
2128 if (const_args
[2]) {
2129 tcg_out_shli64(s
, args
[0], args
[1], args
[2]);
2131 tcg_out32(s
, SLD
| SAB(args
[1], args
[0], args
[2]));
2134 case INDEX_op_shr_i64
:
2135 if (const_args
[2]) {
2136 tcg_out_shri64(s
, args
[0], args
[1], args
[2]);
2138 tcg_out32(s
, SRD
| SAB(args
[1], args
[0], args
[2]));
2141 case INDEX_op_sar_i64
:
2142 if (const_args
[2]) {
2143 int sh
= SH(args
[2] & 0x1f) | (((args
[2] >> 5) & 1) << 1);
2144 tcg_out32(s
, SRADI
| RA(args
[0]) | RS(args
[1]) | sh
);
2146 tcg_out32(s
, SRAD
| SAB(args
[1], args
[0], args
[2]));
2149 case INDEX_op_rotl_i64
:
2150 if (const_args
[2]) {
2151 tcg_out_rld(s
, RLDICL
, args
[0], args
[1], args
[2], 0);
2153 tcg_out32(s
, RLDCL
| SAB(args
[1], args
[0], args
[2]) | MB64(0));
2156 case INDEX_op_rotr_i64
:
2157 if (const_args
[2]) {
2158 tcg_out_rld(s
, RLDICL
, args
[0], args
[1], 64 - args
[2], 0);
2160 tcg_out32(s
, SUBFIC
| TAI(TCG_REG_R0
, args
[2], 64));
2161 tcg_out32(s
, RLDCL
| SAB(args
[1], args
[0], TCG_REG_R0
) | MB64(0));
2165 case INDEX_op_mul_i64
:
2166 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2167 if (const_args
[2]) {
2168 tcg_out32(s
, MULLI
| TAI(a0
, a1
, a2
));
2170 tcg_out32(s
, MULLD
| TAB(a0
, a1
, a2
));
2173 case INDEX_op_div_i64
:
2174 tcg_out32(s
, DIVD
| TAB(args
[0], args
[1], args
[2]));
2176 case INDEX_op_divu_i64
:
2177 tcg_out32(s
, DIVDU
| TAB(args
[0], args
[1], args
[2]));
2180 case INDEX_op_qemu_ld_i32
:
2181 tcg_out_qemu_ld(s
, args
, false);
2183 case INDEX_op_qemu_ld_i64
:
2184 tcg_out_qemu_ld(s
, args
, true);
2186 case INDEX_op_qemu_st_i32
:
2187 tcg_out_qemu_st(s
, args
, false);
2189 case INDEX_op_qemu_st_i64
:
2190 tcg_out_qemu_st(s
, args
, true);
2193 case INDEX_op_ext8s_i32
:
2194 case INDEX_op_ext8s_i64
:
2197 case INDEX_op_ext16s_i32
:
2198 case INDEX_op_ext16s_i64
:
2201 case INDEX_op_ext32s_i64
:
2205 tcg_out32(s
, c
| RS(args
[1]) | RA(args
[0]));
2208 case INDEX_op_setcond_i32
:
2209 tcg_out_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1], args
[2],
2212 case INDEX_op_setcond_i64
:
2213 tcg_out_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1], args
[2],
2216 case INDEX_op_setcond2_i32
:
2217 tcg_out_setcond2(s
, args
, const_args
);
2220 case INDEX_op_bswap16_i32
:
2221 case INDEX_op_bswap16_i64
:
2222 a0
= args
[0], a1
= args
[1];
2225 /* a0 = (a1 r<< 24) & 0xff # 000c */
2226 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 24, 24, 31);
2227 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
2228 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 8, 16, 23);
2230 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
2231 tcg_out_rlw(s
, RLWINM
, TCG_REG_R0
, a1
, 8, 16, 23);
2232 /* a0 = (a1 r<< 24) & 0xff # 000c */
2233 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 24, 24, 31);
2234 /* a0 = a0 | r0 # 00dc */
2235 tcg_out32(s
, OR
| SAB(TCG_REG_R0
, a0
, a0
));
2239 case INDEX_op_bswap32_i32
:
2240 case INDEX_op_bswap32_i64
:
2241 /* Stolen from gcc's builtin_bswap32 */
2243 a0
= args
[0] == a1
? TCG_REG_R0
: args
[0];
2245 /* a1 = args[1] # abcd */
2246 /* a0 = rotate_left (a1, 8) # bcda */
2247 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 8, 0, 31);
2248 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
2249 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 0, 7);
2250 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
2251 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 16, 23);
2253 if (a0
== TCG_REG_R0
) {
2254 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2258 case INDEX_op_bswap64_i64
:
2259 a0
= args
[0], a1
= args
[1], a2
= TCG_REG_R0
;
2265 /* a1 = # abcd efgh */
2266 /* a0 = rl32(a1, 8) # 0000 fghe */
2267 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 8, 0, 31);
2268 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
2269 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 0, 7);
2270 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
2271 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 16, 23);
2273 /* a0 = rl64(a0, 32) # hgfe 0000 */
2274 /* a2 = rl64(a1, 32) # efgh abcd */
2275 tcg_out_rld(s
, RLDICL
, a0
, a0
, 32, 0);
2276 tcg_out_rld(s
, RLDICL
, a2
, a1
, 32, 0);
2278 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
2279 tcg_out_rlw(s
, RLWIMI
, a0
, a2
, 8, 0, 31);
2280 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
2281 tcg_out_rlw(s
, RLWIMI
, a0
, a2
, 24, 0, 7);
2282 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
2283 tcg_out_rlw(s
, RLWIMI
, a0
, a2
, 24, 16, 23);
2286 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2290 case INDEX_op_deposit_i32
:
2291 if (const_args
[2]) {
2292 uint32_t mask
= ((2u << (args
[4] - 1)) - 1) << args
[3];
2293 tcg_out_andi32(s
, args
[0], args
[0], ~mask
);
2295 tcg_out_rlw(s
, RLWIMI
, args
[0], args
[2], args
[3],
2296 32 - args
[3] - args
[4], 31 - args
[3]);
2299 case INDEX_op_deposit_i64
:
2300 if (const_args
[2]) {
2301 uint64_t mask
= ((2ull << (args
[4] - 1)) - 1) << args
[3];
2302 tcg_out_andi64(s
, args
[0], args
[0], ~mask
);
2304 tcg_out_rld(s
, RLDIMI
, args
[0], args
[2], args
[3],
2305 64 - args
[3] - args
[4]);
2309 case INDEX_op_movcond_i32
:
2310 tcg_out_movcond(s
, TCG_TYPE_I32
, args
[5], args
[0], args
[1], args
[2],
2311 args
[3], args
[4], const_args
[2]);
2313 case INDEX_op_movcond_i64
:
2314 tcg_out_movcond(s
, TCG_TYPE_I64
, args
[5], args
[0], args
[1], args
[2],
2315 args
[3], args
[4], const_args
[2]);
2318 #if TCG_TARGET_REG_BITS == 64
2319 case INDEX_op_add2_i64
:
2321 case INDEX_op_add2_i32
:
2323 /* Note that the CA bit is defined based on the word size of the
2324 environment. So in 64-bit mode it's always carry-out of bit 63.
2325 The fallback code using deposit works just as well for 32-bit. */
2326 a0
= args
[0], a1
= args
[1];
2327 if (a0
== args
[3] || (!const_args
[5] && a0
== args
[5])) {
2330 if (const_args
[4]) {
2331 tcg_out32(s
, ADDIC
| TAI(a0
, args
[2], args
[4]));
2333 tcg_out32(s
, ADDC
| TAB(a0
, args
[2], args
[4]));
2335 if (const_args
[5]) {
2336 tcg_out32(s
, (args
[5] ? ADDME
: ADDZE
) | RT(a1
) | RA(args
[3]));
2338 tcg_out32(s
, ADDE
| TAB(a1
, args
[3], args
[5]));
2340 if (a0
!= args
[0]) {
2341 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2345 #if TCG_TARGET_REG_BITS == 64
2346 case INDEX_op_sub2_i64
:
2348 case INDEX_op_sub2_i32
:
2350 a0
= args
[0], a1
= args
[1];
2351 if (a0
== args
[5] || (!const_args
[3] && a0
== args
[3])) {
2354 if (const_args
[2]) {
2355 tcg_out32(s
, SUBFIC
| TAI(a0
, args
[4], args
[2]));
2357 tcg_out32(s
, SUBFC
| TAB(a0
, args
[4], args
[2]));
2359 if (const_args
[3]) {
2360 tcg_out32(s
, (args
[3] ? SUBFME
: SUBFZE
) | RT(a1
) | RA(args
[5]));
2362 tcg_out32(s
, SUBFE
| TAB(a1
, args
[5], args
[3]));
2364 if (a0
!= args
[0]) {
2365 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2369 case INDEX_op_muluh_i32
:
2370 tcg_out32(s
, MULHWU
| TAB(args
[0], args
[1], args
[2]));
2372 case INDEX_op_mulsh_i32
:
2373 tcg_out32(s
, MULHW
| TAB(args
[0], args
[1], args
[2]));
2375 case INDEX_op_muluh_i64
:
2376 tcg_out32(s
, MULHDU
| TAB(args
[0], args
[1], args
[2]));
2378 case INDEX_op_mulsh_i64
:
2379 tcg_out32(s
, MULHD
| TAB(args
[0], args
[1], args
[2]));
2382 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2383 case INDEX_op_mov_i64
:
2384 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2385 case INDEX_op_movi_i64
:
2386 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2392 static const TCGTargetOpDef ppc_op_defs
[] = {
2393 { INDEX_op_exit_tb
, { } },
2394 { INDEX_op_goto_tb
, { } },
2395 { INDEX_op_br
, { } },
2397 { INDEX_op_ld8u_i32
, { "r", "r" } },
2398 { INDEX_op_ld8s_i32
, { "r", "r" } },
2399 { INDEX_op_ld16u_i32
, { "r", "r" } },
2400 { INDEX_op_ld16s_i32
, { "r", "r" } },
2401 { INDEX_op_ld_i32
, { "r", "r" } },
2403 { INDEX_op_st8_i32
, { "r", "r" } },
2404 { INDEX_op_st16_i32
, { "r", "r" } },
2405 { INDEX_op_st_i32
, { "r", "r" } },
2407 { INDEX_op_add_i32
, { "r", "r", "ri" } },
2408 { INDEX_op_mul_i32
, { "r", "r", "rI" } },
2409 { INDEX_op_div_i32
, { "r", "r", "r" } },
2410 { INDEX_op_divu_i32
, { "r", "r", "r" } },
2411 { INDEX_op_sub_i32
, { "r", "rI", "ri" } },
2412 { INDEX_op_and_i32
, { "r", "r", "ri" } },
2413 { INDEX_op_or_i32
, { "r", "r", "ri" } },
2414 { INDEX_op_xor_i32
, { "r", "r", "ri" } },
2415 { INDEX_op_andc_i32
, { "r", "r", "ri" } },
2416 { INDEX_op_orc_i32
, { "r", "r", "ri" } },
2417 { INDEX_op_eqv_i32
, { "r", "r", "ri" } },
2418 { INDEX_op_nand_i32
, { "r", "r", "r" } },
2419 { INDEX_op_nor_i32
, { "r", "r", "r" } },
2421 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
2422 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
2423 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
2424 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
2425 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
2427 { INDEX_op_neg_i32
, { "r", "r" } },
2428 { INDEX_op_not_i32
, { "r", "r" } },
2429 { INDEX_op_ext8s_i32
, { "r", "r" } },
2430 { INDEX_op_ext16s_i32
, { "r", "r" } },
2431 { INDEX_op_bswap16_i32
, { "r", "r" } },
2432 { INDEX_op_bswap32_i32
, { "r", "r" } },
2434 { INDEX_op_brcond_i32
, { "r", "ri" } },
2435 { INDEX_op_setcond_i32
, { "r", "r", "ri" } },
2436 { INDEX_op_movcond_i32
, { "r", "r", "ri", "rZ", "rZ" } },
2438 { INDEX_op_deposit_i32
, { "r", "0", "rZ" } },
2440 { INDEX_op_muluh_i32
, { "r", "r", "r" } },
2441 { INDEX_op_mulsh_i32
, { "r", "r", "r" } },
2443 #if TCG_TARGET_REG_BITS == 64
2444 { INDEX_op_ld8u_i64
, { "r", "r" } },
2445 { INDEX_op_ld8s_i64
, { "r", "r" } },
2446 { INDEX_op_ld16u_i64
, { "r", "r" } },
2447 { INDEX_op_ld16s_i64
, { "r", "r" } },
2448 { INDEX_op_ld32u_i64
, { "r", "r" } },
2449 { INDEX_op_ld32s_i64
, { "r", "r" } },
2450 { INDEX_op_ld_i64
, { "r", "r" } },
2452 { INDEX_op_st8_i64
, { "r", "r" } },
2453 { INDEX_op_st16_i64
, { "r", "r" } },
2454 { INDEX_op_st32_i64
, { "r", "r" } },
2455 { INDEX_op_st_i64
, { "r", "r" } },
2457 { INDEX_op_add_i64
, { "r", "r", "rT" } },
2458 { INDEX_op_sub_i64
, { "r", "rI", "rT" } },
2459 { INDEX_op_and_i64
, { "r", "r", "ri" } },
2460 { INDEX_op_or_i64
, { "r", "r", "rU" } },
2461 { INDEX_op_xor_i64
, { "r", "r", "rU" } },
2462 { INDEX_op_andc_i64
, { "r", "r", "ri" } },
2463 { INDEX_op_orc_i64
, { "r", "r", "r" } },
2464 { INDEX_op_eqv_i64
, { "r", "r", "r" } },
2465 { INDEX_op_nand_i64
, { "r", "r", "r" } },
2466 { INDEX_op_nor_i64
, { "r", "r", "r" } },
2468 { INDEX_op_shl_i64
, { "r", "r", "ri" } },
2469 { INDEX_op_shr_i64
, { "r", "r", "ri" } },
2470 { INDEX_op_sar_i64
, { "r", "r", "ri" } },
2471 { INDEX_op_rotl_i64
, { "r", "r", "ri" } },
2472 { INDEX_op_rotr_i64
, { "r", "r", "ri" } },
2474 { INDEX_op_mul_i64
, { "r", "r", "rI" } },
2475 { INDEX_op_div_i64
, { "r", "r", "r" } },
2476 { INDEX_op_divu_i64
, { "r", "r", "r" } },
2478 { INDEX_op_neg_i64
, { "r", "r" } },
2479 { INDEX_op_not_i64
, { "r", "r" } },
2480 { INDEX_op_ext8s_i64
, { "r", "r" } },
2481 { INDEX_op_ext16s_i64
, { "r", "r" } },
2482 { INDEX_op_ext32s_i64
, { "r", "r" } },
2483 { INDEX_op_bswap16_i64
, { "r", "r" } },
2484 { INDEX_op_bswap32_i64
, { "r", "r" } },
2485 { INDEX_op_bswap64_i64
, { "r", "r" } },
2487 { INDEX_op_brcond_i64
, { "r", "ri" } },
2488 { INDEX_op_setcond_i64
, { "r", "r", "ri" } },
2489 { INDEX_op_movcond_i64
, { "r", "r", "ri", "rZ", "rZ" } },
2491 { INDEX_op_deposit_i64
, { "r", "0", "rZ" } },
2493 { INDEX_op_mulsh_i64
, { "r", "r", "r" } },
2494 { INDEX_op_muluh_i64
, { "r", "r", "r" } },
2497 #if TCG_TARGET_REG_BITS == 32
2498 { INDEX_op_brcond2_i32
, { "r", "r", "ri", "ri" } },
2499 { INDEX_op_setcond2_i32
, { "r", "r", "r", "ri", "ri" } },
2502 #if TCG_TARGET_REG_BITS == 64
2503 { INDEX_op_add2_i64
, { "r", "r", "r", "r", "rI", "rZM" } },
2504 { INDEX_op_sub2_i64
, { "r", "r", "rI", "rZM", "r", "r" } },
2506 { INDEX_op_add2_i32
, { "r", "r", "r", "r", "rI", "rZM" } },
2507 { INDEX_op_sub2_i32
, { "r", "r", "rI", "rZM", "r", "r" } },
2510 #if TCG_TARGET_REG_BITS == 64
2511 { INDEX_op_qemu_ld_i32
, { "r", "L" } },
2512 { INDEX_op_qemu_st_i32
, { "S", "S" } },
2513 { INDEX_op_qemu_ld_i64
, { "r", "L" } },
2514 { INDEX_op_qemu_st_i64
, { "S", "S" } },
2515 #elif TARGET_LONG_BITS == 32
2516 { INDEX_op_qemu_ld_i32
, { "r", "L" } },
2517 { INDEX_op_qemu_st_i32
, { "S", "S" } },
2518 { INDEX_op_qemu_ld_i64
, { "L", "L", "L" } },
2519 { INDEX_op_qemu_st_i64
, { "S", "S", "S" } },
2521 { INDEX_op_qemu_ld_i32
, { "r", "L", "L" } },
2522 { INDEX_op_qemu_st_i32
, { "S", "S", "S" } },
2523 { INDEX_op_qemu_ld_i64
, { "L", "L", "L", "L" } },
2524 { INDEX_op_qemu_st_i64
, { "S", "S", "S", "S" } },
2530 static void tcg_target_init(TCGContext
*s
)
2532 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
2533 if (hwcap
& PPC_FEATURE_ARCH_2_06
) {
2534 have_isa_2_06
= true;
2537 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffffffff);
2538 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffffffff);
2539 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
2549 (1 << TCG_REG_R10
) |
2550 (1 << TCG_REG_R11
) |
2551 (1 << TCG_REG_R12
));
2553 tcg_regset_clear(s
->reserved_regs
);
2554 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
); /* tcg temp */
2555 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R1
); /* stack pointer */
2556 #if defined(_CALL_SYSV)
2557 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R2
); /* toc pointer */
2559 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
2560 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R13
); /* thread pointer */
2562 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP1
); /* mem temp */
2564 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RA
); /* return addr */
2567 tcg_add_target_add_op_defs(ppc_op_defs
);
2573 DebugFrameFDEHeader fde
;
2574 uint8_t fde_def_cfa
[4];
2575 uint8_t fde_reg_ofs
[ARRAY_SIZE(tcg_target_callee_save_regs
) * 2 + 3];
2578 /* We're expecting a 2 byte uleb128 encoded value. */
2579 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
2581 #if TCG_TARGET_REG_BITS == 64
2582 # define ELF_HOST_MACHINE EM_PPC64
2584 # define ELF_HOST_MACHINE EM_PPC
2587 static DebugFrame debug_frame
= {
2588 .cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2591 .cie
.code_align
= 1,
2592 .cie
.data_align
= (-SZR
& 0x7f), /* sleb128 -SZR */
2593 .cie
.return_column
= 65,
2595 /* Total FDE size does not include the "len" member. */
2596 .fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, fde
.cie_offset
),
2599 12, TCG_REG_R1
, /* DW_CFA_def_cfa r1, ... */
2600 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2604 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
2605 0x11, 65, (LR_OFFSET
/ -SZR
) & 0x7f,
2609 void tcg_register_jit(void *buf
, size_t buf_size
)
2611 uint8_t *p
= &debug_frame
.fde_reg_ofs
[3];
2614 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); ++i
, p
+= 2) {
2615 p
[0] = 0x80 + tcg_target_callee_save_regs
[i
];
2616 p
[1] = (FRAME_SIZE
- (REG_SAVE_BOT
+ i
* SZR
)) / SZR
;
2619 debug_frame
.fde
.func_start
= (uintptr_t)buf
;
2620 debug_frame
.fde
.func_len
= buf_size
;
2622 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));
2624 #endif /* __ELF__ */
2626 static size_t dcache_bsize
= 16;
2627 static size_t icache_bsize
= 16;
2629 void flush_icache_range(uintptr_t start
, uintptr_t stop
)
2631 uintptr_t p
, start1
, stop1
;
2632 size_t dsize
= dcache_bsize
;
2633 size_t isize
= icache_bsize
;
2635 start1
= start
& ~(dsize
- 1);
2636 stop1
= (stop
+ dsize
- 1) & ~(dsize
- 1);
2637 for (p
= start1
; p
< stop1
; p
+= dsize
) {
2638 asm volatile ("dcbst 0,%0" : : "r"(p
) : "memory");
2640 asm volatile ("sync" : : : "memory");
2642 start
&= start
& ~(isize
- 1);
2643 stop1
= (stop
+ isize
- 1) & ~(isize
- 1);
2644 for (p
= start1
; p
< stop1
; p
+= isize
) {
2645 asm volatile ("icbi 0,%0" : : "r"(p
) : "memory");
2647 asm volatile ("sync" : : : "memory");
2648 asm volatile ("isync" : : : "memory");
2652 #include <sys/systemcfg.h>
2654 static void __attribute__((constructor
)) tcg_cache_init(void)
2656 icache_bsize
= _system_configuration
.icache_line
;
2657 dcache_bsize
= _system_configuration
.dcache_line
;
2660 #elif defined __linux__
2661 static void __attribute__((constructor
)) tcg_cache_init(void)
2663 unsigned long dsize
= qemu_getauxval(AT_DCACHEBSIZE
);
2664 unsigned long isize
= qemu_getauxval(AT_ICACHEBSIZE
);
2666 if (dsize
== 0 || isize
== 0) {
2668 fprintf(stderr
, "getauxval AT_DCACHEBSIZE failed\n");
2671 fprintf(stderr
, "getauxval AT_ICACHEBSIZE failed\n");
2675 dcache_bsize
= dsize
;
2676 icache_bsize
= isize
;
2679 #elif defined __APPLE__
2681 #include <sys/types.h>
2682 #include <sys/sysctl.h>
2684 static void __attribute__((constructor
)) tcg_cache_init(void)
2688 int name
[2] = { CTL_HW
, HW_CACHELINE
};
2690 len
= sizeof(cacheline
);
2691 if (sysctl(name
, 2, &cacheline
, &len
, NULL
, 0)) {
2692 perror("sysctl CTL_HW HW_CACHELINE failed");
2695 dcache_bsize
= cacheline
;
2696 icache_bsize
= cacheline
;
2699 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
2704 #include <sys/types.h>
2705 #include <sys/sysctl.h>
2707 static void __attribute__((constructor
)) tcg_cache_init(void)
2712 if (sysctlbyname ("machdep.cacheline_size", &cacheline
, &len
, NULL
, 0)) {
2713 fprintf(stderr
, "sysctlbyname machdep.cacheline_size failed: %s\n",
2717 dcache_bsize
= cacheline
;
2718 icache_bsize
= cacheline
;