2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "tcg-be-ldst.h"
27 #if defined _CALL_DARWIN || defined __APPLE__
28 #define TCG_TARGET_CALL_DARWIN
31 # define TCG_TARGET_CALL_ALIGN_ARGS 1
34 /* For some memory operations, we need a scratch that isn't R0. For the AIX
35 calling convention, we can re-use the TOC register since we'll be reloading
36 it at every call. Otherwise R12 will do nicely as neither a call-saved
37 register nor a parameter register. */
39 # define TCG_REG_TMP1 TCG_REG_R2
41 # define TCG_REG_TMP1 TCG_REG_R12
44 /* For the 64-bit target, we don't like the 5 insn sequence needed to build
45 full 64-bit addresses. Better to have a base register to which we can
46 apply a 32-bit displacement.
48 There are generally three items of interest:
49 (1) helper functions in the main executable,
50 (2) TranslationBlock data structures,
51 (3) the return address in the epilogue.
53 For user-only, we USE_STATIC_CODE_GEN_BUFFER, so the code_gen_buffer
54 will be inside the main executable, and thus near enough to make a
55 pointer to the epilogue be within 2GB of all helper functions.
57 For softmmu, we'll let the kernel choose the address of code_gen_buffer,
58 and odds are it'll be somewhere close to the main malloc arena, and so
59 a pointer to the epilogue will be within 2GB of the TranslationBlocks.
61 For --enable-pie, everything will be kinda near everything else,
62 somewhere in high memory.
64 Thus we choose to keep the return address in a call-saved register. */
65 #define TCG_REG_RA TCG_REG_R31
66 #define USE_REG_RA (TCG_TARGET_REG_BITS == 64)
68 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
69 #define SZP ((int)sizeof(void *))
71 /* Shorthand for size of a register. */
72 #define SZR (TCG_TARGET_REG_BITS / 8)
74 #define TCG_CT_CONST_S16 0x100
75 #define TCG_CT_CONST_U16 0x200
76 #define TCG_CT_CONST_S32 0x400
77 #define TCG_CT_CONST_U32 0x800
78 #define TCG_CT_CONST_ZERO 0x1000
79 #define TCG_CT_CONST_MONE 0x2000
81 static tcg_insn_unit
*tb_ret_addr
;
88 static bool have_isa_2_06
;
89 #define HAVE_ISA_2_06 have_isa_2_06
90 #define HAVE_ISEL have_isa_2_06
92 #ifdef CONFIG_USE_GUEST_BASE
93 #define TCG_GUEST_BASE_REG 30
95 #define TCG_GUEST_BASE_REG 0
99 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
135 static const int tcg_target_reg_alloc_order
[] = {
136 TCG_REG_R14
, /* call saved registers */
154 TCG_REG_R12
, /* call clobbered, non-arguments */
158 TCG_REG_R10
, /* call clobbered, arguments */
168 static const int tcg_target_call_iarg_regs
[] = {
179 static const int tcg_target_call_oarg_regs
[] = {
184 static const int tcg_target_callee_save_regs
[] = {
185 #ifdef TCG_TARGET_CALL_DARWIN
201 TCG_REG_R27
, /* currently used for the global env */
208 static inline bool in_range_b(tcg_target_long target
)
210 return target
== sextract64(target
, 0, 26);
213 static uint32_t reloc_pc24_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
215 ptrdiff_t disp
= tcg_ptr_byte_diff(target
, pc
);
216 assert(in_range_b(disp
));
217 return disp
& 0x3fffffc;
220 static void reloc_pc24(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
222 *pc
= (*pc
& ~0x3fffffc) | reloc_pc24_val(pc
, target
);
225 static uint16_t reloc_pc14_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
227 ptrdiff_t disp
= tcg_ptr_byte_diff(target
, pc
);
228 assert(disp
== (int16_t) disp
);
229 return disp
& 0xfffc;
232 static void reloc_pc14(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
234 *pc
= (*pc
& ~0xfffc) | reloc_pc14_val(pc
, target
);
237 static inline void tcg_out_b_noaddr(TCGContext
*s
, int insn
)
239 unsigned retrans
= *s
->code_ptr
& 0x3fffffc;
240 tcg_out32(s
, insn
| retrans
);
243 static inline void tcg_out_bc_noaddr(TCGContext
*s
, int insn
)
245 unsigned retrans
= *s
->code_ptr
& 0xfffc;
246 tcg_out32(s
, insn
| retrans
);
249 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
250 intptr_t value
, intptr_t addend
)
252 tcg_insn_unit
*target
= (tcg_insn_unit
*)value
;
257 reloc_pc14(code_ptr
, target
);
260 reloc_pc24(code_ptr
, target
);
267 /* parse target specific constraints */
268 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
274 case 'A': case 'B': case 'C': case 'D':
275 ct
->ct
|= TCG_CT_REG
;
276 tcg_regset_set_reg(ct
->u
.regs
, 3 + ct_str
[0] - 'A');
279 ct
->ct
|= TCG_CT_REG
;
280 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
282 case 'L': /* qemu_ld constraint */
283 ct
->ct
|= TCG_CT_REG
;
284 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
285 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
286 #ifdef CONFIG_SOFTMMU
287 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R4
);
288 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R5
);
291 case 'S': /* qemu_st constraint */
292 ct
->ct
|= TCG_CT_REG
;
293 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
294 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
295 #ifdef CONFIG_SOFTMMU
296 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R4
);
297 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R5
);
298 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R6
);
302 ct
->ct
|= TCG_CT_CONST_S16
;
305 ct
->ct
|= TCG_CT_CONST_U16
;
308 ct
->ct
|= TCG_CT_CONST_MONE
;
311 ct
->ct
|= TCG_CT_CONST_S32
;
314 ct
->ct
|= TCG_CT_CONST_U32
;
317 ct
->ct
|= TCG_CT_CONST_ZERO
;
327 /* test if a constant matches the constraint */
328 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
329 const TCGArgConstraint
*arg_ct
)
332 if (ct
& TCG_CT_CONST
) {
336 /* The only 32-bit constraint we use aside from
337 TCG_CT_CONST is TCG_CT_CONST_S16. */
338 if (type
== TCG_TYPE_I32
) {
342 if ((ct
& TCG_CT_CONST_S16
) && val
== (int16_t)val
) {
344 } else if ((ct
& TCG_CT_CONST_U16
) && val
== (uint16_t)val
) {
346 } else if ((ct
& TCG_CT_CONST_S32
) && val
== (int32_t)val
) {
348 } else if ((ct
& TCG_CT_CONST_U32
) && val
== (uint32_t)val
) {
350 } else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
352 } else if ((ct
& TCG_CT_CONST_MONE
) && val
== -1) {
358 #define OPCD(opc) ((opc)<<26)
359 #define XO19(opc) (OPCD(19)|((opc)<<1))
360 #define MD30(opc) (OPCD(30)|((opc)<<2))
361 #define MDS30(opc) (OPCD(30)|((opc)<<1))
362 #define XO31(opc) (OPCD(31)|((opc)<<1))
363 #define XO58(opc) (OPCD(58)|(opc))
364 #define XO62(opc) (OPCD(62)|(opc))
368 #define LBZ OPCD( 34)
369 #define LHZ OPCD( 40)
370 #define LHA OPCD( 42)
371 #define LWZ OPCD( 32)
372 #define STB OPCD( 38)
373 #define STH OPCD( 44)
374 #define STW OPCD( 36)
377 #define STDU XO62( 1)
378 #define STDX XO31(149)
381 #define LDX XO31( 21)
384 #define LWAX XO31(341)
386 #define ADDIC OPCD( 12)
387 #define ADDI OPCD( 14)
388 #define ADDIS OPCD( 15)
389 #define ORI OPCD( 24)
390 #define ORIS OPCD( 25)
391 #define XORI OPCD( 26)
392 #define XORIS OPCD( 27)
393 #define ANDI OPCD( 28)
394 #define ANDIS OPCD( 29)
395 #define MULLI OPCD( 7)
396 #define CMPLI OPCD( 10)
397 #define CMPI OPCD( 11)
398 #define SUBFIC OPCD( 8)
400 #define LWZU OPCD( 33)
401 #define STWU OPCD( 37)
403 #define RLWIMI OPCD( 20)
404 #define RLWINM OPCD( 21)
405 #define RLWNM OPCD( 23)
407 #define RLDICL MD30( 0)
408 #define RLDICR MD30( 1)
409 #define RLDIMI MD30( 3)
410 #define RLDCL MDS30( 8)
412 #define BCLR XO19( 16)
413 #define BCCTR XO19(528)
414 #define CRAND XO19(257)
415 #define CRANDC XO19(129)
416 #define CRNAND XO19(225)
417 #define CROR XO19(449)
418 #define CRNOR XO19( 33)
420 #define EXTSB XO31(954)
421 #define EXTSH XO31(922)
422 #define EXTSW XO31(986)
423 #define ADD XO31(266)
424 #define ADDE XO31(138)
425 #define ADDME XO31(234)
426 #define ADDZE XO31(202)
427 #define ADDC XO31( 10)
428 #define AND XO31( 28)
429 #define SUBF XO31( 40)
430 #define SUBFC XO31( 8)
431 #define SUBFE XO31(136)
432 #define SUBFME XO31(232)
433 #define SUBFZE XO31(200)
435 #define XOR XO31(316)
436 #define MULLW XO31(235)
437 #define MULHW XO31( 75)
438 #define MULHWU XO31( 11)
439 #define DIVW XO31(491)
440 #define DIVWU XO31(459)
442 #define CMPL XO31( 32)
443 #define LHBRX XO31(790)
444 #define LWBRX XO31(534)
445 #define LDBRX XO31(532)
446 #define STHBRX XO31(918)
447 #define STWBRX XO31(662)
448 #define STDBRX XO31(660)
449 #define MFSPR XO31(339)
450 #define MTSPR XO31(467)
451 #define SRAWI XO31(824)
452 #define NEG XO31(104)
453 #define MFCR XO31( 19)
454 #define MFOCRF (MFCR | (1u << 20))
455 #define NOR XO31(124)
456 #define CNTLZW XO31( 26)
457 #define CNTLZD XO31( 58)
458 #define ANDC XO31( 60)
459 #define ORC XO31(412)
460 #define EQV XO31(284)
461 #define NAND XO31(476)
462 #define ISEL XO31( 15)
464 #define MULLD XO31(233)
465 #define MULHD XO31( 73)
466 #define MULHDU XO31( 9)
467 #define DIVD XO31(489)
468 #define DIVDU XO31(457)
470 #define LBZX XO31( 87)
471 #define LHZX XO31(279)
472 #define LHAX XO31(343)
473 #define LWZX XO31( 23)
474 #define STBX XO31(215)
475 #define STHX XO31(407)
476 #define STWX XO31(151)
478 #define SPR(a, b) ((((a)<<5)|(b))<<11)
480 #define CTR SPR(9, 0)
482 #define SLW XO31( 24)
483 #define SRW XO31(536)
484 #define SRAW XO31(792)
486 #define SLD XO31( 27)
487 #define SRD XO31(539)
488 #define SRAD XO31(794)
489 #define SRADI XO31(413<<1)
492 #define TRAP (TW | TO(31))
494 #define NOP ORI /* ori 0,0,0 */
496 #define RT(r) ((r)<<21)
497 #define RS(r) ((r)<<21)
498 #define RA(r) ((r)<<16)
499 #define RB(r) ((r)<<11)
500 #define TO(t) ((t)<<21)
501 #define SH(s) ((s)<<11)
502 #define MB(b) ((b)<<6)
503 #define ME(e) ((e)<<1)
504 #define BO(o) ((o)<<21)
505 #define MB64(b) ((b)<<5)
506 #define FXM(b) (1 << (19 - (b)))
510 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
511 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
512 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
513 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
515 #define BF(n) ((n)<<23)
516 #define BI(n, c) (((c)+((n)*4))<<16)
517 #define BT(n, c) (((c)+((n)*4))<<21)
518 #define BA(n, c) (((c)+((n)*4))<<16)
519 #define BB(n, c) (((c)+((n)*4))<<11)
520 #define BC_(n, c) (((c)+((n)*4))<<6)
522 #define BO_COND_TRUE BO(12)
523 #define BO_COND_FALSE BO( 4)
524 #define BO_ALWAYS BO(20)
533 static const uint32_t tcg_to_bc
[] = {
534 [TCG_COND_EQ
] = BC
| BI(7, CR_EQ
) | BO_COND_TRUE
,
535 [TCG_COND_NE
] = BC
| BI(7, CR_EQ
) | BO_COND_FALSE
,
536 [TCG_COND_LT
] = BC
| BI(7, CR_LT
) | BO_COND_TRUE
,
537 [TCG_COND_GE
] = BC
| BI(7, CR_LT
) | BO_COND_FALSE
,
538 [TCG_COND_LE
] = BC
| BI(7, CR_GT
) | BO_COND_FALSE
,
539 [TCG_COND_GT
] = BC
| BI(7, CR_GT
) | BO_COND_TRUE
,
540 [TCG_COND_LTU
] = BC
| BI(7, CR_LT
) | BO_COND_TRUE
,
541 [TCG_COND_GEU
] = BC
| BI(7, CR_LT
) | BO_COND_FALSE
,
542 [TCG_COND_LEU
] = BC
| BI(7, CR_GT
) | BO_COND_FALSE
,
543 [TCG_COND_GTU
] = BC
| BI(7, CR_GT
) | BO_COND_TRUE
,
546 /* The low bit here is set if the RA and RB fields must be inverted. */
547 static const uint32_t tcg_to_isel
[] = {
548 [TCG_COND_EQ
] = ISEL
| BC_(7, CR_EQ
),
549 [TCG_COND_NE
] = ISEL
| BC_(7, CR_EQ
) | 1,
550 [TCG_COND_LT
] = ISEL
| BC_(7, CR_LT
),
551 [TCG_COND_GE
] = ISEL
| BC_(7, CR_LT
) | 1,
552 [TCG_COND_LE
] = ISEL
| BC_(7, CR_GT
) | 1,
553 [TCG_COND_GT
] = ISEL
| BC_(7, CR_GT
),
554 [TCG_COND_LTU
] = ISEL
| BC_(7, CR_LT
),
555 [TCG_COND_GEU
] = ISEL
| BC_(7, CR_LT
) | 1,
556 [TCG_COND_LEU
] = ISEL
| BC_(7, CR_GT
) | 1,
557 [TCG_COND_GTU
] = ISEL
| BC_(7, CR_GT
),
560 static void tcg_out_mem_long(TCGContext
*s
, int opi
, int opx
, TCGReg rt
,
561 TCGReg base
, tcg_target_long offset
);
563 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
)
565 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
567 tcg_out32(s
, OR
| SAB(arg
, ret
, arg
));
571 static inline void tcg_out_rld(TCGContext
*s
, int op
, TCGReg ra
, TCGReg rs
,
574 assert(TCG_TARGET_REG_BITS
== 64);
575 sh
= SH(sh
& 0x1f) | (((sh
>> 5) & 1) << 1);
576 mb
= MB64((mb
>> 5) | ((mb
<< 1) & 0x3f));
577 tcg_out32(s
, op
| RA(ra
) | RS(rs
) | sh
| mb
);
580 static inline void tcg_out_rlw(TCGContext
*s
, int op
, TCGReg ra
, TCGReg rs
,
581 int sh
, int mb
, int me
)
583 tcg_out32(s
, op
| RA(ra
) | RS(rs
) | SH(sh
) | MB(mb
) | ME(me
));
586 static inline void tcg_out_ext32u(TCGContext
*s
, TCGReg dst
, TCGReg src
)
588 tcg_out_rld(s
, RLDICL
, dst
, src
, 0, 32);
591 static inline void tcg_out_shli32(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
593 tcg_out_rlw(s
, RLWINM
, dst
, src
, c
, 0, 31 - c
);
596 static inline void tcg_out_shli64(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
598 tcg_out_rld(s
, RLDICR
, dst
, src
, c
, 63 - c
);
601 static inline void tcg_out_shri32(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
603 tcg_out_rlw(s
, RLWINM
, dst
, src
, 32 - c
, c
, 31);
606 static inline void tcg_out_shri64(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
608 tcg_out_rld(s
, RLDICL
, dst
, src
, 64 - c
, c
);
611 static void tcg_out_movi32(TCGContext
*s
, TCGReg ret
, int32_t arg
)
613 if (arg
== (int16_t) arg
) {
614 tcg_out32(s
, ADDI
| TAI(ret
, 0, arg
));
616 tcg_out32(s
, ADDIS
| TAI(ret
, 0, arg
>> 16));
618 tcg_out32(s
, ORI
| SAI(ret
, ret
, arg
));
623 static void tcg_out_movi(TCGContext
*s
, TCGType type
, TCGReg ret
,
626 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
627 if (type
== TCG_TYPE_I32
|| arg
== (int32_t)arg
) {
628 tcg_out_movi32(s
, ret
, arg
);
629 } else if (arg
== (uint32_t)arg
&& !(arg
& 0x8000)) {
630 tcg_out32(s
, ADDI
| TAI(ret
, 0, arg
));
631 tcg_out32(s
, ORIS
| SAI(ret
, ret
, arg
>> 16));
636 intptr_t diff
= arg
- (intptr_t)tb_ret_addr
;
637 if (diff
== (int32_t)diff
) {
638 tcg_out_mem_long(s
, ADDI
, ADD
, ret
, TCG_REG_RA
, diff
);
643 high
= arg
>> 31 >> 1;
644 tcg_out_movi32(s
, ret
, high
);
646 tcg_out_shli64(s
, ret
, ret
, 32);
648 if (arg
& 0xffff0000) {
649 tcg_out32(s
, ORIS
| SAI(ret
, ret
, arg
>> 16));
652 tcg_out32(s
, ORI
| SAI(ret
, ret
, arg
));
657 static bool mask_operand(uint32_t c
, int *mb
, int *me
)
661 /* Accept a bit pattern like:
665 Keep track of the transitions. */
666 if (c
== 0 || c
== -1) {
672 if (test
& (test
- 1)) {
677 *mb
= test
? clz32(test
& -test
) + 1 : 0;
681 static bool mask64_operand(uint64_t c
, int *mb
, int *me
)
690 /* Accept 1..10..0. */
696 /* Accept 0..01..1. */
697 if (lsb
== 1 && (c
& (c
+ 1)) == 0) {
698 *mb
= clz64(c
+ 1) + 1;
705 static void tcg_out_andi32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
)
709 if ((c
& 0xffff) == c
) {
710 tcg_out32(s
, ANDI
| SAI(src
, dst
, c
));
712 } else if ((c
& 0xffff0000) == c
) {
713 tcg_out32(s
, ANDIS
| SAI(src
, dst
, c
>> 16));
715 } else if (mask_operand(c
, &mb
, &me
)) {
716 tcg_out_rlw(s
, RLWINM
, dst
, src
, 0, mb
, me
);
718 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R0
, c
);
719 tcg_out32(s
, AND
| SAB(src
, dst
, TCG_REG_R0
));
723 static void tcg_out_andi64(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint64_t c
)
727 assert(TCG_TARGET_REG_BITS
== 64);
728 if ((c
& 0xffff) == c
) {
729 tcg_out32(s
, ANDI
| SAI(src
, dst
, c
));
731 } else if ((c
& 0xffff0000) == c
) {
732 tcg_out32(s
, ANDIS
| SAI(src
, dst
, c
>> 16));
734 } else if (mask64_operand(c
, &mb
, &me
)) {
736 tcg_out_rld(s
, RLDICR
, dst
, src
, 0, me
);
738 tcg_out_rld(s
, RLDICL
, dst
, src
, 0, mb
);
741 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_R0
, c
);
742 tcg_out32(s
, AND
| SAB(src
, dst
, TCG_REG_R0
));
746 static void tcg_out_zori32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
,
747 int op_lo
, int op_hi
)
750 tcg_out32(s
, op_hi
| SAI(src
, dst
, c
>> 16));
754 tcg_out32(s
, op_lo
| SAI(src
, dst
, c
));
759 static void tcg_out_ori32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
)
761 tcg_out_zori32(s
, dst
, src
, c
, ORI
, ORIS
);
764 static void tcg_out_xori32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
)
766 tcg_out_zori32(s
, dst
, src
, c
, XORI
, XORIS
);
769 static void tcg_out_b(TCGContext
*s
, int mask
, tcg_insn_unit
*target
)
771 ptrdiff_t disp
= tcg_pcrel_diff(s
, target
);
772 if (in_range_b(disp
)) {
773 tcg_out32(s
, B
| (disp
& 0x3fffffc) | mask
);
775 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R0
, (uintptr_t)target
);
776 tcg_out32(s
, MTSPR
| RS(TCG_REG_R0
) | CTR
);
777 tcg_out32(s
, BCCTR
| BO_ALWAYS
| mask
);
781 static void tcg_out_mem_long(TCGContext
*s
, int opi
, int opx
, TCGReg rt
,
782 TCGReg base
, tcg_target_long offset
)
784 tcg_target_long orig
= offset
, l0
, l1
, extra
= 0, align
= 0;
785 bool is_store
= false;
786 TCGReg rs
= TCG_REG_TMP1
;
793 if (rt
!= TCG_REG_R0
) {
801 case STB
: case STH
: case STW
:
806 /* For unaligned, or very large offsets, use the indexed form. */
807 if (offset
& align
|| offset
!= (int32_t)offset
) {
811 tcg_debug_assert(!is_store
|| rs
!= rt
);
812 tcg_out_movi(s
, TCG_TYPE_PTR
, rs
, orig
);
813 tcg_out32(s
, opx
| TAB(rt
, base
, rs
));
817 l0
= (int16_t)offset
;
818 offset
= (offset
- l0
) >> 16;
819 l1
= (int16_t)offset
;
821 if (l1
< 0 && orig
>= 0) {
823 l1
= (int16_t)(offset
- 0x4000);
826 tcg_out32(s
, ADDIS
| TAI(rs
, base
, l1
));
830 tcg_out32(s
, ADDIS
| TAI(rs
, base
, extra
));
833 if (opi
!= ADDI
|| base
!= rt
|| l0
!= 0) {
834 tcg_out32(s
, opi
| TAI(rt
, base
, l0
));
838 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
839 TCGReg arg1
, intptr_t arg2
)
843 assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
844 if (type
== TCG_TYPE_I32
) {
845 opi
= LWZ
, opx
= LWZX
;
849 tcg_out_mem_long(s
, opi
, opx
, ret
, arg1
, arg2
);
852 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
853 TCGReg arg1
, intptr_t arg2
)
857 assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
858 if (type
== TCG_TYPE_I32
) {
859 opi
= STW
, opx
= STWX
;
861 opi
= STD
, opx
= STDX
;
863 tcg_out_mem_long(s
, opi
, opx
, arg
, arg1
, arg2
);
866 static void tcg_out_cmp(TCGContext
*s
, int cond
, TCGArg arg1
, TCGArg arg2
,
867 int const_arg2
, int cr
, TCGType type
)
872 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
874 /* Simplify the comparisons below wrt CMPI. */
875 if (type
== TCG_TYPE_I32
) {
876 arg2
= (int32_t)arg2
;
883 if ((int16_t) arg2
== arg2
) {
887 } else if ((uint16_t) arg2
== arg2
) {
902 if ((int16_t) arg2
== arg2
) {
917 if ((uint16_t) arg2
== arg2
) {
930 op
|= BF(cr
) | ((type
== TCG_TYPE_I64
) << 21);
933 tcg_out32(s
, op
| RA(arg1
) | (arg2
& 0xffff));
936 tcg_out_movi(s
, type
, TCG_REG_R0
, arg2
);
939 tcg_out32(s
, op
| RA(arg1
) | RB(arg2
));
943 static void tcg_out_setcond_eq0(TCGContext
*s
, TCGType type
,
944 TCGReg dst
, TCGReg src
)
946 if (type
== TCG_TYPE_I32
) {
947 tcg_out32(s
, CNTLZW
| RS(src
) | RA(dst
));
948 tcg_out_shri32(s
, dst
, dst
, 5);
950 tcg_out32(s
, CNTLZD
| RS(src
) | RA(dst
));
951 tcg_out_shri64(s
, dst
, dst
, 6);
955 static void tcg_out_setcond_ne0(TCGContext
*s
, TCGReg dst
, TCGReg src
)
957 /* X != 0 implies X + -1 generates a carry. Extra addition
958 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
960 tcg_out32(s
, ADDIC
| TAI(dst
, src
, -1));
961 tcg_out32(s
, SUBFE
| TAB(dst
, dst
, src
));
963 tcg_out32(s
, ADDIC
| TAI(TCG_REG_R0
, src
, -1));
964 tcg_out32(s
, SUBFE
| TAB(dst
, TCG_REG_R0
, src
));
968 static TCGReg
tcg_gen_setcond_xor(TCGContext
*s
, TCGReg arg1
, TCGArg arg2
,
972 if ((uint32_t)arg2
== arg2
) {
973 tcg_out_xori32(s
, TCG_REG_R0
, arg1
, arg2
);
975 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_R0
, arg2
);
976 tcg_out32(s
, XOR
| SAB(arg1
, TCG_REG_R0
, TCG_REG_R0
));
979 tcg_out32(s
, XOR
| SAB(arg1
, TCG_REG_R0
, arg2
));
984 static void tcg_out_setcond(TCGContext
*s
, TCGType type
, TCGCond cond
,
985 TCGArg arg0
, TCGArg arg1
, TCGArg arg2
,
990 assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
992 /* Ignore high bits of a potential constant arg2. */
993 if (type
== TCG_TYPE_I32
) {
994 arg2
= (uint32_t)arg2
;
997 /* Handle common and trivial cases before handling anything else. */
1001 tcg_out_setcond_eq0(s
, type
, arg0
, arg1
);
1004 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
1005 tcg_out_ext32u(s
, TCG_REG_R0
, arg1
);
1008 tcg_out_setcond_ne0(s
, arg0
, arg1
);
1011 tcg_out32(s
, NOR
| SAB(arg1
, arg0
, arg1
));
1015 /* Extract the sign bit. */
1016 if (type
== TCG_TYPE_I32
) {
1017 tcg_out_shri32(s
, arg0
, arg1
, 31);
1019 tcg_out_shri64(s
, arg0
, arg1
, 63);
1027 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1028 All other cases below are also at least 3 insns, so speed up the
1029 code generator by not considering them and always using ISEL. */
1033 tcg_out_cmp(s
, cond
, arg1
, arg2
, const_arg2
, 7, type
);
1035 isel
= tcg_to_isel
[cond
];
1037 tcg_out_movi(s
, type
, arg0
, 1);
1039 /* arg0 = (bc ? 0 : 1) */
1040 tab
= TAB(arg0
, 0, arg0
);
1043 /* arg0 = (bc ? 1 : 0) */
1044 tcg_out_movi(s
, type
, TCG_REG_R0
, 0);
1045 tab
= TAB(arg0
, arg0
, TCG_REG_R0
);
1047 tcg_out32(s
, isel
| tab
);
1053 arg1
= tcg_gen_setcond_xor(s
, arg1
, arg2
, const_arg2
);
1054 tcg_out_setcond_eq0(s
, type
, arg0
, arg1
);
1058 arg1
= tcg_gen_setcond_xor(s
, arg1
, arg2
, const_arg2
);
1059 /* Discard the high bits only once, rather than both inputs. */
1060 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
1061 tcg_out_ext32u(s
, TCG_REG_R0
, arg1
);
1064 tcg_out_setcond_ne0(s
, arg0
, arg1
);
1082 crop
= CRNOR
| BT(7, CR_EQ
) | BA(7, CR_LT
) | BB(7, CR_LT
);
1088 crop
= CRNOR
| BT(7, CR_EQ
) | BA(7, CR_GT
) | BB(7, CR_GT
);
1090 tcg_out_cmp(s
, cond
, arg1
, arg2
, const_arg2
, 7, type
);
1094 tcg_out32(s
, MFOCRF
| RT(TCG_REG_R0
) | FXM(7));
1095 tcg_out_rlw(s
, RLWINM
, arg0
, TCG_REG_R0
, sh
, 31, 31);
1103 static void tcg_out_bc(TCGContext
*s
, int bc
, TCGLabel
*l
)
1106 tcg_out32(s
, bc
| reloc_pc14_val(s
->code_ptr
, l
->u
.value_ptr
));
1108 tcg_out_reloc(s
, s
->code_ptr
, R_PPC_REL14
, l
, 0);
1109 tcg_out_bc_noaddr(s
, bc
);
1113 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
,
1114 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
1115 TCGLabel
*l
, TCGType type
)
1117 tcg_out_cmp(s
, cond
, arg1
, arg2
, const_arg2
, 7, type
);
1118 tcg_out_bc(s
, tcg_to_bc
[cond
], l
);
1121 static void tcg_out_movcond(TCGContext
*s
, TCGType type
, TCGCond cond
,
1122 TCGArg dest
, TCGArg c1
, TCGArg c2
, TCGArg v1
,
1123 TCGArg v2
, bool const_c2
)
1125 /* If for some reason both inputs are zero, don't produce bad code. */
1126 if (v1
== 0 && v2
== 0) {
1127 tcg_out_movi(s
, type
, dest
, 0);
1131 tcg_out_cmp(s
, cond
, c1
, c2
, const_c2
, 7, type
);
1134 int isel
= tcg_to_isel
[cond
];
1136 /* Swap the V operands if the operation indicates inversion. */
1143 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1145 tcg_out_movi(s
, type
, TCG_REG_R0
, 0);
1147 tcg_out32(s
, isel
| TAB(dest
, v1
, v2
));
1150 cond
= tcg_invert_cond(cond
);
1152 } else if (dest
!= v1
) {
1154 tcg_out_movi(s
, type
, dest
, 0);
1156 tcg_out_mov(s
, type
, dest
, v1
);
1159 /* Branch forward over one insn */
1160 tcg_out32(s
, tcg_to_bc
[cond
] | 8);
1162 tcg_out_movi(s
, type
, dest
, 0);
1164 tcg_out_mov(s
, type
, dest
, v2
);
1169 static void tcg_out_cmp2(TCGContext
*s
, const TCGArg
*args
,
1170 const int *const_args
)
1172 static const struct { uint8_t bit1
, bit2
; } bits
[] = {
1173 [TCG_COND_LT
] = { CR_LT
, CR_LT
},
1174 [TCG_COND_LE
] = { CR_LT
, CR_GT
},
1175 [TCG_COND_GT
] = { CR_GT
, CR_GT
},
1176 [TCG_COND_GE
] = { CR_GT
, CR_LT
},
1177 [TCG_COND_LTU
] = { CR_LT
, CR_LT
},
1178 [TCG_COND_LEU
] = { CR_LT
, CR_GT
},
1179 [TCG_COND_GTU
] = { CR_GT
, CR_GT
},
1180 [TCG_COND_GEU
] = { CR_GT
, CR_LT
},
1183 TCGCond cond
= args
[4], cond2
;
1184 TCGArg al
, ah
, bl
, bh
;
1185 int blconst
, bhconst
;
1192 blconst
= const_args
[2];
1193 bhconst
= const_args
[3];
1202 tcg_out_cmp(s
, cond
, al
, bl
, blconst
, 6, TCG_TYPE_I32
);
1203 tcg_out_cmp(s
, cond
, ah
, bh
, bhconst
, 7, TCG_TYPE_I32
);
1204 tcg_out32(s
, op
| BT(7, CR_EQ
) | BA(6, CR_EQ
) | BB(7, CR_EQ
));
1215 bit1
= bits
[cond
].bit1
;
1216 bit2
= bits
[cond
].bit2
;
1217 op
= (bit1
!= bit2
? CRANDC
: CRAND
);
1218 cond2
= tcg_unsigned_cond(cond
);
1220 tcg_out_cmp(s
, cond
, ah
, bh
, bhconst
, 6, TCG_TYPE_I32
);
1221 tcg_out_cmp(s
, cond2
, al
, bl
, blconst
, 7, TCG_TYPE_I32
);
1222 tcg_out32(s
, op
| BT(7, CR_EQ
) | BA(6, CR_EQ
) | BB(7, bit2
));
1223 tcg_out32(s
, CROR
| BT(7, CR_EQ
) | BA(6, bit1
) | BB(7, CR_EQ
));
1231 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
1232 const int *const_args
)
1234 tcg_out_cmp2(s
, args
+ 1, const_args
+ 1);
1235 tcg_out32(s
, MFOCRF
| RT(TCG_REG_R0
) | FXM(7));
1236 tcg_out_rlw(s
, RLWINM
, args
[0], TCG_REG_R0
, 31, 31, 31);
1239 static void tcg_out_brcond2 (TCGContext
*s
, const TCGArg
*args
,
1240 const int *const_args
)
1242 tcg_out_cmp2(s
, args
, const_args
);
1243 tcg_out_bc(s
, BC
| BI(7, CR_EQ
) | BO_COND_TRUE
, arg_label(args
[5]));
1246 void ppc_tb_set_jmp_target(uintptr_t jmp_addr
, uintptr_t addr
)
1250 s
.code_buf
= s
.code_ptr
= (tcg_insn_unit
*)jmp_addr
;
1251 tcg_out_b(&s
, 0, (tcg_insn_unit
*)addr
);
1252 flush_icache_range(jmp_addr
, jmp_addr
+ tcg_current_code_size(&s
));
1255 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*target
)
1258 /* Look through the descriptor. If the branch is in range, and we
1259 don't have to spend too much effort on building the toc. */
1260 void *tgt
= ((void **)target
)[0];
1261 uintptr_t toc
= ((uintptr_t *)target
)[1];
1262 intptr_t diff
= tcg_pcrel_diff(s
, tgt
);
1264 if (in_range_b(diff
) && toc
== (uint32_t)toc
) {
1265 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP1
, toc
);
1266 tcg_out_b(s
, LK
, tgt
);
1268 /* Fold the low bits of the constant into the addresses below. */
1269 intptr_t arg
= (intptr_t)target
;
1270 int ofs
= (int16_t)arg
;
1272 if (ofs
+ 8 < 0x8000) {
1277 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP1
, arg
);
1278 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R0
, TCG_REG_TMP1
, ofs
);
1279 tcg_out32(s
, MTSPR
| RA(TCG_REG_R0
) | CTR
);
1280 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R2
, TCG_REG_TMP1
, ofs
+ SZP
);
1281 tcg_out32(s
, BCCTR
| BO_ALWAYS
| LK
);
1283 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1286 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1287 address, which the callee uses to compute its TOC address. */
1288 /* FIXME: when the branch is in range, we could avoid r12 load if we
1289 knew that the destination uses the same TOC, and what its local
1290 entry point offset is. */
1291 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R12
, (intptr_t)target
);
1293 diff
= tcg_pcrel_diff(s
, target
);
1294 if (in_range_b(diff
)) {
1295 tcg_out_b(s
, LK
, target
);
1297 tcg_out32(s
, MTSPR
| RS(TCG_REG_R12
) | CTR
);
1298 tcg_out32(s
, BCCTR
| BO_ALWAYS
| LK
);
1301 tcg_out_b(s
, LK
, target
);
1305 static const uint32_t qemu_ldx_opc
[16] = {
1312 [MO_BSWAP
| MO_UB
] = LBZX
,
1313 [MO_BSWAP
| MO_UW
] = LHBRX
,
1314 [MO_BSWAP
| MO_UL
] = LWBRX
,
1315 [MO_BSWAP
| MO_Q
] = LDBRX
,
1318 static const uint32_t qemu_stx_opc
[16] = {
1323 [MO_BSWAP
| MO_UB
] = STBX
,
1324 [MO_BSWAP
| MO_UW
] = STHBRX
,
1325 [MO_BSWAP
| MO_UL
] = STWBRX
,
1326 [MO_BSWAP
| MO_Q
] = STDBRX
,
1329 static const uint32_t qemu_exts_opc
[4] = {
1330 EXTSB
, EXTSH
, EXTSW
, 0
1333 #if defined (CONFIG_SOFTMMU)
1334 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1335 * int mmu_idx, uintptr_t ra)
1337 static void * const qemu_ld_helpers
[16] = {
1338 [MO_UB
] = helper_ret_ldub_mmu
,
1339 [MO_LEUW
] = helper_le_lduw_mmu
,
1340 [MO_LEUL
] = helper_le_ldul_mmu
,
1341 [MO_LEQ
] = helper_le_ldq_mmu
,
1342 [MO_BEUW
] = helper_be_lduw_mmu
,
1343 [MO_BEUL
] = helper_be_ldul_mmu
,
1344 [MO_BEQ
] = helper_be_ldq_mmu
,
1347 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1348 * uintxx_t val, int mmu_idx, uintptr_t ra)
1350 static void * const qemu_st_helpers
[16] = {
1351 [MO_UB
] = helper_ret_stb_mmu
,
1352 [MO_LEUW
] = helper_le_stw_mmu
,
1353 [MO_LEUL
] = helper_le_stl_mmu
,
1354 [MO_LEQ
] = helper_le_stq_mmu
,
1355 [MO_BEUW
] = helper_be_stw_mmu
,
1356 [MO_BEUL
] = helper_be_stl_mmu
,
1357 [MO_BEQ
] = helper_be_stq_mmu
,
1360 /* Perform the TLB load and compare. Places the result of the comparison
1361 in CR7, loads the addend of the TLB into R3, and returns the register
1362 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1364 static TCGReg
tcg_out_tlb_read(TCGContext
*s
, TCGMemOp s_bits
,
1365 TCGReg addrlo
, TCGReg addrhi
,
1366 int mem_index
, bool is_read
)
1370 ? offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
)
1371 : offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
));
1372 int add_off
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1373 TCGReg base
= TCG_AREG0
;
1375 /* Extract the page index, shifted into place for tlb index. */
1376 if (TCG_TARGET_REG_BITS
== 64) {
1377 if (TARGET_LONG_BITS
== 32) {
1378 /* Zero-extend the address into a place helpful for further use. */
1379 tcg_out_ext32u(s
, TCG_REG_R4
, addrlo
);
1380 addrlo
= TCG_REG_R4
;
1382 tcg_out_rld(s
, RLDICL
, TCG_REG_R3
, addrlo
,
1383 64 - TARGET_PAGE_BITS
, 64 - CPU_TLB_BITS
);
1387 /* Compensate for very large offsets. */
1388 if (add_off
>= 0x8000) {
1389 /* Most target env are smaller than 32k; none are larger than 64k.
1390 Simplify the logic here merely to offset by 0x7ff0, giving us a
1391 range just shy of 64k. Check this assumption. */
1392 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
,
1393 tlb_table
[NB_MMU_MODES
- 1][1])
1395 tcg_out32(s
, ADDI
| TAI(TCG_REG_TMP1
, base
, 0x7ff0));
1396 base
= TCG_REG_TMP1
;
1401 /* Extraction and shifting, part 2. */
1402 if (TCG_TARGET_REG_BITS
== 32 || TARGET_LONG_BITS
== 32) {
1403 tcg_out_rlw(s
, RLWINM
, TCG_REG_R3
, addrlo
,
1404 32 - (TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
),
1405 32 - (CPU_TLB_BITS
+ CPU_TLB_ENTRY_BITS
),
1406 31 - CPU_TLB_ENTRY_BITS
);
1408 tcg_out_shli64(s
, TCG_REG_R3
, TCG_REG_R3
, CPU_TLB_ENTRY_BITS
);
1411 tcg_out32(s
, ADD
| TAB(TCG_REG_R3
, TCG_REG_R3
, base
));
1413 /* Load the tlb comparator. */
1414 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1415 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_REG_R4
, TCG_REG_R3
, cmp_off
);
1416 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_REG_TMP1
, TCG_REG_R3
, cmp_off
+ 4);
1418 tcg_out_ld(s
, TCG_TYPE_TL
, TCG_REG_TMP1
, TCG_REG_R3
, cmp_off
);
1421 /* Load the TLB addend for use on the fast path. Do this asap
1422 to minimize any load use delay. */
1423 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R3
, TCG_REG_R3
, add_off
);
1425 /* Clear the non-page, non-alignment bits from the address. */
1426 if (TCG_TARGET_REG_BITS
== 32 || TARGET_LONG_BITS
== 32) {
1427 tcg_out_rlw(s
, RLWINM
, TCG_REG_R0
, addrlo
, 0,
1428 (32 - s_bits
) & 31, 31 - TARGET_PAGE_BITS
);
1429 } else if (!s_bits
) {
1430 tcg_out_rld(s
, RLDICR
, TCG_REG_R0
, addrlo
,
1431 0, 63 - TARGET_PAGE_BITS
);
1433 tcg_out_rld(s
, RLDICL
, TCG_REG_R0
, addrlo
,
1434 64 - TARGET_PAGE_BITS
, TARGET_PAGE_BITS
- s_bits
);
1435 tcg_out_rld(s
, RLDICL
, TCG_REG_R0
, TCG_REG_R0
, TARGET_PAGE_BITS
, 0);
1438 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1439 tcg_out_cmp(s
, TCG_COND_EQ
, TCG_REG_R0
, TCG_REG_TMP1
,
1440 0, 7, TCG_TYPE_I32
);
1441 tcg_out_cmp(s
, TCG_COND_EQ
, addrhi
, TCG_REG_R4
, 0, 6, TCG_TYPE_I32
);
1442 tcg_out32(s
, CRAND
| BT(7, CR_EQ
) | BA(6, CR_EQ
) | BB(7, CR_EQ
));
1444 tcg_out_cmp(s
, TCG_COND_EQ
, TCG_REG_R0
, TCG_REG_TMP1
,
1451 /* Record the context of a call to the out of line helper code for the slow
1452 path for a load or store, so that we can later generate the correct
1454 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOpIdx oi
,
1455 TCGReg datalo_reg
, TCGReg datahi_reg
,
1456 TCGReg addrlo_reg
, TCGReg addrhi_reg
,
1457 tcg_insn_unit
*raddr
, tcg_insn_unit
*lptr
)
1459 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1461 label
->is_ld
= is_ld
;
1463 label
->datalo_reg
= datalo_reg
;
1464 label
->datahi_reg
= datahi_reg
;
1465 label
->addrlo_reg
= addrlo_reg
;
1466 label
->addrhi_reg
= addrhi_reg
;
1467 label
->raddr
= raddr
;
1468 label
->label_ptr
[0] = lptr
;
1471 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1473 TCGMemOpIdx oi
= lb
->oi
;
1474 TCGMemOp opc
= get_memop(oi
);
1475 TCGReg hi
, lo
, arg
= TCG_REG_R3
;
1477 reloc_pc14(lb
->label_ptr
[0], s
->code_ptr
);
1479 tcg_out_mov(s
, TCG_TYPE_PTR
, arg
++, TCG_AREG0
);
1481 lo
= lb
->addrlo_reg
;
1482 hi
= lb
->addrhi_reg
;
1483 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1484 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1487 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, hi
);
1488 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, lo
);
1490 /* If the address needed to be zero-extended, we'll have already
1491 placed it in R4. The only remaining case is 64-bit guest. */
1492 tcg_out_mov(s
, TCG_TYPE_TL
, arg
++, lo
);
1495 tcg_out_movi(s
, TCG_TYPE_I32
, arg
++, oi
);
1496 tcg_out32(s
, MFSPR
| RT(arg
) | LR
);
1498 tcg_out_call(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1500 lo
= lb
->datalo_reg
;
1501 hi
= lb
->datahi_reg
;
1502 if (TCG_TARGET_REG_BITS
== 32 && (opc
& MO_SIZE
) == MO_64
) {
1503 tcg_out_mov(s
, TCG_TYPE_I32
, lo
, TCG_REG_R4
);
1504 tcg_out_mov(s
, TCG_TYPE_I32
, hi
, TCG_REG_R3
);
1505 } else if (opc
& MO_SIGN
) {
1506 uint32_t insn
= qemu_exts_opc
[opc
& MO_SIZE
];
1507 tcg_out32(s
, insn
| RA(lo
) | RS(TCG_REG_R3
));
1509 tcg_out_mov(s
, TCG_TYPE_REG
, lo
, TCG_REG_R3
);
1512 tcg_out_b(s
, 0, lb
->raddr
);
1515 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1517 TCGMemOpIdx oi
= lb
->oi
;
1518 TCGMemOp opc
= get_memop(oi
);
1519 TCGMemOp s_bits
= opc
& MO_SIZE
;
1520 TCGReg hi
, lo
, arg
= TCG_REG_R3
;
1522 reloc_pc14(lb
->label_ptr
[0], s
->code_ptr
);
1524 tcg_out_mov(s
, TCG_TYPE_PTR
, arg
++, TCG_AREG0
);
1526 lo
= lb
->addrlo_reg
;
1527 hi
= lb
->addrhi_reg
;
1528 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1529 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1532 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, hi
);
1533 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, lo
);
1535 /* If the address needed to be zero-extended, we'll have already
1536 placed it in R4. The only remaining case is 64-bit guest. */
1537 tcg_out_mov(s
, TCG_TYPE_TL
, arg
++, lo
);
1540 lo
= lb
->datalo_reg
;
1541 hi
= lb
->datahi_reg
;
1542 if (TCG_TARGET_REG_BITS
== 32) {
1545 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1548 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, hi
);
1551 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, lo
);
1554 tcg_out_rlw(s
, RLWINM
, arg
++, lo
, 0, 32 - (8 << s_bits
), 31);
1558 if (s_bits
== MO_64
) {
1559 tcg_out_mov(s
, TCG_TYPE_I64
, arg
++, lo
);
1561 tcg_out_rld(s
, RLDICL
, arg
++, lo
, 0, 64 - (8 << s_bits
));
1565 tcg_out_movi(s
, TCG_TYPE_I32
, arg
++, oi
);
1566 tcg_out32(s
, MFSPR
| RT(arg
) | LR
);
1568 tcg_out_call(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1570 tcg_out_b(s
, 0, lb
->raddr
);
1572 #endif /* SOFTMMU */
1574 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1576 TCGReg datalo
, datahi
, addrlo
, rbase
;
1577 TCGReg addrhi
__attribute__((unused
));
1579 TCGMemOp opc
, s_bits
;
1580 #ifdef CONFIG_SOFTMMU
1582 tcg_insn_unit
*label_ptr
;
1586 datahi
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1588 addrhi
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1590 opc
= get_memop(oi
);
1591 s_bits
= opc
& MO_SIZE
;
1593 #ifdef CONFIG_SOFTMMU
1594 mem_index
= get_mmuidx(oi
);
1595 addrlo
= tcg_out_tlb_read(s
, s_bits
, addrlo
, addrhi
, mem_index
, true);
1597 /* Load a pointer into the current opcode w/conditional branch-link. */
1598 label_ptr
= s
->code_ptr
;
1599 tcg_out_bc_noaddr(s
, BC
| BI(7, CR_EQ
) | BO_COND_FALSE
| LK
);
1602 #else /* !CONFIG_SOFTMMU */
1603 rbase
= GUEST_BASE
? TCG_GUEST_BASE_REG
: 0;
1604 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1605 tcg_out_ext32u(s
, TCG_REG_TMP1
, addrlo
);
1606 addrlo
= TCG_REG_TMP1
;
1610 if (TCG_TARGET_REG_BITS
== 32 && s_bits
== MO_64
) {
1611 if (opc
& MO_BSWAP
) {
1612 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1613 tcg_out32(s
, LWBRX
| TAB(datalo
, rbase
, addrlo
));
1614 tcg_out32(s
, LWBRX
| TAB(datahi
, rbase
, TCG_REG_R0
));
1615 } else if (rbase
!= 0) {
1616 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1617 tcg_out32(s
, LWZX
| TAB(datahi
, rbase
, addrlo
));
1618 tcg_out32(s
, LWZX
| TAB(datalo
, rbase
, TCG_REG_R0
));
1619 } else if (addrlo
== datahi
) {
1620 tcg_out32(s
, LWZ
| TAI(datalo
, addrlo
, 4));
1621 tcg_out32(s
, LWZ
| TAI(datahi
, addrlo
, 0));
1623 tcg_out32(s
, LWZ
| TAI(datahi
, addrlo
, 0));
1624 tcg_out32(s
, LWZ
| TAI(datalo
, addrlo
, 4));
1627 uint32_t insn
= qemu_ldx_opc
[opc
& (MO_BSWAP
| MO_SSIZE
)];
1628 if (!HAVE_ISA_2_06
&& insn
== LDBRX
) {
1629 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1630 tcg_out32(s
, LWBRX
| TAB(datalo
, rbase
, addrlo
));
1631 tcg_out32(s
, LWBRX
| TAB(TCG_REG_R0
, rbase
, TCG_REG_R0
));
1632 tcg_out_rld(s
, RLDIMI
, datalo
, TCG_REG_R0
, 32, 0);
1634 tcg_out32(s
, insn
| TAB(datalo
, rbase
, addrlo
));
1636 insn
= qemu_ldx_opc
[opc
& (MO_SIZE
| MO_BSWAP
)];
1637 tcg_out32(s
, insn
| TAB(datalo
, rbase
, addrlo
));
1638 insn
= qemu_exts_opc
[s_bits
];
1639 tcg_out32(s
, insn
| RA(datalo
) | RS(datalo
));
1643 #ifdef CONFIG_SOFTMMU
1644 add_qemu_ldst_label(s
, true, oi
, datalo
, datahi
, addrlo
, addrhi
,
1645 s
->code_ptr
, label_ptr
);
1649 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1651 TCGReg datalo
, datahi
, addrlo
, rbase
;
1652 TCGReg addrhi
__attribute__((unused
));
1654 TCGMemOp opc
, s_bits
;
1655 #ifdef CONFIG_SOFTMMU
1657 tcg_insn_unit
*label_ptr
;
1661 datahi
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1663 addrhi
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1665 opc
= get_memop(oi
);
1666 s_bits
= opc
& MO_SIZE
;
1668 #ifdef CONFIG_SOFTMMU
1669 mem_index
= get_mmuidx(oi
);
1670 addrlo
= tcg_out_tlb_read(s
, s_bits
, addrlo
, addrhi
, mem_index
, false);
1672 /* Load a pointer into the current opcode w/conditional branch-link. */
1673 label_ptr
= s
->code_ptr
;
1674 tcg_out_bc_noaddr(s
, BC
| BI(7, CR_EQ
) | BO_COND_FALSE
| LK
);
1677 #else /* !CONFIG_SOFTMMU */
1678 rbase
= GUEST_BASE
? TCG_GUEST_BASE_REG
: 0;
1679 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1680 tcg_out_ext32u(s
, TCG_REG_TMP1
, addrlo
);
1681 addrlo
= TCG_REG_TMP1
;
1685 if (TCG_TARGET_REG_BITS
== 32 && s_bits
== MO_64
) {
1686 if (opc
& MO_BSWAP
) {
1687 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1688 tcg_out32(s
, STWBRX
| SAB(datalo
, rbase
, addrlo
));
1689 tcg_out32(s
, STWBRX
| SAB(datahi
, rbase
, TCG_REG_R0
));
1690 } else if (rbase
!= 0) {
1691 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
1692 tcg_out32(s
, STWX
| SAB(datahi
, rbase
, addrlo
));
1693 tcg_out32(s
, STWX
| SAB(datalo
, rbase
, TCG_REG_R0
));
1695 tcg_out32(s
, STW
| TAI(datahi
, addrlo
, 0));
1696 tcg_out32(s
, STW
| TAI(datalo
, addrlo
, 4));
1699 uint32_t insn
= qemu_stx_opc
[opc
& (MO_BSWAP
| MO_SIZE
)];
1700 if (!HAVE_ISA_2_06
&& insn
== STDBRX
) {
1701 tcg_out32(s
, STWBRX
| SAB(datalo
, rbase
, addrlo
));
1702 tcg_out32(s
, ADDI
| TAI(TCG_REG_TMP1
, addrlo
, 4));
1703 tcg_out_shri64(s
, TCG_REG_R0
, datalo
, 32);
1704 tcg_out32(s
, STWBRX
| SAB(TCG_REG_R0
, rbase
, TCG_REG_TMP1
));
1706 tcg_out32(s
, insn
| SAB(datalo
, rbase
, addrlo
));
1710 #ifdef CONFIG_SOFTMMU
1711 add_qemu_ldst_label(s
, false, oi
, datalo
, datahi
, addrlo
, addrhi
,
1712 s
->code_ptr
, label_ptr
);
1716 /* Parameters for function call generation, used in tcg.c. */
1717 #define TCG_TARGET_STACK_ALIGN 16
1718 #define TCG_TARGET_EXTEND_ARGS 1
1721 # define LINK_AREA_SIZE (6 * SZR)
1722 # define LR_OFFSET (1 * SZR)
1723 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
1724 #elif defined(TCG_TARGET_CALL_DARWIN)
1725 # define LINK_AREA_SIZE (6 * SZR)
1726 # define LR_OFFSET (2 * SZR)
1727 #elif TCG_TARGET_REG_BITS == 64
1728 # if defined(_CALL_ELF) && _CALL_ELF == 2
1729 # define LINK_AREA_SIZE (4 * SZR)
1730 # define LR_OFFSET (1 * SZR)
1732 #else /* TCG_TARGET_REG_BITS == 32 */
1733 # if defined(_CALL_SYSV)
1734 # define LINK_AREA_SIZE (2 * SZR)
1735 # define LR_OFFSET (1 * SZR)
1739 # error "Unhandled abi"
1741 #ifndef TCG_TARGET_CALL_STACK_OFFSET
1742 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
1745 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1746 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
1748 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
1749 + TCG_STATIC_CALL_ARGS_SIZE \
1750 + CPU_TEMP_BUF_SIZE \
1752 + TCG_TARGET_STACK_ALIGN - 1) \
1753 & -TCG_TARGET_STACK_ALIGN)
1755 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
1757 static void tcg_target_qemu_prologue(TCGContext
*s
)
1762 void **desc
= (void **)s
->code_ptr
;
1763 desc
[0] = desc
+ 2; /* entry point */
1764 desc
[1] = 0; /* environment pointer */
1765 s
->code_ptr
= (void *)(desc
+ 2); /* skip over descriptor */
1768 tcg_set_frame(s
, TCG_REG_CALL_STACK
, REG_SAVE_BOT
- CPU_TEMP_BUF_SIZE
,
1772 tcg_out32(s
, MFSPR
| RT(TCG_REG_R0
) | LR
);
1773 tcg_out32(s
, (SZR
== 8 ? STDU
: STWU
)
1774 | SAI(TCG_REG_R1
, TCG_REG_R1
, -FRAME_SIZE
));
1776 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); ++i
) {
1777 tcg_out_st(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
1778 TCG_REG_R1
, REG_SAVE_BOT
+ i
* SZR
);
1780 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_REG_R0
, TCG_REG_R1
, FRAME_SIZE
+LR_OFFSET
);
1782 #ifdef CONFIG_USE_GUEST_BASE
1784 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
1785 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
1789 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1790 tcg_out32(s
, MTSPR
| RS(tcg_target_call_iarg_regs
[1]) | CTR
);
1794 /* Make the caller load the value as the TOC into R2. */
1795 tb_ret_addr
= s
->code_ptr
+ 2;
1796 desc
[1] = tb_ret_addr
;
1797 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_RA
, TCG_REG_R2
);
1798 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1799 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1800 /* Compute from the incoming R12 value. */
1801 tb_ret_addr
= s
->code_ptr
+ 2;
1802 tcg_out32(s
, ADDI
| TAI(TCG_REG_RA
, TCG_REG_R12
,
1803 tcg_ptr_byte_diff(tb_ret_addr
, s
->code_buf
)));
1804 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1806 /* Reserve max 5 insns for the constant load. */
1807 tb_ret_addr
= s
->code_ptr
+ 6;
1808 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RA
, (intptr_t)tb_ret_addr
);
1809 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1810 while (s
->code_ptr
< tb_ret_addr
) {
1815 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1816 tb_ret_addr
= s
->code_ptr
;
1820 assert(tb_ret_addr
== s
->code_ptr
);
1822 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R0
, TCG_REG_R1
, FRAME_SIZE
+LR_OFFSET
);
1823 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); ++i
) {
1824 tcg_out_ld(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
1825 TCG_REG_R1
, REG_SAVE_BOT
+ i
* SZR
);
1827 tcg_out32(s
, MTSPR
| RS(TCG_REG_R0
) | LR
);
1828 tcg_out32(s
, ADDI
| TAI(TCG_REG_R1
, TCG_REG_R1
, FRAME_SIZE
));
1829 tcg_out32(s
, BCLR
| BO_ALWAYS
);
1832 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
1833 const int *const_args
)
1839 case INDEX_op_exit_tb
:
1841 ptrdiff_t disp
= tcg_pcrel_diff(s
, tb_ret_addr
);
1843 /* If we can use a direct branch, otherwise use the value in RA.
1844 Note that the direct branch is always forward. If it's in
1845 range now, it'll still be in range after the movi. Don't
1846 bother about the 20 bytes where the test here fails but it
1847 would succeed below. */
1848 if (!in_range_b(disp
)) {
1849 tcg_out32(s
, MTSPR
| RS(TCG_REG_RA
) | CTR
);
1850 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R3
, args
[0]);
1851 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
1855 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R3
, args
[0]);
1856 tcg_out_b(s
, 0, tb_ret_addr
);
1858 case INDEX_op_goto_tb
:
1859 if (s
->tb_jmp_offset
) {
1860 /* Direct jump method. */
1861 s
->tb_jmp_offset
[args
[0]] = tcg_current_code_size(s
);
1864 /* Indirect jump method. */
1867 s
->tb_next_offset
[args
[0]] = tcg_current_code_size(s
);
1871 TCGLabel
*l
= arg_label(args
[0]);
1874 tcg_out_b(s
, 0, l
->u
.value_ptr
);
1876 tcg_out_reloc(s
, s
->code_ptr
, R_PPC_REL24
, l
, 0);
1877 tcg_out_b_noaddr(s
, B
);
1881 case INDEX_op_ld8u_i32
:
1882 case INDEX_op_ld8u_i64
:
1883 tcg_out_mem_long(s
, LBZ
, LBZX
, args
[0], args
[1], args
[2]);
1885 case INDEX_op_ld8s_i32
:
1886 case INDEX_op_ld8s_i64
:
1887 tcg_out_mem_long(s
, LBZ
, LBZX
, args
[0], args
[1], args
[2]);
1888 tcg_out32(s
, EXTSB
| RS(args
[0]) | RA(args
[0]));
1890 case INDEX_op_ld16u_i32
:
1891 case INDEX_op_ld16u_i64
:
1892 tcg_out_mem_long(s
, LHZ
, LHZX
, args
[0], args
[1], args
[2]);
1894 case INDEX_op_ld16s_i32
:
1895 case INDEX_op_ld16s_i64
:
1896 tcg_out_mem_long(s
, LHA
, LHAX
, args
[0], args
[1], args
[2]);
1898 case INDEX_op_ld_i32
:
1899 case INDEX_op_ld32u_i64
:
1900 tcg_out_mem_long(s
, LWZ
, LWZX
, args
[0], args
[1], args
[2]);
1902 case INDEX_op_ld32s_i64
:
1903 tcg_out_mem_long(s
, LWA
, LWAX
, args
[0], args
[1], args
[2]);
1905 case INDEX_op_ld_i64
:
1906 tcg_out_mem_long(s
, LD
, LDX
, args
[0], args
[1], args
[2]);
1908 case INDEX_op_st8_i32
:
1909 case INDEX_op_st8_i64
:
1910 tcg_out_mem_long(s
, STB
, STBX
, args
[0], args
[1], args
[2]);
1912 case INDEX_op_st16_i32
:
1913 case INDEX_op_st16_i64
:
1914 tcg_out_mem_long(s
, STH
, STHX
, args
[0], args
[1], args
[2]);
1916 case INDEX_op_st_i32
:
1917 case INDEX_op_st32_i64
:
1918 tcg_out_mem_long(s
, STW
, STWX
, args
[0], args
[1], args
[2]);
1920 case INDEX_op_st_i64
:
1921 tcg_out_mem_long(s
, STD
, STDX
, args
[0], args
[1], args
[2]);
1924 case INDEX_op_add_i32
:
1925 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1926 if (const_args
[2]) {
1928 tcg_out_mem_long(s
, ADDI
, ADD
, a0
, a1
, (int32_t)a2
);
1930 tcg_out32(s
, ADD
| TAB(a0
, a1
, a2
));
1933 case INDEX_op_sub_i32
:
1934 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1935 if (const_args
[1]) {
1936 if (const_args
[2]) {
1937 tcg_out_movi(s
, TCG_TYPE_I32
, a0
, a1
- a2
);
1939 tcg_out32(s
, SUBFIC
| TAI(a0
, a2
, a1
));
1941 } else if (const_args
[2]) {
1945 tcg_out32(s
, SUBF
| TAB(a0
, a2
, a1
));
1949 case INDEX_op_and_i32
:
1950 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1951 if (const_args
[2]) {
1952 tcg_out_andi32(s
, a0
, a1
, a2
);
1954 tcg_out32(s
, AND
| SAB(a1
, a0
, a2
));
1957 case INDEX_op_and_i64
:
1958 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1959 if (const_args
[2]) {
1960 tcg_out_andi64(s
, a0
, a1
, a2
);
1962 tcg_out32(s
, AND
| SAB(a1
, a0
, a2
));
1965 case INDEX_op_or_i64
:
1966 case INDEX_op_or_i32
:
1967 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1968 if (const_args
[2]) {
1969 tcg_out_ori32(s
, a0
, a1
, a2
);
1971 tcg_out32(s
, OR
| SAB(a1
, a0
, a2
));
1974 case INDEX_op_xor_i64
:
1975 case INDEX_op_xor_i32
:
1976 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1977 if (const_args
[2]) {
1978 tcg_out_xori32(s
, a0
, a1
, a2
);
1980 tcg_out32(s
, XOR
| SAB(a1
, a0
, a2
));
1983 case INDEX_op_andc_i32
:
1984 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1985 if (const_args
[2]) {
1986 tcg_out_andi32(s
, a0
, a1
, ~a2
);
1988 tcg_out32(s
, ANDC
| SAB(a1
, a0
, a2
));
1991 case INDEX_op_andc_i64
:
1992 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1993 if (const_args
[2]) {
1994 tcg_out_andi64(s
, a0
, a1
, ~a2
);
1996 tcg_out32(s
, ANDC
| SAB(a1
, a0
, a2
));
1999 case INDEX_op_orc_i32
:
2000 if (const_args
[2]) {
2001 tcg_out_ori32(s
, args
[0], args
[1], ~args
[2]);
2005 case INDEX_op_orc_i64
:
2006 tcg_out32(s
, ORC
| SAB(args
[1], args
[0], args
[2]));
2008 case INDEX_op_eqv_i32
:
2009 if (const_args
[2]) {
2010 tcg_out_xori32(s
, args
[0], args
[1], ~args
[2]);
2014 case INDEX_op_eqv_i64
:
2015 tcg_out32(s
, EQV
| SAB(args
[1], args
[0], args
[2]));
2017 case INDEX_op_nand_i32
:
2018 case INDEX_op_nand_i64
:
2019 tcg_out32(s
, NAND
| SAB(args
[1], args
[0], args
[2]));
2021 case INDEX_op_nor_i32
:
2022 case INDEX_op_nor_i64
:
2023 tcg_out32(s
, NOR
| SAB(args
[1], args
[0], args
[2]));
2026 case INDEX_op_mul_i32
:
2027 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2028 if (const_args
[2]) {
2029 tcg_out32(s
, MULLI
| TAI(a0
, a1
, a2
));
2031 tcg_out32(s
, MULLW
| TAB(a0
, a1
, a2
));
2035 case INDEX_op_div_i32
:
2036 tcg_out32(s
, DIVW
| TAB(args
[0], args
[1], args
[2]));
2039 case INDEX_op_divu_i32
:
2040 tcg_out32(s
, DIVWU
| TAB(args
[0], args
[1], args
[2]));
2043 case INDEX_op_shl_i32
:
2044 if (const_args
[2]) {
2045 tcg_out_shli32(s
, args
[0], args
[1], args
[2]);
2047 tcg_out32(s
, SLW
| SAB(args
[1], args
[0], args
[2]));
2050 case INDEX_op_shr_i32
:
2051 if (const_args
[2]) {
2052 tcg_out_shri32(s
, args
[0], args
[1], args
[2]);
2054 tcg_out32(s
, SRW
| SAB(args
[1], args
[0], args
[2]));
2057 case INDEX_op_sar_i32
:
2058 if (const_args
[2]) {
2059 tcg_out32(s
, SRAWI
| RS(args
[1]) | RA(args
[0]) | SH(args
[2]));
2061 tcg_out32(s
, SRAW
| SAB(args
[1], args
[0], args
[2]));
2064 case INDEX_op_rotl_i32
:
2065 if (const_args
[2]) {
2066 tcg_out_rlw(s
, RLWINM
, args
[0], args
[1], args
[2], 0, 31);
2068 tcg_out32(s
, RLWNM
| SAB(args
[1], args
[0], args
[2])
2072 case INDEX_op_rotr_i32
:
2073 if (const_args
[2]) {
2074 tcg_out_rlw(s
, RLWINM
, args
[0], args
[1], 32 - args
[2], 0, 31);
2076 tcg_out32(s
, SUBFIC
| TAI(TCG_REG_R0
, args
[2], 32));
2077 tcg_out32(s
, RLWNM
| SAB(args
[1], args
[0], TCG_REG_R0
)
2082 case INDEX_op_brcond_i32
:
2083 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1],
2084 arg_label(args
[3]), TCG_TYPE_I32
);
2086 case INDEX_op_brcond_i64
:
2087 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1],
2088 arg_label(args
[3]), TCG_TYPE_I64
);
2090 case INDEX_op_brcond2_i32
:
2091 tcg_out_brcond2(s
, args
, const_args
);
2094 case INDEX_op_neg_i32
:
2095 case INDEX_op_neg_i64
:
2096 tcg_out32(s
, NEG
| RT(args
[0]) | RA(args
[1]));
2099 case INDEX_op_not_i32
:
2100 case INDEX_op_not_i64
:
2101 tcg_out32(s
, NOR
| SAB(args
[1], args
[0], args
[1]));
2104 case INDEX_op_add_i64
:
2105 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2106 if (const_args
[2]) {
2108 tcg_out_mem_long(s
, ADDI
, ADD
, a0
, a1
, a2
);
2110 tcg_out32(s
, ADD
| TAB(a0
, a1
, a2
));
2113 case INDEX_op_sub_i64
:
2114 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2115 if (const_args
[1]) {
2116 if (const_args
[2]) {
2117 tcg_out_movi(s
, TCG_TYPE_I64
, a0
, a1
- a2
);
2119 tcg_out32(s
, SUBFIC
| TAI(a0
, a2
, a1
));
2121 } else if (const_args
[2]) {
2125 tcg_out32(s
, SUBF
| TAB(a0
, a2
, a1
));
2129 case INDEX_op_shl_i64
:
2130 if (const_args
[2]) {
2131 tcg_out_shli64(s
, args
[0], args
[1], args
[2]);
2133 tcg_out32(s
, SLD
| SAB(args
[1], args
[0], args
[2]));
2136 case INDEX_op_shr_i64
:
2137 if (const_args
[2]) {
2138 tcg_out_shri64(s
, args
[0], args
[1], args
[2]);
2140 tcg_out32(s
, SRD
| SAB(args
[1], args
[0], args
[2]));
2143 case INDEX_op_sar_i64
:
2144 if (const_args
[2]) {
2145 int sh
= SH(args
[2] & 0x1f) | (((args
[2] >> 5) & 1) << 1);
2146 tcg_out32(s
, SRADI
| RA(args
[0]) | RS(args
[1]) | sh
);
2148 tcg_out32(s
, SRAD
| SAB(args
[1], args
[0], args
[2]));
2151 case INDEX_op_rotl_i64
:
2152 if (const_args
[2]) {
2153 tcg_out_rld(s
, RLDICL
, args
[0], args
[1], args
[2], 0);
2155 tcg_out32(s
, RLDCL
| SAB(args
[1], args
[0], args
[2]) | MB64(0));
2158 case INDEX_op_rotr_i64
:
2159 if (const_args
[2]) {
2160 tcg_out_rld(s
, RLDICL
, args
[0], args
[1], 64 - args
[2], 0);
2162 tcg_out32(s
, SUBFIC
| TAI(TCG_REG_R0
, args
[2], 64));
2163 tcg_out32(s
, RLDCL
| SAB(args
[1], args
[0], TCG_REG_R0
) | MB64(0));
2167 case INDEX_op_mul_i64
:
2168 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2169 if (const_args
[2]) {
2170 tcg_out32(s
, MULLI
| TAI(a0
, a1
, a2
));
2172 tcg_out32(s
, MULLD
| TAB(a0
, a1
, a2
));
2175 case INDEX_op_div_i64
:
2176 tcg_out32(s
, DIVD
| TAB(args
[0], args
[1], args
[2]));
2178 case INDEX_op_divu_i64
:
2179 tcg_out32(s
, DIVDU
| TAB(args
[0], args
[1], args
[2]));
2182 case INDEX_op_qemu_ld_i32
:
2183 tcg_out_qemu_ld(s
, args
, false);
2185 case INDEX_op_qemu_ld_i64
:
2186 tcg_out_qemu_ld(s
, args
, true);
2188 case INDEX_op_qemu_st_i32
:
2189 tcg_out_qemu_st(s
, args
, false);
2191 case INDEX_op_qemu_st_i64
:
2192 tcg_out_qemu_st(s
, args
, true);
2195 case INDEX_op_ext8s_i32
:
2196 case INDEX_op_ext8s_i64
:
2199 case INDEX_op_ext16s_i32
:
2200 case INDEX_op_ext16s_i64
:
2203 case INDEX_op_ext32s_i64
:
2207 tcg_out32(s
, c
| RS(args
[1]) | RA(args
[0]));
2210 case INDEX_op_setcond_i32
:
2211 tcg_out_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1], args
[2],
2214 case INDEX_op_setcond_i64
:
2215 tcg_out_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1], args
[2],
2218 case INDEX_op_setcond2_i32
:
2219 tcg_out_setcond2(s
, args
, const_args
);
2222 case INDEX_op_bswap16_i32
:
2223 case INDEX_op_bswap16_i64
:
2224 a0
= args
[0], a1
= args
[1];
2227 /* a0 = (a1 r<< 24) & 0xff # 000c */
2228 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 24, 24, 31);
2229 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
2230 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 8, 16, 23);
2232 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
2233 tcg_out_rlw(s
, RLWINM
, TCG_REG_R0
, a1
, 8, 16, 23);
2234 /* a0 = (a1 r<< 24) & 0xff # 000c */
2235 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 24, 24, 31);
2236 /* a0 = a0 | r0 # 00dc */
2237 tcg_out32(s
, OR
| SAB(TCG_REG_R0
, a0
, a0
));
2241 case INDEX_op_bswap32_i32
:
2242 case INDEX_op_bswap32_i64
:
2243 /* Stolen from gcc's builtin_bswap32 */
2245 a0
= args
[0] == a1
? TCG_REG_R0
: args
[0];
2247 /* a1 = args[1] # abcd */
2248 /* a0 = rotate_left (a1, 8) # bcda */
2249 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 8, 0, 31);
2250 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
2251 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 0, 7);
2252 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
2253 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 16, 23);
2255 if (a0
== TCG_REG_R0
) {
2256 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2260 case INDEX_op_bswap64_i64
:
2261 a0
= args
[0], a1
= args
[1], a2
= TCG_REG_R0
;
2267 /* a1 = # abcd efgh */
2268 /* a0 = rl32(a1, 8) # 0000 fghe */
2269 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 8, 0, 31);
2270 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
2271 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 0, 7);
2272 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
2273 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 16, 23);
2275 /* a0 = rl64(a0, 32) # hgfe 0000 */
2276 /* a2 = rl64(a1, 32) # efgh abcd */
2277 tcg_out_rld(s
, RLDICL
, a0
, a0
, 32, 0);
2278 tcg_out_rld(s
, RLDICL
, a2
, a1
, 32, 0);
2280 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
2281 tcg_out_rlw(s
, RLWIMI
, a0
, a2
, 8, 0, 31);
2282 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
2283 tcg_out_rlw(s
, RLWIMI
, a0
, a2
, 24, 0, 7);
2284 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
2285 tcg_out_rlw(s
, RLWIMI
, a0
, a2
, 24, 16, 23);
2288 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2292 case INDEX_op_deposit_i32
:
2293 if (const_args
[2]) {
2294 uint32_t mask
= ((2u << (args
[4] - 1)) - 1) << args
[3];
2295 tcg_out_andi32(s
, args
[0], args
[0], ~mask
);
2297 tcg_out_rlw(s
, RLWIMI
, args
[0], args
[2], args
[3],
2298 32 - args
[3] - args
[4], 31 - args
[3]);
2301 case INDEX_op_deposit_i64
:
2302 if (const_args
[2]) {
2303 uint64_t mask
= ((2ull << (args
[4] - 1)) - 1) << args
[3];
2304 tcg_out_andi64(s
, args
[0], args
[0], ~mask
);
2306 tcg_out_rld(s
, RLDIMI
, args
[0], args
[2], args
[3],
2307 64 - args
[3] - args
[4]);
2311 case INDEX_op_movcond_i32
:
2312 tcg_out_movcond(s
, TCG_TYPE_I32
, args
[5], args
[0], args
[1], args
[2],
2313 args
[3], args
[4], const_args
[2]);
2315 case INDEX_op_movcond_i64
:
2316 tcg_out_movcond(s
, TCG_TYPE_I64
, args
[5], args
[0], args
[1], args
[2],
2317 args
[3], args
[4], const_args
[2]);
2320 #if TCG_TARGET_REG_BITS == 64
2321 case INDEX_op_add2_i64
:
2323 case INDEX_op_add2_i32
:
2325 /* Note that the CA bit is defined based on the word size of the
2326 environment. So in 64-bit mode it's always carry-out of bit 63.
2327 The fallback code using deposit works just as well for 32-bit. */
2328 a0
= args
[0], a1
= args
[1];
2329 if (a0
== args
[3] || (!const_args
[5] && a0
== args
[5])) {
2332 if (const_args
[4]) {
2333 tcg_out32(s
, ADDIC
| TAI(a0
, args
[2], args
[4]));
2335 tcg_out32(s
, ADDC
| TAB(a0
, args
[2], args
[4]));
2337 if (const_args
[5]) {
2338 tcg_out32(s
, (args
[5] ? ADDME
: ADDZE
) | RT(a1
) | RA(args
[3]));
2340 tcg_out32(s
, ADDE
| TAB(a1
, args
[3], args
[5]));
2342 if (a0
!= args
[0]) {
2343 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2347 #if TCG_TARGET_REG_BITS == 64
2348 case INDEX_op_sub2_i64
:
2350 case INDEX_op_sub2_i32
:
2352 a0
= args
[0], a1
= args
[1];
2353 if (a0
== args
[5] || (!const_args
[3] && a0
== args
[3])) {
2356 if (const_args
[2]) {
2357 tcg_out32(s
, SUBFIC
| TAI(a0
, args
[4], args
[2]));
2359 tcg_out32(s
, SUBFC
| TAB(a0
, args
[4], args
[2]));
2361 if (const_args
[3]) {
2362 tcg_out32(s
, (args
[3] ? SUBFME
: SUBFZE
) | RT(a1
) | RA(args
[5]));
2364 tcg_out32(s
, SUBFE
| TAB(a1
, args
[5], args
[3]));
2366 if (a0
!= args
[0]) {
2367 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2371 case INDEX_op_muluh_i32
:
2372 tcg_out32(s
, MULHWU
| TAB(args
[0], args
[1], args
[2]));
2374 case INDEX_op_mulsh_i32
:
2375 tcg_out32(s
, MULHW
| TAB(args
[0], args
[1], args
[2]));
2377 case INDEX_op_muluh_i64
:
2378 tcg_out32(s
, MULHDU
| TAB(args
[0], args
[1], args
[2]));
2380 case INDEX_op_mulsh_i64
:
2381 tcg_out32(s
, MULHD
| TAB(args
[0], args
[1], args
[2]));
2384 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2385 case INDEX_op_mov_i64
:
2386 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2387 case INDEX_op_movi_i64
:
2388 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2394 static const TCGTargetOpDef ppc_op_defs
[] = {
2395 { INDEX_op_exit_tb
, { } },
2396 { INDEX_op_goto_tb
, { } },
2397 { INDEX_op_br
, { } },
2399 { INDEX_op_ld8u_i32
, { "r", "r" } },
2400 { INDEX_op_ld8s_i32
, { "r", "r" } },
2401 { INDEX_op_ld16u_i32
, { "r", "r" } },
2402 { INDEX_op_ld16s_i32
, { "r", "r" } },
2403 { INDEX_op_ld_i32
, { "r", "r" } },
2405 { INDEX_op_st8_i32
, { "r", "r" } },
2406 { INDEX_op_st16_i32
, { "r", "r" } },
2407 { INDEX_op_st_i32
, { "r", "r" } },
2409 { INDEX_op_add_i32
, { "r", "r", "ri" } },
2410 { INDEX_op_mul_i32
, { "r", "r", "rI" } },
2411 { INDEX_op_div_i32
, { "r", "r", "r" } },
2412 { INDEX_op_divu_i32
, { "r", "r", "r" } },
2413 { INDEX_op_sub_i32
, { "r", "rI", "ri" } },
2414 { INDEX_op_and_i32
, { "r", "r", "ri" } },
2415 { INDEX_op_or_i32
, { "r", "r", "ri" } },
2416 { INDEX_op_xor_i32
, { "r", "r", "ri" } },
2417 { INDEX_op_andc_i32
, { "r", "r", "ri" } },
2418 { INDEX_op_orc_i32
, { "r", "r", "ri" } },
2419 { INDEX_op_eqv_i32
, { "r", "r", "ri" } },
2420 { INDEX_op_nand_i32
, { "r", "r", "r" } },
2421 { INDEX_op_nor_i32
, { "r", "r", "r" } },
2423 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
2424 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
2425 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
2426 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
2427 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
2429 { INDEX_op_neg_i32
, { "r", "r" } },
2430 { INDEX_op_not_i32
, { "r", "r" } },
2431 { INDEX_op_ext8s_i32
, { "r", "r" } },
2432 { INDEX_op_ext16s_i32
, { "r", "r" } },
2433 { INDEX_op_bswap16_i32
, { "r", "r" } },
2434 { INDEX_op_bswap32_i32
, { "r", "r" } },
2436 { INDEX_op_brcond_i32
, { "r", "ri" } },
2437 { INDEX_op_setcond_i32
, { "r", "r", "ri" } },
2438 { INDEX_op_movcond_i32
, { "r", "r", "ri", "rZ", "rZ" } },
2440 { INDEX_op_deposit_i32
, { "r", "0", "rZ" } },
2442 { INDEX_op_muluh_i32
, { "r", "r", "r" } },
2443 { INDEX_op_mulsh_i32
, { "r", "r", "r" } },
2445 #if TCG_TARGET_REG_BITS == 64
2446 { INDEX_op_ld8u_i64
, { "r", "r" } },
2447 { INDEX_op_ld8s_i64
, { "r", "r" } },
2448 { INDEX_op_ld16u_i64
, { "r", "r" } },
2449 { INDEX_op_ld16s_i64
, { "r", "r" } },
2450 { INDEX_op_ld32u_i64
, { "r", "r" } },
2451 { INDEX_op_ld32s_i64
, { "r", "r" } },
2452 { INDEX_op_ld_i64
, { "r", "r" } },
2454 { INDEX_op_st8_i64
, { "r", "r" } },
2455 { INDEX_op_st16_i64
, { "r", "r" } },
2456 { INDEX_op_st32_i64
, { "r", "r" } },
2457 { INDEX_op_st_i64
, { "r", "r" } },
2459 { INDEX_op_add_i64
, { "r", "r", "rT" } },
2460 { INDEX_op_sub_i64
, { "r", "rI", "rT" } },
2461 { INDEX_op_and_i64
, { "r", "r", "ri" } },
2462 { INDEX_op_or_i64
, { "r", "r", "rU" } },
2463 { INDEX_op_xor_i64
, { "r", "r", "rU" } },
2464 { INDEX_op_andc_i64
, { "r", "r", "ri" } },
2465 { INDEX_op_orc_i64
, { "r", "r", "r" } },
2466 { INDEX_op_eqv_i64
, { "r", "r", "r" } },
2467 { INDEX_op_nand_i64
, { "r", "r", "r" } },
2468 { INDEX_op_nor_i64
, { "r", "r", "r" } },
2470 { INDEX_op_shl_i64
, { "r", "r", "ri" } },
2471 { INDEX_op_shr_i64
, { "r", "r", "ri" } },
2472 { INDEX_op_sar_i64
, { "r", "r", "ri" } },
2473 { INDEX_op_rotl_i64
, { "r", "r", "ri" } },
2474 { INDEX_op_rotr_i64
, { "r", "r", "ri" } },
2476 { INDEX_op_mul_i64
, { "r", "r", "rI" } },
2477 { INDEX_op_div_i64
, { "r", "r", "r" } },
2478 { INDEX_op_divu_i64
, { "r", "r", "r" } },
2480 { INDEX_op_neg_i64
, { "r", "r" } },
2481 { INDEX_op_not_i64
, { "r", "r" } },
2482 { INDEX_op_ext8s_i64
, { "r", "r" } },
2483 { INDEX_op_ext16s_i64
, { "r", "r" } },
2484 { INDEX_op_ext32s_i64
, { "r", "r" } },
2485 { INDEX_op_bswap16_i64
, { "r", "r" } },
2486 { INDEX_op_bswap32_i64
, { "r", "r" } },
2487 { INDEX_op_bswap64_i64
, { "r", "r" } },
2489 { INDEX_op_brcond_i64
, { "r", "ri" } },
2490 { INDEX_op_setcond_i64
, { "r", "r", "ri" } },
2491 { INDEX_op_movcond_i64
, { "r", "r", "ri", "rZ", "rZ" } },
2493 { INDEX_op_deposit_i64
, { "r", "0", "rZ" } },
2495 { INDEX_op_mulsh_i64
, { "r", "r", "r" } },
2496 { INDEX_op_muluh_i64
, { "r", "r", "r" } },
2499 #if TCG_TARGET_REG_BITS == 32
2500 { INDEX_op_brcond2_i32
, { "r", "r", "ri", "ri" } },
2501 { INDEX_op_setcond2_i32
, { "r", "r", "r", "ri", "ri" } },
2504 #if TCG_TARGET_REG_BITS == 64
2505 { INDEX_op_add2_i64
, { "r", "r", "r", "r", "rI", "rZM" } },
2506 { INDEX_op_sub2_i64
, { "r", "r", "rI", "rZM", "r", "r" } },
2508 { INDEX_op_add2_i32
, { "r", "r", "r", "r", "rI", "rZM" } },
2509 { INDEX_op_sub2_i32
, { "r", "r", "rI", "rZM", "r", "r" } },
2512 #if TCG_TARGET_REG_BITS == 64
2513 { INDEX_op_qemu_ld_i32
, { "r", "L" } },
2514 { INDEX_op_qemu_st_i32
, { "S", "S" } },
2515 { INDEX_op_qemu_ld_i64
, { "r", "L" } },
2516 { INDEX_op_qemu_st_i64
, { "S", "S" } },
2517 #elif TARGET_LONG_BITS == 32
2518 { INDEX_op_qemu_ld_i32
, { "r", "L" } },
2519 { INDEX_op_qemu_st_i32
, { "S", "S" } },
2520 { INDEX_op_qemu_ld_i64
, { "L", "L", "L" } },
2521 { INDEX_op_qemu_st_i64
, { "S", "S", "S" } },
2523 { INDEX_op_qemu_ld_i32
, { "r", "L", "L" } },
2524 { INDEX_op_qemu_st_i32
, { "S", "S", "S" } },
2525 { INDEX_op_qemu_ld_i64
, { "L", "L", "L", "L" } },
2526 { INDEX_op_qemu_st_i64
, { "S", "S", "S", "S" } },
2532 static void tcg_target_init(TCGContext
*s
)
2534 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
2535 if (hwcap
& PPC_FEATURE_ARCH_2_06
) {
2536 have_isa_2_06
= true;
2539 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffffffff);
2540 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffffffff);
2541 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
2551 (1 << TCG_REG_R10
) |
2552 (1 << TCG_REG_R11
) |
2553 (1 << TCG_REG_R12
));
2555 tcg_regset_clear(s
->reserved_regs
);
2556 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
); /* tcg temp */
2557 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R1
); /* stack pointer */
2558 #if defined(_CALL_SYSV)
2559 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R2
); /* toc pointer */
2561 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
2562 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R13
); /* thread pointer */
2564 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP1
); /* mem temp */
2566 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RA
); /* return addr */
2569 tcg_add_target_add_op_defs(ppc_op_defs
);
2575 DebugFrameFDEHeader fde
;
2576 uint8_t fde_def_cfa
[4];
2577 uint8_t fde_reg_ofs
[ARRAY_SIZE(tcg_target_callee_save_regs
) * 2 + 3];
2580 /* We're expecting a 2 byte uleb128 encoded value. */
2581 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
2583 #if TCG_TARGET_REG_BITS == 64
2584 # define ELF_HOST_MACHINE EM_PPC64
2586 # define ELF_HOST_MACHINE EM_PPC
2589 static DebugFrame debug_frame
= {
2590 .cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2593 .cie
.code_align
= 1,
2594 .cie
.data_align
= (-SZR
& 0x7f), /* sleb128 -SZR */
2595 .cie
.return_column
= 65,
2597 /* Total FDE size does not include the "len" member. */
2598 .fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, fde
.cie_offset
),
2601 12, TCG_REG_R1
, /* DW_CFA_def_cfa r1, ... */
2602 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2606 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
2607 0x11, 65, (LR_OFFSET
/ -SZR
) & 0x7f,
2611 void tcg_register_jit(void *buf
, size_t buf_size
)
2613 uint8_t *p
= &debug_frame
.fde_reg_ofs
[3];
2616 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); ++i
, p
+= 2) {
2617 p
[0] = 0x80 + tcg_target_callee_save_regs
[i
];
2618 p
[1] = (FRAME_SIZE
- (REG_SAVE_BOT
+ i
* SZR
)) / SZR
;
2621 debug_frame
.fde
.func_start
= (uintptr_t)buf
;
2622 debug_frame
.fde
.func_len
= buf_size
;
2624 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));
2626 #endif /* __ELF__ */
2628 static size_t dcache_bsize
= 16;
2629 static size_t icache_bsize
= 16;
2631 void flush_icache_range(uintptr_t start
, uintptr_t stop
)
2633 uintptr_t p
, start1
, stop1
;
2634 size_t dsize
= dcache_bsize
;
2635 size_t isize
= icache_bsize
;
2637 start1
= start
& ~(dsize
- 1);
2638 stop1
= (stop
+ dsize
- 1) & ~(dsize
- 1);
2639 for (p
= start1
; p
< stop1
; p
+= dsize
) {
2640 asm volatile ("dcbst 0,%0" : : "r"(p
) : "memory");
2642 asm volatile ("sync" : : : "memory");
2644 start
&= start
& ~(isize
- 1);
2645 stop1
= (stop
+ isize
- 1) & ~(isize
- 1);
2646 for (p
= start1
; p
< stop1
; p
+= isize
) {
2647 asm volatile ("icbi 0,%0" : : "r"(p
) : "memory");
2649 asm volatile ("sync" : : : "memory");
2650 asm volatile ("isync" : : : "memory");
2654 #include <sys/systemcfg.h>
2656 static void __attribute__((constructor
)) tcg_cache_init(void)
2658 icache_bsize
= _system_configuration
.icache_line
;
2659 dcache_bsize
= _system_configuration
.dcache_line
;
2662 #elif defined __linux__
2663 static void __attribute__((constructor
)) tcg_cache_init(void)
2665 unsigned long dsize
= qemu_getauxval(AT_DCACHEBSIZE
);
2666 unsigned long isize
= qemu_getauxval(AT_ICACHEBSIZE
);
2668 if (dsize
== 0 || isize
== 0) {
2670 fprintf(stderr
, "getauxval AT_DCACHEBSIZE failed\n");
2673 fprintf(stderr
, "getauxval AT_ICACHEBSIZE failed\n");
2677 dcache_bsize
= dsize
;
2678 icache_bsize
= isize
;
2681 #elif defined __APPLE__
2683 #include <sys/types.h>
2684 #include <sys/sysctl.h>
2686 static void __attribute__((constructor
)) tcg_cache_init(void)
2690 int name
[2] = { CTL_HW
, HW_CACHELINE
};
2692 len
= sizeof(cacheline
);
2693 if (sysctl(name
, 2, &cacheline
, &len
, NULL
, 0)) {
2694 perror("sysctl CTL_HW HW_CACHELINE failed");
2697 dcache_bsize
= cacheline
;
2698 icache_bsize
= cacheline
;
2701 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
2706 #include <sys/types.h>
2707 #include <sys/sysctl.h>
2709 static void __attribute__((constructor
)) tcg_cache_init(void)
2714 if (sysctlbyname ("machdep.cacheline_size", &cacheline
, &len
, NULL
, 0)) {
2715 fprintf(stderr
, "sysctlbyname machdep.cacheline_size failed: %s\n",
2719 dcache_bsize
= cacheline
;
2720 icache_bsize
= cacheline
;