1 /* Subroutines for insn-output.c for Tensilica's Xtensa architecture.
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "cfgcleanup.h"
36 #include "insn-config.h"
37 #include "conditions.h"
38 #include "insn-flags.h"
39 #include "insn-attr.h"
40 #include "insn-codes.h"
43 #include "fold-const.h"
44 #include "stringpool.h"
45 #include "stor-layout.h"
58 #include "diagnostic-core.h"
62 #include "langhooks.h"
63 #include "internal-fn.h"
64 #include "gimple-fold.h"
69 #include "hw-doloop.h"
72 /* This file should be included last. */
73 #include "target-def.h"
75 /* Enumeration for all of the relational tests, so that we can build
76 arrays indexed by the test type, and not worry about the order
94 /* Array giving truth value on whether or not a given hard register
95 can support a given mode. */
96 char xtensa_hard_regno_mode_ok
[(int) MAX_MACHINE_MODE
][FIRST_PSEUDO_REGISTER
];
98 /* Current frame size calculated by compute_frame_size. */
99 unsigned xtensa_current_frame_size
;
100 /* Callee-save area size in the current frame calculated by compute_frame_size. */
101 int xtensa_callee_save_size
;
103 /* Largest block move to handle in-line. */
104 #define LARGEST_MOVE_RATIO 15
106 /* Define the structure for the machine field in struct function. */
107 struct GTY(()) machine_function
109 int accesses_prev_frame
;
113 rtx_insn
*set_frame_ptr_insn
;
116 /* Vector, indexed by hard register number, which contains 1 for a
117 register that is allowable in a candidate for leaf function
120 const char xtensa_leaf_regs
[FIRST_PSEUDO_REGISTER
] =
122 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
124 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
128 static void xtensa_option_override (void);
129 static enum internal_test
map_test_to_internal_test (enum rtx_code
);
130 static rtx
gen_int_relational (enum rtx_code
, rtx
, rtx
, int *);
131 static rtx
gen_float_relational (enum rtx_code
, rtx
, rtx
);
132 static rtx
gen_conditional_move (enum rtx_code
, machine_mode
, rtx
, rtx
);
133 static rtx
fixup_subreg_mem (rtx
);
134 static struct machine_function
* xtensa_init_machine_status (void);
135 static rtx
xtensa_legitimize_tls_address (rtx
);
136 static rtx
xtensa_legitimize_address (rtx
, rtx
, machine_mode
);
137 static bool xtensa_mode_dependent_address_p (const_rtx
, addr_space_t
);
138 static bool xtensa_return_in_msb (const_tree
);
139 static void printx (FILE *, signed int);
140 static rtx
xtensa_builtin_saveregs (void);
141 static bool xtensa_legitimate_address_p (machine_mode
, rtx
, bool);
142 static unsigned int xtensa_multibss_section_type_flags (tree
, const char *,
143 int) ATTRIBUTE_UNUSED
;
144 static section
*xtensa_select_rtx_section (machine_mode
, rtx
,
145 unsigned HOST_WIDE_INT
);
146 static bool xtensa_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
147 static int xtensa_register_move_cost (machine_mode
, reg_class_t
,
149 static int xtensa_memory_move_cost (machine_mode
, reg_class_t
, bool);
150 static tree
xtensa_build_builtin_va_list (void);
151 static bool xtensa_return_in_memory (const_tree
, const_tree
);
152 static tree
xtensa_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*,
154 static void xtensa_function_arg_advance (cumulative_args_t
, machine_mode
,
156 static rtx
xtensa_function_arg (cumulative_args_t
, machine_mode
,
158 static rtx
xtensa_function_incoming_arg (cumulative_args_t
,
159 machine_mode
, const_tree
, bool);
160 static rtx
xtensa_function_value (const_tree
, const_tree
, bool);
161 static rtx
xtensa_libcall_value (machine_mode
, const_rtx
);
162 static bool xtensa_function_value_regno_p (const unsigned int);
163 static unsigned int xtensa_function_arg_boundary (machine_mode
,
165 static void xtensa_init_builtins (void);
166 static tree
xtensa_fold_builtin (tree
, int, tree
*, bool);
167 static rtx
xtensa_expand_builtin (tree
, rtx
, rtx
, machine_mode
, int);
168 static void xtensa_va_start (tree
, rtx
);
169 static bool xtensa_frame_pointer_required (void);
170 static rtx
xtensa_static_chain (const_tree
, bool);
171 static void xtensa_asm_trampoline_template (FILE *);
172 static void xtensa_trampoline_init (rtx
, tree
, rtx
);
173 static bool xtensa_output_addr_const_extra (FILE *, rtx
);
174 static bool xtensa_cannot_force_const_mem (machine_mode
, rtx
);
176 static reg_class_t
xtensa_preferred_reload_class (rtx
, reg_class_t
);
177 static reg_class_t
xtensa_preferred_output_reload_class (rtx
, reg_class_t
);
178 static reg_class_t
xtensa_secondary_reload (bool, rtx
, reg_class_t
,
180 struct secondary_reload_info
*);
182 static bool constantpool_address_p (const_rtx addr
);
183 static bool xtensa_legitimate_constant_p (machine_mode
, rtx
);
184 static void xtensa_reorg (void);
185 static bool xtensa_can_use_doloop_p (const widest_int
&, const widest_int
&,
187 static const char *xtensa_invalid_within_doloop (const rtx_insn
*);
189 static bool xtensa_member_type_forces_blk (const_tree
,
192 static void xtensa_conditional_register_usage (void);
196 /* These hooks specify assembly directives for creating certain kinds
197 of integer object. */
199 #undef TARGET_ASM_ALIGNED_SI_OP
200 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
202 #undef TARGET_ASM_SELECT_RTX_SECTION
203 #define TARGET_ASM_SELECT_RTX_SECTION xtensa_select_rtx_section
205 #undef TARGET_LEGITIMIZE_ADDRESS
206 #define TARGET_LEGITIMIZE_ADDRESS xtensa_legitimize_address
207 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
208 #define TARGET_MODE_DEPENDENT_ADDRESS_P xtensa_mode_dependent_address_p
210 #undef TARGET_REGISTER_MOVE_COST
211 #define TARGET_REGISTER_MOVE_COST xtensa_register_move_cost
212 #undef TARGET_MEMORY_MOVE_COST
213 #define TARGET_MEMORY_MOVE_COST xtensa_memory_move_cost
214 #undef TARGET_RTX_COSTS
215 #define TARGET_RTX_COSTS xtensa_rtx_costs
216 #undef TARGET_ADDRESS_COST
217 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
219 #undef TARGET_MEMBER_TYPE_FORCES_BLK
220 #define TARGET_MEMBER_TYPE_FORCES_BLK xtensa_member_type_forces_blk
222 #undef TARGET_BUILD_BUILTIN_VA_LIST
223 #define TARGET_BUILD_BUILTIN_VA_LIST xtensa_build_builtin_va_list
225 #undef TARGET_EXPAND_BUILTIN_VA_START
226 #define TARGET_EXPAND_BUILTIN_VA_START xtensa_va_start
228 #undef TARGET_PROMOTE_FUNCTION_MODE
229 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
230 #undef TARGET_PROMOTE_PROTOTYPES
231 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
233 #undef TARGET_RETURN_IN_MEMORY
234 #define TARGET_RETURN_IN_MEMORY xtensa_return_in_memory
235 #undef TARGET_FUNCTION_VALUE
236 #define TARGET_FUNCTION_VALUE xtensa_function_value
237 #undef TARGET_LIBCALL_VALUE
238 #define TARGET_LIBCALL_VALUE xtensa_libcall_value
239 #undef TARGET_FUNCTION_VALUE_REGNO_P
240 #define TARGET_FUNCTION_VALUE_REGNO_P xtensa_function_value_regno_p
242 #undef TARGET_SPLIT_COMPLEX_ARG
243 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
244 #undef TARGET_MUST_PASS_IN_STACK
245 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
246 #undef TARGET_FUNCTION_ARG_ADVANCE
247 #define TARGET_FUNCTION_ARG_ADVANCE xtensa_function_arg_advance
248 #undef TARGET_FUNCTION_ARG
249 #define TARGET_FUNCTION_ARG xtensa_function_arg
250 #undef TARGET_FUNCTION_INCOMING_ARG
251 #define TARGET_FUNCTION_INCOMING_ARG xtensa_function_incoming_arg
252 #undef TARGET_FUNCTION_ARG_BOUNDARY
253 #define TARGET_FUNCTION_ARG_BOUNDARY xtensa_function_arg_boundary
255 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
256 #define TARGET_EXPAND_BUILTIN_SAVEREGS xtensa_builtin_saveregs
257 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
258 #define TARGET_GIMPLIFY_VA_ARG_EXPR xtensa_gimplify_va_arg_expr
260 #undef TARGET_RETURN_IN_MSB
261 #define TARGET_RETURN_IN_MSB xtensa_return_in_msb
263 #undef TARGET_INIT_BUILTINS
264 #define TARGET_INIT_BUILTINS xtensa_init_builtins
265 #undef TARGET_FOLD_BUILTIN
266 #define TARGET_FOLD_BUILTIN xtensa_fold_builtin
267 #undef TARGET_EXPAND_BUILTIN
268 #define TARGET_EXPAND_BUILTIN xtensa_expand_builtin
270 #undef TARGET_PREFERRED_RELOAD_CLASS
271 #define TARGET_PREFERRED_RELOAD_CLASS xtensa_preferred_reload_class
272 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
273 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS xtensa_preferred_output_reload_class
275 #undef TARGET_SECONDARY_RELOAD
276 #define TARGET_SECONDARY_RELOAD xtensa_secondary_reload
278 #undef TARGET_HAVE_TLS
279 #define TARGET_HAVE_TLS (TARGET_THREADPTR && HAVE_AS_TLS)
281 #undef TARGET_CANNOT_FORCE_CONST_MEM
282 #define TARGET_CANNOT_FORCE_CONST_MEM xtensa_cannot_force_const_mem
284 #undef TARGET_LEGITIMATE_ADDRESS_P
285 #define TARGET_LEGITIMATE_ADDRESS_P xtensa_legitimate_address_p
287 #undef TARGET_FRAME_POINTER_REQUIRED
288 #define TARGET_FRAME_POINTER_REQUIRED xtensa_frame_pointer_required
290 #undef TARGET_STATIC_CHAIN
291 #define TARGET_STATIC_CHAIN xtensa_static_chain
292 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
293 #define TARGET_ASM_TRAMPOLINE_TEMPLATE xtensa_asm_trampoline_template
294 #undef TARGET_TRAMPOLINE_INIT
295 #define TARGET_TRAMPOLINE_INIT xtensa_trampoline_init
297 #undef TARGET_OPTION_OVERRIDE
298 #define TARGET_OPTION_OVERRIDE xtensa_option_override
300 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
301 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA xtensa_output_addr_const_extra
303 #undef TARGET_LEGITIMATE_CONSTANT_P
304 #define TARGET_LEGITIMATE_CONSTANT_P xtensa_legitimate_constant_p
306 #undef TARGET_MACHINE_DEPENDENT_REORG
307 #define TARGET_MACHINE_DEPENDENT_REORG xtensa_reorg
309 #undef TARGET_CAN_USE_DOLOOP_P
310 #define TARGET_CAN_USE_DOLOOP_P xtensa_can_use_doloop_p
312 #undef TARGET_INVALID_WITHIN_DOLOOP
313 #define TARGET_INVALID_WITHIN_DOLOOP xtensa_invalid_within_doloop
315 #undef TARGET_CONDITIONAL_REGISTER_USAGE
316 #define TARGET_CONDITIONAL_REGISTER_USAGE xtensa_conditional_register_usage
318 struct gcc_target targetm
= TARGET_INITIALIZER
;
321 /* Functions to test Xtensa immediate operand validity. */
324 xtensa_simm8 (HOST_WIDE_INT v
)
326 return v
>= -128 && v
<= 127;
331 xtensa_simm8x256 (HOST_WIDE_INT v
)
333 return (v
& 255) == 0 && (v
>= -32768 && v
<= 32512);
338 xtensa_simm12b (HOST_WIDE_INT v
)
340 return v
>= -2048 && v
<= 2047;
345 xtensa_uimm8 (HOST_WIDE_INT v
)
347 return v
>= 0 && v
<= 255;
352 xtensa_uimm8x2 (HOST_WIDE_INT v
)
354 return (v
& 1) == 0 && (v
>= 0 && v
<= 510);
359 xtensa_uimm8x4 (HOST_WIDE_INT v
)
361 return (v
& 3) == 0 && (v
>= 0 && v
<= 1020);
366 xtensa_b4const (HOST_WIDE_INT v
)
393 xtensa_b4const_or_zero (HOST_WIDE_INT v
)
397 return xtensa_b4const (v
);
402 xtensa_b4constu (HOST_WIDE_INT v
)
429 xtensa_mask_immediate (HOST_WIDE_INT v
)
431 #define MAX_MASK_SIZE 16
434 for (mask_size
= 1; mask_size
<= MAX_MASK_SIZE
; mask_size
++)
447 /* This is just like the standard true_regnum() function except that it
448 works even when reg_renumber is not initialized. */
451 xt_true_regnum (rtx x
)
453 if (GET_CODE (x
) == REG
)
456 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
457 && reg_renumber
[REGNO (x
)] >= 0)
458 return reg_renumber
[REGNO (x
)];
461 if (GET_CODE (x
) == SUBREG
)
463 int base
= xt_true_regnum (SUBREG_REG (x
));
464 if (base
>= 0 && base
< FIRST_PSEUDO_REGISTER
)
465 return base
+ subreg_regno_offset (REGNO (SUBREG_REG (x
)),
466 GET_MODE (SUBREG_REG (x
)),
467 SUBREG_BYTE (x
), GET_MODE (x
));
474 xtensa_valid_move (machine_mode mode
, rtx
*operands
)
476 /* Either the destination or source must be a register, and the
477 MAC16 accumulator doesn't count. */
479 if (register_operand (operands
[0], mode
))
481 int dst_regnum
= xt_true_regnum (operands
[0]);
483 /* The stack pointer can only be assigned with a MOVSP opcode. */
484 if (dst_regnum
== STACK_POINTER_REGNUM
)
485 return !TARGET_WINDOWED_ABI
487 && register_operand (operands
[1], mode
)
488 && !ACC_REG_P (xt_true_regnum (operands
[1])));
490 if (!ACC_REG_P (dst_regnum
))
493 if (register_operand (operands
[1], mode
))
495 int src_regnum
= xt_true_regnum (operands
[1]);
496 if (!ACC_REG_P (src_regnum
))
504 smalloffset_mem_p (rtx op
)
506 if (GET_CODE (op
) == MEM
)
508 rtx addr
= XEXP (op
, 0);
509 if (GET_CODE (addr
) == REG
)
510 return BASE_REG_P (addr
, 0);
511 if (GET_CODE (addr
) == PLUS
)
513 rtx offset
= XEXP (addr
, 0);
515 if (GET_CODE (offset
) != CONST_INT
)
516 offset
= XEXP (addr
, 1);
517 if (GET_CODE (offset
) != CONST_INT
)
520 val
= INTVAL (offset
);
521 return (val
& 3) == 0 && (val
>= 0 && val
<= 60);
529 constantpool_address_p (const_rtx addr
)
531 const_rtx sym
= addr
;
533 if (GET_CODE (addr
) == CONST
)
537 /* Only handle (PLUS (SYM, OFFSET)) form. */
538 addr
= XEXP (addr
, 0);
539 if (GET_CODE (addr
) != PLUS
)
542 /* Make sure the address is word aligned. */
543 offset
= XEXP (addr
, 1);
544 if ((!CONST_INT_P (offset
))
545 || ((INTVAL (offset
) & 3) != 0))
548 sym
= XEXP (addr
, 0);
551 if ((GET_CODE (sym
) == SYMBOL_REF
)
552 && CONSTANT_POOL_ADDRESS_P (sym
))
559 constantpool_mem_p (rtx op
)
561 if (GET_CODE (op
) == SUBREG
)
562 op
= SUBREG_REG (op
);
563 if (GET_CODE (op
) == MEM
)
564 return constantpool_address_p (XEXP (op
, 0));
569 /* Return TRUE if X is a thread-local symbol. */
572 xtensa_tls_symbol_p (rtx x
)
574 if (! TARGET_HAVE_TLS
)
577 return GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0;
582 xtensa_extend_reg (rtx dst
, rtx src
)
584 rtx temp
= gen_reg_rtx (SImode
);
585 rtx shift
= GEN_INT (BITS_PER_WORD
- GET_MODE_BITSIZE (GET_MODE (src
)));
587 /* Generate paradoxical subregs as needed so that the modes match. */
588 src
= simplify_gen_subreg (SImode
, src
, GET_MODE (src
), 0);
589 dst
= simplify_gen_subreg (SImode
, dst
, GET_MODE (dst
), 0);
591 emit_insn (gen_ashlsi3 (temp
, src
, shift
));
592 emit_insn (gen_ashrsi3 (dst
, temp
, shift
));
597 xtensa_mem_offset (unsigned v
, machine_mode mode
)
602 /* Handle the worst case for block moves. See xtensa_expand_block_move
603 where we emit an optimized block move operation if the block can be
604 moved in < "move_ratio" pieces. The worst case is when the block is
605 aligned but has a size of (3 mod 4) (does this happen?) so that the
606 last piece requires a byte load/store. */
607 return (xtensa_uimm8 (v
)
608 && xtensa_uimm8 (v
+ MOVE_MAX
* LARGEST_MOVE_RATIO
));
611 return xtensa_uimm8 (v
);
614 return xtensa_uimm8x2 (v
);
617 return (xtensa_uimm8x4 (v
) && xtensa_uimm8x4 (v
+ 4));
623 return xtensa_uimm8x4 (v
);
627 /* Make normal rtx_code into something we can index from an array. */
629 static enum internal_test
630 map_test_to_internal_test (enum rtx_code test_code
)
632 enum internal_test test
= ITEST_MAX
;
637 case EQ
: test
= ITEST_EQ
; break;
638 case NE
: test
= ITEST_NE
; break;
639 case GT
: test
= ITEST_GT
; break;
640 case GE
: test
= ITEST_GE
; break;
641 case LT
: test
= ITEST_LT
; break;
642 case LE
: test
= ITEST_LE
; break;
643 case GTU
: test
= ITEST_GTU
; break;
644 case GEU
: test
= ITEST_GEU
; break;
645 case LTU
: test
= ITEST_LTU
; break;
646 case LEU
: test
= ITEST_LEU
; break;
653 /* Generate the code to compare two integer values. The return value is
654 the comparison expression. */
657 gen_int_relational (enum rtx_code test_code
, /* relational test (EQ, etc) */
658 rtx cmp0
, /* first operand to compare */
659 rtx cmp1
, /* second operand to compare */
660 int *p_invert
/* whether branch needs to reverse test */)
664 enum rtx_code test_code
; /* test code to use in insn */
665 bool (*const_range_p
) (HOST_WIDE_INT
); /* range check function */
666 int const_add
; /* constant to add (convert LE -> LT) */
667 int reverse_regs
; /* reverse registers in test */
668 int invert_const
; /* != 0 if invert value if cmp1 is constant */
669 int invert_reg
; /* != 0 if invert value if cmp1 is register */
670 int unsignedp
; /* != 0 for unsigned comparisons. */
673 static struct cmp_info info
[ (int)ITEST_MAX
] = {
675 { EQ
, xtensa_b4const_or_zero
, 0, 0, 0, 0, 0 }, /* EQ */
676 { NE
, xtensa_b4const_or_zero
, 0, 0, 0, 0, 0 }, /* NE */
678 { LT
, xtensa_b4const_or_zero
, 1, 1, 1, 0, 0 }, /* GT */
679 { GE
, xtensa_b4const_or_zero
, 0, 0, 0, 0, 0 }, /* GE */
680 { LT
, xtensa_b4const_or_zero
, 0, 0, 0, 0, 0 }, /* LT */
681 { GE
, xtensa_b4const_or_zero
, 1, 1, 1, 0, 0 }, /* LE */
683 { LTU
, xtensa_b4constu
, 1, 1, 1, 0, 1 }, /* GTU */
684 { GEU
, xtensa_b4constu
, 0, 0, 0, 0, 1 }, /* GEU */
685 { LTU
, xtensa_b4constu
, 0, 0, 0, 0, 1 }, /* LTU */
686 { GEU
, xtensa_b4constu
, 1, 1, 1, 0, 1 }, /* LEU */
689 enum internal_test test
;
691 struct cmp_info
*p_info
;
693 test
= map_test_to_internal_test (test_code
);
694 gcc_assert (test
!= ITEST_MAX
);
696 p_info
= &info
[ (int)test
];
698 mode
= GET_MODE (cmp0
);
699 if (mode
== VOIDmode
)
700 mode
= GET_MODE (cmp1
);
702 /* Make sure we can handle any constants given to us. */
703 if (GET_CODE (cmp1
) == CONST_INT
)
705 HOST_WIDE_INT value
= INTVAL (cmp1
);
706 unsigned HOST_WIDE_INT uvalue
= (unsigned HOST_WIDE_INT
)value
;
708 /* if the immediate overflows or does not fit in the immediate field,
709 spill it to a register */
711 if ((p_info
->unsignedp
?
712 (uvalue
+ p_info
->const_add
> uvalue
) :
713 (value
+ p_info
->const_add
> value
)) != (p_info
->const_add
> 0))
715 cmp1
= force_reg (mode
, cmp1
);
717 else if (!(p_info
->const_range_p
) (value
+ p_info
->const_add
))
719 cmp1
= force_reg (mode
, cmp1
);
722 else if ((GET_CODE (cmp1
) != REG
) && (GET_CODE (cmp1
) != SUBREG
))
724 cmp1
= force_reg (mode
, cmp1
);
727 /* See if we need to invert the result. */
728 *p_invert
= ((GET_CODE (cmp1
) == CONST_INT
)
729 ? p_info
->invert_const
730 : p_info
->invert_reg
);
732 /* Comparison to constants, may involve adding 1 to change a LT into LE.
733 Comparison between two registers, may involve switching operands. */
734 if (GET_CODE (cmp1
) == CONST_INT
)
736 if (p_info
->const_add
!= 0)
737 cmp1
= GEN_INT (INTVAL (cmp1
) + p_info
->const_add
);
740 else if (p_info
->reverse_regs
)
747 return gen_rtx_fmt_ee (p_info
->test_code
, VOIDmode
, cmp0
, cmp1
);
751 /* Generate the code to compare two float values. The return value is
752 the comparison expression. */
755 gen_float_relational (enum rtx_code test_code
, /* relational test (EQ, etc) */
756 rtx cmp0
, /* first operand to compare */
757 rtx cmp1
/* second operand to compare */)
759 rtx (*gen_fn
) (rtx
, rtx
, rtx
);
761 int reverse_regs
, invert
;
765 case EQ
: reverse_regs
= 0; invert
= 0; gen_fn
= gen_seq_sf
; break;
766 case NE
: reverse_regs
= 0; invert
= 1; gen_fn
= gen_seq_sf
; break;
767 case LE
: reverse_regs
= 0; invert
= 0; gen_fn
= gen_sle_sf
; break;
768 case GT
: reverse_regs
= 1; invert
= 0; gen_fn
= gen_slt_sf
; break;
769 case LT
: reverse_regs
= 0; invert
= 0; gen_fn
= gen_slt_sf
; break;
770 case GE
: reverse_regs
= 1; invert
= 0; gen_fn
= gen_sle_sf
; break;
771 case UNEQ
: reverse_regs
= 0; invert
= 0; gen_fn
= gen_suneq_sf
; break;
772 case LTGT
: reverse_regs
= 0; invert
= 1; gen_fn
= gen_suneq_sf
; break;
773 case UNLE
: reverse_regs
= 0; invert
= 0; gen_fn
= gen_sunle_sf
; break;
774 case UNGT
: reverse_regs
= 1; invert
= 0; gen_fn
= gen_sunlt_sf
; break;
775 case UNLT
: reverse_regs
= 0; invert
= 0; gen_fn
= gen_sunlt_sf
; break;
776 case UNGE
: reverse_regs
= 1; invert
= 0; gen_fn
= gen_sunle_sf
; break;
778 reverse_regs
= 0; invert
= 0; gen_fn
= gen_sunordered_sf
; break;
780 reverse_regs
= 0; invert
= 1; gen_fn
= gen_sunordered_sf
; break;
782 fatal_insn ("bad test", gen_rtx_fmt_ee (test_code
, VOIDmode
, cmp0
, cmp1
));
783 reverse_regs
= 0; invert
= 0; gen_fn
= 0; /* avoid compiler warnings */
793 brtmp
= gen_rtx_REG (CCmode
, FPCC_REGNUM
);
794 emit_insn (gen_fn (brtmp
, cmp0
, cmp1
));
796 return gen_rtx_fmt_ee (invert
? EQ
: NE
, VOIDmode
, brtmp
, const0_rtx
);
801 xtensa_expand_conditional_branch (rtx
*operands
, machine_mode mode
)
803 enum rtx_code test_code
= GET_CODE (operands
[0]);
804 rtx cmp0
= operands
[1];
805 rtx cmp1
= operands
[2];
814 fatal_insn ("bad test", gen_rtx_fmt_ee (test_code
, VOIDmode
, cmp0
, cmp1
));
818 cmp
= gen_int_relational (test_code
, cmp0
, cmp1
, &invert
);
822 if (!TARGET_HARD_FLOAT
)
823 fatal_insn ("bad test", gen_rtx_fmt_ee (test_code
, VOIDmode
,
826 cmp
= gen_float_relational (test_code
, cmp0
, cmp1
);
830 /* Generate the branch. */
832 label1
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
841 emit_jump_insn (gen_rtx_SET (pc_rtx
,
842 gen_rtx_IF_THEN_ELSE (VOIDmode
, cmp
,
849 gen_conditional_move (enum rtx_code code
, machine_mode mode
,
856 /* Jump optimization calls get_condition() which canonicalizes
857 comparisons like (GE x <const>) to (GT x <const-1>).
858 Transform those comparisons back to GE, since that is the
859 comparison supported in Xtensa. We shouldn't have to
860 transform <LE x const> comparisons, because neither
861 xtensa_expand_conditional_branch() nor get_condition() will
864 if ((code
== GT
) && (op1
== constm1_rtx
))
869 cmp
= gen_rtx_fmt_ee (code
, VOIDmode
, cc0_rtx
, const0_rtx
);
871 if (boolean_operator (cmp
, VOIDmode
))
873 /* Swap the operands to make const0 second. */
874 if (op0
== const0_rtx
)
880 /* If not comparing against zero, emit a comparison (subtract). */
881 if (op1
!= const0_rtx
)
883 op0
= expand_binop (SImode
, sub_optab
, op0
, op1
,
884 0, 0, OPTAB_LIB_WIDEN
);
888 else if (branch_operator (cmp
, VOIDmode
))
890 /* Swap the operands to make const0 second. */
891 if (op0
== const0_rtx
)
898 case LT
: code
= GE
; break;
899 case GE
: code
= LT
; break;
900 default: gcc_unreachable ();
904 if (op1
!= const0_rtx
)
910 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
913 if (TARGET_HARD_FLOAT
&& mode
== SFmode
)
914 return gen_float_relational (code
, op0
, op1
);
921 xtensa_expand_conditional_move (rtx
*operands
, int isflt
)
923 rtx dest
= operands
[0];
924 rtx cmp
= operands
[1];
925 machine_mode cmp_mode
= GET_MODE (XEXP (cmp
, 0));
926 rtx (*gen_fn
) (rtx
, rtx
, rtx
, rtx
, rtx
);
928 if (!(cmp
= gen_conditional_move (GET_CODE (cmp
), cmp_mode
,
929 XEXP (cmp
, 0), XEXP (cmp
, 1))))
933 gen_fn
= (cmp_mode
== SImode
934 ? gen_movsfcc_internal0
935 : gen_movsfcc_internal1
);
937 gen_fn
= (cmp_mode
== SImode
938 ? gen_movsicc_internal0
939 : gen_movsicc_internal1
);
941 emit_insn (gen_fn (dest
, XEXP (cmp
, 0), operands
[2], operands
[3], cmp
));
947 xtensa_expand_scc (rtx operands
[4], machine_mode cmp_mode
)
949 rtx dest
= operands
[0];
951 rtx one_tmp
, zero_tmp
;
952 rtx (*gen_fn
) (rtx
, rtx
, rtx
, rtx
, rtx
);
954 if (!(cmp
= gen_conditional_move (GET_CODE (operands
[1]), cmp_mode
,
955 operands
[2], operands
[3])))
958 one_tmp
= gen_reg_rtx (SImode
);
959 zero_tmp
= gen_reg_rtx (SImode
);
960 emit_insn (gen_movsi (one_tmp
, const_true_rtx
));
961 emit_insn (gen_movsi (zero_tmp
, const0_rtx
));
963 gen_fn
= (cmp_mode
== SImode
964 ? gen_movsicc_internal0
965 : gen_movsicc_internal1
);
966 emit_insn (gen_fn (dest
, XEXP (cmp
, 0), one_tmp
, zero_tmp
, cmp
));
971 /* Split OP[1] into OP[2,3] and likewise for OP[0] into OP[0,1]. MODE is
972 for the output, i.e., the input operands are twice as big as MODE. */
975 xtensa_split_operand_pair (rtx operands
[4], machine_mode mode
)
977 switch (GET_CODE (operands
[1]))
980 operands
[3] = gen_rtx_REG (mode
, REGNO (operands
[1]) + 1);
981 operands
[2] = gen_rtx_REG (mode
, REGNO (operands
[1]));
985 operands
[3] = adjust_address (operands
[1], mode
, GET_MODE_SIZE (mode
));
986 operands
[2] = adjust_address (operands
[1], mode
, 0);
991 split_double (operands
[1], &operands
[2], &operands
[3]);
998 switch (GET_CODE (operands
[0]))
1001 operands
[1] = gen_rtx_REG (mode
, REGNO (operands
[0]) + 1);
1002 operands
[0] = gen_rtx_REG (mode
, REGNO (operands
[0]));
1006 operands
[1] = adjust_address (operands
[0], mode
, GET_MODE_SIZE (mode
));
1007 operands
[0] = adjust_address (operands
[0], mode
, 0);
1016 /* Emit insns to move operands[1] into operands[0].
1017 Return 1 if we have written out everything that needs to be done to
1018 do the move. Otherwise, return 0 and the caller will emit the move
1022 xtensa_emit_move_sequence (rtx
*operands
, machine_mode mode
)
1024 rtx src
= operands
[1];
1026 if (CONSTANT_P (src
)
1027 && (GET_CODE (src
) != CONST_INT
|| ! xtensa_simm12b (INTVAL (src
))))
1029 rtx dst
= operands
[0];
1031 if (xtensa_tls_referenced_p (src
))
1035 if (GET_CODE (src
) == CONST
&& GET_CODE (XEXP (src
, 0)) == PLUS
)
1037 addend
= XEXP (XEXP (src
, 0), 1);
1038 src
= XEXP (XEXP (src
, 0), 0);
1041 src
= xtensa_legitimize_tls_address (src
);
1044 src
= gen_rtx_PLUS (mode
, src
, addend
);
1045 src
= force_operand (src
, dst
);
1047 emit_move_insn (dst
, src
);
1051 if (! TARGET_CONST16
)
1053 src
= force_const_mem (SImode
, src
);
1057 /* PC-relative loads are always SImode, and CONST16 is only
1058 supported in the movsi pattern, so add a SUBREG for any other
1063 if (register_operand (dst
, mode
))
1065 emit_move_insn (simplify_gen_subreg (SImode
, dst
, mode
, 0), src
);
1070 src
= force_reg (SImode
, src
);
1071 src
= gen_lowpart_SUBREG (mode
, src
);
1077 if (!(reload_in_progress
| reload_completed
)
1078 && !xtensa_valid_move (mode
, operands
))
1079 operands
[1] = force_reg (mode
, operands
[1]);
1081 operands
[1] = xtensa_copy_incoming_a7 (operands
[1]);
1083 /* During reload we don't want to emit (subreg:X (mem:Y)) since that
1084 instruction won't be recognized after reload, so we remove the
1085 subreg and adjust mem accordingly. */
1086 if (reload_in_progress
)
1088 operands
[0] = fixup_subreg_mem (operands
[0]);
1089 operands
[1] = fixup_subreg_mem (operands
[1]);
1096 fixup_subreg_mem (rtx x
)
1098 if (GET_CODE (x
) == SUBREG
1099 && GET_CODE (SUBREG_REG (x
)) == REG
1100 && REGNO (SUBREG_REG (x
)) >= FIRST_PSEUDO_REGISTER
)
1103 gen_rtx_SUBREG (GET_MODE (x
),
1104 reg_equiv_mem (REGNO (SUBREG_REG (x
))),
1106 x
= alter_subreg (&temp
, true);
1112 /* Check if an incoming argument in a7 is expected to be used soon and
1113 if OPND is a register or register pair that includes a7. If so,
1114 create a new pseudo and copy a7 into that pseudo at the very
1115 beginning of the function, followed by the special "set_frame_ptr"
1116 unspec_volatile insn. The return value is either the original
1117 operand, if it is not a7, or the new pseudo containing a copy of
1118 the incoming argument. This is necessary because the register
1119 allocator will ignore conflicts with a7 and may either assign some
1120 other pseudo to a7 or use a7 as the hard_frame_pointer, clobbering
1121 the incoming argument in a7. By copying the argument out of a7 as
1122 the very first thing, and then immediately following that with an
1123 unspec_volatile to keep the scheduler away, we should avoid any
1124 problems. Putting the set_frame_ptr insn at the beginning, with
1125 only the a7 copy before it, also makes it easier for the prologue
1126 expander to initialize the frame pointer after the a7 copy and to
1127 fix up the a7 copy to use the stack pointer instead of the frame
1131 xtensa_copy_incoming_a7 (rtx opnd
)
1133 rtx entry_insns
= 0;
1137 if (!cfun
->machine
->need_a7_copy
)
1140 /* This function should never be called again once a7 has been copied. */
1141 gcc_assert (!cfun
->machine
->set_frame_ptr_insn
);
1143 mode
= GET_MODE (opnd
);
1145 /* The operand using a7 may come in a later instruction, so just return
1146 the original operand if it doesn't use a7. */
1148 if (GET_CODE (reg
) == SUBREG
)
1150 gcc_assert (SUBREG_BYTE (reg
) == 0);
1151 reg
= SUBREG_REG (reg
);
1153 if (GET_CODE (reg
) != REG
1154 || REGNO (reg
) > A7_REG
1155 || REGNO (reg
) + HARD_REGNO_NREGS (A7_REG
, mode
) <= A7_REG
)
1158 /* 1-word args will always be in a7; 2-word args in a6/a7. */
1159 gcc_assert (REGNO (reg
) + HARD_REGNO_NREGS (A7_REG
, mode
) - 1 == A7_REG
);
1161 cfun
->machine
->need_a7_copy
= false;
1163 /* Copy a7 to a new pseudo at the function entry. Use gen_raw_REG to
1164 create the REG for a7 so that hard_frame_pointer_rtx is not used. */
1167 tmp
= gen_reg_rtx (mode
);
1173 /* Copy the value out of A7 here but keep the first word in A6 until
1174 after the set_frame_ptr insn. Otherwise, the register allocator
1175 may decide to put "subreg (tmp, 0)" in A7 and clobber the incoming
1177 emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode
, tmp
, 4),
1178 gen_raw_REG (SImode
, A7_REG
)));
1181 emit_insn (gen_movsf_internal (tmp
, gen_raw_REG (mode
, A7_REG
)));
1184 emit_insn (gen_movsi_internal (tmp
, gen_raw_REG (mode
, A7_REG
)));
1187 emit_insn (gen_movhi_internal (tmp
, gen_raw_REG (mode
, A7_REG
)));
1190 emit_insn (gen_movqi_internal (tmp
, gen_raw_REG (mode
, A7_REG
)));
1196 cfun
->machine
->set_frame_ptr_insn
= emit_insn (gen_set_frame_ptr ());
1198 /* For DF and DI mode arguments, copy the incoming value in A6 now. */
1199 if (mode
== DFmode
|| mode
== DImode
)
1200 emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode
, tmp
, 0),
1201 gen_rtx_REG (SImode
, A7_REG
- 1)));
1202 entry_insns
= get_insns ();
1205 if (cfun
->machine
->vararg_a7
)
1207 /* This is called from within builtin_saveregs, which will insert the
1208 saveregs code at the function entry, ahead of anything placed at
1209 the function entry now. Instead, save the sequence to be inserted
1210 at the beginning of the saveregs code. */
1211 cfun
->machine
->vararg_a7_copy
= entry_insns
;
1215 /* Put entry_insns after the NOTE that starts the function. If
1216 this is inside a start_sequence, make the outer-level insn
1217 chain current, so the code is placed at the start of the
1219 push_topmost_sequence ();
1220 /* Do not use entry_of_function() here. This is called from within
1221 expand_function_start, when the CFG still holds GIMPLE. */
1222 emit_insn_after (entry_insns
, get_insns ());
1223 pop_topmost_sequence ();
1230 /* Try to expand a block move operation to a sequence of RTL move
1231 instructions. If not optimizing, or if the block size is not a
1232 constant, or if the block is too large, the expansion fails and GCC
1233 falls back to calling memcpy().
1235 operands[0] is the destination
1236 operands[1] is the source
1237 operands[2] is the length
1238 operands[3] is the alignment */
1241 xtensa_expand_block_move (rtx
*operands
)
1243 static const machine_mode mode_from_align
[] =
1245 VOIDmode
, QImode
, HImode
, VOIDmode
, SImode
,
1248 rtx dst_mem
= operands
[0];
1249 rtx src_mem
= operands
[1];
1250 HOST_WIDE_INT bytes
, align
;
1251 int num_pieces
, move_ratio
;
1253 machine_mode mode
[2];
1262 /* If this is not a fixed size move, just call memcpy. */
1263 if (!optimize
|| (GET_CODE (operands
[2]) != CONST_INT
))
1266 bytes
= INTVAL (operands
[2]);
1267 align
= INTVAL (operands
[3]);
1269 /* Anything to move? */
1273 if (align
> MOVE_MAX
)
1276 /* Decide whether to expand inline based on the optimization level. */
1279 move_ratio
= LARGEST_MOVE_RATIO
;
1280 num_pieces
= (bytes
/ align
) + (bytes
% align
); /* Close enough anyway. */
1281 if (num_pieces
> move_ratio
)
1284 x
= XEXP (dst_mem
, 0);
1287 x
= force_reg (Pmode
, x
);
1288 dst_mem
= replace_equiv_address (dst_mem
, x
);
1291 x
= XEXP (src_mem
, 0);
1294 x
= force_reg (Pmode
, x
);
1295 src_mem
= replace_equiv_address (src_mem
, x
);
1298 active
[0] = active
[1] = false;
1309 next_amount
= (bytes
>= 4 ? 4 : (bytes
>= 2 ? 2 : 1));
1310 next_amount
= MIN (next_amount
, align
);
1312 amount
[next
] = next_amount
;
1313 mode
[next
] = mode_from_align
[next_amount
];
1314 temp
[next
] = gen_reg_rtx (mode
[next
]);
1316 x
= adjust_address (src_mem
, mode
[next
], offset_ld
);
1317 emit_insn (gen_rtx_SET (temp
[next
], x
));
1319 offset_ld
+= next_amount
;
1320 bytes
-= next_amount
;
1321 active
[next
] = true;
1326 active
[phase
] = false;
1328 x
= adjust_address (dst_mem
, mode
[phase
], offset_st
);
1329 emit_insn (gen_rtx_SET (x
, temp
[phase
]));
1331 offset_st
+= amount
[phase
];
1334 while (active
[next
]);
1341 xtensa_expand_nonlocal_goto (rtx
*operands
)
1343 rtx goto_handler
= operands
[1];
1344 rtx containing_fp
= operands
[3];
1346 /* Generate a call to "__xtensa_nonlocal_goto" (in libgcc); the code
1347 is too big to generate in-line. */
1349 if (GET_CODE (containing_fp
) != REG
)
1350 containing_fp
= force_reg (Pmode
, containing_fp
);
1352 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__xtensa_nonlocal_goto"),
1353 LCT_NORMAL
, VOIDmode
, 2,
1354 containing_fp
, Pmode
,
1355 goto_handler
, Pmode
);
1359 static struct machine_function
*
1360 xtensa_init_machine_status (void)
1362 return ggc_cleared_alloc
<machine_function
> ();
1366 /* Shift VAL of mode MODE left by COUNT bits. */
1369 xtensa_expand_mask_and_shift (rtx val
, machine_mode mode
, rtx count
)
1371 val
= expand_simple_binop (SImode
, AND
, val
, GEN_INT (GET_MODE_MASK (mode
)),
1372 NULL_RTX
, 1, OPTAB_DIRECT
);
1373 return expand_simple_binop (SImode
, ASHIFT
, val
, count
,
1374 NULL_RTX
, 1, OPTAB_DIRECT
);
1378 /* Structure to hold the initial parameters for a compare_and_swap operation
1379 in HImode and QImode. */
1381 struct alignment_context
1383 rtx memsi
; /* SI aligned memory location. */
1384 rtx shift
; /* Bit offset with regard to lsb. */
1385 rtx modemask
; /* Mask of the HQImode shifted by SHIFT bits. */
1386 rtx modemaski
; /* ~modemask */
1390 /* Initialize structure AC for word access to HI and QI mode memory. */
1393 init_alignment_context (struct alignment_context
*ac
, rtx mem
)
1395 machine_mode mode
= GET_MODE (mem
);
1396 rtx byteoffset
= NULL_RTX
;
1397 bool aligned
= (MEM_ALIGN (mem
) >= GET_MODE_BITSIZE (SImode
));
1400 ac
->memsi
= adjust_address (mem
, SImode
, 0); /* Memory is aligned. */
1403 /* Alignment is unknown. */
1406 /* Force the address into a register. */
1407 addr
= force_reg (Pmode
, XEXP (mem
, 0));
1409 /* Align it to SImode. */
1410 align
= expand_simple_binop (Pmode
, AND
, addr
,
1411 GEN_INT (-GET_MODE_SIZE (SImode
)),
1412 NULL_RTX
, 1, OPTAB_DIRECT
);
1414 ac
->memsi
= gen_rtx_MEM (SImode
, align
);
1415 MEM_VOLATILE_P (ac
->memsi
) = MEM_VOLATILE_P (mem
);
1416 set_mem_alias_set (ac
->memsi
, ALIAS_SET_MEMORY_BARRIER
);
1417 set_mem_align (ac
->memsi
, GET_MODE_BITSIZE (SImode
));
1419 byteoffset
= expand_simple_binop (Pmode
, AND
, addr
,
1420 GEN_INT (GET_MODE_SIZE (SImode
) - 1),
1421 NULL_RTX
, 1, OPTAB_DIRECT
);
1424 /* Calculate shiftcount. */
1425 if (TARGET_BIG_ENDIAN
)
1427 ac
->shift
= GEN_INT (GET_MODE_SIZE (SImode
) - GET_MODE_SIZE (mode
));
1429 ac
->shift
= expand_simple_binop (SImode
, MINUS
, ac
->shift
, byteoffset
,
1430 NULL_RTX
, 1, OPTAB_DIRECT
);
1435 ac
->shift
= NULL_RTX
;
1437 ac
->shift
= byteoffset
;
1440 if (ac
->shift
!= NULL_RTX
)
1442 /* Shift is the byte count, but we need the bitcount. */
1443 gcc_assert (exact_log2 (BITS_PER_UNIT
) >= 0);
1444 ac
->shift
= expand_simple_binop (SImode
, ASHIFT
, ac
->shift
,
1445 GEN_INT (exact_log2 (BITS_PER_UNIT
)),
1446 NULL_RTX
, 1, OPTAB_DIRECT
);
1447 ac
->modemask
= expand_simple_binop (SImode
, ASHIFT
,
1448 GEN_INT (GET_MODE_MASK (mode
)),
1450 NULL_RTX
, 1, OPTAB_DIRECT
);
1453 ac
->modemask
= GEN_INT (GET_MODE_MASK (mode
));
1455 ac
->modemaski
= expand_simple_unop (SImode
, NOT
, ac
->modemask
, NULL_RTX
, 1);
1459 /* Expand an atomic compare and swap operation for HImode and QImode.
1460 MEM is the memory location, CMP the old value to compare MEM with
1461 and NEW_RTX the value to set if CMP == MEM. */
1464 xtensa_expand_compare_and_swap (rtx target
, rtx mem
, rtx cmp
, rtx new_rtx
)
1466 machine_mode mode
= GET_MODE (mem
);
1467 struct alignment_context ac
;
1468 rtx tmp
, cmpv
, newv
, val
;
1469 rtx oldval
= gen_reg_rtx (SImode
);
1470 rtx res
= gen_reg_rtx (SImode
);
1471 rtx_code_label
*csloop
= gen_label_rtx ();
1472 rtx_code_label
*csend
= gen_label_rtx ();
1474 init_alignment_context (&ac
, mem
);
1476 if (ac
.shift
!= NULL_RTX
)
1478 cmp
= xtensa_expand_mask_and_shift (cmp
, mode
, ac
.shift
);
1479 new_rtx
= xtensa_expand_mask_and_shift (new_rtx
, mode
, ac
.shift
);
1482 /* Load the surrounding word into VAL with the MEM value masked out. */
1483 val
= force_reg (SImode
, expand_simple_binop (SImode
, AND
, ac
.memsi
,
1484 ac
.modemaski
, NULL_RTX
, 1,
1486 emit_label (csloop
);
1488 /* Patch CMP and NEW_RTX into VAL at correct position. */
1489 cmpv
= force_reg (SImode
, expand_simple_binop (SImode
, IOR
, cmp
, val
,
1490 NULL_RTX
, 1, OPTAB_DIRECT
));
1491 newv
= force_reg (SImode
, expand_simple_binop (SImode
, IOR
, new_rtx
, val
,
1492 NULL_RTX
, 1, OPTAB_DIRECT
));
1494 /* Jump to end if we're done. */
1495 emit_insn (gen_sync_compare_and_swapsi (res
, ac
.memsi
, cmpv
, newv
));
1496 emit_cmp_and_jump_insns (res
, cmpv
, EQ
, const0_rtx
, SImode
, true, csend
);
1498 /* Check for changes outside mode. */
1499 emit_move_insn (oldval
, val
);
1500 tmp
= expand_simple_binop (SImode
, AND
, res
, ac
.modemaski
,
1501 val
, 1, OPTAB_DIRECT
);
1503 emit_move_insn (val
, tmp
);
1505 /* Loop internal if so. */
1506 emit_cmp_and_jump_insns (oldval
, val
, NE
, const0_rtx
, SImode
, true, csloop
);
1510 /* Return the correct part of the bitfield. */
1511 convert_move (target
,
1512 (ac
.shift
== NULL_RTX
? res
1513 : expand_simple_binop (SImode
, LSHIFTRT
, res
, ac
.shift
,
1514 NULL_RTX
, 1, OPTAB_DIRECT
)),
1519 /* Expand an atomic operation CODE of mode MODE (either HImode or QImode --
1520 the default expansion works fine for SImode). MEM is the memory location
1521 and VAL the value to play with. If AFTER is true then store the value
1522 MEM holds after the operation, if AFTER is false then store the value MEM
1523 holds before the operation. If TARGET is zero then discard that value, else
1524 store it to TARGET. */
1527 xtensa_expand_atomic (enum rtx_code code
, rtx target
, rtx mem
, rtx val
,
1530 machine_mode mode
= GET_MODE (mem
);
1531 struct alignment_context ac
;
1532 rtx_code_label
*csloop
= gen_label_rtx ();
1534 rtx old
= gen_reg_rtx (SImode
);
1535 rtx new_rtx
= gen_reg_rtx (SImode
);
1536 rtx orig
= NULL_RTX
;
1538 init_alignment_context (&ac
, mem
);
1540 /* Prepare values before the compare-and-swap loop. */
1541 if (ac
.shift
!= NULL_RTX
)
1542 val
= xtensa_expand_mask_and_shift (val
, mode
, ac
.shift
);
1547 orig
= gen_reg_rtx (SImode
);
1548 convert_move (orig
, val
, 1);
1556 case MULT
: /* NAND */
1558 /* val = "11..1<val>11..1" */
1559 val
= expand_simple_binop (SImode
, XOR
, val
, ac
.modemaski
,
1560 NULL_RTX
, 1, OPTAB_DIRECT
);
1567 /* Load full word. Subsequent loads are performed by S32C1I. */
1568 cmp
= force_reg (SImode
, ac
.memsi
);
1570 emit_label (csloop
);
1571 emit_move_insn (old
, cmp
);
1577 val
= expand_simple_binop (SImode
, code
, old
, orig
,
1578 NULL_RTX
, 1, OPTAB_DIRECT
);
1579 val
= expand_simple_binop (SImode
, AND
, val
, ac
.modemask
,
1580 NULL_RTX
, 1, OPTAB_DIRECT
);
1583 tmp
= expand_simple_binop (SImode
, AND
, old
, ac
.modemaski
,
1584 NULL_RTX
, 1, OPTAB_DIRECT
);
1585 tmp
= expand_simple_binop (SImode
, IOR
, tmp
, val
,
1586 new_rtx
, 1, OPTAB_DIRECT
);
1592 tmp
= expand_simple_binop (SImode
, code
, old
, val
,
1593 new_rtx
, 1, OPTAB_DIRECT
);
1596 case MULT
: /* NAND */
1597 tmp
= expand_simple_binop (SImode
, XOR
, old
, ac
.modemask
,
1598 NULL_RTX
, 1, OPTAB_DIRECT
);
1599 tmp
= expand_simple_binop (SImode
, AND
, tmp
, val
,
1600 new_rtx
, 1, OPTAB_DIRECT
);
1608 emit_move_insn (new_rtx
, tmp
);
1609 emit_insn (gen_sync_compare_and_swapsi (cmp
, ac
.memsi
, old
, new_rtx
));
1610 emit_cmp_and_jump_insns (cmp
, old
, NE
, const0_rtx
, SImode
, true, csloop
);
1614 tmp
= (after
? new_rtx
: cmp
);
1615 convert_move (target
,
1616 (ac
.shift
== NULL_RTX
? tmp
1617 : expand_simple_binop (SImode
, LSHIFTRT
, tmp
, ac
.shift
,
1618 NULL_RTX
, 1, OPTAB_DIRECT
)),
1625 xtensa_setup_frame_addresses (void)
1627 /* Set flag to cause TARGET_FRAME_POINTER_REQUIRED to return true. */
1628 cfun
->machine
->accesses_prev_frame
= 1;
1630 if (TARGET_WINDOWED_ABI
)
1632 (gen_rtx_SYMBOL_REF (Pmode
, "__xtensa_libgcc_window_spill"),
1633 LCT_NORMAL
, VOIDmode
, 0);
1637 /* Emit the assembly for the end of a zero-cost loop. Normally we just emit
1638 a comment showing where the end of the loop is. However, if there is a
1639 label or a branch at the end of the loop then we need to place a nop
1640 there. If the loop ends with a label we need the nop so that branches
1641 targeting that label will target the nop (and thus remain in the loop),
1642 instead of targeting the instruction after the loop (and thus exiting
1643 the loop). If the loop ends with a branch, we need the nop in case the
1644 branch is targeting a location inside the loop. When the branch
1645 executes it will cause the loop count to be decremented even if it is
1646 taken (because it is the last instruction in the loop), so we need to
1647 nop after the branch to prevent the loop count from being decremented
1648 when the branch is taken. */
1651 xtensa_emit_loop_end (rtx_insn
*insn
, rtx
*operands
)
1655 for (insn
= PREV_INSN (insn
); insn
&& !done
; insn
= PREV_INSN (insn
))
1657 switch (GET_CODE (insn
))
1664 output_asm_insn (TARGET_DENSITY
? "nop.n" : "nop", operands
);
1670 rtx body
= PATTERN (insn
);
1674 output_asm_insn (TARGET_DENSITY
? "nop.n" : "nop", operands
);
1677 else if ((GET_CODE (body
) != USE
)
1678 && (GET_CODE (body
) != CLOBBER
))
1685 output_asm_insn ("%1_LEND:", operands
);
1690 xtensa_emit_branch (bool inverted
, bool immed
, rtx
*operands
)
1692 static char result
[64];
1696 code
= GET_CODE (operands
[3]);
1699 case EQ
: op
= inverted
? "ne" : "eq"; break;
1700 case NE
: op
= inverted
? "eq" : "ne"; break;
1701 case LT
: op
= inverted
? "ge" : "lt"; break;
1702 case GE
: op
= inverted
? "lt" : "ge"; break;
1703 case LTU
: op
= inverted
? "geu" : "ltu"; break;
1704 case GEU
: op
= inverted
? "ltu" : "geu"; break;
1705 default: gcc_unreachable ();
1710 if (INTVAL (operands
[1]) == 0)
1711 sprintf (result
, "b%sz%s\t%%0, %%2", op
,
1712 (TARGET_DENSITY
&& (code
== EQ
|| code
== NE
)) ? ".n" : "");
1714 sprintf (result
, "b%si\t%%0, %%d1, %%2", op
);
1717 sprintf (result
, "b%s\t%%0, %%1, %%2", op
);
1724 xtensa_emit_bit_branch (bool inverted
, bool immed
, rtx
*operands
)
1726 static char result
[64];
1729 switch (GET_CODE (operands
[3]))
1731 case EQ
: op
= inverted
? "bs" : "bc"; break;
1732 case NE
: op
= inverted
? "bc" : "bs"; break;
1733 default: gcc_unreachable ();
1738 unsigned bitnum
= INTVAL (operands
[1]) & 0x1f;
1739 operands
[1] = GEN_INT (bitnum
);
1740 sprintf (result
, "b%si\t%%0, %%d1, %%2", op
);
1743 sprintf (result
, "b%s\t%%0, %%1, %%2", op
);
1750 xtensa_emit_movcc (bool inverted
, bool isfp
, bool isbool
, rtx
*operands
)
1752 static char result
[64];
1756 code
= GET_CODE (operands
[4]);
1761 case EQ
: op
= inverted
? "t" : "f"; break;
1762 case NE
: op
= inverted
? "f" : "t"; break;
1763 default: gcc_unreachable ();
1770 case EQ
: op
= inverted
? "nez" : "eqz"; break;
1771 case NE
: op
= inverted
? "eqz" : "nez"; break;
1772 case LT
: op
= inverted
? "gez" : "ltz"; break;
1773 case GE
: op
= inverted
? "ltz" : "gez"; break;
1774 default: gcc_unreachable ();
1778 sprintf (result
, "mov%s%s\t%%0, %%%d, %%1",
1779 op
, isfp
? ".s" : "", inverted
? 3 : 2);
1785 xtensa_emit_call (int callop
, rtx
*operands
)
1787 static char result
[64];
1788 rtx tgt
= operands
[callop
];
1790 if (GET_CODE (tgt
) == CONST_INT
)
1791 sprintf (result
, "call%d\t0x%lx", WINDOW_SIZE
, INTVAL (tgt
));
1792 else if (register_operand (tgt
, VOIDmode
))
1793 sprintf (result
, "callx%d\t%%%d", WINDOW_SIZE
, callop
);
1795 sprintf (result
, "call%d\t%%%d", WINDOW_SIZE
, callop
);
1802 xtensa_legitimate_address_p (machine_mode mode
, rtx addr
, bool strict
)
1804 /* Allow constant pool addresses. */
1805 if (mode
!= BLKmode
&& GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
1806 && ! TARGET_CONST16
&& constantpool_address_p (addr
)
1807 && ! xtensa_tls_referenced_p (addr
))
1810 while (GET_CODE (addr
) == SUBREG
)
1811 addr
= SUBREG_REG (addr
);
1813 /* Allow base registers. */
1814 if (GET_CODE (addr
) == REG
&& BASE_REG_P (addr
, strict
))
1817 /* Check for "register + offset" addressing. */
1818 if (GET_CODE (addr
) == PLUS
)
1820 rtx xplus0
= XEXP (addr
, 0);
1821 rtx xplus1
= XEXP (addr
, 1);
1822 enum rtx_code code0
;
1823 enum rtx_code code1
;
1825 while (GET_CODE (xplus0
) == SUBREG
)
1826 xplus0
= SUBREG_REG (xplus0
);
1827 code0
= GET_CODE (xplus0
);
1829 while (GET_CODE (xplus1
) == SUBREG
)
1830 xplus1
= SUBREG_REG (xplus1
);
1831 code1
= GET_CODE (xplus1
);
1833 /* Swap operands if necessary so the register is first. */
1834 if (code0
!= REG
&& code1
== REG
)
1836 xplus0
= XEXP (addr
, 1);
1837 xplus1
= XEXP (addr
, 0);
1838 code0
= GET_CODE (xplus0
);
1839 code1
= GET_CODE (xplus1
);
1842 if (code0
== REG
&& BASE_REG_P (xplus0
, strict
)
1843 && code1
== CONST_INT
1844 && xtensa_mem_offset (INTVAL (xplus1
), mode
))
1852 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
1854 static GTY(()) rtx xtensa_tls_module_base_symbol
;
1857 xtensa_tls_module_base (void)
1859 if (! xtensa_tls_module_base_symbol
)
1861 xtensa_tls_module_base_symbol
=
1862 gen_rtx_SYMBOL_REF (Pmode
, "_TLS_MODULE_BASE_");
1863 SYMBOL_REF_FLAGS (xtensa_tls_module_base_symbol
)
1864 |= TLS_MODEL_GLOBAL_DYNAMIC
<< SYMBOL_FLAG_TLS_SHIFT
;
1867 return xtensa_tls_module_base_symbol
;
1872 xtensa_call_tls_desc (rtx sym
, rtx
*retp
)
1875 rtx_insn
*call_insn
, *insns
;
1878 fn
= gen_reg_rtx (Pmode
);
1879 arg
= gen_reg_rtx (Pmode
);
1880 a10
= gen_rtx_REG (Pmode
, 10);
1882 emit_insn (gen_tls_func (fn
, sym
));
1883 emit_insn (gen_tls_arg (arg
, sym
));
1884 emit_move_insn (a10
, arg
);
1885 call_insn
= emit_call_insn (gen_tls_call (a10
, fn
, sym
, const1_rtx
));
1886 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), a10
);
1887 insns
= get_insns ();
1896 xtensa_legitimize_tls_address (rtx x
)
1898 unsigned int model
= SYMBOL_REF_TLS_MODEL (x
);
1899 rtx dest
, tp
, ret
, modbase
, base
, addend
;
1902 dest
= gen_reg_rtx (Pmode
);
1905 case TLS_MODEL_GLOBAL_DYNAMIC
:
1906 insns
= xtensa_call_tls_desc (x
, &ret
);
1907 emit_libcall_block (insns
, dest
, ret
, x
);
1910 case TLS_MODEL_LOCAL_DYNAMIC
:
1911 base
= gen_reg_rtx (Pmode
);
1912 modbase
= xtensa_tls_module_base ();
1913 insns
= xtensa_call_tls_desc (modbase
, &ret
);
1914 emit_libcall_block (insns
, base
, ret
, modbase
);
1915 addend
= force_reg (SImode
, gen_sym_DTPOFF (x
));
1916 emit_insn (gen_addsi3 (dest
, base
, addend
));
1919 case TLS_MODEL_INITIAL_EXEC
:
1920 case TLS_MODEL_LOCAL_EXEC
:
1921 tp
= gen_reg_rtx (SImode
);
1922 emit_insn (gen_get_thread_pointersi (tp
));
1923 addend
= force_reg (SImode
, gen_sym_TPOFF (x
));
1924 emit_insn (gen_addsi3 (dest
, tp
, addend
));
1936 xtensa_legitimize_address (rtx x
,
1937 rtx oldx ATTRIBUTE_UNUSED
,
1940 if (xtensa_tls_symbol_p (x
))
1941 return xtensa_legitimize_tls_address (x
);
1943 if (GET_CODE (x
) == PLUS
)
1945 rtx plus0
= XEXP (x
, 0);
1946 rtx plus1
= XEXP (x
, 1);
1948 if (GET_CODE (plus0
) != REG
&& GET_CODE (plus1
) == REG
)
1950 plus0
= XEXP (x
, 1);
1951 plus1
= XEXP (x
, 0);
1954 /* Try to split up the offset to use an ADDMI instruction. */
1955 if (GET_CODE (plus0
) == REG
1956 && GET_CODE (plus1
) == CONST_INT
1957 && !xtensa_mem_offset (INTVAL (plus1
), mode
)
1958 && !xtensa_simm8 (INTVAL (plus1
))
1959 && xtensa_mem_offset (INTVAL (plus1
) & 0xff, mode
)
1960 && xtensa_simm8x256 (INTVAL (plus1
) & ~0xff))
1962 rtx temp
= gen_reg_rtx (Pmode
);
1963 rtx addmi_offset
= GEN_INT (INTVAL (plus1
) & ~0xff);
1964 emit_insn (gen_rtx_SET (temp
, gen_rtx_PLUS (Pmode
, plus0
,
1966 return gen_rtx_PLUS (Pmode
, temp
, GEN_INT (INTVAL (plus1
) & 0xff));
1973 /* Worker function for TARGET_MODE_DEPENDENT_ADDRESS_P.
1975 Treat constant-pool references as "mode dependent" since they can
1976 only be accessed with SImode loads. This works around a bug in the
1977 combiner where a constant pool reference is temporarily converted
1978 to an HImode load, which is then assumed to zero-extend based on
1979 our definition of LOAD_EXTEND_OP. This is wrong because the high
1980 bits of a 16-bit value in the constant pool are now sign-extended
1984 xtensa_mode_dependent_address_p (const_rtx addr
,
1985 addr_space_t as ATTRIBUTE_UNUSED
)
1987 return constantpool_address_p (addr
);
1990 /* Return TRUE if X contains any TLS symbol references. */
1993 xtensa_tls_referenced_p (rtx x
)
1995 if (! TARGET_HAVE_TLS
)
1998 subrtx_iterator::array_type array
;
1999 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
2001 const_rtx x
= *iter
;
2002 if (GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0)
2005 /* Ignore TLS references that have already been legitimized. */
2006 if (GET_CODE (x
) == UNSPEC
)
2007 switch (XINT (x
, 1))
2011 case UNSPEC_TLS_FUNC
:
2012 case UNSPEC_TLS_ARG
:
2013 case UNSPEC_TLS_CALL
:
2014 iter
.skip_subrtxes ();
2024 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
2027 xtensa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2029 return xtensa_tls_referenced_p (x
);
2033 /* Return the debugger register number to use for 'regno'. */
2036 xtensa_dbx_register_number (int regno
)
2040 if (GP_REG_P (regno
))
2042 regno
-= GP_REG_FIRST
;
2045 else if (BR_REG_P (regno
))
2047 regno
-= BR_REG_FIRST
;
2050 else if (FP_REG_P (regno
))
2052 regno
-= FP_REG_FIRST
;
2055 else if (ACC_REG_P (regno
))
2057 first
= 0x200; /* Start of Xtensa special registers. */
2058 regno
= 16; /* ACCLO is special register 16. */
2061 /* When optimizing, we sometimes get asked about pseudo-registers
2062 that don't represent hard registers. Return 0 for these. */
2066 return first
+ regno
;
2070 /* Argument support functions. */
2072 /* Initialize CUMULATIVE_ARGS for a function. */
2075 init_cumulative_args (CUMULATIVE_ARGS
*cum
, int incoming
)
2078 cum
->incoming
= incoming
;
2082 /* Advance the argument to the next argument position. */
2085 xtensa_function_arg_advance (cumulative_args_t cum
, machine_mode mode
,
2086 const_tree type
, bool named ATTRIBUTE_UNUSED
)
2091 arg_words
= &get_cumulative_args (cum
)->arg_words
;
2092 max
= MAX_ARGS_IN_REGISTERS
;
2094 words
= (((mode
!= BLKmode
)
2095 ? (int) GET_MODE_SIZE (mode
)
2096 : int_size_in_bytes (type
)) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2098 if (*arg_words
< max
2099 && (targetm
.calls
.must_pass_in_stack (mode
, type
)
2100 || *arg_words
+ words
> max
))
2103 *arg_words
+= words
;
2107 /* Return an RTL expression containing the register for the given mode,
2108 or 0 if the argument is to be passed on the stack. INCOMING_P is nonzero
2109 if this is an incoming argument to the current function. */
2112 xtensa_function_arg_1 (cumulative_args_t cum_v
, machine_mode mode
,
2113 const_tree type
, bool incoming_p
)
2115 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
2116 int regbase
, words
, max
;
2120 arg_words
= &cum
->arg_words
;
2121 regbase
= (incoming_p
? GP_ARG_FIRST
: GP_OUTGOING_ARG_FIRST
);
2122 max
= MAX_ARGS_IN_REGISTERS
;
2124 words
= (((mode
!= BLKmode
)
2125 ? (int) GET_MODE_SIZE (mode
)
2126 : int_size_in_bytes (type
)) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2128 if (type
&& (TYPE_ALIGN (type
) > BITS_PER_WORD
))
2130 int align
= MIN (TYPE_ALIGN (type
), STACK_BOUNDARY
) / BITS_PER_WORD
;
2131 *arg_words
= (*arg_words
+ align
- 1) & -align
;
2134 if (*arg_words
+ words
> max
)
2137 regno
= regbase
+ *arg_words
;
2139 if (cum
->incoming
&& regno
<= A7_REG
&& regno
+ words
> A7_REG
)
2140 cfun
->machine
->need_a7_copy
= TARGET_WINDOWED_ABI
;
2142 return gen_rtx_REG (mode
, regno
);
2145 /* Implement TARGET_FUNCTION_ARG. */
2148 xtensa_function_arg (cumulative_args_t cum
, machine_mode mode
,
2149 const_tree type
, bool named ATTRIBUTE_UNUSED
)
2151 return xtensa_function_arg_1 (cum
, mode
, type
, false);
2154 /* Implement TARGET_FUNCTION_INCOMING_ARG. */
2157 xtensa_function_incoming_arg (cumulative_args_t cum
, machine_mode mode
,
2158 const_tree type
, bool named ATTRIBUTE_UNUSED
)
2160 return xtensa_function_arg_1 (cum
, mode
, type
, true);
2164 xtensa_function_arg_boundary (machine_mode mode
, const_tree type
)
2166 unsigned int alignment
;
2168 alignment
= type
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
);
2169 if (alignment
< PARM_BOUNDARY
)
2170 alignment
= PARM_BOUNDARY
;
2171 if (alignment
> STACK_BOUNDARY
)
2172 alignment
= STACK_BOUNDARY
;
2178 xtensa_return_in_msb (const_tree valtype
)
2180 return (TARGET_BIG_ENDIAN
2181 && AGGREGATE_TYPE_P (valtype
)
2182 && int_size_in_bytes (valtype
) >= UNITS_PER_WORD
);
2187 xtensa_option_override (void)
2192 if (!TARGET_BOOLEANS
&& TARGET_HARD_FLOAT
)
2193 error ("boolean registers required for the floating-point option");
2195 /* Set up array giving whether a given register can hold a given mode. */
2196 for (mode
= VOIDmode
;
2197 mode
!= MAX_MACHINE_MODE
;
2198 mode
= (machine_mode
) ((int) mode
+ 1))
2200 int size
= GET_MODE_SIZE (mode
);
2201 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2203 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
2207 if (ACC_REG_P (regno
))
2208 temp
= (TARGET_MAC16
2209 && (mclass
== MODE_INT
) && (size
<= UNITS_PER_WORD
));
2210 else if (GP_REG_P (regno
))
2211 temp
= ((regno
& 1) == 0 || (size
<= UNITS_PER_WORD
));
2212 else if (FP_REG_P (regno
))
2213 temp
= (TARGET_HARD_FLOAT
&& (mode
== SFmode
));
2214 else if (BR_REG_P (regno
))
2215 temp
= (TARGET_BOOLEANS
&& (mode
== CCmode
));
2219 xtensa_hard_regno_mode_ok
[(int) mode
][regno
] = temp
;
2223 init_machine_status
= xtensa_init_machine_status
;
2225 /* Check PIC settings. PIC is only supported when using L32R
2226 instructions, and some targets need to always use PIC. */
2227 if (flag_pic
&& TARGET_CONST16
)
2228 error ("-f%s is not supported with CONST16 instructions",
2229 (flag_pic
> 1 ? "PIC" : "pic"));
2230 else if (TARGET_FORCE_NO_PIC
)
2232 else if (XTENSA_ALWAYS_PIC
)
2235 error ("PIC is required but not supported with CONST16 instructions");
2238 /* There's no need for -fPIC (as opposed to -fpic) on Xtensa. */
2241 if (flag_pic
&& !flag_pie
)
2244 /* Hot/cold partitioning does not work on this architecture, because of
2245 constant pools (the load instruction cannot necessarily reach that far).
2246 Therefore disable it on this architecture. */
2247 if (flag_reorder_blocks_and_partition
)
2249 flag_reorder_blocks_and_partition
= 0;
2250 flag_reorder_blocks
= 1;
2254 /* A C compound statement to output to stdio stream STREAM the
2255 assembler syntax for an instruction operand X. X is an RTL
2258 CODE is a value that can be used to specify one of several ways
2259 of printing the operand. It is used when identical operands
2260 must be printed differently depending on the context. CODE
2261 comes from the '%' specification that was used to request
2262 printing of the operand. If the specification was just '%DIGIT'
2263 then CODE is 0; if the specification was '%LTR DIGIT' then CODE
2264 is the ASCII code for LTR.
2266 If X is a register, this macro should print the register's name.
2267 The names can be found in an array 'reg_names' whose type is
2268 'char *[]'. 'reg_names' is initialized from 'REGISTER_NAMES'.
2270 When the machine description has a specification '%PUNCT' (a '%'
2271 followed by a punctuation character), this macro is called with
2272 a null pointer for X and the punctuation character for CODE.
2274 'a', 'c', 'l', and 'n' are reserved.
2276 The Xtensa specific codes are:
2278 'd' CONST_INT, print as signed decimal
2279 'x' CONST_INT, print as signed hexadecimal
2280 'K' CONST_INT, print number of bits in mask for EXTUI
2281 'R' CONST_INT, print (X & 0x1f)
2282 'L' CONST_INT, print ((32 - X) & 0x1f)
2283 'D' REG, print second register of double-word register operand
2284 'N' MEM, print address of next word following a memory operand
2285 'v' MEM, if memory reference is volatile, output a MEMW before it
2286 't' any constant, add "@h" suffix for top 16 bits
2287 'b' any constant, add "@l" suffix for bottom 16 bits
2291 printx (FILE *file
, signed int val
)
2293 /* Print a hexadecimal value in a nice way. */
2294 if ((val
> -0xa) && (val
< 0xa))
2295 fprintf (file
, "%d", val
);
2297 fprintf (file
, "-0x%x", -val
);
2299 fprintf (file
, "0x%x", val
);
2304 print_operand (FILE *file
, rtx x
, int letter
)
2307 error ("PRINT_OPERAND null pointer");
2312 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == SUBREG
)
2313 fprintf (file
, "%s", reg_names
[xt_true_regnum (x
) + 1]);
2315 output_operand_lossage ("invalid %%D value");
2319 if (GET_CODE (x
) == MEM
)
2321 /* For a volatile memory reference, emit a MEMW before the
2323 if (MEM_VOLATILE_P (x
) && TARGET_SERIALIZE_VOLATILE
)
2324 fprintf (file
, "memw\n\t");
2327 output_operand_lossage ("invalid %%v value");
2331 if (GET_CODE (x
) == MEM
2332 && (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DImode
))
2334 x
= adjust_address (x
, GET_MODE (x
) == DFmode
? SFmode
: SImode
, 4);
2335 output_address (XEXP (x
, 0));
2338 output_operand_lossage ("invalid %%N value");
2342 if (GET_CODE (x
) == CONST_INT
)
2345 unsigned val
= INTVAL (x
);
2351 if ((val
!= 0) || (num_bits
== 0) || (num_bits
> 16))
2352 fatal_insn ("invalid mask", x
);
2354 fprintf (file
, "%d", num_bits
);
2357 output_operand_lossage ("invalid %%K value");
2361 if (GET_CODE (x
) == CONST_INT
)
2362 fprintf (file
, "%ld", (32 - INTVAL (x
)) & 0x1f);
2364 output_operand_lossage ("invalid %%L value");
2368 if (GET_CODE (x
) == CONST_INT
)
2369 fprintf (file
, "%ld", INTVAL (x
) & 0x1f);
2371 output_operand_lossage ("invalid %%R value");
2375 if (GET_CODE (x
) == CONST_INT
)
2376 printx (file
, INTVAL (x
));
2378 output_operand_lossage ("invalid %%x value");
2382 if (GET_CODE (x
) == CONST_INT
)
2383 fprintf (file
, "%ld", INTVAL (x
));
2385 output_operand_lossage ("invalid %%d value");
2390 if (GET_CODE (x
) == CONST_INT
)
2392 printx (file
, INTVAL (x
));
2393 fputs (letter
== 't' ? "@h" : "@l", file
);
2395 else if (GET_CODE (x
) == CONST_DOUBLE
)
2398 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
2399 if (GET_MODE (x
) == SFmode
)
2402 REAL_VALUE_TO_TARGET_SINGLE (r
, l
);
2403 fprintf (file
, "0x%08lx@%c", l
, letter
== 't' ? 'h' : 'l');
2406 output_operand_lossage ("invalid %%t/%%b value");
2408 else if (GET_CODE (x
) == CONST
)
2410 /* X must be a symbolic constant on ELF. Write an expression
2411 suitable for 'const16' that sets the high or low 16 bits. */
2412 if (GET_CODE (XEXP (x
, 0)) != PLUS
2413 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
2414 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
2415 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
2416 output_operand_lossage ("invalid %%t/%%b value");
2417 print_operand (file
, XEXP (XEXP (x
, 0), 0), 0);
2418 fputs (letter
== 't' ? "@h" : "@l", file
);
2419 /* There must be a non-alphanumeric character between 'h' or 'l'
2420 and the number. The '-' is added by print_operand() already. */
2421 if (INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0)
2423 print_operand (file
, XEXP (XEXP (x
, 0), 1), 0);
2427 output_addr_const (file
, x
);
2428 fputs (letter
== 't' ? "@h" : "@l", file
);
2433 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == SUBREG
)
2434 fprintf (file
, "%s", reg_names
[xt_true_regnum (x
)]);
2435 else if (GET_CODE (x
) == MEM
)
2436 output_address (XEXP (x
, 0));
2437 else if (GET_CODE (x
) == CONST_INT
)
2438 fprintf (file
, "%ld", INTVAL (x
));
2440 output_addr_const (file
, x
);
2445 /* A C compound statement to output to stdio stream STREAM the
2446 assembler syntax for an instruction operand that is a memory
2447 reference whose address is ADDR. ADDR is an RTL expression. */
2450 print_operand_address (FILE *file
, rtx addr
)
2453 error ("PRINT_OPERAND_ADDRESS, null pointer");
2455 switch (GET_CODE (addr
))
2458 fatal_insn ("invalid address", addr
);
2462 fprintf (file
, "%s, 0", reg_names
[REGNO (addr
)]);
2468 rtx offset
= (rtx
)0;
2469 rtx arg0
= XEXP (addr
, 0);
2470 rtx arg1
= XEXP (addr
, 1);
2472 if (GET_CODE (arg0
) == REG
)
2477 else if (GET_CODE (arg1
) == REG
)
2483 fatal_insn ("no register in address", addr
);
2485 if (CONSTANT_P (offset
))
2487 fprintf (file
, "%s, ", reg_names
[REGNO (reg
)]);
2488 output_addr_const (file
, offset
);
2491 fatal_insn ("address offset not a constant", addr
);
2499 output_addr_const (file
, addr
);
2504 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
2507 xtensa_output_addr_const_extra (FILE *fp
, rtx x
)
2509 if (GET_CODE (x
) == UNSPEC
&& XVECLEN (x
, 0) == 1)
2511 switch (XINT (x
, 1))
2514 output_addr_const (fp
, XVECEXP (x
, 0, 0));
2515 fputs ("@TPOFF", fp
);
2518 output_addr_const (fp
, XVECEXP (x
, 0, 0));
2519 fputs ("@DTPOFF", fp
);
2524 output_addr_const (fp
, XVECEXP (x
, 0, 0));
2538 xtensa_output_literal (FILE *file
, rtx x
, machine_mode mode
, int labelno
)
2545 fprintf (file
, "\t.literal .LC%u, ", (unsigned) labelno
);
2547 switch (GET_MODE_CLASS (mode
))
2550 gcc_assert (GET_CODE (x
) == CONST_DOUBLE
);
2552 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
2556 REAL_VALUE_TO_TARGET_SINGLE (r
, value_long
[0]);
2557 if (HOST_BITS_PER_LONG
> 32)
2558 value_long
[0] &= 0xffffffff;
2559 fprintf (file
, "0x%08lx\n", value_long
[0]);
2563 REAL_VALUE_TO_TARGET_DOUBLE (r
, value_long
);
2564 if (HOST_BITS_PER_LONG
> 32)
2566 value_long
[0] &= 0xffffffff;
2567 value_long
[1] &= 0xffffffff;
2569 fprintf (file
, "0x%08lx, 0x%08lx\n",
2570 value_long
[0], value_long
[1]);
2580 case MODE_PARTIAL_INT
:
2581 size
= GET_MODE_SIZE (mode
);
2585 output_addr_const (file
, x
);
2590 split_double (x
, &first
, &second
);
2591 output_addr_const (file
, first
);
2593 output_addr_const (file
, second
);
2608 xtensa_call_save_reg(int regno
)
2610 if (TARGET_WINDOWED_ABI
)
2613 if (regno
== A0_REG
)
2614 return crtl
->profile
|| !crtl
->is_leaf
|| crtl
->calls_eh_return
||
2615 df_regs_ever_live_p (regno
);
2617 if (crtl
->calls_eh_return
&& regno
>= 2 && regno
< 4)
2620 return !fixed_regs
[regno
] && !call_used_regs
[regno
] &&
2621 df_regs_ever_live_p (regno
);
2624 /* Return the bytes needed to compute the frame pointer from the current
2627 #define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
2628 #define XTENSA_STACK_ALIGN(LOC) (((LOC) + STACK_BYTES-1) & ~(STACK_BYTES-1))
2631 compute_frame_size (int size
)
2635 /* Add space for the incoming static chain value. */
2636 if (cfun
->static_chain_decl
!= NULL
)
2637 size
+= (1 * UNITS_PER_WORD
);
2639 xtensa_callee_save_size
= 0;
2640 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; ++regno
)
2642 if (xtensa_call_save_reg(regno
))
2643 xtensa_callee_save_size
+= UNITS_PER_WORD
;
2646 xtensa_current_frame_size
=
2647 XTENSA_STACK_ALIGN (size
2648 + xtensa_callee_save_size
2649 + crtl
->outgoing_args_size
2650 + (WINDOW_SIZE
* UNITS_PER_WORD
));
2651 xtensa_callee_save_size
= XTENSA_STACK_ALIGN (xtensa_callee_save_size
);
2652 return xtensa_current_frame_size
;
2657 xtensa_frame_pointer_required (void)
2659 /* The code to expand builtin_frame_addr and builtin_return_addr
2660 currently uses the hard_frame_pointer instead of frame_pointer.
2661 This seems wrong but maybe it's necessary for other architectures.
2662 This function is derived from the i386 code. */
2664 if (cfun
->machine
->accesses_prev_frame
)
2671 /* minimum frame = reg save area (4 words) plus static chain (1 word)
2672 and the total number of words must be a multiple of 128 bits. */
2673 #define MIN_FRAME_SIZE (8 * UNITS_PER_WORD)
2676 xtensa_expand_prologue (void)
2678 HOST_WIDE_INT total_size
;
2679 rtx_insn
*insn
= NULL
;
2683 total_size
= compute_frame_size (get_frame_size ());
2685 if (TARGET_WINDOWED_ABI
)
2687 if (total_size
< (1 << (12+3)))
2688 insn
= emit_insn (gen_entry (GEN_INT (total_size
)));
2691 /* Use a8 as a temporary since a0-a7 may be live. */
2692 rtx tmp_reg
= gen_rtx_REG (Pmode
, A8_REG
);
2693 emit_insn (gen_entry (GEN_INT (MIN_FRAME_SIZE
)));
2694 emit_move_insn (tmp_reg
, GEN_INT (total_size
- MIN_FRAME_SIZE
));
2695 emit_insn (gen_subsi3 (tmp_reg
, stack_pointer_rtx
, tmp_reg
));
2696 insn
= emit_insn (gen_movsi (stack_pointer_rtx
, tmp_reg
));
2702 HOST_WIDE_INT offset
= 0;
2704 /* -128 is a limit of single addi instruction. */
2705 if (total_size
> 0 && total_size
<= 128)
2707 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
2708 GEN_INT (-total_size
)));
2709 RTX_FRAME_RELATED_P (insn
) = 1;
2710 note_rtx
= gen_rtx_SET (stack_pointer_rtx
,
2711 plus_constant (Pmode
, stack_pointer_rtx
,
2713 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note_rtx
);
2714 offset
= total_size
- UNITS_PER_WORD
;
2716 else if (xtensa_callee_save_size
)
2718 /* 1020 is maximal s32i offset, if the frame is bigger than that
2719 * we move sp to the end of callee-saved save area, save and then
2720 * move it to its final location. */
2721 if (total_size
> 1024)
2723 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
2724 GEN_INT (-xtensa_callee_save_size
)));
2725 RTX_FRAME_RELATED_P (insn
) = 1;
2726 note_rtx
= gen_rtx_SET (stack_pointer_rtx
,
2727 plus_constant (Pmode
, stack_pointer_rtx
,
2728 -xtensa_callee_save_size
));
2729 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note_rtx
);
2730 offset
= xtensa_callee_save_size
- UNITS_PER_WORD
;
2734 rtx tmp_reg
= gen_rtx_REG (Pmode
, A9_REG
);
2735 emit_move_insn (tmp_reg
, GEN_INT (total_size
));
2736 insn
= emit_insn (gen_subsi3 (stack_pointer_rtx
,
2737 stack_pointer_rtx
, tmp_reg
));
2738 RTX_FRAME_RELATED_P (insn
) = 1;
2739 note_rtx
= gen_rtx_SET (stack_pointer_rtx
,
2740 plus_constant (Pmode
, stack_pointer_rtx
,
2742 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note_rtx
);
2743 offset
= total_size
- UNITS_PER_WORD
;
2747 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; ++regno
)
2749 if (xtensa_call_save_reg(regno
))
2751 rtx x
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, GEN_INT (offset
));
2752 rtx mem
= gen_frame_mem (SImode
, x
);
2753 rtx reg
= gen_rtx_REG (SImode
, regno
);
2755 offset
-= UNITS_PER_WORD
;
2756 insn
= emit_move_insn (mem
, reg
);
2757 RTX_FRAME_RELATED_P (insn
) = 1;
2758 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
2759 gen_rtx_SET (mem
, reg
));
2762 if (total_size
> 1024)
2764 rtx tmp_reg
= gen_rtx_REG (Pmode
, A9_REG
);
2765 emit_move_insn (tmp_reg
, GEN_INT (total_size
-
2766 xtensa_callee_save_size
));
2767 insn
= emit_insn (gen_subsi3 (stack_pointer_rtx
,
2768 stack_pointer_rtx
, tmp_reg
));
2769 RTX_FRAME_RELATED_P (insn
) = 1;
2770 note_rtx
= gen_rtx_SET (stack_pointer_rtx
,
2771 plus_constant (Pmode
, stack_pointer_rtx
,
2772 xtensa_callee_save_size
-
2774 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note_rtx
);
2778 if (frame_pointer_needed
)
2780 if (cfun
->machine
->set_frame_ptr_insn
)
2784 push_topmost_sequence ();
2785 first
= get_insns ();
2786 pop_topmost_sequence ();
2788 /* For all instructions prior to set_frame_ptr_insn, replace
2789 hard_frame_pointer references with stack_pointer. */
2791 insn
!= cfun
->machine
->set_frame_ptr_insn
;
2792 insn
= NEXT_INSN (insn
))
2796 PATTERN (insn
) = replace_rtx (copy_rtx (PATTERN (insn
)),
2797 hard_frame_pointer_rtx
,
2799 df_insn_rescan (insn
);
2805 insn
= emit_insn (gen_movsi (hard_frame_pointer_rtx
,
2806 stack_pointer_rtx
));
2807 if (!TARGET_WINDOWED_ABI
)
2809 note_rtx
= gen_rtx_SET (hard_frame_pointer_rtx
,
2811 RTX_FRAME_RELATED_P (insn
) = 1;
2812 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note_rtx
);
2817 if (TARGET_WINDOWED_ABI
)
2819 /* Create a note to describe the CFA. Because this is only used to set
2820 DW_AT_frame_base for debug info, don't bother tracking changes through
2821 each instruction in the prologue. It just takes up space. */
2822 note_rtx
= gen_rtx_SET ((frame_pointer_needed
2823 ? hard_frame_pointer_rtx
2824 : stack_pointer_rtx
),
2825 plus_constant (Pmode
, stack_pointer_rtx
,
2827 RTX_FRAME_RELATED_P (insn
) = 1;
2828 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note_rtx
);
2833 xtensa_expand_epilogue (void)
2835 if (!TARGET_WINDOWED_ABI
)
2838 HOST_WIDE_INT offset
;
2840 if (xtensa_current_frame_size
> (frame_pointer_needed
? 127 : 1024))
2842 rtx tmp_reg
= gen_rtx_REG (Pmode
, A9_REG
);
2843 emit_move_insn (tmp_reg
, GEN_INT (xtensa_current_frame_size
-
2844 xtensa_callee_save_size
));
2845 emit_insn (gen_addsi3 (stack_pointer_rtx
, frame_pointer_needed
?
2846 hard_frame_pointer_rtx
: stack_pointer_rtx
,
2848 offset
= xtensa_callee_save_size
- UNITS_PER_WORD
;
2852 if (frame_pointer_needed
)
2853 emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
);
2854 offset
= xtensa_current_frame_size
- UNITS_PER_WORD
;
2857 /* Prevent reordering of saved a0 update and loading it back from
2859 if (crtl
->calls_eh_return
)
2860 emit_insn (gen_blockage ());
2862 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; ++regno
)
2864 if (xtensa_call_save_reg(regno
))
2866 rtx x
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, GEN_INT (offset
));
2868 offset
-= UNITS_PER_WORD
;
2869 emit_move_insn (gen_rtx_REG (SImode
, regno
),
2870 gen_frame_mem (SImode
, x
));
2874 if (xtensa_current_frame_size
> 0)
2876 if (frame_pointer_needed
|| /* always reachable with addi */
2877 xtensa_current_frame_size
> 1024 ||
2878 xtensa_current_frame_size
<= 127)
2880 if (xtensa_current_frame_size
<= 127)
2881 offset
= xtensa_current_frame_size
;
2883 offset
= xtensa_callee_save_size
;
2885 emit_insn (gen_addsi3 (stack_pointer_rtx
,
2891 rtx tmp_reg
= gen_rtx_REG (Pmode
, A9_REG
);
2892 emit_move_insn (tmp_reg
, GEN_INT (xtensa_current_frame_size
));
2893 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
2898 if (crtl
->calls_eh_return
)
2899 emit_insn (gen_add3_insn (stack_pointer_rtx
,
2901 EH_RETURN_STACKADJ_RTX
));
2903 xtensa_current_frame_size
= 0;
2904 xtensa_callee_save_size
= 0;
2905 emit_jump_insn (gen_return ());
2909 xtensa_set_return_address (rtx address
, rtx scratch
)
2911 HOST_WIDE_INT total_size
= compute_frame_size (get_frame_size ());
2912 rtx frame
= frame_pointer_needed
?
2913 hard_frame_pointer_rtx
: stack_pointer_rtx
;
2914 rtx a0_addr
= plus_constant (Pmode
, frame
,
2915 total_size
- UNITS_PER_WORD
);
2916 rtx note
= gen_rtx_SET (gen_frame_mem (SImode
, a0_addr
),
2917 gen_rtx_REG (SImode
, A0_REG
));
2920 if (total_size
> 1024) {
2921 emit_move_insn (scratch
, GEN_INT (total_size
- UNITS_PER_WORD
));
2922 emit_insn (gen_addsi3 (scratch
, frame
, scratch
));
2926 insn
= emit_move_insn (gen_frame_mem (SImode
, a0_addr
), address
);
2927 RTX_FRAME_RELATED_P (insn
) = 1;
2928 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note
);
2932 xtensa_return_addr (int count
, rtx frame
)
2934 rtx result
, retaddr
, curaddr
, label
;
2936 if (!TARGET_WINDOWED_ABI
)
2941 return get_hard_reg_initial_val (Pmode
, A0_REG
);
2945 retaddr
= gen_rtx_REG (Pmode
, A0_REG
);
2948 rtx addr
= plus_constant (Pmode
, frame
, -4 * UNITS_PER_WORD
);
2949 addr
= memory_address (Pmode
, addr
);
2950 retaddr
= gen_reg_rtx (Pmode
);
2951 emit_move_insn (retaddr
, gen_rtx_MEM (Pmode
, addr
));
2954 /* The 2 most-significant bits of the return address on Xtensa hold
2955 the register window size. To get the real return address, these
2956 bits must be replaced with the high bits from some address in the
2959 /* Get the 2 high bits of a local label in the code. */
2960 curaddr
= gen_reg_rtx (Pmode
);
2961 label
= gen_label_rtx ();
2963 LABEL_PRESERVE_P (label
) = 1;
2964 emit_move_insn (curaddr
, gen_rtx_LABEL_REF (Pmode
, label
));
2965 emit_insn (gen_lshrsi3 (curaddr
, curaddr
, GEN_INT (30)));
2966 emit_insn (gen_ashlsi3 (curaddr
, curaddr
, GEN_INT (30)));
2968 /* Clear the 2 high bits of the return address. */
2969 result
= gen_reg_rtx (Pmode
);
2970 emit_insn (gen_ashlsi3 (result
, retaddr
, GEN_INT (2)));
2971 emit_insn (gen_lshrsi3 (result
, result
, GEN_INT (2)));
2973 /* Combine them to get the result. */
2974 emit_insn (gen_iorsi3 (result
, result
, curaddr
));
2978 /* Disable the use of word-sized or smaller complex modes for structures,
2979 and for function arguments in particular, where they cause problems with
2980 register a7. The xtensa_copy_incoming_a7 function assumes that there is
2981 a single reference to an argument in a7, but with small complex modes the
2982 real and imaginary components may be extracted separately, leading to two
2983 uses of the register, only one of which would be replaced. */
2986 xtensa_member_type_forces_blk (const_tree
, machine_mode mode
)
2988 return mode
== CQImode
|| mode
== CHImode
;
2991 /* Create the va_list data type.
2993 This structure is set up by __builtin_saveregs. The __va_reg field
2994 points to a stack-allocated region holding the contents of the
2995 incoming argument registers. The __va_ndx field is an index
2996 initialized to the position of the first unnamed (variable)
2997 argument. This same index is also used to address the arguments
2998 passed in memory. Thus, the __va_stk field is initialized to point
2999 to the position of the first argument in memory offset to account
3000 for the arguments passed in registers and to account for the size
3001 of the argument registers not being 16-byte aligned. E.G., there
3002 are 6 argument registers of 4 bytes each, but we want the __va_ndx
3003 for the first stack argument to have the maximal alignment of 16
3004 bytes, so we offset the __va_stk address by 32 bytes so that
3005 __va_stk[32] references the first argument on the stack. */
3008 xtensa_build_builtin_va_list (void)
3010 tree f_stk
, f_reg
, f_ndx
, record
, type_decl
;
3012 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
3013 type_decl
= build_decl (BUILTINS_LOCATION
,
3014 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
3016 f_stk
= build_decl (BUILTINS_LOCATION
,
3017 FIELD_DECL
, get_identifier ("__va_stk"),
3019 f_reg
= build_decl (BUILTINS_LOCATION
,
3020 FIELD_DECL
, get_identifier ("__va_reg"),
3022 f_ndx
= build_decl (BUILTINS_LOCATION
,
3023 FIELD_DECL
, get_identifier ("__va_ndx"),
3026 DECL_FIELD_CONTEXT (f_stk
) = record
;
3027 DECL_FIELD_CONTEXT (f_reg
) = record
;
3028 DECL_FIELD_CONTEXT (f_ndx
) = record
;
3030 TYPE_STUB_DECL (record
) = type_decl
;
3031 TYPE_NAME (record
) = type_decl
;
3032 TYPE_FIELDS (record
) = f_stk
;
3033 DECL_CHAIN (f_stk
) = f_reg
;
3034 DECL_CHAIN (f_reg
) = f_ndx
;
3036 layout_type (record
);
3041 /* Save the incoming argument registers on the stack. Returns the
3042 address of the saved registers. */
3045 xtensa_builtin_saveregs (void)
3048 int arg_words
= crtl
->args
.info
.arg_words
;
3049 int gp_left
= MAX_ARGS_IN_REGISTERS
- arg_words
;
3054 /* Allocate the general-purpose register space. */
3055 gp_regs
= assign_stack_local
3056 (BLKmode
, MAX_ARGS_IN_REGISTERS
* UNITS_PER_WORD
, -1);
3057 set_mem_alias_set (gp_regs
, get_varargs_alias_set ());
3059 /* Now store the incoming registers. */
3060 cfun
->machine
->need_a7_copy
= TARGET_WINDOWED_ABI
;
3061 cfun
->machine
->vararg_a7
= true;
3062 move_block_from_reg (GP_ARG_FIRST
+ arg_words
,
3063 adjust_address (gp_regs
, BLKmode
,
3064 arg_words
* UNITS_PER_WORD
),
3066 if (cfun
->machine
->vararg_a7_copy
!= 0)
3067 emit_insn_before (cfun
->machine
->vararg_a7_copy
, get_insns ());
3069 return XEXP (gp_regs
, 0);
3073 /* Implement `va_start' for varargs and stdarg. We look at the
3074 current function to fill in an initial va_list. */
3077 xtensa_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
3085 arg_words
= crtl
->args
.info
.arg_words
;
3087 f_stk
= TYPE_FIELDS (va_list_type_node
);
3088 f_reg
= DECL_CHAIN (f_stk
);
3089 f_ndx
= DECL_CHAIN (f_reg
);
3091 stk
= build3 (COMPONENT_REF
, TREE_TYPE (f_stk
), valist
, f_stk
, NULL_TREE
);
3092 reg
= build3 (COMPONENT_REF
, TREE_TYPE (f_reg
), unshare_expr (valist
),
3094 ndx
= build3 (COMPONENT_REF
, TREE_TYPE (f_ndx
), unshare_expr (valist
),
3097 /* Call __builtin_saveregs; save the result in __va_reg */
3098 u
= make_tree (sizetype
, expand_builtin_saveregs ());
3099 u
= fold_convert (ptr_type_node
, u
);
3100 t
= build2 (MODIFY_EXPR
, ptr_type_node
, reg
, u
);
3101 TREE_SIDE_EFFECTS (t
) = 1;
3102 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
3104 /* Set the __va_stk member to ($arg_ptr - 32). */
3105 u
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
3106 u
= fold_build_pointer_plus_hwi (u
, -32);
3107 t
= build2 (MODIFY_EXPR
, ptr_type_node
, stk
, u
);
3108 TREE_SIDE_EFFECTS (t
) = 1;
3109 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
3111 /* Set the __va_ndx member. If the first variable argument is on
3112 the stack, adjust __va_ndx by 2 words to account for the extra
3113 alignment offset for __va_stk. */
3114 if (arg_words
>= MAX_ARGS_IN_REGISTERS
)
3116 t
= build2 (MODIFY_EXPR
, integer_type_node
, ndx
,
3117 build_int_cst (integer_type_node
, arg_words
* UNITS_PER_WORD
));
3118 TREE_SIDE_EFFECTS (t
) = 1;
3119 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
3123 /* Implement `va_arg'. */
3126 xtensa_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
3127 gimple_seq
*post_p ATTRIBUTE_UNUSED
)
3132 tree type_size
, array
, orig_ndx
, addr
, size
, va_size
, t
;
3133 tree lab_false
, lab_over
, lab_false2
;
3136 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
3138 type
= build_pointer_type (type
);
3140 /* Handle complex values as separate real and imaginary parts. */
3141 if (TREE_CODE (type
) == COMPLEX_TYPE
)
3143 tree real_part
, imag_part
;
3145 real_part
= xtensa_gimplify_va_arg_expr (valist
, TREE_TYPE (type
),
3147 real_part
= get_initialized_tmp_var (real_part
, pre_p
, NULL
);
3149 imag_part
= xtensa_gimplify_va_arg_expr (unshare_expr (valist
),
3152 imag_part
= get_initialized_tmp_var (imag_part
, pre_p
, NULL
);
3154 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
3157 f_stk
= TYPE_FIELDS (va_list_type_node
);
3158 f_reg
= DECL_CHAIN (f_stk
);
3159 f_ndx
= DECL_CHAIN (f_reg
);
3161 stk
= build3 (COMPONENT_REF
, TREE_TYPE (f_stk
), valist
,
3163 reg
= build3 (COMPONENT_REF
, TREE_TYPE (f_reg
), unshare_expr (valist
),
3165 ndx
= build3 (COMPONENT_REF
, TREE_TYPE (f_ndx
), unshare_expr (valist
),
3168 type_size
= size_in_bytes (type
);
3169 va_size
= round_up (type_size
, UNITS_PER_WORD
);
3170 gimplify_expr (&va_size
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
3173 /* First align __va_ndx if necessary for this arg:
3175 orig_ndx = (AP).__va_ndx;
3176 if (__alignof__ (TYPE) > 4 )
3177 orig_ndx = ((orig_ndx + __alignof__ (TYPE) - 1)
3178 & -__alignof__ (TYPE)); */
3180 orig_ndx
= get_initialized_tmp_var (ndx
, pre_p
, NULL
);
3182 if (TYPE_ALIGN (type
) > BITS_PER_WORD
)
3184 int align
= MIN (TYPE_ALIGN (type
), STACK_BOUNDARY
) / BITS_PER_UNIT
;
3186 t
= build2 (PLUS_EXPR
, integer_type_node
, unshare_expr (orig_ndx
),
3187 build_int_cst (integer_type_node
, align
- 1));
3188 t
= build2 (BIT_AND_EXPR
, integer_type_node
, t
,
3189 build_int_cst (integer_type_node
, -align
));
3190 gimplify_assign (unshare_expr (orig_ndx
), t
, pre_p
);
3194 /* Increment __va_ndx to point past the argument:
3196 (AP).__va_ndx = orig_ndx + __va_size (TYPE); */
3198 t
= fold_convert (integer_type_node
, va_size
);
3199 t
= build2 (PLUS_EXPR
, integer_type_node
, orig_ndx
, t
);
3200 gimplify_assign (unshare_expr (ndx
), t
, pre_p
);
3203 /* Check if the argument is in registers:
3205 if ((AP).__va_ndx <= __MAX_ARGS_IN_REGISTERS * 4
3206 && !must_pass_in_stack (type))
3207 __array = (AP).__va_reg; */
3209 array
= create_tmp_var (ptr_type_node
);
3212 if (!targetm
.calls
.must_pass_in_stack (TYPE_MODE (type
), type
))
3214 lab_false
= create_artificial_label (UNKNOWN_LOCATION
);
3215 lab_over
= create_artificial_label (UNKNOWN_LOCATION
);
3217 t
= build2 (GT_EXPR
, boolean_type_node
, unshare_expr (ndx
),
3218 build_int_cst (integer_type_node
,
3219 MAX_ARGS_IN_REGISTERS
* UNITS_PER_WORD
));
3220 t
= build3 (COND_EXPR
, void_type_node
, t
,
3221 build1 (GOTO_EXPR
, void_type_node
, lab_false
),
3223 gimplify_and_add (t
, pre_p
);
3225 gimplify_assign (unshare_expr (array
), reg
, pre_p
);
3227 t
= build1 (GOTO_EXPR
, void_type_node
, lab_over
);
3228 gimplify_and_add (t
, pre_p
);
3230 t
= build1 (LABEL_EXPR
, void_type_node
, lab_false
);
3231 gimplify_and_add (t
, pre_p
);
3235 /* ...otherwise, the argument is on the stack (never split between
3236 registers and the stack -- change __va_ndx if necessary):
3240 if (orig_ndx <= __MAX_ARGS_IN_REGISTERS * 4)
3241 (AP).__va_ndx = 32 + __va_size (TYPE);
3242 __array = (AP).__va_stk;
3245 lab_false2
= create_artificial_label (UNKNOWN_LOCATION
);
3247 t
= build2 (GT_EXPR
, boolean_type_node
, unshare_expr (orig_ndx
),
3248 build_int_cst (integer_type_node
,
3249 MAX_ARGS_IN_REGISTERS
* UNITS_PER_WORD
));
3250 t
= build3 (COND_EXPR
, void_type_node
, t
,
3251 build1 (GOTO_EXPR
, void_type_node
, lab_false2
),
3253 gimplify_and_add (t
, pre_p
);
3255 t
= size_binop (PLUS_EXPR
, unshare_expr (va_size
), size_int (32));
3256 t
= fold_convert (integer_type_node
, t
);
3257 gimplify_assign (unshare_expr (ndx
), t
, pre_p
);
3259 t
= build1 (LABEL_EXPR
, void_type_node
, lab_false2
);
3260 gimplify_and_add (t
, pre_p
);
3262 gimplify_assign (array
, stk
, pre_p
);
3266 t
= build1 (LABEL_EXPR
, void_type_node
, lab_over
);
3267 gimplify_and_add (t
, pre_p
);
3271 /* Given the base array pointer (__array) and index to the subsequent
3272 argument (__va_ndx), find the address:
3274 __array + (AP).__va_ndx - (BYTES_BIG_ENDIAN && sizeof (TYPE) < 4
3278 The results are endian-dependent because values smaller than one word
3279 are aligned differently. */
3282 if (BYTES_BIG_ENDIAN
&& TREE_CODE (type_size
) == INTEGER_CST
)
3284 t
= fold_build2 (GE_EXPR
, boolean_type_node
, unshare_expr (type_size
),
3285 size_int (PARM_BOUNDARY
/ BITS_PER_UNIT
));
3286 t
= fold_build3 (COND_EXPR
, sizetype
, t
, unshare_expr (va_size
),
3287 unshare_expr (type_size
));
3291 size
= unshare_expr (va_size
);
3293 t
= fold_convert (sizetype
, unshare_expr (ndx
));
3294 t
= build2 (MINUS_EXPR
, sizetype
, t
, size
);
3295 addr
= fold_build_pointer_plus (unshare_expr (array
), t
);
3297 addr
= fold_convert (build_pointer_type (type
), addr
);
3299 addr
= build_va_arg_indirect_ref (addr
);
3300 return build_va_arg_indirect_ref (addr
);
3308 XTENSA_BUILTIN_UMULSIDI3
,
3314 xtensa_init_builtins (void)
3318 ftype
= build_function_type_list (unsigned_intDI_type_node
,
3319 unsigned_intSI_type_node
,
3320 unsigned_intSI_type_node
, NULL_TREE
);
3322 decl
= add_builtin_function ("__builtin_umulsidi3", ftype
,
3323 XTENSA_BUILTIN_UMULSIDI3
, BUILT_IN_MD
,
3324 "__umulsidi3", NULL_TREE
);
3325 TREE_NOTHROW (decl
) = 1;
3326 TREE_READONLY (decl
) = 1;
3331 xtensa_fold_builtin (tree fndecl
, int n_args ATTRIBUTE_UNUSED
, tree
*args
,
3332 bool ignore ATTRIBUTE_UNUSED
)
3334 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
3339 case XTENSA_BUILTIN_UMULSIDI3
:
3342 if ((TREE_CODE (arg0
) == INTEGER_CST
&& TREE_CODE (arg1
) == INTEGER_CST
)
3343 || TARGET_MUL32_HIGH
)
3344 return fold_build2 (MULT_EXPR
, unsigned_intDI_type_node
,
3345 fold_convert (unsigned_intDI_type_node
, arg0
),
3346 fold_convert (unsigned_intDI_type_node
, arg1
));
3350 internal_error ("bad builtin code");
3359 xtensa_expand_builtin (tree exp
, rtx target
,
3360 rtx subtarget ATTRIBUTE_UNUSED
,
3361 machine_mode mode ATTRIBUTE_UNUSED
,
3364 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
3365 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
3369 case XTENSA_BUILTIN_UMULSIDI3
:
3370 /* The umulsidi3 builtin is just a mechanism to avoid calling the real
3371 __umulsidi3 function when the Xtensa configuration can directly
3372 implement it. If not, just call the function. */
3373 return expand_call (exp
, target
, ignore
);
3376 internal_error ("bad builtin code");
3381 /* Worker function for TARGET_PREFERRED_RELOAD_CLASS. */
3384 xtensa_preferred_reload_class (rtx x
, reg_class_t rclass
)
3386 if (CONSTANT_P (x
) && CONST_DOUBLE_P (x
))
3389 /* Don't use the stack pointer or hard frame pointer for reloads!
3390 The hard frame pointer would normally be OK except that it may
3391 briefly hold an incoming argument in the prologue, and reload
3392 won't know that it is live because the hard frame pointer is
3393 treated specially. */
3395 if (rclass
== AR_REGS
|| rclass
== GR_REGS
)
3401 /* Worker function for TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
3404 xtensa_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED
,
3407 /* Don't use the stack pointer or hard frame pointer for reloads!
3408 The hard frame pointer would normally be OK except that it may
3409 briefly hold an incoming argument in the prologue, and reload
3410 won't know that it is live because the hard frame pointer is
3411 treated specially. */
3413 if (rclass
== AR_REGS
|| rclass
== GR_REGS
)
3419 /* Worker function for TARGET_SECONDARY_RELOAD. */
3422 xtensa_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass
,
3423 machine_mode mode
, secondary_reload_info
*sri
)
3427 if (in_p
&& constantpool_mem_p (x
))
3429 if (rclass
== FP_REGS
)
3433 sri
->icode
= CODE_FOR_reloadqi_literal
;
3434 else if (mode
== HImode
)
3435 sri
->icode
= CODE_FOR_reloadhi_literal
;
3438 regno
= xt_true_regnum (x
);
3439 if (ACC_REG_P (regno
))
3440 return ((rclass
== GR_REGS
|| rclass
== RL_REGS
) ? NO_REGS
: RL_REGS
);
3441 if (rclass
== ACC_REG
)
3442 return (GP_REG_P (regno
) ? NO_REGS
: RL_REGS
);
3449 order_regs_for_local_alloc (void)
3451 if (!leaf_function_p ())
3453 static const int reg_nonleaf_alloc_order
[FIRST_PSEUDO_REGISTER
] =
3455 static const int reg_nonleaf_alloc_order_call0
[FIRST_PSEUDO_REGISTER
] =
3457 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 12, 13, 14, 15,
3459 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
3464 memcpy (reg_alloc_order
, TARGET_WINDOWED_ABI
?
3465 reg_nonleaf_alloc_order
: reg_nonleaf_alloc_order_call0
,
3466 FIRST_PSEUDO_REGISTER
* sizeof (int));
3470 int i
, num_arg_regs
;
3473 /* Use the AR registers in increasing order (skipping a0 and a1)
3474 but save the incoming argument registers for a last resort. */
3475 num_arg_regs
= crtl
->args
.info
.arg_words
;
3476 if (num_arg_regs
> MAX_ARGS_IN_REGISTERS
)
3477 num_arg_regs
= MAX_ARGS_IN_REGISTERS
;
3478 for (i
= GP_ARG_FIRST
; i
< 16 - num_arg_regs
; i
++)
3479 reg_alloc_order
[nxt
++] = i
+ num_arg_regs
;
3480 for (i
= 0; i
< num_arg_regs
; i
++)
3481 reg_alloc_order
[nxt
++] = GP_ARG_FIRST
+ i
;
3483 /* List the coprocessor registers in order. */
3484 for (i
= 0; i
< BR_REG_NUM
; i
++)
3485 reg_alloc_order
[nxt
++] = BR_REG_FIRST
+ i
;
3487 /* List the FP registers in order for now. */
3488 for (i
= 0; i
< 16; i
++)
3489 reg_alloc_order
[nxt
++] = FP_REG_FIRST
+ i
;
3491 /* GCC requires that we list *all* the registers.... */
3492 reg_alloc_order
[nxt
++] = 0; /* a0 = return address */
3493 reg_alloc_order
[nxt
++] = 1; /* a1 = stack pointer */
3494 reg_alloc_order
[nxt
++] = 16; /* pseudo frame pointer */
3495 reg_alloc_order
[nxt
++] = 17; /* pseudo arg pointer */
3497 reg_alloc_order
[nxt
++] = ACC_REG_FIRST
; /* MAC16 accumulator */
3502 /* Some Xtensa targets support multiple bss sections. If the section
3503 name ends with ".bss", add SECTION_BSS to the flags. */
3506 xtensa_multibss_section_type_flags (tree decl
, const char *name
, int reloc
)
3508 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
3511 suffix
= strrchr (name
, '.');
3512 if (suffix
&& strcmp (suffix
, ".bss") == 0)
3514 if (!decl
|| (TREE_CODE (decl
) == VAR_DECL
3515 && DECL_INITIAL (decl
) == NULL_TREE
))
3516 flags
|= SECTION_BSS
; /* @nobits */
3518 warning (0, "only uninitialized variables can be placed in a "
3526 /* The literal pool stays with the function. */
3529 xtensa_select_rtx_section (machine_mode mode ATTRIBUTE_UNUSED
,
3530 rtx x ATTRIBUTE_UNUSED
,
3531 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
3533 return function_section (current_function_decl
);
3536 /* Worker function for TARGET_REGISTER_MOVE_COST. */
3539 xtensa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
3540 reg_class_t from
, reg_class_t to
)
3542 if (from
== to
&& from
!= BR_REGS
&& to
!= BR_REGS
)
3544 else if (reg_class_subset_p (from
, AR_REGS
)
3545 && reg_class_subset_p (to
, AR_REGS
))
3547 else if (reg_class_subset_p (from
, AR_REGS
) && to
== ACC_REG
)
3549 else if (from
== ACC_REG
&& reg_class_subset_p (to
, AR_REGS
))
3555 /* Worker function for TARGET_MEMORY_MOVE_COST. */
3558 xtensa_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
3559 reg_class_t rclass ATTRIBUTE_UNUSED
,
3560 bool in ATTRIBUTE_UNUSED
)
3565 /* Compute a (partial) cost for rtx X. Return true if the complete
3566 cost has been computed, and false if subexpressions should be
3567 scanned. In either case, *TOTAL contains the cost result. */
3570 xtensa_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
3571 int opno ATTRIBUTE_UNUSED
,
3572 int *total
, bool speed ATTRIBUTE_UNUSED
)
3574 int code
= GET_CODE (x
);
3582 if (xtensa_simm12b (INTVAL (x
)))
3589 if (xtensa_simm8 (INTVAL (x
))
3590 || xtensa_simm8x256 (INTVAL (x
)))
3597 if (xtensa_mask_immediate (INTVAL (x
)))
3604 if ((INTVAL (x
) == 0) || xtensa_b4const (INTVAL (x
)))
3615 /* No way to tell if X is the 2nd operand so be conservative. */
3618 if (xtensa_simm12b (INTVAL (x
)))
3620 else if (TARGET_CONST16
)
3621 *total
= COSTS_N_INSNS (2);
3630 *total
= COSTS_N_INSNS (2);
3637 *total
= COSTS_N_INSNS (4);
3645 (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
) ? 2 : 1;
3647 if (memory_address_p (mode
, XEXP ((x
), 0)))
3648 *total
= COSTS_N_INSNS (num_words
);
3650 *total
= COSTS_N_INSNS (2*num_words
);
3656 *total
= COSTS_N_INSNS (TARGET_NSA
? 5 : 50);
3660 *total
= COSTS_N_INSNS (TARGET_NSA
? 1 : 50);
3664 *total
= COSTS_N_INSNS (mode
== DImode
? 3 : 2);
3671 *total
= COSTS_N_INSNS (2);
3673 *total
= COSTS_N_INSNS (1);
3680 *total
= COSTS_N_INSNS (50);
3682 *total
= COSTS_N_INSNS (1);
3688 *total
= COSTS_N_INSNS (TARGET_HARD_FLOAT
? 1 : 50);
3689 else if (mode
== DFmode
)
3690 *total
= COSTS_N_INSNS (50);
3692 *total
= COSTS_N_INSNS (4);
3700 *total
= COSTS_N_INSNS (TARGET_HARD_FLOAT
? 1 : 50);
3701 else if (mode
== DFmode
|| mode
== DImode
)
3702 *total
= COSTS_N_INSNS (50);
3704 *total
= COSTS_N_INSNS (1);
3709 *total
= COSTS_N_INSNS (mode
== DImode
? 4 : 2);
3715 *total
= COSTS_N_INSNS (TARGET_HARD_FLOAT
? 4 : 50);
3716 else if (mode
== DFmode
)
3717 *total
= COSTS_N_INSNS (50);
3718 else if (mode
== DImode
)
3719 *total
= COSTS_N_INSNS (TARGET_MUL32_HIGH
? 10 : 50);
3720 else if (TARGET_MUL32
)
3721 *total
= COSTS_N_INSNS (4);
3722 else if (TARGET_MAC16
)
3723 *total
= COSTS_N_INSNS (16);
3724 else if (TARGET_MUL16
)
3725 *total
= COSTS_N_INSNS (12);
3727 *total
= COSTS_N_INSNS (50);
3736 *total
= COSTS_N_INSNS (TARGET_HARD_FLOAT_DIV
? 8 : 50);
3739 else if (mode
== DFmode
)
3741 *total
= COSTS_N_INSNS (50);
3751 *total
= COSTS_N_INSNS (50);
3752 else if (TARGET_DIV32
)
3753 *total
= COSTS_N_INSNS (32);
3755 *total
= COSTS_N_INSNS (50);
3761 *total
= COSTS_N_INSNS (TARGET_HARD_FLOAT_SQRT
? 8 : 50);
3763 *total
= COSTS_N_INSNS (50);
3770 *total
= COSTS_N_INSNS (TARGET_MINMAX
? 1 : 50);
3775 *total
= COSTS_N_INSNS (TARGET_SEXT
? 1 : 2);
3780 *total
= COSTS_N_INSNS (1);
3788 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3791 xtensa_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
3793 return ((unsigned HOST_WIDE_INT
) int_size_in_bytes (type
)
3794 > 4 * UNITS_PER_WORD
);
3797 /* Worker function for TARGET_FUNCTION_VALUE. */
3800 xtensa_function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
,
3803 return gen_rtx_REG ((INTEGRAL_TYPE_P (valtype
)
3804 && TYPE_PRECISION (valtype
) < BITS_PER_WORD
)
3805 ? SImode
: TYPE_MODE (valtype
),
3806 outgoing
? GP_OUTGOING_RETURN
: GP_RETURN
);
3809 /* Worker function for TARGET_LIBCALL_VALUE. */
3812 xtensa_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
3814 return gen_rtx_REG ((GET_MODE_CLASS (mode
) == MODE_INT
3815 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
3816 ? SImode
: mode
, GP_RETURN
);
3819 /* Worker function TARGET_FUNCTION_VALUE_REGNO_P. */
3822 xtensa_function_value_regno_p (const unsigned int regno
)
3824 return (regno
== GP_RETURN
);
3827 /* The static chain is passed in memory. Provide rtx giving 'mem'
3828 expressions that denote where they are stored. */
3831 xtensa_static_chain (const_tree
ARG_UNUSED (fndecl_or_type
), bool incoming_p
)
3833 if (TARGET_WINDOWED_ABI
)
3835 rtx base
= incoming_p
? arg_pointer_rtx
: stack_pointer_rtx
;
3836 return gen_frame_mem (Pmode
, plus_constant (Pmode
, base
,
3837 -5 * UNITS_PER_WORD
));
3840 return gen_rtx_REG (Pmode
, A8_REG
);
3844 /* TRAMPOLINE_TEMPLATE: For Xtensa, the trampoline must perform an ENTRY
3845 instruction with a minimal stack frame in order to get some free
3846 registers. Once the actual call target is known, the proper stack frame
3847 size is extracted from the ENTRY instruction at the target and the
3848 current frame is adjusted to match. The trampoline then transfers
3849 control to the instruction following the ENTRY at the target. Note:
3850 this assumes that the target begins with an ENTRY instruction. */
3853 xtensa_asm_trampoline_template (FILE *stream
)
3855 bool use_call0
= (TARGET_CONST16
|| TARGET_ABSOLUTE_LITERALS
);
3857 fprintf (stream
, "\t.begin no-transform\n");
3859 if (TARGET_WINDOWED_ABI
)
3861 fprintf (stream
, "\tentry\tsp, %d\n", MIN_FRAME_SIZE
);
3865 /* Save the return address. */
3866 fprintf (stream
, "\tmov\ta10, a0\n");
3868 /* Use a CALL0 instruction to skip past the constants and in the
3869 process get the PC into A0. This allows PC-relative access to
3870 the constants without relying on L32R. */
3871 fprintf (stream
, "\tcall0\t.Lskipconsts\n");
3874 fprintf (stream
, "\tj\t.Lskipconsts\n");
3876 fprintf (stream
, "\t.align\t4\n");
3877 fprintf (stream
, ".Lchainval:%s0\n", integer_asm_op (4, TRUE
));
3878 fprintf (stream
, ".Lfnaddr:%s0\n", integer_asm_op (4, TRUE
));
3879 fprintf (stream
, ".Lskipconsts:\n");
3881 /* Load the static chain and function address from the trampoline. */
3884 fprintf (stream
, "\taddi\ta0, a0, 3\n");
3885 fprintf (stream
, "\tl32i\ta9, a0, 0\n");
3886 fprintf (stream
, "\tl32i\ta8, a0, 4\n");
3890 fprintf (stream
, "\tl32r\ta9, .Lchainval\n");
3891 fprintf (stream
, "\tl32r\ta8, .Lfnaddr\n");
3894 /* Store the static chain. */
3895 fprintf (stream
, "\ts32i\ta9, sp, %d\n", MIN_FRAME_SIZE
- 20);
3897 /* Set the proper stack pointer value. */
3898 fprintf (stream
, "\tl32i\ta9, a8, 0\n");
3899 fprintf (stream
, "\textui\ta9, a9, %d, 12\n",
3900 TARGET_BIG_ENDIAN
? 8 : 12);
3901 fprintf (stream
, "\tslli\ta9, a9, 3\n");
3902 fprintf (stream
, "\taddi\ta9, a9, %d\n", -MIN_FRAME_SIZE
);
3903 fprintf (stream
, "\tsub\ta9, sp, a9\n");
3904 fprintf (stream
, "\tmovsp\tsp, a9\n");
3907 /* Restore the return address. */
3908 fprintf (stream
, "\tmov\ta0, a10\n");
3910 /* Jump to the instruction following the ENTRY. */
3911 fprintf (stream
, "\taddi\ta8, a8, 3\n");
3912 fprintf (stream
, "\tjx\ta8\n");
3914 /* Pad size to a multiple of TRAMPOLINE_ALIGNMENT. */
3916 fprintf (stream
, "\t.byte\t0\n");
3918 fprintf (stream
, "\tnop\n");
3924 /* Save the return address. */
3925 fprintf (stream
, "\tmov\ta10, a0\n");
3927 /* Use a CALL0 instruction to skip past the constants and in the
3928 process get the PC into A0. This allows PC-relative access to
3929 the constants without relying on L32R. */
3930 fprintf (stream
, "\tcall0\t.Lskipconsts\n");
3933 fprintf (stream
, "\tj\t.Lskipconsts\n");
3935 fprintf (stream
, "\t.align\t4\n");
3936 fprintf (stream
, ".Lchainval:%s0\n", integer_asm_op (4, TRUE
));
3937 fprintf (stream
, ".Lfnaddr:%s0\n", integer_asm_op (4, TRUE
));
3938 fprintf (stream
, ".Lskipconsts:\n");
3940 /* Load the static chain and function address from the trampoline. */
3943 fprintf (stream
, "\taddi\ta0, a0, 3\n");
3944 fprintf (stream
, "\tl32i\ta8, a0, 0\n");
3945 fprintf (stream
, "\tl32i\ta9, a0, 4\n");
3946 fprintf (stream
, "\tmov\ta0, a10\n");
3950 fprintf (stream
, "\tl32r\ta8, .Lchainval\n");
3951 fprintf (stream
, "\tl32r\ta9, .Lfnaddr\n");
3953 fprintf (stream
, "\tjx\ta9\n");
3955 /* Pad size to a multiple of TRAMPOLINE_ALIGNMENT. */
3957 fprintf (stream
, "\t.byte\t0\n");
3959 fprintf (stream
, "\tnop\n");
3961 fprintf (stream
, "\t.end no-transform\n");
3965 xtensa_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain
)
3967 rtx func
= XEXP (DECL_RTL (fndecl
), 0);
3968 bool use_call0
= (TARGET_CONST16
|| TARGET_ABSOLUTE_LITERALS
);
3972 if (TARGET_WINDOWED_ABI
)
3974 chain_off
= use_call0
? 12 : 8;
3975 func_off
= use_call0
? 16 : 12;
3979 chain_off
= use_call0
? 8 : 4;
3980 func_off
= use_call0
? 12 : 8;
3983 emit_block_move (m_tramp
, assemble_trampoline_template (),
3984 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
3986 emit_move_insn (adjust_address (m_tramp
, SImode
, chain_off
), chain
);
3987 emit_move_insn (adjust_address (m_tramp
, SImode
, func_off
), func
);
3988 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__xtensa_sync_caches"),
3989 LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
3992 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
3995 xtensa_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
3997 return !xtensa_tls_referenced_p (x
);
4000 /* Implement TARGET_CAN_USE_DOLOOP_P. */
4003 xtensa_can_use_doloop_p (const widest_int
&, const widest_int
&,
4004 unsigned int loop_depth
, bool entered_at_top
)
4006 /* Considering limitations in the hardware, only use doloop
4007 for innermost loops which must be entered from the top. */
4008 if (loop_depth
> 1 || !entered_at_top
)
4014 /* NULL if INSN insn is valid within a low-overhead loop.
4015 Otherwise return why doloop cannot be applied. */
4018 xtensa_invalid_within_doloop (const rtx_insn
*insn
)
4021 return "Function call in the loop.";
4023 if (JUMP_P (insn
) && INSN_CODE (insn
) == CODE_FOR_return
)
4024 return "Return from a call instruction in the loop.";
4029 /* Optimize LOOP. */
4034 hwloop_optimize (hwloop_info loop
)
4038 basic_block entry_bb
;
4040 rtx_insn
*insn
, *seq
, *entry_after
;
4042 if (loop
->depth
> 1)
4045 fprintf (dump_file
, ";; loop %d is not innermost\n",
4050 if (!loop
->incoming_dest
)
4053 fprintf (dump_file
, ";; loop %d has more than one entry\n",
4058 if (loop
->incoming_dest
!= loop
->head
)
4061 fprintf (dump_file
, ";; loop %d is not entered from head\n",
4066 if (loop
->has_call
|| loop
->has_asm
)
4069 fprintf (dump_file
, ";; loop %d has invalid insn\n",
4074 /* Scan all the blocks to make sure they don't use iter_reg. */
4075 if (loop
->iter_reg_used
|| loop
->iter_reg_used_outside
)
4078 fprintf (dump_file
, ";; loop %d uses iterator\n",
4083 /* Check if start_label appears before doloop_end. */
4084 insn
= loop
->start_label
;
4085 while (insn
&& insn
!= loop
->loop_end
)
4086 insn
= NEXT_INSN (insn
);
4091 fprintf (dump_file
, ";; loop %d start_label not before loop_end\n",
4096 /* Get the loop iteration register. */
4097 iter_reg
= loop
->iter_reg
;
4099 gcc_assert (REG_P (iter_reg
));
4103 FOR_EACH_VEC_SAFE_ELT (loop
->incoming
, i
, entry_edge
)
4104 if (entry_edge
->flags
& EDGE_FALLTHRU
)
4107 if (entry_edge
== NULL
)
4110 /* Place the zero_cost_loop_start instruction before the loop. */
4111 entry_bb
= entry_edge
->src
;
4115 insn
= emit_insn (gen_zero_cost_loop_start (loop
->iter_reg
,
4121 if (!single_succ_p (entry_bb
) || vec_safe_length (loop
->incoming
) > 1)
4127 emit_insn_before (seq
, BB_HEAD (loop
->head
));
4128 seq
= emit_label_before (gen_label_rtx (), seq
);
4129 new_bb
= create_basic_block (seq
, insn
, entry_bb
);
4130 FOR_EACH_EDGE (e
, ei
, loop
->incoming
)
4132 if (!(e
->flags
& EDGE_FALLTHRU
))
4133 redirect_edge_and_branch_force (e
, new_bb
);
4135 redirect_edge_succ (e
, new_bb
);
4138 make_edge (new_bb
, loop
->head
, 0);
4142 entry_after
= BB_END (entry_bb
);
4143 while (DEBUG_INSN_P (entry_after
)
4144 || (NOTE_P (entry_after
)
4145 && NOTE_KIND (entry_after
) != NOTE_INSN_BASIC_BLOCK
))
4146 entry_after
= PREV_INSN (entry_after
);
4148 emit_insn_after (seq
, entry_after
);
4156 /* A callback for the hw-doloop pass. Called when a loop we have discovered
4157 turns out not to be optimizable; we have to split the loop_end pattern into
4158 a subtract and a test. */
4161 hwloop_fail (hwloop_info loop
)
4164 rtx_insn
*insn
= loop
->loop_end
;
4166 emit_insn_before (gen_addsi3 (loop
->iter_reg
,
4171 test
= gen_rtx_NE (VOIDmode
, loop
->iter_reg
, const0_rtx
);
4172 insn
= emit_jump_insn_before (gen_cbranchsi4 (test
,
4173 loop
->iter_reg
, const0_rtx
,
4177 JUMP_LABEL (insn
) = loop
->start_label
;
4178 LABEL_NUSES (loop
->start_label
)++;
4179 delete_insn (loop
->loop_end
);
4182 /* A callback for the hw-doloop pass. This function examines INSN; if
4183 it is a doloop_end pattern we recognize, return the reg rtx for the
4184 loop counter. Otherwise, return NULL_RTX. */
4187 hwloop_pattern_reg (rtx_insn
*insn
)
4191 if (!JUMP_P (insn
) || recog_memoized (insn
) != CODE_FOR_loop_end
)
4194 reg
= SET_DEST (XVECEXP (PATTERN (insn
), 0, 1));
4202 static struct hw_doloop_hooks xtensa_doloop_hooks
=
4209 /* Run from machine_dependent_reorg, this pass looks for doloop_end insns
4210 and tries to rewrite the RTL of these loops so that proper Xtensa
4211 hardware loops are generated. */
4214 xtensa_reorg_loops (void)
4216 reorg_loops (false, &xtensa_doloop_hooks
);
4220 xtensa_reorg_loops (void)
4225 /* Implement the TARGET_MACHINE_DEPENDENT_REORG pass. */
4230 /* We are freeing block_for_insn in the toplev to keep compatibility
4231 with old MDEP_REORGS that are not CFG based. Recompute it now. */
4232 compute_bb_for_insn ();
4236 /* Doloop optimization. */
4237 xtensa_reorg_loops ();
4240 /* Update register usage after having seen the compiler flags. */
4243 xtensa_conditional_register_usage (void)
4247 c_mask
= TARGET_WINDOWED_ABI
? (1 << 1) : (1 << 2);
4249 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
4251 /* Set/reset conditionally defined registers from
4252 CALL_USED_REGISTERS initializer. */
4253 if (call_used_regs
[i
] > 1)
4254 call_used_regs
[i
] = !!(call_used_regs
[i
] & c_mask
);
4257 /* Remove hard FP register from the preferred reload registers set. */
4258 CLEAR_HARD_REG_BIT (reg_class_contents
[(int)RL_REGS
],
4259 HARD_FRAME_POINTER_REGNUM
);
4262 /* Map hard register number to register class */
4264 enum reg_class
xtensa_regno_to_class (int regno
)
4266 static const enum reg_class regno_to_class
[FIRST_PSEUDO_REGISTER
] =
4268 RL_REGS
, SP_REG
, RL_REGS
, RL_REGS
,
4269 RL_REGS
, RL_REGS
, RL_REGS
, RL_REGS
,
4270 RL_REGS
, RL_REGS
, RL_REGS
, RL_REGS
,
4271 RL_REGS
, RL_REGS
, RL_REGS
, RL_REGS
,
4272 AR_REGS
, AR_REGS
, BR_REGS
,
4273 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
4274 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
4275 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
4276 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
4280 if (regno
== HARD_FRAME_POINTER_REGNUM
)
4283 return regno_to_class
[regno
];
4286 #include "gt-xtensa.h"