1 /* Subroutines used for code generation for RISC-V.
2 Copyright (C) 2011-2018 Free Software Foundation, Inc.
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #define IN_TARGET_CODE 1
26 #include "coretypes.h"
30 #include "insn-config.h"
31 #include "insn-attr.h"
36 #include "stringpool.h"
39 #include "stor-layout.h"
48 #include "target-def.h"
49 #include "basic-block.h"
54 #include "diagnostic.h"
58 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
59 #define UNSPEC_ADDRESS_P(X) \
60 (GET_CODE (X) == UNSPEC \
61 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
62 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
64 /* Extract the symbol or label from UNSPEC wrapper X. */
65 #define UNSPEC_ADDRESS(X) \
68 /* Extract the symbol type from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS_TYPE(X) \
70 ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
72 /* True if bit BIT is set in VALUE. */
73 #define BITSET_P(VALUE, BIT) (((VALUE) & (1ULL << (BIT))) != 0)
75 /* Classifies an address.
78 A natural register + offset address. The register satisfies
79 riscv_valid_base_register_p and the offset is a const_arith_operand.
82 A LO_SUM rtx. The first operand is a valid base register and
83 the second operand is a symbolic address.
86 A signed 16-bit constant address.
89 A constant symbolic address. */
90 enum riscv_address_type
{
97 /* Information about a function's frame layout. */
98 struct GTY(()) riscv_frame_info
{
99 /* The size of the frame in bytes. */
100 HOST_WIDE_INT total_size
;
102 /* Bit X is set if the function saves or restores GPR X. */
105 /* Likewise FPR X. */
108 /* How much the GPR save/restore routines adjust sp (or 0 if unused). */
109 unsigned save_libcall_adjustment
;
111 /* Offsets of fixed-point and floating-point save areas from frame bottom */
112 HOST_WIDE_INT gp_sp_offset
;
113 HOST_WIDE_INT fp_sp_offset
;
115 /* Offset of virtual frame pointer from stack pointer/frame bottom */
116 HOST_WIDE_INT frame_pointer_offset
;
118 /* Offset of hard frame pointer from stack pointer/frame bottom */
119 HOST_WIDE_INT hard_frame_pointer_offset
;
121 /* The offset of arg_pointer_rtx from the bottom of the frame. */
122 HOST_WIDE_INT arg_pointer_offset
;
125 struct GTY(()) machine_function
{
126 /* The number of extra stack bytes taken up by register varargs.
127 This area is allocated by the callee at the very top of the frame. */
130 /* True if current function is a naked function. */
133 /* The current frame information, calculated by riscv_compute_frame_info. */
134 struct riscv_frame_info frame
;
137 /* Information about a single argument. */
138 struct riscv_arg_info
{
139 /* True if the argument is at least partially passed on the stack. */
142 /* The number of integer registers allocated to this argument. */
143 unsigned int num_gprs
;
145 /* The offset of the first register used, provided num_gprs is nonzero.
146 If passed entirely on the stack, the value is MAX_ARGS_IN_REGISTERS. */
147 unsigned int gpr_offset
;
149 /* The number of floating-point registers allocated to this argument. */
150 unsigned int num_fprs
;
152 /* The offset of the first register used, provided num_fprs is nonzero. */
153 unsigned int fpr_offset
;
156 /* Information about an address described by riscv_address_type.
162 REG is the base register and OFFSET is the constant offset.
165 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
166 is the type of symbol it references.
169 SYMBOL_TYPE is the type of symbol that the address references. */
170 struct riscv_address_info
{
171 enum riscv_address_type type
;
174 enum riscv_symbol_type symbol_type
;
177 /* One stage in a constant building sequence. These sequences have
181 A = A CODE[1] VALUE[1]
182 A = A CODE[2] VALUE[2]
185 where A is an accumulator, each CODE[i] is a binary rtl operation
186 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
187 struct riscv_integer_op
{
189 unsigned HOST_WIDE_INT value
;
192 /* The largest number of operations needed to load an integer constant.
193 The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI. */
194 #define RISCV_MAX_INTEGER_OPS 8
196 /* Costs of various operations on the different architectures. */
198 struct riscv_tune_info
200 unsigned short fp_add
[2];
201 unsigned short fp_mul
[2];
202 unsigned short fp_div
[2];
203 unsigned short int_mul
[2];
204 unsigned short int_div
[2];
205 unsigned short issue_rate
;
206 unsigned short branch_cost
;
207 unsigned short memory_cost
;
208 bool slow_unaligned_access
;
211 /* Information about one CPU we know about. */
212 struct riscv_cpu_info
{
213 /* This CPU's canonical name. */
216 /* Tuning parameters for this CPU. */
217 const struct riscv_tune_info
*tune_info
;
220 /* Global variables for machine-dependent things. */
222 /* Whether unaligned accesses execute very slowly. */
223 bool riscv_slow_unaligned_access_p
;
225 /* Stack alignment to assume/maintain. */
226 unsigned riscv_stack_boundary
;
228 /* Which tuning parameters to use. */
229 static const struct riscv_tune_info
*tune_info
;
231 /* Index R is the smallest register class that contains register R. */
232 const enum reg_class riscv_regno_to_class
[FIRST_PSEUDO_REGISTER
] = {
233 GR_REGS
, GR_REGS
, GR_REGS
, GR_REGS
,
234 GR_REGS
, GR_REGS
, SIBCALL_REGS
, SIBCALL_REGS
,
235 JALR_REGS
, JALR_REGS
, JALR_REGS
, JALR_REGS
,
236 JALR_REGS
, JALR_REGS
, JALR_REGS
, JALR_REGS
,
237 JALR_REGS
, JALR_REGS
, JALR_REGS
, JALR_REGS
,
238 JALR_REGS
, JALR_REGS
, JALR_REGS
, JALR_REGS
,
239 JALR_REGS
, JALR_REGS
, JALR_REGS
, JALR_REGS
,
240 SIBCALL_REGS
, SIBCALL_REGS
, SIBCALL_REGS
, SIBCALL_REGS
,
241 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
242 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
243 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
244 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
245 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
246 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
247 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
248 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
249 FRAME_REGS
, FRAME_REGS
,
252 /* Costs to use when optimizing for rocket. */
253 static const struct riscv_tune_info rocket_tune_info
= {
254 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
255 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
256 {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
257 {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
258 {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
262 true, /* slow_unaligned_access */
265 /* Costs to use when optimizing for size. */
266 static const struct riscv_tune_info optimize_size_tune_info
= {
267 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
268 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
269 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
270 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
271 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
275 false, /* slow_unaligned_access */
278 static tree
riscv_handle_fndecl_attribute (tree
*, tree
, tree
, int, bool *);
280 /* Defining target-specific uses of __attribute__. */
281 static const struct attribute_spec riscv_attribute_table
[] =
283 /* Syntax: { name, min_len, max_len, decl_required, type_required,
284 function_type_required, affects_type_identity, handler,
287 /* The attribute telling no prologue/epilogue. */
288 { "naked", 0, 0, true, false, false, false,
289 riscv_handle_fndecl_attribute
, NULL
},
291 /* The last attribute spec is set to be NULL. */
292 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
295 /* A table describing all the processors GCC knows about. */
296 static const struct riscv_cpu_info riscv_cpu_info_table
[] = {
297 { "rocket", &rocket_tune_info
},
298 { "size", &optimize_size_tune_info
},
301 /* Return the riscv_cpu_info entry for the given name string. */
303 static const struct riscv_cpu_info
*
304 riscv_parse_cpu (const char *cpu_string
)
306 for (unsigned i
= 0; i
< ARRAY_SIZE (riscv_cpu_info_table
); i
++)
307 if (strcmp (riscv_cpu_info_table
[i
].name
, cpu_string
) == 0)
308 return riscv_cpu_info_table
+ i
;
310 error ("unknown cpu %qs for -mtune", cpu_string
);
311 return riscv_cpu_info_table
;
314 /* Helper function for riscv_build_integer; arguments are as for
315 riscv_build_integer. */
318 riscv_build_integer_1 (struct riscv_integer_op codes
[RISCV_MAX_INTEGER_OPS
],
319 HOST_WIDE_INT value
, machine_mode mode
)
321 HOST_WIDE_INT low_part
= CONST_LOW_PART (value
);
322 int cost
= RISCV_MAX_INTEGER_OPS
+ 1, alt_cost
;
323 struct riscv_integer_op alt_codes
[RISCV_MAX_INTEGER_OPS
];
325 if (SMALL_OPERAND (value
) || LUI_OPERAND (value
))
327 /* Simply ADDI or LUI. */
328 codes
[0].code
= UNKNOWN
;
329 codes
[0].value
= value
;
333 /* End with ADDI. When constructing HImode constants, do not generate any
334 intermediate value that is not itself a valid HImode constant. The
335 XORI case below will handle those remaining HImode constants. */
338 || value
- low_part
<= ((1 << (GET_MODE_BITSIZE (HImode
) - 1)) - 1)))
340 alt_cost
= 1 + riscv_build_integer_1 (alt_codes
, value
- low_part
, mode
);
343 alt_codes
[alt_cost
-1].code
= PLUS
;
344 alt_codes
[alt_cost
-1].value
= low_part
;
345 memcpy (codes
, alt_codes
, sizeof (alt_codes
));
351 if (cost
> 2 && (low_part
< 0 || mode
== HImode
))
353 alt_cost
= 1 + riscv_build_integer_1 (alt_codes
, value
^ low_part
, mode
);
356 alt_codes
[alt_cost
-1].code
= XOR
;
357 alt_codes
[alt_cost
-1].value
= low_part
;
358 memcpy (codes
, alt_codes
, sizeof (alt_codes
));
363 /* Eliminate trailing zeros and end with SLLI. */
364 if (cost
> 2 && (value
& 1) == 0)
366 int shift
= ctz_hwi (value
);
367 unsigned HOST_WIDE_INT x
= value
;
368 x
= sext_hwi (x
>> shift
, HOST_BITS_PER_WIDE_INT
- shift
);
370 /* Don't eliminate the lower 12 bits if LUI might apply. */
371 if (shift
> IMM_BITS
&& !SMALL_OPERAND (x
) && LUI_OPERAND (x
<< IMM_BITS
))
372 shift
-= IMM_BITS
, x
<<= IMM_BITS
;
374 alt_cost
= 1 + riscv_build_integer_1 (alt_codes
, x
, mode
);
377 alt_codes
[alt_cost
-1].code
= ASHIFT
;
378 alt_codes
[alt_cost
-1].value
= shift
;
379 memcpy (codes
, alt_codes
, sizeof (alt_codes
));
384 gcc_assert (cost
<= RISCV_MAX_INTEGER_OPS
);
388 /* Fill CODES with a sequence of rtl operations to load VALUE.
389 Return the number of operations needed. */
392 riscv_build_integer (struct riscv_integer_op
*codes
, HOST_WIDE_INT value
,
395 int cost
= riscv_build_integer_1 (codes
, value
, mode
);
397 /* Eliminate leading zeros and end with SRLI. */
398 if (value
> 0 && cost
> 2)
400 struct riscv_integer_op alt_codes
[RISCV_MAX_INTEGER_OPS
];
401 int alt_cost
, shift
= clz_hwi (value
);
402 HOST_WIDE_INT shifted_val
;
404 /* Try filling trailing bits with 1s. */
405 shifted_val
= (value
<< shift
) | ((((HOST_WIDE_INT
) 1) << shift
) - 1);
406 alt_cost
= 1 + riscv_build_integer_1 (alt_codes
, shifted_val
, mode
);
409 alt_codes
[alt_cost
-1].code
= LSHIFTRT
;
410 alt_codes
[alt_cost
-1].value
= shift
;
411 memcpy (codes
, alt_codes
, sizeof (alt_codes
));
415 /* Try filling trailing bits with 0s. */
416 shifted_val
= value
<< shift
;
417 alt_cost
= 1 + riscv_build_integer_1 (alt_codes
, shifted_val
, mode
);
420 alt_codes
[alt_cost
-1].code
= LSHIFTRT
;
421 alt_codes
[alt_cost
-1].value
= shift
;
422 memcpy (codes
, alt_codes
, sizeof (alt_codes
));
430 /* Return the cost of constructing VAL in the event that a scratch
431 register is available. */
434 riscv_split_integer_cost (HOST_WIDE_INT val
)
437 unsigned HOST_WIDE_INT loval
= sext_hwi (val
, 32);
438 unsigned HOST_WIDE_INT hival
= sext_hwi ((val
- loval
) >> 32, 32);
439 struct riscv_integer_op codes
[RISCV_MAX_INTEGER_OPS
];
441 cost
= 2 + riscv_build_integer (codes
, loval
, VOIDmode
);
443 cost
+= riscv_build_integer (codes
, hival
, VOIDmode
);
448 /* Return the cost of constructing the integer constant VAL. */
451 riscv_integer_cost (HOST_WIDE_INT val
)
453 struct riscv_integer_op codes
[RISCV_MAX_INTEGER_OPS
];
454 return MIN (riscv_build_integer (codes
, val
, VOIDmode
),
455 riscv_split_integer_cost (val
));
458 /* Try to split a 64b integer into 32b parts, then reassemble. */
461 riscv_split_integer (HOST_WIDE_INT val
, machine_mode mode
)
463 unsigned HOST_WIDE_INT loval
= sext_hwi (val
, 32);
464 unsigned HOST_WIDE_INT hival
= sext_hwi ((val
- loval
) >> 32, 32);
465 rtx hi
= gen_reg_rtx (mode
), lo
= gen_reg_rtx (mode
);
467 riscv_move_integer (hi
, hi
, hival
);
468 riscv_move_integer (lo
, lo
, loval
);
470 hi
= gen_rtx_fmt_ee (ASHIFT
, mode
, hi
, GEN_INT (32));
471 hi
= force_reg (mode
, hi
);
473 return gen_rtx_fmt_ee (PLUS
, mode
, hi
, lo
);
476 /* Return true if X is a thread-local symbol. */
479 riscv_tls_symbol_p (const_rtx x
)
481 return SYMBOL_REF_P (x
) && SYMBOL_REF_TLS_MODEL (x
) != 0;
484 /* Return true if symbol X binds locally. */
487 riscv_symbol_binds_local_p (const_rtx x
)
489 if (SYMBOL_REF_P (x
))
490 return (SYMBOL_REF_DECL (x
)
491 ? targetm
.binds_local_p (SYMBOL_REF_DECL (x
))
492 : SYMBOL_REF_LOCAL_P (x
));
497 /* Return the method that should be used to access SYMBOL_REF or
500 static enum riscv_symbol_type
501 riscv_classify_symbol (const_rtx x
)
503 if (riscv_tls_symbol_p (x
))
506 if (GET_CODE (x
) == SYMBOL_REF
&& flag_pic
&& !riscv_symbol_binds_local_p (x
))
507 return SYMBOL_GOT_DISP
;
509 return riscv_cmodel
== CM_MEDLOW
? SYMBOL_ABSOLUTE
: SYMBOL_PCREL
;
512 /* Classify the base of symbolic expression X. */
514 enum riscv_symbol_type
515 riscv_classify_symbolic_expression (rtx x
)
519 split_const (x
, &x
, &offset
);
520 if (UNSPEC_ADDRESS_P (x
))
521 return UNSPEC_ADDRESS_TYPE (x
);
523 return riscv_classify_symbol (x
);
526 /* Return true if X is a symbolic constant. If it is, store the type of
527 the symbol in *SYMBOL_TYPE. */
530 riscv_symbolic_constant_p (rtx x
, enum riscv_symbol_type
*symbol_type
)
534 split_const (x
, &x
, &offset
);
535 if (UNSPEC_ADDRESS_P (x
))
537 *symbol_type
= UNSPEC_ADDRESS_TYPE (x
);
538 x
= UNSPEC_ADDRESS (x
);
540 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
541 *symbol_type
= riscv_classify_symbol (x
);
545 if (offset
== const0_rtx
)
548 /* Nonzero offsets are only valid for references that don't use the GOT. */
549 switch (*symbol_type
)
551 case SYMBOL_ABSOLUTE
:
554 /* GAS rejects offsets outside the range [-2^31, 2^31-1]. */
555 return sext_hwi (INTVAL (offset
), 32) == INTVAL (offset
);
562 /* Returns the number of instructions necessary to reference a symbol. */
564 static int riscv_symbol_insns (enum riscv_symbol_type type
)
568 case SYMBOL_TLS
: return 0; /* Depends on the TLS model. */
569 case SYMBOL_ABSOLUTE
: return 2; /* LUI + the reference. */
570 case SYMBOL_PCREL
: return 2; /* AUIPC + the reference. */
571 case SYMBOL_TLS_LE
: return 3; /* LUI + ADD TP + the reference. */
572 case SYMBOL_GOT_DISP
: return 3; /* AUIPC + LD GOT + the reference. */
573 default: gcc_unreachable ();
577 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
580 riscv_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
582 return riscv_const_insns (x
) > 0;
585 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
588 riscv_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
590 enum riscv_symbol_type type
;
593 /* There is no assembler syntax for expressing an address-sized
595 if (GET_CODE (x
) == HIGH
)
598 split_const (x
, &base
, &offset
);
599 if (riscv_symbolic_constant_p (base
, &type
))
601 /* As an optimization, don't spill symbolic constants that are as
602 cheap to rematerialize as to access in the constant pool. */
603 if (SMALL_OPERAND (INTVAL (offset
)) && riscv_symbol_insns (type
) > 0)
606 /* As an optimization, avoid needlessly generate dynamic relocations. */
611 /* TLS symbols must be computed by riscv_legitimize_move. */
612 if (tls_referenced_p (x
))
618 /* Return true if register REGNO is a valid base register for mode MODE.
619 STRICT_P is true if REG_OK_STRICT is in effect. */
622 riscv_regno_mode_ok_for_base_p (int regno
,
623 machine_mode mode ATTRIBUTE_UNUSED
,
626 if (!HARD_REGISTER_NUM_P (regno
))
630 regno
= reg_renumber
[regno
];
633 /* These fake registers will be eliminated to either the stack or
634 hard frame pointer, both of which are usually valid base registers.
635 Reload deals with the cases where the eliminated form isn't valid. */
636 if (regno
== ARG_POINTER_REGNUM
|| regno
== FRAME_POINTER_REGNUM
)
639 return GP_REG_P (regno
);
642 /* Return true if X is a valid base register for mode MODE.
643 STRICT_P is true if REG_OK_STRICT is in effect. */
646 riscv_valid_base_register_p (rtx x
, machine_mode mode
, bool strict_p
)
648 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
652 && riscv_regno_mode_ok_for_base_p (REGNO (x
), mode
, strict_p
));
655 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
656 can address a value of mode MODE. */
659 riscv_valid_offset_p (rtx x
, machine_mode mode
)
661 /* Check that X is a signed 12-bit number. */
662 if (!const_arith_operand (x
, Pmode
))
665 /* We may need to split multiword moves, so make sure that every word
667 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
668 && !SMALL_OPERAND (INTVAL (x
) + GET_MODE_SIZE (mode
) - UNITS_PER_WORD
))
674 /* Should a symbol of type SYMBOL_TYPE should be split in two? */
677 riscv_split_symbol_type (enum riscv_symbol_type symbol_type
)
679 if (symbol_type
== SYMBOL_TLS_LE
)
682 if (!TARGET_EXPLICIT_RELOCS
)
685 return symbol_type
== SYMBOL_ABSOLUTE
|| symbol_type
== SYMBOL_PCREL
;
688 /* Return true if a LO_SUM can address a value of mode MODE when the
689 LO_SUM symbol has type SYM_TYPE. */
692 riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type
, machine_mode mode
)
694 /* Check that symbols of type SYMBOL_TYPE can be used to access values
696 if (riscv_symbol_insns (sym_type
) == 0)
699 /* Check that there is a known low-part relocation. */
700 if (!riscv_split_symbol_type (sym_type
))
703 /* We may need to split multiword moves, so make sure that each word
704 can be accessed without inducing a carry. */
705 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
706 && (!TARGET_STRICT_ALIGN
707 || GET_MODE_BITSIZE (mode
) > GET_MODE_ALIGNMENT (mode
)))
713 /* Return true if X is a valid address for machine mode MODE. If it is,
714 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
718 riscv_classify_address (struct riscv_address_info
*info
, rtx x
,
719 machine_mode mode
, bool strict_p
)
721 switch (GET_CODE (x
))
725 info
->type
= ADDRESS_REG
;
727 info
->offset
= const0_rtx
;
728 return riscv_valid_base_register_p (info
->reg
, mode
, strict_p
);
731 info
->type
= ADDRESS_REG
;
732 info
->reg
= XEXP (x
, 0);
733 info
->offset
= XEXP (x
, 1);
734 return (riscv_valid_base_register_p (info
->reg
, mode
, strict_p
)
735 && riscv_valid_offset_p (info
->offset
, mode
));
738 info
->type
= ADDRESS_LO_SUM
;
739 info
->reg
= XEXP (x
, 0);
740 info
->offset
= XEXP (x
, 1);
741 /* We have to trust the creator of the LO_SUM to do something vaguely
742 sane. Target-independent code that creates a LO_SUM should also
743 create and verify the matching HIGH. Target-independent code that
744 adds an offset to a LO_SUM must prove that the offset will not
745 induce a carry. Failure to do either of these things would be
746 a bug, and we are not required to check for it here. The RISC-V
747 backend itself should only create LO_SUMs for valid symbolic
748 constants, with the high part being either a HIGH or a copy
751 = riscv_classify_symbolic_expression (info
->offset
);
752 return (riscv_valid_base_register_p (info
->reg
, mode
, strict_p
)
753 && riscv_valid_lo_sum_p (info
->symbol_type
, mode
));
756 /* Small-integer addresses don't occur very often, but they
757 are legitimate if x0 is a valid base register. */
758 info
->type
= ADDRESS_CONST_INT
;
759 return SMALL_OPERAND (INTVAL (x
));
766 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
769 riscv_legitimate_address_p (machine_mode mode
, rtx x
, bool strict_p
)
771 struct riscv_address_info addr
;
773 return riscv_classify_address (&addr
, x
, mode
, strict_p
);
776 /* Return the number of instructions needed to load or store a value
777 of mode MODE at address X. Return 0 if X isn't valid for MODE.
778 Assume that multiword moves may need to be split into word moves
779 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
783 riscv_address_insns (rtx x
, machine_mode mode
, bool might_split_p
)
785 struct riscv_address_info addr
;
788 if (!riscv_classify_address (&addr
, x
, mode
, false))
791 /* BLKmode is used for single unaligned loads and stores and should
792 not count as a multiword mode. */
793 if (mode
!= BLKmode
&& might_split_p
)
794 n
+= (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
796 if (addr
.type
== ADDRESS_LO_SUM
)
797 n
+= riscv_symbol_insns (addr
.symbol_type
) - 1;
802 /* Return the number of instructions needed to load constant X.
803 Return 0 if X isn't a valid constant. */
806 riscv_const_insns (rtx x
)
808 enum riscv_symbol_type symbol_type
;
811 switch (GET_CODE (x
))
814 if (!riscv_symbolic_constant_p (XEXP (x
, 0), &symbol_type
)
815 || !riscv_split_symbol_type (symbol_type
))
818 /* This is simply an LUI. */
823 int cost
= riscv_integer_cost (INTVAL (x
));
824 /* Force complicated constants to memory. */
825 return cost
< 4 ? cost
: 0;
830 /* We can use x0 to load floating-point zero. */
831 return x
== CONST0_RTX (GET_MODE (x
)) ? 1 : 0;
834 /* See if we can refer to X directly. */
835 if (riscv_symbolic_constant_p (x
, &symbol_type
))
836 return riscv_symbol_insns (symbol_type
);
838 /* Otherwise try splitting the constant into a base and offset. */
839 split_const (x
, &x
, &offset
);
842 int n
= riscv_const_insns (x
);
844 return n
+ riscv_integer_cost (INTVAL (offset
));
850 return riscv_symbol_insns (riscv_classify_symbol (x
));
857 /* X is a doubleword constant that can be handled by splitting it into
858 two words and loading each word separately. Return the number of
859 instructions required to do this. */
862 riscv_split_const_insns (rtx x
)
864 unsigned int low
, high
;
866 low
= riscv_const_insns (riscv_subword (x
, false));
867 high
= riscv_const_insns (riscv_subword (x
, true));
868 gcc_assert (low
> 0 && high
> 0);
872 /* Return the number of instructions needed to implement INSN,
873 given that it loads from or stores to MEM. */
876 riscv_load_store_insns (rtx mem
, rtx_insn
*insn
)
882 gcc_assert (MEM_P (mem
));
883 mode
= GET_MODE (mem
);
885 /* Try to prove that INSN does not need to be split. */
886 might_split_p
= true;
887 if (GET_MODE_BITSIZE (mode
) <= 32)
888 might_split_p
= false;
889 else if (GET_MODE_BITSIZE (mode
) == 64)
891 set
= single_set (insn
);
892 if (set
&& !riscv_split_64bit_move_p (SET_DEST (set
), SET_SRC (set
)))
893 might_split_p
= false;
896 return riscv_address_insns (XEXP (mem
, 0), mode
, might_split_p
);
899 /* Emit a move from SRC to DEST. Assume that the move expanders can
900 handle all moves if !can_create_pseudo_p (). The distinction is
901 important because, unlike emit_move_insn, the move expanders know
902 how to force Pmode objects into the constant pool even when the
903 constant pool address is not itself legitimate. */
906 riscv_emit_move (rtx dest
, rtx src
)
908 return (can_create_pseudo_p ()
909 ? emit_move_insn (dest
, src
)
910 : emit_move_insn_1 (dest
, src
));
913 /* Emit an instruction of the form (set TARGET SRC). */
916 riscv_emit_set (rtx target
, rtx src
)
918 emit_insn (gen_rtx_SET (target
, src
));
922 /* Emit an instruction of the form (set DEST (CODE X Y)). */
925 riscv_emit_binary (enum rtx_code code
, rtx dest
, rtx x
, rtx y
)
927 return riscv_emit_set (dest
, gen_rtx_fmt_ee (code
, GET_MODE (dest
), x
, y
));
930 /* Compute (CODE X Y) and store the result in a new register
931 of mode MODE. Return that new register. */
934 riscv_force_binary (machine_mode mode
, enum rtx_code code
, rtx x
, rtx y
)
936 return riscv_emit_binary (code
, gen_reg_rtx (mode
), x
, y
);
939 /* Copy VALUE to a register and return that register. If new pseudos
940 are allowed, copy it into a new register, otherwise use DEST. */
943 riscv_force_temporary (rtx dest
, rtx value
)
945 if (can_create_pseudo_p ())
946 return force_reg (Pmode
, value
);
949 riscv_emit_move (dest
, value
);
954 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
955 then add CONST_INT OFFSET to the result. */
958 riscv_unspec_address_offset (rtx base
, rtx offset
,
959 enum riscv_symbol_type symbol_type
)
961 base
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, base
),
962 UNSPEC_ADDRESS_FIRST
+ symbol_type
);
963 if (offset
!= const0_rtx
)
964 base
= gen_rtx_PLUS (Pmode
, base
, offset
);
965 return gen_rtx_CONST (Pmode
, base
);
968 /* Return an UNSPEC address with underlying address ADDRESS and symbol
972 riscv_unspec_address (rtx address
, enum riscv_symbol_type symbol_type
)
976 split_const (address
, &base
, &offset
);
977 return riscv_unspec_address_offset (base
, offset
, symbol_type
);
980 /* If OP is an UNSPEC address, return the address to which it refers,
981 otherwise return OP itself. */
984 riscv_strip_unspec_address (rtx op
)
988 split_const (op
, &base
, &offset
);
989 if (UNSPEC_ADDRESS_P (base
))
990 op
= plus_constant (Pmode
, UNSPEC_ADDRESS (base
), INTVAL (offset
));
994 /* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
995 high part to BASE and return the result. Just return BASE otherwise.
996 TEMP is as for riscv_force_temporary.
998 The returned expression can be used as the first operand to a LO_SUM. */
1001 riscv_unspec_offset_high (rtx temp
, rtx addr
, enum riscv_symbol_type symbol_type
)
1003 addr
= gen_rtx_HIGH (Pmode
, riscv_unspec_address (addr
, symbol_type
));
1004 return riscv_force_temporary (temp
, addr
);
1007 /* Load an entry from the GOT for a TLS GD access. */
1009 static rtx
riscv_got_load_tls_gd (rtx dest
, rtx sym
)
1011 if (Pmode
== DImode
)
1012 return gen_got_load_tls_gddi (dest
, sym
);
1014 return gen_got_load_tls_gdsi (dest
, sym
);
1017 /* Load an entry from the GOT for a TLS IE access. */
1019 static rtx
riscv_got_load_tls_ie (rtx dest
, rtx sym
)
1021 if (Pmode
== DImode
)
1022 return gen_got_load_tls_iedi (dest
, sym
);
1024 return gen_got_load_tls_iesi (dest
, sym
);
1027 /* Add in the thread pointer for a TLS LE access. */
1029 static rtx
riscv_tls_add_tp_le (rtx dest
, rtx base
, rtx sym
)
1031 rtx tp
= gen_rtx_REG (Pmode
, THREAD_POINTER_REGNUM
);
1032 if (Pmode
== DImode
)
1033 return gen_tls_add_tp_ledi (dest
, base
, tp
, sym
);
1035 return gen_tls_add_tp_lesi (dest
, base
, tp
, sym
);
1038 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
1039 it appears in a MEM of that mode. Return true if ADDR is a legitimate
1040 constant in that context and can be split into high and low parts.
1041 If so, and if LOW_OUT is nonnull, emit the high part and store the
1042 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
1044 TEMP is as for riscv_force_temporary and is used to load the high
1045 part into a register.
1047 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
1048 a legitimize SET_SRC for an .md pattern, otherwise the low part
1049 is guaranteed to be a legitimate address for mode MODE. */
1052 riscv_split_symbol (rtx temp
, rtx addr
, machine_mode mode
, rtx
*low_out
)
1054 enum riscv_symbol_type symbol_type
;
1056 if ((GET_CODE (addr
) == HIGH
&& mode
== MAX_MACHINE_MODE
)
1057 || !riscv_symbolic_constant_p (addr
, &symbol_type
)
1058 || riscv_symbol_insns (symbol_type
) == 0
1059 || !riscv_split_symbol_type (symbol_type
))
1063 switch (symbol_type
)
1065 case SYMBOL_ABSOLUTE
:
1067 rtx high
= gen_rtx_HIGH (Pmode
, copy_rtx (addr
));
1068 high
= riscv_force_temporary (temp
, high
);
1069 *low_out
= gen_rtx_LO_SUM (Pmode
, high
, addr
);
1075 static unsigned seqno
;
1079 ssize_t bytes
= snprintf (buf
, sizeof (buf
), ".LA%u", seqno
);
1080 gcc_assert ((size_t) bytes
< sizeof (buf
));
1082 label
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
1083 SYMBOL_REF_FLAGS (label
) |= SYMBOL_FLAG_LOCAL
;
1086 temp
= gen_reg_rtx (Pmode
);
1088 if (Pmode
== DImode
)
1089 emit_insn (gen_auipcdi (temp
, copy_rtx (addr
), GEN_INT (seqno
)));
1091 emit_insn (gen_auipcsi (temp
, copy_rtx (addr
), GEN_INT (seqno
)));
1093 *low_out
= gen_rtx_LO_SUM (Pmode
, temp
, label
);
1106 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1107 riscv_force_temporary; it is only needed when OFFSET is not a
1111 riscv_add_offset (rtx temp
, rtx reg
, HOST_WIDE_INT offset
)
1113 if (!SMALL_OPERAND (offset
))
1117 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
1118 The addition inside the macro CONST_HIGH_PART may cause an
1119 overflow, so we need to force a sign-extension check. */
1120 high
= gen_int_mode (CONST_HIGH_PART (offset
), Pmode
);
1121 offset
= CONST_LOW_PART (offset
);
1122 high
= riscv_force_temporary (temp
, high
);
1123 reg
= riscv_force_temporary (temp
, gen_rtx_PLUS (Pmode
, high
, reg
));
1125 return plus_constant (Pmode
, reg
, offset
);
1128 /* The __tls_get_attr symbol. */
1129 static GTY(()) rtx riscv_tls_symbol
;
1131 /* Return an instruction sequence that calls __tls_get_addr. SYM is
1132 the TLS symbol we are referencing and TYPE is the symbol type to use
1133 (either global dynamic or local dynamic). RESULT is an RTX for the
1134 return value location. */
1137 riscv_call_tls_get_addr (rtx sym
, rtx result
)
1139 rtx a0
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
), func
;
1142 if (!riscv_tls_symbol
)
1143 riscv_tls_symbol
= init_one_libfunc ("__tls_get_addr");
1144 func
= gen_rtx_MEM (FUNCTION_MODE
, riscv_tls_symbol
);
1148 emit_insn (riscv_got_load_tls_gd (a0
, sym
));
1149 insn
= emit_call_insn (gen_call_value (result
, func
, const0_rtx
, NULL
));
1150 RTL_CONST_CALL_P (insn
) = 1;
1151 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), a0
);
1152 insn
= get_insns ();
1159 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
1160 its address. The return value will be both a valid address and a valid
1161 SET_SRC (either a REG or a LO_SUM). */
1164 riscv_legitimize_tls_address (rtx loc
)
1167 enum tls_model model
= SYMBOL_REF_TLS_MODEL (loc
);
1169 /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
1171 model
= TLS_MODEL_LOCAL_EXEC
;
1175 case TLS_MODEL_LOCAL_DYNAMIC
:
1176 /* Rely on section anchors for the optimization that LDM TLS
1177 provides. The anchor's address is loaded with GD TLS. */
1178 case TLS_MODEL_GLOBAL_DYNAMIC
:
1179 tmp
= gen_rtx_REG (Pmode
, GP_RETURN
);
1180 dest
= gen_reg_rtx (Pmode
);
1181 emit_libcall_block (riscv_call_tls_get_addr (loc
, tmp
), dest
, tmp
, loc
);
1184 case TLS_MODEL_INITIAL_EXEC
:
1185 /* la.tls.ie; tp-relative add */
1186 tp
= gen_rtx_REG (Pmode
, THREAD_POINTER_REGNUM
);
1187 tmp
= gen_reg_rtx (Pmode
);
1188 emit_insn (riscv_got_load_tls_ie (tmp
, loc
));
1189 dest
= gen_reg_rtx (Pmode
);
1190 emit_insn (gen_add3_insn (dest
, tmp
, tp
));
1193 case TLS_MODEL_LOCAL_EXEC
:
1194 tmp
= riscv_unspec_offset_high (NULL
, loc
, SYMBOL_TLS_LE
);
1195 dest
= gen_reg_rtx (Pmode
);
1196 emit_insn (riscv_tls_add_tp_le (dest
, tmp
, loc
));
1197 dest
= gen_rtx_LO_SUM (Pmode
, dest
,
1198 riscv_unspec_address (loc
, SYMBOL_TLS_LE
));
1207 /* If X is not a valid address for mode MODE, force it into a register. */
1210 riscv_force_address (rtx x
, machine_mode mode
)
1212 if (!riscv_legitimate_address_p (mode
, x
, false))
1213 x
= force_reg (Pmode
, x
);
1217 /* This function is used to implement LEGITIMIZE_ADDRESS. If X can
1218 be legitimized in a way that the generic machinery might not expect,
1219 return a new address, otherwise return NULL. MODE is the mode of
1220 the memory being accessed. */
1223 riscv_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1228 if (riscv_tls_symbol_p (x
))
1229 return riscv_legitimize_tls_address (x
);
1231 /* See if the address can split into a high part and a LO_SUM. */
1232 if (riscv_split_symbol (NULL
, x
, mode
, &addr
))
1233 return riscv_force_address (addr
, mode
);
1235 /* Handle BASE + OFFSET using riscv_add_offset. */
1236 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1))
1237 && INTVAL (XEXP (x
, 1)) != 0)
1239 rtx base
= XEXP (x
, 0);
1240 HOST_WIDE_INT offset
= INTVAL (XEXP (x
, 1));
1242 if (!riscv_valid_base_register_p (base
, mode
, false))
1243 base
= copy_to_mode_reg (Pmode
, base
);
1244 addr
= riscv_add_offset (NULL
, base
, offset
);
1245 return riscv_force_address (addr
, mode
);
1251 /* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
1254 riscv_move_integer (rtx temp
, rtx dest
, HOST_WIDE_INT value
)
1256 struct riscv_integer_op codes
[RISCV_MAX_INTEGER_OPS
];
1261 mode
= GET_MODE (dest
);
1262 num_ops
= riscv_build_integer (codes
, value
, mode
);
1264 if (can_create_pseudo_p () && num_ops
> 2 /* not a simple constant */
1265 && num_ops
>= riscv_split_integer_cost (value
))
1266 x
= riscv_split_integer (value
, mode
);
1269 /* Apply each binary operation to X. */
1270 x
= GEN_INT (codes
[0].value
);
1272 for (i
= 1; i
< num_ops
; i
++)
1274 if (!can_create_pseudo_p ())
1275 x
= riscv_emit_set (temp
, x
);
1277 x
= force_reg (mode
, x
);
1279 x
= gen_rtx_fmt_ee (codes
[i
].code
, mode
, x
, GEN_INT (codes
[i
].value
));
1283 riscv_emit_set (dest
, x
);
1286 /* Subroutine of riscv_legitimize_move. Move constant SRC into register
1287 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1291 riscv_legitimize_const_move (machine_mode mode
, rtx dest
, rtx src
)
1295 /* Split moves of big integers into smaller pieces. */
1296 if (splittable_const_int_operand (src
, mode
))
1298 riscv_move_integer (dest
, dest
, INTVAL (src
));
1302 /* Split moves of symbolic constants into high/low pairs. */
1303 if (riscv_split_symbol (dest
, src
, MAX_MACHINE_MODE
, &src
))
1305 riscv_emit_set (dest
, src
);
1309 /* Generate the appropriate access sequences for TLS symbols. */
1310 if (riscv_tls_symbol_p (src
))
1312 riscv_emit_move (dest
, riscv_legitimize_tls_address (src
));
1316 /* If we have (const (plus symbol offset)), and that expression cannot
1317 be forced into memory, load the symbol first and add in the offset. Also
1318 prefer to do this even if the constant _can_ be forced into memory, as it
1319 usually produces better code. */
1320 split_const (src
, &base
, &offset
);
1321 if (offset
!= const0_rtx
1322 && (targetm
.cannot_force_const_mem (mode
, src
) || can_create_pseudo_p ()))
1324 base
= riscv_force_temporary (dest
, base
);
1325 riscv_emit_move (dest
, riscv_add_offset (NULL
, base
, INTVAL (offset
)));
1329 src
= force_const_mem (mode
, src
);
1331 /* When using explicit relocs, constant pool references are sometimes
1332 not legitimate addresses. */
1333 riscv_split_symbol (dest
, XEXP (src
, 0), mode
, &XEXP (src
, 0));
1334 riscv_emit_move (dest
, src
);
1337 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
1338 sequence that is valid. */
1341 riscv_legitimize_move (machine_mode mode
, rtx dest
, rtx src
)
1343 if (!register_operand (dest
, mode
) && !reg_or_0_operand (src
, mode
))
1345 riscv_emit_move (dest
, force_reg (mode
, src
));
1349 /* We need to deal with constants that would be legitimate
1350 immediate_operands but aren't legitimate move_operands. */
1351 if (CONSTANT_P (src
) && !move_operand (src
, mode
))
1353 riscv_legitimize_const_move (mode
, dest
, src
);
1354 set_unique_reg_note (get_last_insn (), REG_EQUAL
, copy_rtx (src
));
1358 /* RISC-V GCC may generate non-legitimate address due to we provide some
1359 pattern for optimize access PIC local symbol and it's make GCC generate
1360 unrecognizable instruction during optmizing. */
1362 if (MEM_P (dest
) && !riscv_legitimate_address_p (mode
, XEXP (dest
, 0),
1365 XEXP (dest
, 0) = riscv_force_address (XEXP (dest
, 0), mode
);
1368 if (MEM_P (src
) && !riscv_legitimate_address_p (mode
, XEXP (src
, 0),
1371 XEXP (src
, 0) = riscv_force_address (XEXP (src
, 0), mode
);
1377 /* Return true if there is an instruction that implements CODE and accepts
1378 X as an immediate operand. */
1381 riscv_immediate_operand_p (int code
, HOST_WIDE_INT x
)
1388 /* All shift counts are truncated to a valid constant. */
1397 /* These instructions take 12-bit signed immediates. */
1398 return SMALL_OPERAND (x
);
1401 /* We add 1 to the immediate and use SLT. */
1402 return SMALL_OPERAND (x
+ 1);
1405 /* Likewise SLTU, but reject the always-true case. */
1406 return SMALL_OPERAND (x
+ 1) && x
+ 1 != 0;
1410 /* We can emulate an immediate of 1 by using GT/GTU against x0. */
1414 /* By default assume that x0 can be used for 0. */
1419 /* Return the cost of binary operation X, given that the instruction
1420 sequence for a word-sized or smaller operation takes SIGNLE_INSNS
1421 instructions and that the sequence of a double-word operation takes
1422 DOUBLE_INSNS instructions. */
1425 riscv_binary_cost (rtx x
, int single_insns
, int double_insns
)
1427 if (GET_MODE_SIZE (GET_MODE (x
)) == UNITS_PER_WORD
* 2)
1428 return COSTS_N_INSNS (double_insns
);
1429 return COSTS_N_INSNS (single_insns
);
1432 /* Return the cost of sign- or zero-extending OP. */
1435 riscv_extend_cost (rtx op
, bool unsigned_p
)
1440 if (unsigned_p
&& GET_MODE (op
) == QImode
)
1441 /* We can use ANDI. */
1442 return COSTS_N_INSNS (1);
1444 if (!unsigned_p
&& GET_MODE (op
) == SImode
)
1445 /* We can use SEXT.W. */
1446 return COSTS_N_INSNS (1);
1448 /* We need to use a shift left and a shift right. */
1449 return COSTS_N_INSNS (2);
1452 /* Implement TARGET_RTX_COSTS. */
1454 #define SINGLE_SHIFT_COST 1
1457 riscv_rtx_costs (rtx x
, machine_mode mode
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
1458 int *total
, bool speed
)
1460 bool float_mode_p
= FLOAT_MODE_P (mode
);
1463 switch (GET_CODE (x
))
1466 if (riscv_immediate_operand_p (outer_code
, INTVAL (x
)))
1477 if ((cost
= riscv_const_insns (x
)) > 0)
1479 /* If the constant is likely to be stored in a GPR, SETs of
1480 single-insn constants are as cheap as register sets; we
1481 never want to CSE them. */
1482 if (cost
== 1 && outer_code
== SET
)
1484 /* When we load a constant more than once, it usually is better
1485 to duplicate the last operation in the sequence than to CSE
1486 the constant itself. */
1487 else if (outer_code
== SET
|| GET_MODE (x
) == VOIDmode
)
1488 *total
= COSTS_N_INSNS (1);
1490 else /* The instruction will be fetched from the constant pool. */
1491 *total
= COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE
));
1495 /* If the address is legitimate, return the number of
1496 instructions it needs. */
1497 if ((cost
= riscv_address_insns (XEXP (x
, 0), mode
, true)) > 0)
1499 *total
= COSTS_N_INSNS (cost
+ tune_info
->memory_cost
);
1502 /* Otherwise use the default handling. */
1506 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 2 : 1);
1512 /* Double-word operations use two single-word operations. */
1513 *total
= riscv_binary_cost (x
, 1, 2);
1517 /* This is an SImode shift. */
1518 if (outer_code
== SET
&& (INTVAL (XEXP (x
, 2)) > 0)
1519 && (INTVAL (XEXP (x
, 1)) + INTVAL (XEXP (x
, 2)) == 32))
1521 *total
= COSTS_N_INSNS (SINGLE_SHIFT_COST
);
1529 *total
= riscv_binary_cost (x
, SINGLE_SHIFT_COST
,
1530 CONSTANT_P (XEXP (x
, 1)) ? 4 : 9);
1534 *total
= COSTS_N_INSNS (float_mode_p
? 1 : 3);
1538 *total
= set_src_cost (XEXP (x
, 0), mode
, speed
);
1542 /* This is an SImode shift. */
1543 if (outer_code
== SET
&& GET_MODE (x
) == DImode
1544 && GET_MODE (XEXP (x
, 0)) == SImode
)
1546 *total
= COSTS_N_INSNS (SINGLE_SHIFT_COST
);
1559 /* Branch comparisons have VOIDmode, so use the first operand's
1561 mode
= GET_MODE (XEXP (x
, 0));
1563 *total
= tune_info
->fp_add
[mode
== DFmode
];
1565 *total
= riscv_binary_cost (x
, 1, 3);
1570 /* (FEQ(A, A) & FEQ(B, B)) compared against 0. */
1571 mode
= GET_MODE (XEXP (x
, 0));
1572 *total
= tune_info
->fp_add
[mode
== DFmode
] + COSTS_N_INSNS (2);
1577 /* (FEQ(A, A) & FEQ(B, B)) compared against FEQ(A, B). */
1578 mode
= GET_MODE (XEXP (x
, 0));
1579 *total
= tune_info
->fp_add
[mode
== DFmode
] + COSTS_N_INSNS (3);
1586 /* FLT or FLE, but guarded by an FFLAGS read and write. */
1587 mode
= GET_MODE (XEXP (x
, 0));
1588 *total
= tune_info
->fp_add
[mode
== DFmode
] + COSTS_N_INSNS (4);
1594 *total
= tune_info
->fp_add
[mode
== DFmode
];
1596 *total
= riscv_binary_cost (x
, 1, 4);
1601 rtx op
= XEXP (x
, 0);
1602 if (GET_CODE (op
) == FMA
&& !HONOR_SIGNED_ZEROS (mode
))
1604 *total
= (tune_info
->fp_mul
[mode
== DFmode
]
1605 + set_src_cost (XEXP (op
, 0), mode
, speed
)
1606 + set_src_cost (XEXP (op
, 1), mode
, speed
)
1607 + set_src_cost (XEXP (op
, 2), mode
, speed
));
1613 *total
= tune_info
->fp_add
[mode
== DFmode
];
1615 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 4 : 1);
1620 *total
= tune_info
->fp_mul
[mode
== DFmode
];
1621 else if (!TARGET_MUL
)
1622 /* Estimate the cost of a library call. */
1623 *total
= COSTS_N_INSNS (speed
? 32 : 6);
1624 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
1625 *total
= 3 * tune_info
->int_mul
[0] + COSTS_N_INSNS (2);
1627 *total
= COSTS_N_INSNS (1);
1629 *total
= tune_info
->int_mul
[mode
== DImode
];
1637 *total
= tune_info
->fp_div
[mode
== DFmode
];
1645 /* Estimate the cost of a library call. */
1646 *total
= COSTS_N_INSNS (speed
? 32 : 6);
1648 *total
= tune_info
->int_div
[mode
== DImode
];
1650 *total
= COSTS_N_INSNS (1);
1654 /* This is an SImode shift. */
1655 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
)
1657 *total
= COSTS_N_INSNS (SINGLE_SHIFT_COST
);
1662 *total
= riscv_extend_cost (XEXP (x
, 0), GET_CODE (x
) == ZERO_EXTEND
);
1666 case UNSIGNED_FLOAT
:
1669 case FLOAT_TRUNCATE
:
1670 *total
= tune_info
->fp_add
[mode
== DFmode
];
1674 *total
= (tune_info
->fp_mul
[mode
== DFmode
]
1675 + set_src_cost (XEXP (x
, 0), mode
, speed
)
1676 + set_src_cost (XEXP (x
, 1), mode
, speed
)
1677 + set_src_cost (XEXP (x
, 2), mode
, speed
));
1681 if (XINT (x
, 1) == UNSPEC_AUIPC
)
1683 /* Make AUIPC cheap to avoid spilling its result to the stack. */
1694 /* Implement TARGET_ADDRESS_COST. */
1697 riscv_address_cost (rtx addr
, machine_mode mode
,
1698 addr_space_t as ATTRIBUTE_UNUSED
,
1699 bool speed ATTRIBUTE_UNUSED
)
1701 return riscv_address_insns (addr
, mode
, false);
1704 /* Return one word of double-word value OP. HIGH_P is true to select the
1705 high part or false to select the low part. */
1708 riscv_subword (rtx op
, bool high_p
)
1710 unsigned int byte
= high_p
? UNITS_PER_WORD
: 0;
1711 machine_mode mode
= GET_MODE (op
);
1713 if (mode
== VOIDmode
)
1714 mode
= TARGET_64BIT
? TImode
: DImode
;
1717 return adjust_address (op
, word_mode
, byte
);
1720 gcc_assert (!FP_REG_RTX_P (op
));
1722 return simplify_gen_subreg (word_mode
, op
, mode
, byte
);
1725 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
1728 riscv_split_64bit_move_p (rtx dest
, rtx src
)
1733 /* Allow FPR <-> FPR and FPR <-> MEM moves, and permit the special case
1734 of zeroing an FPR with FCVT.D.W. */
1735 if (TARGET_DOUBLE_FLOAT
1736 && ((FP_REG_RTX_P (src
) && FP_REG_RTX_P (dest
))
1737 || (FP_REG_RTX_P (dest
) && MEM_P (src
))
1738 || (FP_REG_RTX_P (src
) && MEM_P (dest
))
1739 || (FP_REG_RTX_P (dest
) && src
== CONST0_RTX (GET_MODE (src
)))))
1745 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
1746 this function handles 64-bit moves for which riscv_split_64bit_move_p
1747 holds. For 64-bit targets, this function handles 128-bit moves. */
1750 riscv_split_doubleword_move (rtx dest
, rtx src
)
1754 /* The operation can be split into two normal moves. Decide in
1755 which order to do them. */
1756 low_dest
= riscv_subword (dest
, false);
1757 if (REG_P (low_dest
) && reg_overlap_mentioned_p (low_dest
, src
))
1759 riscv_emit_move (riscv_subword (dest
, true), riscv_subword (src
, true));
1760 riscv_emit_move (low_dest
, riscv_subword (src
, false));
1764 riscv_emit_move (low_dest
, riscv_subword (src
, false));
1765 riscv_emit_move (riscv_subword (dest
, true), riscv_subword (src
, true));
1769 /* Return the appropriate instructions to move SRC into DEST. Assume
1770 that SRC is operand 1 and DEST is operand 0. */
1773 riscv_output_move (rtx dest
, rtx src
)
1775 enum rtx_code dest_code
, src_code
;
1779 dest_code
= GET_CODE (dest
);
1780 src_code
= GET_CODE (src
);
1781 mode
= GET_MODE (dest
);
1782 dbl_p
= (GET_MODE_SIZE (mode
) == 8);
1784 if (dbl_p
&& riscv_split_64bit_move_p (dest
, src
))
1787 if (dest_code
== REG
&& GP_REG_P (REGNO (dest
)))
1789 if (src_code
== REG
&& FP_REG_P (REGNO (src
)))
1790 return dbl_p
? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
1792 if (src_code
== MEM
)
1793 switch (GET_MODE_SIZE (mode
))
1795 case 1: return "lbu\t%0,%1";
1796 case 2: return "lhu\t%0,%1";
1797 case 4: return "lw\t%0,%1";
1798 case 8: return "ld\t%0,%1";
1801 if (src_code
== CONST_INT
)
1804 if (src_code
== HIGH
)
1805 return "lui\t%0,%h1";
1807 if (symbolic_operand (src
, VOIDmode
))
1808 switch (riscv_classify_symbolic_expression (src
))
1810 case SYMBOL_GOT_DISP
: return "la\t%0,%1";
1811 case SYMBOL_ABSOLUTE
: return "lla\t%0,%1";
1812 case SYMBOL_PCREL
: return "lla\t%0,%1";
1813 default: gcc_unreachable ();
1816 if ((src_code
== REG
&& GP_REG_P (REGNO (src
)))
1817 || (src
== CONST0_RTX (mode
)))
1819 if (dest_code
== REG
)
1821 if (GP_REG_P (REGNO (dest
)))
1822 return "mv\t%0,%z1";
1824 if (FP_REG_P (REGNO (dest
)))
1827 return "fmv.s.x\t%0,%z1";
1829 return "fmv.d.x\t%0,%z1";
1830 /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
1831 gcc_assert (src
== CONST0_RTX (mode
));
1832 return "fcvt.d.w\t%0,x0";
1835 if (dest_code
== MEM
)
1836 switch (GET_MODE_SIZE (mode
))
1838 case 1: return "sb\t%z1,%0";
1839 case 2: return "sh\t%z1,%0";
1840 case 4: return "sw\t%z1,%0";
1841 case 8: return "sd\t%z1,%0";
1844 if (src_code
== REG
&& FP_REG_P (REGNO (src
)))
1846 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
1847 return dbl_p
? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
1849 if (dest_code
== MEM
)
1850 return dbl_p
? "fsd\t%1,%0" : "fsw\t%1,%0";
1852 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
1854 if (src_code
== MEM
)
1855 return dbl_p
? "fld\t%0,%1" : "flw\t%0,%1";
1861 riscv_output_return ()
1863 if (cfun
->machine
->naked_p
)
1870 /* Return true if CMP1 is a suitable second operand for integer ordering
1871 test CODE. See also the *sCC patterns in riscv.md. */
1874 riscv_int_order_operand_ok_p (enum rtx_code code
, rtx cmp1
)
1880 return reg_or_0_operand (cmp1
, VOIDmode
);
1884 return cmp1
== const1_rtx
;
1888 return arith_operand (cmp1
, VOIDmode
);
1891 return sle_operand (cmp1
, VOIDmode
);
1894 return sleu_operand (cmp1
, VOIDmode
);
1901 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
1902 integer ordering test *CODE, or if an equivalent combination can
1903 be formed by adjusting *CODE and *CMP1. When returning true, update
1904 *CODE and *CMP1 with the chosen code and operand, otherwise leave
1908 riscv_canonicalize_int_order_test (enum rtx_code
*code
, rtx
*cmp1
,
1911 HOST_WIDE_INT plus_one
;
1913 if (riscv_int_order_operand_ok_p (*code
, *cmp1
))
1916 if (CONST_INT_P (*cmp1
))
1920 plus_one
= trunc_int_for_mode (UINTVAL (*cmp1
) + 1, mode
);
1921 if (INTVAL (*cmp1
) < plus_one
)
1924 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
1930 plus_one
= trunc_int_for_mode (UINTVAL (*cmp1
) + 1, mode
);
1934 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
1945 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
1946 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
1947 is nonnull, it's OK to set TARGET to the inverse of the result and
1948 flip *INVERT_PTR instead. */
1951 riscv_emit_int_order_test (enum rtx_code code
, bool *invert_ptr
,
1952 rtx target
, rtx cmp0
, rtx cmp1
)
1956 /* First see if there is a RISCV instruction that can do this operation.
1957 If not, try doing the same for the inverse operation. If that also
1958 fails, force CMP1 into a register and try again. */
1959 mode
= GET_MODE (cmp0
);
1960 if (riscv_canonicalize_int_order_test (&code
, &cmp1
, mode
))
1961 riscv_emit_binary (code
, target
, cmp0
, cmp1
);
1964 enum rtx_code inv_code
= reverse_condition (code
);
1965 if (!riscv_canonicalize_int_order_test (&inv_code
, &cmp1
, mode
))
1967 cmp1
= force_reg (mode
, cmp1
);
1968 riscv_emit_int_order_test (code
, invert_ptr
, target
, cmp0
, cmp1
);
1970 else if (invert_ptr
== 0)
1972 rtx inv_target
= riscv_force_binary (GET_MODE (target
),
1973 inv_code
, cmp0
, cmp1
);
1974 riscv_emit_binary (XOR
, target
, inv_target
, const1_rtx
);
1978 *invert_ptr
= !*invert_ptr
;
1979 riscv_emit_binary (inv_code
, target
, cmp0
, cmp1
);
1984 /* Return a register that is zero iff CMP0 and CMP1 are equal.
1985 The register will have the same mode as CMP0. */
1988 riscv_zero_if_equal (rtx cmp0
, rtx cmp1
)
1990 if (cmp1
== const0_rtx
)
1993 return expand_binop (GET_MODE (cmp0
), sub_optab
,
1994 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
1997 /* Sign- or zero-extend OP0 and OP1 for integer comparisons. */
2000 riscv_extend_comparands (rtx_code code
, rtx
*op0
, rtx
*op1
)
2002 /* Comparisons consider all XLEN bits, so extend sub-XLEN values. */
2003 if (GET_MODE_SIZE (word_mode
) > GET_MODE_SIZE (GET_MODE (*op0
)))
2005 /* It is more profitable to zero-extend QImode values. */
2006 if (unsigned_condition (code
) == code
&& GET_MODE (*op0
) == QImode
)
2008 *op0
= gen_rtx_ZERO_EXTEND (word_mode
, *op0
);
2009 if (CONST_INT_P (*op1
))
2010 *op1
= GEN_INT ((uint8_t) INTVAL (*op1
));
2012 *op1
= gen_rtx_ZERO_EXTEND (word_mode
, *op1
);
2016 *op0
= gen_rtx_SIGN_EXTEND (word_mode
, *op0
);
2017 if (*op1
!= const0_rtx
)
2018 *op1
= gen_rtx_SIGN_EXTEND (word_mode
, *op1
);
2023 /* Convert a comparison into something that can be used in a branch. On
2024 entry, *OP0 and *OP1 are the values being compared and *CODE is the code
2025 used to compare them. Update them to describe the final comparison. */
2028 riscv_emit_int_compare (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
)
2030 if (splittable_const_int_operand (*op1
, VOIDmode
))
2032 HOST_WIDE_INT rhs
= INTVAL (*op1
);
2034 if (*code
== EQ
|| *code
== NE
)
2036 /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
2037 if (SMALL_OPERAND (-rhs
))
2039 *op0
= riscv_force_binary (GET_MODE (*op0
), PLUS
, *op0
,
2046 static const enum rtx_code mag_comparisons
[][2] = {
2047 {LEU
, LTU
}, {GTU
, GEU
}, {LE
, LT
}, {GT
, GE
}
2050 /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */
2051 for (size_t i
= 0; i
< ARRAY_SIZE (mag_comparisons
); i
++)
2053 HOST_WIDE_INT new_rhs
;
2054 bool increment
= *code
== mag_comparisons
[i
][0];
2055 bool decrement
= *code
== mag_comparisons
[i
][1];
2056 if (!increment
&& !decrement
)
2059 new_rhs
= rhs
+ (increment
? 1 : -1);
2060 if (riscv_integer_cost (new_rhs
) < riscv_integer_cost (rhs
)
2061 && (rhs
< 0) == (new_rhs
< 0))
2063 *op1
= GEN_INT (new_rhs
);
2064 *code
= mag_comparisons
[i
][increment
];
2071 riscv_extend_comparands (*code
, op0
, op1
);
2073 *op0
= force_reg (word_mode
, *op0
);
2074 if (*op1
!= const0_rtx
)
2075 *op1
= force_reg (word_mode
, *op1
);
2078 /* Like riscv_emit_int_compare, but for floating-point comparisons. */
2081 riscv_emit_float_compare (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
)
2083 rtx tmp0
, tmp1
, cmp_op0
= *op0
, cmp_op1
= *op1
;
2084 enum rtx_code fp_code
= *code
;
2094 /* a == a && b == b */
2095 tmp0
= riscv_force_binary (word_mode
, EQ
, cmp_op0
, cmp_op0
);
2096 tmp1
= riscv_force_binary (word_mode
, EQ
, cmp_op1
, cmp_op1
);
2097 *op0
= riscv_force_binary (word_mode
, AND
, tmp0
, tmp1
);
2103 /* ordered(a, b) > (a == b) */
2104 *code
= fp_code
== LTGT
? GTU
: EQ
;
2105 tmp0
= riscv_force_binary (word_mode
, EQ
, cmp_op0
, cmp_op0
);
2106 tmp1
= riscv_force_binary (word_mode
, EQ
, cmp_op1
, cmp_op1
);
2107 *op0
= riscv_force_binary (word_mode
, AND
, tmp0
, tmp1
);
2108 *op1
= riscv_force_binary (word_mode
, EQ
, cmp_op0
, cmp_op1
);
2111 #define UNORDERED_COMPARISON(CODE, CMP) \
2114 *op0 = gen_reg_rtx (word_mode); \
2115 if (GET_MODE (cmp_op0) == SFmode && TARGET_64BIT) \
2116 emit_insn (gen_f##CMP##_quietsfdi4 (*op0, cmp_op0, cmp_op1)); \
2117 else if (GET_MODE (cmp_op0) == SFmode) \
2118 emit_insn (gen_f##CMP##_quietsfsi4 (*op0, cmp_op0, cmp_op1)); \
2119 else if (GET_MODE (cmp_op0) == DFmode && TARGET_64BIT) \
2120 emit_insn (gen_f##CMP##_quietdfdi4 (*op0, cmp_op0, cmp_op1)); \
2121 else if (GET_MODE (cmp_op0) == DFmode) \
2122 emit_insn (gen_f##CMP##_quietdfsi4 (*op0, cmp_op0, cmp_op1)); \
2124 gcc_unreachable (); \
2125 *op1 = const0_rtx; \
2129 std::swap (cmp_op0
, cmp_op1
);
2132 UNORDERED_COMPARISON(UNGT
, le
)
2135 std::swap (cmp_op0
, cmp_op1
);
2138 UNORDERED_COMPARISON(UNGE
, lt
)
2139 #undef UNORDERED_COMPARISON
2151 /* We have instructions for these cases. */
2152 *op0
= riscv_force_binary (word_mode
, fp_code
, cmp_op0
, cmp_op1
);
2161 /* CODE-compare OP0 and OP1. Store the result in TARGET. */
2164 riscv_expand_int_scc (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
2166 riscv_extend_comparands (code
, &op0
, &op1
);
2167 op0
= force_reg (word_mode
, op0
);
2169 if (code
== EQ
|| code
== NE
)
2171 rtx zie
= riscv_zero_if_equal (op0
, op1
);
2172 riscv_emit_binary (code
, target
, zie
, const0_rtx
);
2175 riscv_emit_int_order_test (code
, 0, target
, op0
, op1
);
2178 /* Like riscv_expand_int_scc, but for floating-point comparisons. */
2181 riscv_expand_float_scc (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
2183 riscv_emit_float_compare (&code
, &op0
, &op1
);
2185 rtx cmp
= riscv_force_binary (word_mode
, code
, op0
, op1
);
2186 riscv_emit_set (target
, lowpart_subreg (SImode
, cmp
, word_mode
));
2189 /* Jump to LABEL if (CODE OP0 OP1) holds. */
2192 riscv_expand_conditional_branch (rtx label
, rtx_code code
, rtx op0
, rtx op1
)
2194 if (FLOAT_MODE_P (GET_MODE (op1
)))
2195 riscv_emit_float_compare (&code
, &op0
, &op1
);
2197 riscv_emit_int_compare (&code
, &op0
, &op1
);
2199 rtx condition
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2200 emit_jump_insn (gen_condjump (condition
, label
));
2203 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
2204 least PARM_BOUNDARY bits of alignment, but will be given anything up
2205 to STACK_BOUNDARY bits if the type requires it. */
2208 riscv_function_arg_boundary (machine_mode mode
, const_tree type
)
2210 unsigned int alignment
;
2212 /* Use natural alignment if the type is not aggregate data. */
2213 if (type
&& !AGGREGATE_TYPE_P (type
))
2214 alignment
= TYPE_ALIGN (TYPE_MAIN_VARIANT (type
));
2216 alignment
= type
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
);
2218 return MIN (STACK_BOUNDARY
, MAX (PARM_BOUNDARY
, alignment
));
2221 /* If MODE represents an argument that can be passed or returned in
2222 floating-point registers, return the number of registers, else 0. */
2225 riscv_pass_mode_in_fpr_p (machine_mode mode
)
2227 if (GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_FP_ARG
)
2229 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2232 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
2241 HOST_WIDE_INT offset
;
2242 } riscv_aggregate_field
;
2244 /* Identify subfields of aggregates that are candidates for passing in
2245 floating-point registers. */
2248 riscv_flatten_aggregate_field (const_tree type
,
2249 riscv_aggregate_field fields
[2],
2250 int n
, HOST_WIDE_INT offset
)
2252 switch (TREE_CODE (type
))
2255 /* Can't handle incomplete types nor sizes that are not fixed. */
2256 if (!COMPLETE_TYPE_P (type
)
2257 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
2258 || !tree_fits_uhwi_p (TYPE_SIZE (type
)))
2261 for (tree f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
2262 if (TREE_CODE (f
) == FIELD_DECL
)
2264 if (!TYPE_P (TREE_TYPE (f
)))
2267 HOST_WIDE_INT pos
= offset
+ int_byte_position (f
);
2268 n
= riscv_flatten_aggregate_field (TREE_TYPE (f
), fields
, n
, pos
);
2276 HOST_WIDE_INT n_elts
;
2277 riscv_aggregate_field subfields
[2];
2278 tree index
= TYPE_DOMAIN (type
);
2279 tree elt_size
= TYPE_SIZE_UNIT (TREE_TYPE (type
));
2280 int n_subfields
= riscv_flatten_aggregate_field (TREE_TYPE (type
),
2281 subfields
, 0, offset
);
2283 /* Can't handle incomplete types nor sizes that are not fixed. */
2284 if (n_subfields
<= 0
2285 || !COMPLETE_TYPE_P (type
)
2286 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
2288 || !TYPE_MAX_VALUE (index
)
2289 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
2290 || !TYPE_MIN_VALUE (index
)
2291 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
2292 || !tree_fits_uhwi_p (elt_size
))
2295 n_elts
= 1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
2296 - tree_to_uhwi (TYPE_MIN_VALUE (index
));
2297 gcc_assert (n_elts
>= 0);
2299 for (HOST_WIDE_INT i
= 0; i
< n_elts
; i
++)
2300 for (int j
= 0; j
< n_subfields
; j
++)
2305 fields
[n
] = subfields
[j
];
2306 fields
[n
++].offset
+= i
* tree_to_uhwi (elt_size
);
2314 /* Complex type need consume 2 field, so n must be 0. */
2318 HOST_WIDE_INT elt_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type
)));
2320 if (elt_size
<= UNITS_PER_FP_ARG
)
2322 fields
[0].type
= TREE_TYPE (type
);
2323 fields
[0].offset
= offset
;
2324 fields
[1].type
= TREE_TYPE (type
);
2325 fields
[1].offset
= offset
+ elt_size
;
2335 && ((SCALAR_FLOAT_TYPE_P (type
)
2336 && GET_MODE_SIZE (TYPE_MODE (type
)) <= UNITS_PER_FP_ARG
)
2337 || (INTEGRAL_TYPE_P (type
)
2338 && GET_MODE_SIZE (TYPE_MODE (type
)) <= UNITS_PER_WORD
)))
2340 fields
[n
].type
= type
;
2341 fields
[n
].offset
= offset
;
2349 /* Identify candidate aggregates for passing in floating-point registers.
2350 Candidates have at most two fields after flattening. */
2353 riscv_flatten_aggregate_argument (const_tree type
,
2354 riscv_aggregate_field fields
[2])
2356 if (!type
|| TREE_CODE (type
) != RECORD_TYPE
)
2359 return riscv_flatten_aggregate_field (type
, fields
, 0, 0);
2362 /* See whether TYPE is a record whose fields should be returned in one or
2363 two floating-point registers. If so, populate FIELDS accordingly. */
2366 riscv_pass_aggregate_in_fpr_pair_p (const_tree type
,
2367 riscv_aggregate_field fields
[2])
2369 int n
= riscv_flatten_aggregate_argument (type
, fields
);
2371 for (int i
= 0; i
< n
; i
++)
2372 if (!SCALAR_FLOAT_TYPE_P (fields
[i
].type
))
2375 return n
> 0 ? n
: 0;
2378 /* See whether TYPE is a record whose fields should be returned in one or
2379 floating-point register and one integer register. If so, populate
2380 FIELDS accordingly. */
2383 riscv_pass_aggregate_in_fpr_and_gpr_p (const_tree type
,
2384 riscv_aggregate_field fields
[2])
2386 unsigned num_int
= 0, num_float
= 0;
2387 int n
= riscv_flatten_aggregate_argument (type
, fields
);
2389 for (int i
= 0; i
< n
; i
++)
2391 num_float
+= SCALAR_FLOAT_TYPE_P (fields
[i
].type
);
2392 num_int
+= INTEGRAL_TYPE_P (fields
[i
].type
);
2395 return num_int
== 1 && num_float
== 1;
2398 /* Return the representation of an argument passed or returned in an FPR
2399 when the value has mode VALUE_MODE and the type has TYPE_MODE. The
2400 two modes may be different for structures like:
2402 struct __attribute__((packed)) foo { float f; }
2404 where the SFmode value "f" is passed in REGNO but the struct itself
2405 has mode BLKmode. */
2408 riscv_pass_fpr_single (machine_mode type_mode
, unsigned regno
,
2409 machine_mode value_mode
)
2411 rtx x
= gen_rtx_REG (value_mode
, regno
);
2413 if (type_mode
!= value_mode
)
2415 x
= gen_rtx_EXPR_LIST (VOIDmode
, x
, const0_rtx
);
2416 x
= gen_rtx_PARALLEL (type_mode
, gen_rtvec (1, x
));
2421 /* Pass or return a composite value in the FPR pair REGNO and REGNO + 1.
2422 MODE is the mode of the composite. MODE1 and OFFSET1 are the mode and
2423 byte offset for the first value, likewise MODE2 and OFFSET2 for the
2427 riscv_pass_fpr_pair (machine_mode mode
, unsigned regno1
,
2428 machine_mode mode1
, HOST_WIDE_INT offset1
,
2429 unsigned regno2
, machine_mode mode2
,
2430 HOST_WIDE_INT offset2
)
2432 return gen_rtx_PARALLEL
2435 gen_rtx_EXPR_LIST (VOIDmode
,
2436 gen_rtx_REG (mode1
, regno1
),
2438 gen_rtx_EXPR_LIST (VOIDmode
,
2439 gen_rtx_REG (mode2
, regno2
),
2440 GEN_INT (offset2
))));
2443 /* Fill INFO with information about a single argument, and return an
2444 RTL pattern to pass or return the argument. CUM is the cumulative
2445 state for earlier arguments. MODE is the mode of this argument and
2446 TYPE is its type (if known). NAMED is true if this is a named
2447 (fixed) argument rather than a variable one. RETURN_P is true if
2448 returning the argument, or false if passing the argument. */
2451 riscv_get_arg_info (struct riscv_arg_info
*info
, const CUMULATIVE_ARGS
*cum
,
2452 machine_mode mode
, const_tree type
, bool named
,
2455 unsigned num_bytes
, num_words
;
2456 unsigned fpr_base
= return_p
? FP_RETURN
: FP_ARG_FIRST
;
2457 unsigned gpr_base
= return_p
? GP_RETURN
: GP_ARG_FIRST
;
2458 unsigned alignment
= riscv_function_arg_boundary (mode
, type
);
2460 memset (info
, 0, sizeof (*info
));
2461 info
->gpr_offset
= cum
->num_gprs
;
2462 info
->fpr_offset
= cum
->num_fprs
;
2466 riscv_aggregate_field fields
[2];
2467 unsigned fregno
= fpr_base
+ info
->fpr_offset
;
2468 unsigned gregno
= gpr_base
+ info
->gpr_offset
;
2470 /* Pass one- or two-element floating-point aggregates in FPRs. */
2471 if ((info
->num_fprs
= riscv_pass_aggregate_in_fpr_pair_p (type
, fields
))
2472 && info
->fpr_offset
+ info
->num_fprs
<= MAX_ARGS_IN_REGISTERS
)
2473 switch (info
->num_fprs
)
2476 return riscv_pass_fpr_single (mode
, fregno
,
2477 TYPE_MODE (fields
[0].type
));
2480 return riscv_pass_fpr_pair (mode
, fregno
,
2481 TYPE_MODE (fields
[0].type
),
2484 TYPE_MODE (fields
[1].type
),
2491 /* Pass real and complex floating-point numbers in FPRs. */
2492 if ((info
->num_fprs
= riscv_pass_mode_in_fpr_p (mode
))
2493 && info
->fpr_offset
+ info
->num_fprs
<= MAX_ARGS_IN_REGISTERS
)
2494 switch (GET_MODE_CLASS (mode
))
2497 return gen_rtx_REG (mode
, fregno
);
2499 case MODE_COMPLEX_FLOAT
:
2500 return riscv_pass_fpr_pair (mode
, fregno
, GET_MODE_INNER (mode
), 0,
2501 fregno
+ 1, GET_MODE_INNER (mode
),
2502 GET_MODE_UNIT_SIZE (mode
));
2508 /* Pass structs with one float and one integer in an FPR and a GPR. */
2509 if (riscv_pass_aggregate_in_fpr_and_gpr_p (type
, fields
)
2510 && info
->gpr_offset
< MAX_ARGS_IN_REGISTERS
2511 && info
->fpr_offset
< MAX_ARGS_IN_REGISTERS
)
2516 if (!SCALAR_FLOAT_TYPE_P (fields
[0].type
))
2517 std::swap (fregno
, gregno
);
2519 return riscv_pass_fpr_pair (mode
, fregno
, TYPE_MODE (fields
[0].type
),
2521 gregno
, TYPE_MODE (fields
[1].type
),
2526 /* Work out the size of the argument. */
2527 num_bytes
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
2528 num_words
= (num_bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2530 /* Doubleword-aligned varargs start on an even register boundary. */
2531 if (!named
&& num_bytes
!= 0 && alignment
> BITS_PER_WORD
)
2532 info
->gpr_offset
+= info
->gpr_offset
& 1;
2534 /* Partition the argument between registers and stack. */
2536 info
->num_gprs
= MIN (num_words
, MAX_ARGS_IN_REGISTERS
- info
->gpr_offset
);
2537 info
->stack_p
= (num_words
- info
->num_gprs
) != 0;
2539 if (info
->num_gprs
|| return_p
)
2540 return gen_rtx_REG (mode
, gpr_base
+ info
->gpr_offset
);
2545 /* Implement TARGET_FUNCTION_ARG. */
2548 riscv_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
2549 const_tree type
, bool named
)
2551 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
2552 struct riscv_arg_info info
;
2554 if (mode
== VOIDmode
)
2557 return riscv_get_arg_info (&info
, cum
, mode
, type
, named
, false);
2560 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
2563 riscv_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
2564 const_tree type
, bool named
)
2566 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
2567 struct riscv_arg_info info
;
2569 riscv_get_arg_info (&info
, cum
, mode
, type
, named
, false);
2571 /* Advance the register count. This has the effect of setting
2572 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
2573 argument required us to skip the final GPR and pass the whole
2574 argument on the stack. */
2575 cum
->num_fprs
= info
.fpr_offset
+ info
.num_fprs
;
2576 cum
->num_gprs
= info
.gpr_offset
+ info
.num_gprs
;
2579 /* Implement TARGET_ARG_PARTIAL_BYTES. */
2582 riscv_arg_partial_bytes (cumulative_args_t cum
,
2583 machine_mode mode
, tree type
, bool named
)
2585 struct riscv_arg_info arg
;
2587 riscv_get_arg_info (&arg
, get_cumulative_args (cum
), mode
, type
, named
, false);
2588 return arg
.stack_p
? arg
.num_gprs
* UNITS_PER_WORD
: 0;
2591 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
2592 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
2593 VALTYPE is null and MODE is the mode of the return value. */
2596 riscv_function_value (const_tree type
, const_tree func
, machine_mode mode
)
2598 struct riscv_arg_info info
;
2599 CUMULATIVE_ARGS args
;
2603 int unsigned_p
= TYPE_UNSIGNED (type
);
2605 mode
= TYPE_MODE (type
);
2607 /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
2608 return values, promote the mode here too. */
2609 mode
= promote_function_mode (type
, mode
, &unsigned_p
, func
, 1);
2612 memset (&args
, 0, sizeof args
);
2613 return riscv_get_arg_info (&info
, &args
, mode
, type
, true, true);
2616 /* Implement TARGET_PASS_BY_REFERENCE. */
2619 riscv_pass_by_reference (cumulative_args_t cum_v
, machine_mode mode
,
2620 const_tree type
, bool named
)
2622 HOST_WIDE_INT size
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
2623 struct riscv_arg_info info
;
2624 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
2626 /* ??? std_gimplify_va_arg_expr passes NULL for cum. Fortunately, we
2627 never pass variadic arguments in floating-point registers, so we can
2628 avoid the call to riscv_get_arg_info in this case. */
2631 /* Don't pass by reference if we can use a floating-point register. */
2632 riscv_get_arg_info (&info
, cum
, mode
, type
, named
, false);
2637 /* Pass by reference if the data do not fit in two integer registers. */
2638 return !IN_RANGE (size
, 0, 2 * UNITS_PER_WORD
);
2641 /* Implement TARGET_RETURN_IN_MEMORY. */
2644 riscv_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
2646 CUMULATIVE_ARGS args
;
2647 cumulative_args_t cum
= pack_cumulative_args (&args
);
2649 /* The rules for returning in memory are the same as for passing the
2650 first named argument by reference. */
2651 memset (&args
, 0, sizeof args
);
2652 return riscv_pass_by_reference (cum
, TYPE_MODE (type
), type
, true);
2655 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
2658 riscv_setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
2659 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
2662 CUMULATIVE_ARGS local_cum
;
2665 /* The caller has advanced CUM up to, but not beyond, the last named
2666 argument. Advance a local copy of CUM past the last "real" named
2667 argument, to find out how many registers are left over. */
2668 local_cum
= *get_cumulative_args (cum
);
2669 riscv_function_arg_advance (pack_cumulative_args (&local_cum
), mode
, type
, 1);
2671 /* Found out how many registers we need to save. */
2672 gp_saved
= MAX_ARGS_IN_REGISTERS
- local_cum
.num_gprs
;
2674 if (!no_rtl
&& gp_saved
> 0)
2676 rtx ptr
= plus_constant (Pmode
, virtual_incoming_args_rtx
,
2677 REG_PARM_STACK_SPACE (cfun
->decl
)
2678 - gp_saved
* UNITS_PER_WORD
);
2679 rtx mem
= gen_frame_mem (BLKmode
, ptr
);
2680 set_mem_alias_set (mem
, get_varargs_alias_set ());
2682 move_block_from_reg (local_cum
.num_gprs
+ GP_ARG_FIRST
,
2685 if (REG_PARM_STACK_SPACE (cfun
->decl
) == 0)
2686 cfun
->machine
->varargs_size
= gp_saved
* UNITS_PER_WORD
;
2689 /* Handle an attribute requiring a FUNCTION_DECL;
2690 arguments as in struct attribute_spec.handler. */
2692 riscv_handle_fndecl_attribute (tree
*node
, tree name
,
2693 tree args ATTRIBUTE_UNUSED
,
2694 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
2696 if (TREE_CODE (*node
) != FUNCTION_DECL
)
2698 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
2700 *no_add_attrs
= true;
2706 /* Return true if func is a naked function. */
2708 riscv_naked_function_p (tree func
)
2710 tree func_decl
= func
;
2711 if (func
== NULL_TREE
)
2712 func_decl
= current_function_decl
;
2713 return NULL_TREE
!= lookup_attribute ("naked", DECL_ATTRIBUTES (func_decl
));
2716 /* Implement TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS. */
2718 riscv_allocate_stack_slots_for_args ()
2720 /* Naked functions should not allocate stack slots for arguments. */
2721 return !riscv_naked_function_p (current_function_decl
);
2724 /* Implement TARGET_WARN_FUNC_RETURN. */
2726 riscv_warn_func_return (tree decl
)
2728 /* Naked functions are implemented entirely in assembly, including the
2729 return sequence, so suppress warnings about this. */
2730 return !riscv_naked_function_p (decl
);
2733 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
2736 riscv_va_start (tree valist
, rtx nextarg
)
2738 nextarg
= plus_constant (Pmode
, nextarg
, -cfun
->machine
->varargs_size
);
2739 std_expand_builtin_va_start (valist
, nextarg
);
2742 /* Make ADDR suitable for use as a call or sibcall target. */
2745 riscv_legitimize_call_address (rtx addr
)
2747 if (!call_insn_operand (addr
, VOIDmode
))
2749 rtx reg
= RISCV_PROLOGUE_TEMP (Pmode
);
2750 riscv_emit_move (reg
, addr
);
2756 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
2757 Assume that the areas do not overlap. */
2760 riscv_block_move_straight (rtx dest
, rtx src
, HOST_WIDE_INT length
)
2762 HOST_WIDE_INT offset
, delta
;
2763 unsigned HOST_WIDE_INT bits
;
2765 enum machine_mode mode
;
2768 bits
= MAX (BITS_PER_UNIT
,
2769 MIN (BITS_PER_WORD
, MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
))));
2771 mode
= mode_for_size (bits
, MODE_INT
, 0).require ();
2772 delta
= bits
/ BITS_PER_UNIT
;
2774 /* Allocate a buffer for the temporary registers. */
2775 regs
= XALLOCAVEC (rtx
, length
/ delta
);
2777 /* Load as many BITS-sized chunks as possible. Use a normal load if
2778 the source has enough alignment, otherwise use left/right pairs. */
2779 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
2781 regs
[i
] = gen_reg_rtx (mode
);
2782 riscv_emit_move (regs
[i
], adjust_address (src
, mode
, offset
));
2785 /* Copy the chunks to the destination. */
2786 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
2787 riscv_emit_move (adjust_address (dest
, mode
, offset
), regs
[i
]);
2789 /* Mop up any left-over bytes. */
2790 if (offset
< length
)
2792 src
= adjust_address (src
, BLKmode
, offset
);
2793 dest
= adjust_address (dest
, BLKmode
, offset
);
2794 move_by_pieces (dest
, src
, length
- offset
,
2795 MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
)), 0);
2799 /* Helper function for doing a loop-based block operation on memory
2800 reference MEM. Each iteration of the loop will operate on LENGTH
2803 Create a new base register for use within the loop and point it to
2804 the start of MEM. Create a new memory reference that uses this
2805 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
2808 riscv_adjust_block_mem (rtx mem
, HOST_WIDE_INT length
,
2809 rtx
*loop_reg
, rtx
*loop_mem
)
2811 *loop_reg
= copy_addr_to_reg (XEXP (mem
, 0));
2813 /* Although the new mem does not refer to a known location,
2814 it does keep up to LENGTH bytes of alignment. */
2815 *loop_mem
= change_address (mem
, BLKmode
, *loop_reg
);
2816 set_mem_align (*loop_mem
, MIN (MEM_ALIGN (mem
), length
* BITS_PER_UNIT
));
2819 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
2820 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
2821 the memory regions do not overlap. */
2824 riscv_block_move_loop (rtx dest
, rtx src
, HOST_WIDE_INT length
,
2825 HOST_WIDE_INT bytes_per_iter
)
2827 rtx label
, src_reg
, dest_reg
, final_src
, test
;
2828 HOST_WIDE_INT leftover
;
2830 leftover
= length
% bytes_per_iter
;
2833 /* Create registers and memory references for use within the loop. */
2834 riscv_adjust_block_mem (src
, bytes_per_iter
, &src_reg
, &src
);
2835 riscv_adjust_block_mem (dest
, bytes_per_iter
, &dest_reg
, &dest
);
2837 /* Calculate the value that SRC_REG should have after the last iteration
2839 final_src
= expand_simple_binop (Pmode
, PLUS
, src_reg
, GEN_INT (length
),
2842 /* Emit the start of the loop. */
2843 label
= gen_label_rtx ();
2846 /* Emit the loop body. */
2847 riscv_block_move_straight (dest
, src
, bytes_per_iter
);
2849 /* Move on to the next block. */
2850 riscv_emit_move (src_reg
, plus_constant (Pmode
, src_reg
, bytes_per_iter
));
2851 riscv_emit_move (dest_reg
, plus_constant (Pmode
, dest_reg
, bytes_per_iter
));
2853 /* Emit the loop condition. */
2854 test
= gen_rtx_NE (VOIDmode
, src_reg
, final_src
);
2855 if (Pmode
== DImode
)
2856 emit_jump_insn (gen_cbranchdi4 (test
, src_reg
, final_src
, label
));
2858 emit_jump_insn (gen_cbranchsi4 (test
, src_reg
, final_src
, label
));
2860 /* Mop up any left-over bytes. */
2862 riscv_block_move_straight (dest
, src
, leftover
);
2864 emit_insn(gen_nop ());
2867 /* Expand a movmemsi instruction, which copies LENGTH bytes from
2868 memory reference SRC to memory reference DEST. */
2871 riscv_expand_block_move (rtx dest
, rtx src
, rtx length
)
2873 if (CONST_INT_P (length
))
2875 HOST_WIDE_INT factor
, align
;
2877 align
= MIN (MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
)), BITS_PER_WORD
);
2878 factor
= BITS_PER_WORD
/ align
;
2880 if (optimize_function_for_size_p (cfun
)
2881 && INTVAL (length
) * factor
* UNITS_PER_WORD
> MOVE_RATIO (false))
2884 if (INTVAL (length
) <= RISCV_MAX_MOVE_BYTES_STRAIGHT
/ factor
)
2886 riscv_block_move_straight (dest
, src
, INTVAL (length
));
2889 else if (optimize
&& align
>= BITS_PER_WORD
)
2891 unsigned min_iter_words
2892 = RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER
/ UNITS_PER_WORD
;
2893 unsigned iter_words
= min_iter_words
;
2894 HOST_WIDE_INT bytes
= INTVAL (length
), words
= bytes
/ UNITS_PER_WORD
;
2896 /* Lengthen the loop body if it shortens the tail. */
2897 for (unsigned i
= min_iter_words
; i
< min_iter_words
* 2 - 1; i
++)
2899 unsigned cur_cost
= iter_words
+ words
% iter_words
;
2900 unsigned new_cost
= i
+ words
% i
;
2901 if (new_cost
<= cur_cost
)
2905 riscv_block_move_loop (dest
, src
, bytes
, iter_words
* UNITS_PER_WORD
);
2912 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
2913 in context CONTEXT. HI_RELOC indicates a high-part reloc. */
2916 riscv_print_operand_reloc (FILE *file
, rtx op
, bool hi_reloc
)
2920 switch (riscv_classify_symbolic_expression (op
))
2922 case SYMBOL_ABSOLUTE
:
2923 reloc
= hi_reloc
? "%hi" : "%lo";
2927 reloc
= hi_reloc
? "%pcrel_hi" : "%pcrel_lo";
2931 reloc
= hi_reloc
? "%tprel_hi" : "%tprel_lo";
2938 fprintf (file
, "%s(", reloc
);
2939 output_addr_const (file
, riscv_strip_unspec_address (op
));
2943 /* Return true if the .AQ suffix should be added to an AMO to implement the
2944 acquire portion of memory model MODEL. */
2947 riscv_memmodel_needs_amo_acquire (enum memmodel model
)
2951 case MEMMODEL_ACQ_REL
:
2952 case MEMMODEL_SEQ_CST
:
2953 case MEMMODEL_SYNC_SEQ_CST
:
2954 case MEMMODEL_ACQUIRE
:
2955 case MEMMODEL_CONSUME
:
2956 case MEMMODEL_SYNC_ACQUIRE
:
2959 case MEMMODEL_RELEASE
:
2960 case MEMMODEL_SYNC_RELEASE
:
2961 case MEMMODEL_RELAXED
:
2969 /* Return true if a FENCE should be emitted to before a memory access to
2970 implement the release portion of memory model MODEL. */
2973 riscv_memmodel_needs_release_fence (enum memmodel model
)
2977 case MEMMODEL_ACQ_REL
:
2978 case MEMMODEL_SEQ_CST
:
2979 case MEMMODEL_SYNC_SEQ_CST
:
2980 case MEMMODEL_RELEASE
:
2981 case MEMMODEL_SYNC_RELEASE
:
2984 case MEMMODEL_ACQUIRE
:
2985 case MEMMODEL_CONSUME
:
2986 case MEMMODEL_SYNC_ACQUIRE
:
2987 case MEMMODEL_RELAXED
:
2995 /* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
2997 'h' Print the high-part relocation associated with OP, after stripping
2999 'R' Print the low-part relocation associated with OP.
3000 'C' Print the integer branch condition for comparison OP.
3001 'A' Print the atomic operation suffix for memory model OP.
3002 'F' Print a FENCE if the memory model requires a release.
3003 'z' Print x0 if OP is zero, otherwise print OP normally.
3004 'i' Print i if the operand is not a register. */
3007 riscv_print_operand (FILE *file
, rtx op
, int letter
)
3009 machine_mode mode
= GET_MODE (op
);
3010 enum rtx_code code
= GET_CODE (op
);
3017 riscv_print_operand_reloc (file
, op
, true);
3021 riscv_print_operand_reloc (file
, op
, false);
3025 /* The RTL names match the instruction names. */
3026 fputs (GET_RTX_NAME (code
), file
);
3030 if (riscv_memmodel_needs_amo_acquire ((enum memmodel
) INTVAL (op
)))
3031 fputs (".aq", file
);
3035 if (riscv_memmodel_needs_release_fence ((enum memmodel
) INTVAL (op
)))
3036 fputs ("fence iorw,ow; ", file
);
3048 if (letter
&& letter
!= 'z')
3049 output_operand_lossage ("invalid use of '%%%c'", letter
);
3050 fprintf (file
, "%s", reg_names
[REGNO (op
)]);
3054 if (letter
&& letter
!= 'z')
3055 output_operand_lossage ("invalid use of '%%%c'", letter
);
3057 output_address (mode
, XEXP (op
, 0));
3061 if (letter
== 'z' && op
== CONST0_RTX (GET_MODE (op
)))
3062 fputs (reg_names
[GP_REG_FIRST
], file
);
3063 else if (letter
&& letter
!= 'z')
3064 output_operand_lossage ("invalid use of '%%%c'", letter
);
3066 output_addr_const (file
, riscv_strip_unspec_address (op
));
3072 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
3075 riscv_print_operand_address (FILE *file
, machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
3077 struct riscv_address_info addr
;
3079 if (riscv_classify_address (&addr
, x
, word_mode
, true))
3083 riscv_print_operand (file
, addr
.offset
, 0);
3084 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
3087 case ADDRESS_LO_SUM
:
3088 riscv_print_operand_reloc (file
, addr
.offset
, false);
3089 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
3092 case ADDRESS_CONST_INT
:
3093 output_addr_const (file
, x
);
3094 fprintf (file
, "(%s)", reg_names
[GP_REG_FIRST
]);
3097 case ADDRESS_SYMBOLIC
:
3098 output_addr_const (file
, riscv_strip_unspec_address (x
));
3105 riscv_size_ok_for_small_data_p (int size
)
3107 return g_switch_value
&& IN_RANGE (size
, 1, g_switch_value
);
3110 /* Return true if EXP should be placed in the small data section. */
3113 riscv_in_small_data_p (const_tree x
)
3115 if (TREE_CODE (x
) == STRING_CST
|| TREE_CODE (x
) == FUNCTION_DECL
)
3118 if (TREE_CODE (x
) == VAR_DECL
&& DECL_SECTION_NAME (x
))
3120 const char *sec
= DECL_SECTION_NAME (x
);
3121 return strcmp (sec
, ".sdata") == 0 || strcmp (sec
, ".sbss") == 0;
3124 return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x
)));
3127 /* Switch to the appropriate section for output of DECL. */
3130 riscv_select_section (tree decl
, int reloc
,
3131 unsigned HOST_WIDE_INT align
)
3133 switch (categorize_decl_for_section (decl
, reloc
))
3135 case SECCAT_SRODATA
:
3136 return get_named_section (decl
, ".srodata", reloc
);
3139 return default_elf_select_section (decl
, reloc
, align
);
3143 /* Return a section for X, handling small data. */
3146 riscv_elf_select_rtx_section (machine_mode mode
, rtx x
,
3147 unsigned HOST_WIDE_INT align
)
3149 section
*s
= default_elf_select_rtx_section (mode
, x
, align
);
3151 if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode
)))
3153 if (strncmp (s
->named
.name
, ".rodata.cst", strlen (".rodata.cst")) == 0)
3155 /* Rename .rodata.cst* to .srodata.cst*. */
3156 char *name
= (char *) alloca (strlen (s
->named
.name
) + 2);
3157 sprintf (name
, ".s%s", s
->named
.name
+ 1);
3158 return get_section (name
, s
->named
.common
.flags
, NULL
);
3161 if (s
== data_section
)
3162 return sdata_section
;
3168 /* Make the last instruction frame-related and note that it performs
3169 the operation described by FRAME_PATTERN. */
3172 riscv_set_frame_expr (rtx frame_pattern
)
3176 insn
= get_last_insn ();
3177 RTX_FRAME_RELATED_P (insn
) = 1;
3178 REG_NOTES (insn
) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
3183 /* Return a frame-related rtx that stores REG at MEM.
3184 REG must be a single register. */
3187 riscv_frame_set (rtx mem
, rtx reg
)
3189 rtx set
= gen_rtx_SET (mem
, reg
);
3190 RTX_FRAME_RELATED_P (set
) = 1;
3194 /* Return true if the current function must save register REGNO. */
3197 riscv_save_reg_p (unsigned int regno
)
3199 bool call_saved
= !global_regs
[regno
] && !call_used_regs
[regno
];
3200 bool might_clobber
= crtl
->saves_all_registers
3201 || df_regs_ever_live_p (regno
);
3203 if (call_saved
&& might_clobber
)
3206 if (regno
== HARD_FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
3209 if (regno
== RETURN_ADDR_REGNUM
&& crtl
->calls_eh_return
)
3215 /* Determine whether to call GPR save/restore routines. */
3217 riscv_use_save_libcall (const struct riscv_frame_info
*frame
)
3219 if (!TARGET_SAVE_RESTORE
|| crtl
->calls_eh_return
|| frame_pointer_needed
)
3222 return frame
->save_libcall_adjustment
!= 0;
3225 /* Determine which GPR save/restore routine to call. */
3228 riscv_save_libcall_count (unsigned mask
)
3230 for (unsigned n
= GP_REG_LAST
; n
> GP_REG_FIRST
; n
--)
3231 if (BITSET_P (mask
, n
))
3232 return CALLEE_SAVED_REG_NUMBER (n
) + 1;
3236 /* Populate the current function's riscv_frame_info structure.
3238 RISC-V stack frames grown downward. High addresses are at the top.
3240 +-------------------------------+
3242 | incoming stack arguments |
3244 +-------------------------------+ <-- incoming stack pointer
3246 | callee-allocated save area |
3247 | for arguments that are |
3248 | split between registers and |
3251 +-------------------------------+ <-- arg_pointer_rtx
3253 | callee-allocated save area |
3254 | for register varargs |
3256 +-------------------------------+ <-- hard_frame_pointer_rtx;
3257 | | stack_pointer_rtx + gp_sp_offset
3258 | GPR save area | + UNITS_PER_WORD
3260 +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
3261 | | + UNITS_PER_HWVALUE
3264 +-------------------------------+ <-- frame_pointer_rtx (virtual)
3268 P +-------------------------------+
3270 | outgoing stack arguments |
3272 +-------------------------------+ <-- stack_pointer_rtx
3274 Dynamic stack allocations such as alloca insert data at point P.
3275 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
3276 hard_frame_pointer_rtx unchanged. */
3279 riscv_compute_frame_info (void)
3281 struct riscv_frame_info
*frame
;
3282 HOST_WIDE_INT offset
;
3283 unsigned int regno
, i
, num_x_saved
= 0, num_f_saved
= 0;
3285 frame
= &cfun
->machine
->frame
;
3286 memset (frame
, 0, sizeof (*frame
));
3288 if (!cfun
->machine
->naked_p
)
3290 /* Find out which GPRs we need to save. */
3291 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
3292 if (riscv_save_reg_p (regno
))
3293 frame
->mask
|= 1 << (regno
- GP_REG_FIRST
), num_x_saved
++;
3295 /* If this function calls eh_return, we must also save and restore the
3296 EH data registers. */
3297 if (crtl
->calls_eh_return
)
3298 for (i
= 0; (regno
= EH_RETURN_DATA_REGNO (i
)) != INVALID_REGNUM
; i
++)
3299 frame
->mask
|= 1 << (regno
- GP_REG_FIRST
), num_x_saved
++;
3301 /* Find out which FPRs we need to save. This loop must iterate over
3302 the same space as its companion in riscv_for_each_saved_reg. */
3303 if (TARGET_HARD_FLOAT
)
3304 for (regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
++)
3305 if (riscv_save_reg_p (regno
))
3306 frame
->fmask
|= 1 << (regno
- FP_REG_FIRST
), num_f_saved
++;
3309 /* At the bottom of the frame are any outgoing stack arguments. */
3310 offset
= crtl
->outgoing_args_size
;
3311 /* Next are local stack variables. */
3312 offset
+= RISCV_STACK_ALIGN (get_frame_size ());
3313 /* The virtual frame pointer points above the local variables. */
3314 frame
->frame_pointer_offset
= offset
;
3315 /* Next are the callee-saved FPRs. */
3317 offset
+= RISCV_STACK_ALIGN (num_f_saved
* UNITS_PER_FP_REG
);
3318 frame
->fp_sp_offset
= offset
- UNITS_PER_FP_REG
;
3319 /* Next are the callee-saved GPRs. */
3322 unsigned x_save_size
= RISCV_STACK_ALIGN (num_x_saved
* UNITS_PER_WORD
);
3323 unsigned num_save_restore
= 1 + riscv_save_libcall_count (frame
->mask
);
3325 /* Only use save/restore routines if they don't alter the stack size. */
3326 if (RISCV_STACK_ALIGN (num_save_restore
* UNITS_PER_WORD
) == x_save_size
)
3327 frame
->save_libcall_adjustment
= x_save_size
;
3329 offset
+= x_save_size
;
3331 frame
->gp_sp_offset
= offset
- UNITS_PER_WORD
;
3332 /* The hard frame pointer points above the callee-saved GPRs. */
3333 frame
->hard_frame_pointer_offset
= offset
;
3334 /* Above the hard frame pointer is the callee-allocated varags save area. */
3335 offset
+= RISCV_STACK_ALIGN (cfun
->machine
->varargs_size
);
3336 frame
->arg_pointer_offset
= offset
;
3337 /* Next is the callee-allocated area for pretend stack arguments. */
3338 offset
+= crtl
->args
.pretend_args_size
;
3339 frame
->total_size
= offset
;
3340 /* Next points the incoming stack pointer and any incoming arguments. */
3342 /* Only use save/restore routines when the GPRs are atop the frame. */
3343 if (frame
->hard_frame_pointer_offset
!= frame
->total_size
)
3344 frame
->save_libcall_adjustment
= 0;
3347 /* Make sure that we're not trying to eliminate to the wrong hard frame
3351 riscv_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
3353 return (to
== HARD_FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
3356 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
3357 or argument pointer. TO is either the stack pointer or hard frame
3361 riscv_initial_elimination_offset (int from
, int to
)
3363 HOST_WIDE_INT src
, dest
;
3365 riscv_compute_frame_info ();
3367 if (to
== HARD_FRAME_POINTER_REGNUM
)
3368 dest
= cfun
->machine
->frame
.hard_frame_pointer_offset
;
3369 else if (to
== STACK_POINTER_REGNUM
)
3370 dest
= 0; /* The stack pointer is the base of all offsets, hence 0. */
3374 if (from
== FRAME_POINTER_REGNUM
)
3375 src
= cfun
->machine
->frame
.frame_pointer_offset
;
3376 else if (from
== ARG_POINTER_REGNUM
)
3377 src
= cfun
->machine
->frame
.arg_pointer_offset
;
3384 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
3388 riscv_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
3393 return get_hard_reg_initial_val (Pmode
, RETURN_ADDR_REGNUM
);
3396 /* Emit code to change the current function's return address to
3397 ADDRESS. SCRATCH is available as a scratch register, if needed.
3398 ADDRESS and SCRATCH are both word-mode GPRs. */
3401 riscv_set_return_address (rtx address
, rtx scratch
)
3405 gcc_assert (BITSET_P (cfun
->machine
->frame
.mask
, RETURN_ADDR_REGNUM
));
3406 slot_address
= riscv_add_offset (scratch
, stack_pointer_rtx
,
3407 cfun
->machine
->frame
.gp_sp_offset
);
3408 riscv_emit_move (gen_frame_mem (GET_MODE (address
), slot_address
), address
);
3411 /* A function to save or store a register. The first argument is the
3412 register and the second is the stack slot. */
3413 typedef void (*riscv_save_restore_fn
) (rtx
, rtx
);
3415 /* Use FN to save or restore register REGNO. MODE is the register's
3416 mode and OFFSET is the offset of its save slot from the current
3420 riscv_save_restore_reg (machine_mode mode
, int regno
,
3421 HOST_WIDE_INT offset
, riscv_save_restore_fn fn
)
3425 mem
= gen_frame_mem (mode
, plus_constant (Pmode
, stack_pointer_rtx
, offset
));
3426 fn (gen_rtx_REG (mode
, regno
), mem
);
3429 /* Call FN for each register that is saved by the current function.
3430 SP_OFFSET is the offset of the current stack pointer from the start
3434 riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset
, riscv_save_restore_fn fn
)
3436 HOST_WIDE_INT offset
;
3438 /* Save the link register and s-registers. */
3439 offset
= cfun
->machine
->frame
.gp_sp_offset
- sp_offset
;
3440 for (int regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
3441 if (BITSET_P (cfun
->machine
->frame
.mask
, regno
- GP_REG_FIRST
))
3443 riscv_save_restore_reg (word_mode
, regno
, offset
, fn
);
3444 offset
-= UNITS_PER_WORD
;
3447 /* This loop must iterate over the same space as its companion in
3448 riscv_compute_frame_info. */
3449 offset
= cfun
->machine
->frame
.fp_sp_offset
- sp_offset
;
3450 for (int regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
++)
3451 if (BITSET_P (cfun
->machine
->frame
.fmask
, regno
- FP_REG_FIRST
))
3453 machine_mode mode
= TARGET_DOUBLE_FLOAT
? DFmode
: SFmode
;
3455 riscv_save_restore_reg (mode
, regno
, offset
, fn
);
3456 offset
-= GET_MODE_SIZE (mode
);
3460 /* Save register REG to MEM. Make the instruction frame-related. */
3463 riscv_save_reg (rtx reg
, rtx mem
)
3465 riscv_emit_move (mem
, reg
);
3466 riscv_set_frame_expr (riscv_frame_set (mem
, reg
));
3469 /* Restore register REG from MEM. */
3472 riscv_restore_reg (rtx reg
, rtx mem
)
3474 rtx insn
= riscv_emit_move (reg
, mem
);
3475 rtx dwarf
= NULL_RTX
;
3476 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
3477 REG_NOTES (insn
) = dwarf
;
3479 RTX_FRAME_RELATED_P (insn
) = 1;
3482 /* Return the code to invoke the GPR save routine. */
3485 riscv_output_gpr_save (unsigned mask
)
3488 unsigned n
= riscv_save_libcall_count (mask
);
3490 ssize_t bytes
= snprintf (s
, sizeof (s
), "call\tt0,__riscv_save_%u", n
);
3491 gcc_assert ((size_t) bytes
< sizeof (s
));
3496 /* For stack frames that can't be allocated with a single ADDI instruction,
3497 compute the best value to initially allocate. It must at a minimum
3498 allocate enough space to spill the callee-saved registers. */
3500 static HOST_WIDE_INT
3501 riscv_first_stack_step (struct riscv_frame_info
*frame
)
3503 HOST_WIDE_INT min_first_step
= frame
->total_size
- frame
->fp_sp_offset
;
3504 HOST_WIDE_INT max_first_step
= IMM_REACH
/ 2 - STACK_BOUNDARY
/ 8;
3506 if (SMALL_OPERAND (frame
->total_size
))
3507 return frame
->total_size
;
3509 /* As an optimization, use the least-significant bits of the total frame
3510 size, so that the second adjustment step is just LUI + ADD. */
3511 if (!SMALL_OPERAND (frame
->total_size
- max_first_step
)
3512 && frame
->total_size
% IMM_REACH
< IMM_REACH
/ 2
3513 && frame
->total_size
% IMM_REACH
>= min_first_step
)
3514 return frame
->total_size
% IMM_REACH
;
3516 gcc_assert (min_first_step
<= max_first_step
);
3517 return max_first_step
;
3521 riscv_adjust_libcall_cfi_prologue ()
3523 rtx dwarf
= NULL_RTX
;
3524 rtx adjust_sp_rtx
, reg
, mem
, insn
;
3525 int saved_size
= cfun
->machine
->frame
.save_libcall_adjustment
;
3528 for (int regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
3529 if (BITSET_P (cfun
->machine
->frame
.mask
, regno
- GP_REG_FIRST
))
3531 /* The save order is ra, s0, s1, s2 to s11. */
3532 if (regno
== RETURN_ADDR_REGNUM
)
3533 offset
= saved_size
- UNITS_PER_WORD
;
3534 else if (regno
== S0_REGNUM
)
3535 offset
= saved_size
- UNITS_PER_WORD
* 2;
3536 else if (regno
== S1_REGNUM
)
3537 offset
= saved_size
- UNITS_PER_WORD
* 3;
3539 offset
= saved_size
- ((regno
- S2_REGNUM
+ 4) * UNITS_PER_WORD
);
3541 reg
= gen_rtx_REG (SImode
, regno
);
3542 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
3546 insn
= gen_rtx_SET (mem
, reg
);
3547 dwarf
= alloc_reg_note (REG_CFA_OFFSET
, insn
, dwarf
);
3550 /* Debug info for adjust sp. */
3551 adjust_sp_rtx
= gen_add3_insn (stack_pointer_rtx
,
3552 stack_pointer_rtx
, GEN_INT (-saved_size
));
3553 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
, adjust_sp_rtx
,
3559 riscv_emit_stack_tie (void)
3561 if (Pmode
== SImode
)
3562 emit_insn (gen_stack_tiesi (stack_pointer_rtx
, hard_frame_pointer_rtx
));
3564 emit_insn (gen_stack_tiedi (stack_pointer_rtx
, hard_frame_pointer_rtx
));
3567 /* Expand the "prologue" pattern. */
3570 riscv_expand_prologue (void)
3572 struct riscv_frame_info
*frame
= &cfun
->machine
->frame
;
3573 HOST_WIDE_INT size
= frame
->total_size
;
3574 unsigned mask
= frame
->mask
;
3577 if (cfun
->machine
->naked_p
)
3579 if (flag_stack_usage_info
)
3580 current_function_static_stack_size
= 0;
3585 if (flag_stack_usage_info
)
3586 current_function_static_stack_size
= size
;
3588 /* When optimizing for size, call a subroutine to save the registers. */
3589 if (riscv_use_save_libcall (frame
))
3591 rtx dwarf
= NULL_RTX
;
3592 dwarf
= riscv_adjust_libcall_cfi_prologue ();
3594 frame
->mask
= 0; /* Temporarily fib that we need not save GPRs. */
3595 size
-= frame
->save_libcall_adjustment
;
3596 insn
= emit_insn (gen_gpr_save (GEN_INT (mask
)));
3598 RTX_FRAME_RELATED_P (insn
) = 1;
3599 REG_NOTES (insn
) = dwarf
;
3602 /* Save the registers. */
3603 if ((frame
->mask
| frame
->fmask
) != 0)
3605 HOST_WIDE_INT step1
= MIN (size
, riscv_first_stack_step (frame
));
3607 insn
= gen_add3_insn (stack_pointer_rtx
,
3610 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
3612 riscv_for_each_saved_reg (size
, riscv_save_reg
);
3615 frame
->mask
= mask
; /* Undo the above fib. */
3617 /* Set up the frame pointer, if we're using one. */
3618 if (frame_pointer_needed
)
3620 insn
= gen_add3_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
,
3621 GEN_INT (frame
->hard_frame_pointer_offset
- size
));
3622 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
3624 riscv_emit_stack_tie ();
3627 /* Allocate the rest of the frame. */
3630 if (SMALL_OPERAND (-size
))
3632 insn
= gen_add3_insn (stack_pointer_rtx
, stack_pointer_rtx
,
3634 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
3638 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode
), GEN_INT (-size
));
3639 emit_insn (gen_add3_insn (stack_pointer_rtx
,
3641 RISCV_PROLOGUE_TEMP (Pmode
)));
3643 /* Describe the effect of the previous instructions. */
3644 insn
= plus_constant (Pmode
, stack_pointer_rtx
, -size
);
3645 insn
= gen_rtx_SET (stack_pointer_rtx
, insn
);
3646 riscv_set_frame_expr (insn
);
3652 riscv_adjust_libcall_cfi_epilogue ()
3654 rtx dwarf
= NULL_RTX
;
3655 rtx adjust_sp_rtx
, reg
;
3656 int saved_size
= cfun
->machine
->frame
.save_libcall_adjustment
;
3658 /* Debug info for adjust sp. */
3659 adjust_sp_rtx
= gen_add3_insn (stack_pointer_rtx
,
3660 stack_pointer_rtx
, GEN_INT (saved_size
));
3661 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
, adjust_sp_rtx
,
3664 for (int regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
3665 if (BITSET_P (cfun
->machine
->frame
.mask
, regno
- GP_REG_FIRST
))
3667 reg
= gen_rtx_REG (SImode
, regno
);
3668 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
3674 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
3678 riscv_expand_epilogue (bool sibcall_p
)
3680 /* Split the frame into two. STEP1 is the amount of stack we should
3681 deallocate before restoring the registers. STEP2 is the amount we
3682 should deallocate afterwards.
3684 Start off by assuming that no registers need to be restored. */
3685 struct riscv_frame_info
*frame
= &cfun
->machine
->frame
;
3686 unsigned mask
= frame
->mask
;
3687 HOST_WIDE_INT step1
= frame
->total_size
;
3688 HOST_WIDE_INT step2
= 0;
3689 bool use_restore_libcall
= !sibcall_p
&& riscv_use_save_libcall (frame
);
3690 rtx ra
= gen_rtx_REG (Pmode
, RETURN_ADDR_REGNUM
);
3693 /* We need to add memory barrier to prevent read from deallocated stack. */
3694 bool need_barrier_p
= (get_frame_size ()
3695 + cfun
->machine
->frame
.arg_pointer_offset
) != 0;
3697 if (cfun
->machine
->naked_p
)
3699 gcc_assert (!sibcall_p
);
3701 emit_jump_insn (gen_return ());
3706 if (!sibcall_p
&& riscv_can_use_return_insn ())
3708 emit_jump_insn (gen_return ());
3712 /* Move past any dynamic stack allocations. */
3713 if (cfun
->calls_alloca
)
3715 /* Emit a barrier to prevent loads from a deallocated stack. */
3716 riscv_emit_stack_tie ();
3717 need_barrier_p
= false;
3719 rtx adjust
= GEN_INT (-frame
->hard_frame_pointer_offset
);
3720 if (!SMALL_OPERAND (INTVAL (adjust
)))
3722 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode
), adjust
);
3723 adjust
= RISCV_PROLOGUE_TEMP (Pmode
);
3727 gen_add3_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
,
3730 rtx dwarf
= NULL_RTX
;
3731 rtx cfa_adjust_value
= gen_rtx_PLUS (
3732 Pmode
, hard_frame_pointer_rtx
,
3733 GEN_INT (-frame
->hard_frame_pointer_offset
));
3734 rtx cfa_adjust_rtx
= gen_rtx_SET (stack_pointer_rtx
, cfa_adjust_value
);
3735 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
, cfa_adjust_rtx
, dwarf
);
3736 RTX_FRAME_RELATED_P (insn
) = 1;
3738 REG_NOTES (insn
) = dwarf
;
3741 /* If we need to restore registers, deallocate as much stack as
3742 possible in the second step without going out of range. */
3743 if ((frame
->mask
| frame
->fmask
) != 0)
3745 step2
= riscv_first_stack_step (frame
);
3749 /* Set TARGET to BASE + STEP1. */
3752 /* Emit a barrier to prevent loads from a deallocated stack. */
3753 riscv_emit_stack_tie ();
3754 need_barrier_p
= false;
3756 /* Get an rtx for STEP1 that we can add to BASE. */
3757 rtx adjust
= GEN_INT (step1
);
3758 if (!SMALL_OPERAND (step1
))
3760 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode
), adjust
);
3761 adjust
= RISCV_PROLOGUE_TEMP (Pmode
);
3765 gen_add3_insn (stack_pointer_rtx
, stack_pointer_rtx
, adjust
));
3767 rtx dwarf
= NULL_RTX
;
3768 rtx cfa_adjust_rtx
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
3771 dwarf
= alloc_reg_note (REG_CFA_DEF_CFA
, cfa_adjust_rtx
, dwarf
);
3772 RTX_FRAME_RELATED_P (insn
) = 1;
3774 REG_NOTES (insn
) = dwarf
;
3777 if (use_restore_libcall
)
3778 frame
->mask
= 0; /* Temporarily fib that we need not save GPRs. */
3780 /* Restore the registers. */
3781 riscv_for_each_saved_reg (frame
->total_size
- step2
, riscv_restore_reg
);
3783 if (use_restore_libcall
)
3785 frame
->mask
= mask
; /* Undo the above fib. */
3786 gcc_assert (step2
>= frame
->save_libcall_adjustment
);
3787 step2
-= frame
->save_libcall_adjustment
;
3791 riscv_emit_stack_tie ();
3793 /* Deallocate the final bit of the frame. */
3796 insn
= emit_insn (gen_add3_insn (stack_pointer_rtx
, stack_pointer_rtx
,
3799 rtx dwarf
= NULL_RTX
;
3800 rtx cfa_adjust_rtx
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
3802 dwarf
= alloc_reg_note (REG_CFA_DEF_CFA
, cfa_adjust_rtx
, dwarf
);
3803 RTX_FRAME_RELATED_P (insn
) = 1;
3805 REG_NOTES (insn
) = dwarf
;
3808 if (use_restore_libcall
)
3810 rtx dwarf
= riscv_adjust_libcall_cfi_epilogue ();
3811 insn
= emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask
))));
3812 RTX_FRAME_RELATED_P (insn
) = 1;
3813 REG_NOTES (insn
) = dwarf
;
3815 emit_jump_insn (gen_gpr_restore_return (ra
));
3819 /* Add in the __builtin_eh_return stack adjustment. */
3820 if (crtl
->calls_eh_return
)
3821 emit_insn (gen_add3_insn (stack_pointer_rtx
, stack_pointer_rtx
,
3822 EH_RETURN_STACKADJ_RTX
));
3825 emit_jump_insn (gen_simple_return_internal (ra
));
3828 /* Return nonzero if this function is known to have a null epilogue.
3829 This allows the optimizer to omit jumps to jumps if no stack
3833 riscv_can_use_return_insn (void)
3835 return reload_completed
&& cfun
->machine
->frame
.total_size
== 0;
3838 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
3840 When floating-point registers are wider than integer ones, moves between
3841 them must go through memory. */
3844 riscv_secondary_memory_needed (machine_mode mode
, reg_class_t class1
,
3847 return (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
3848 && (class1
== FP_REGS
) != (class2
== FP_REGS
));
3851 /* Implement TARGET_REGISTER_MOVE_COST. */
3854 riscv_register_move_cost (machine_mode mode
,
3855 reg_class_t from
, reg_class_t to
)
3857 return riscv_secondary_memory_needed (mode
, from
, to
) ? 8 : 2;
3860 /* Implement TARGET_HARD_REGNO_NREGS. */
3863 riscv_hard_regno_nregs (unsigned int regno
, machine_mode mode
)
3865 if (FP_REG_P (regno
))
3866 return (GET_MODE_SIZE (mode
) + UNITS_PER_FP_REG
- 1) / UNITS_PER_FP_REG
;
3868 /* All other registers are word-sized. */
3869 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
3872 /* Implement TARGET_HARD_REGNO_MODE_OK. */
3875 riscv_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
3877 unsigned int nregs
= riscv_hard_regno_nregs (regno
, mode
);
3879 if (GP_REG_P (regno
))
3881 if (!GP_REG_P (regno
+ nregs
- 1))
3884 else if (FP_REG_P (regno
))
3886 if (!FP_REG_P (regno
+ nregs
- 1))
3889 if (GET_MODE_CLASS (mode
) != MODE_FLOAT
3890 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
)
3893 /* Only use callee-saved registers if a potential callee is guaranteed
3894 to spill the requisite width. */
3895 if (GET_MODE_UNIT_SIZE (mode
) > UNITS_PER_FP_REG
3896 || (!call_used_regs
[regno
]
3897 && GET_MODE_UNIT_SIZE (mode
) > UNITS_PER_FP_ARG
))
3903 /* Require same callee-savedness for all registers. */
3904 for (unsigned i
= 1; i
< nregs
; i
++)
3905 if (call_used_regs
[regno
] != call_used_regs
[regno
+ i
])
3911 /* Implement TARGET_MODES_TIEABLE_P.
3913 Don't allow floating-point modes to be tied, since type punning of
3914 single-precision and double-precision is implementation defined. */
3917 riscv_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
3919 return (mode1
== mode2
3920 || !(GET_MODE_CLASS (mode1
) == MODE_FLOAT
3921 && GET_MODE_CLASS (mode2
) == MODE_FLOAT
));
3924 /* Implement CLASS_MAX_NREGS. */
3926 static unsigned char
3927 riscv_class_max_nregs (reg_class_t rclass
, machine_mode mode
)
3929 if (reg_class_subset_p (FP_REGS
, rclass
))
3930 return riscv_hard_regno_nregs (FP_REG_FIRST
, mode
);
3932 if (reg_class_subset_p (GR_REGS
, rclass
))
3933 return riscv_hard_regno_nregs (GP_REG_FIRST
, mode
);
3938 /* Implement TARGET_MEMORY_MOVE_COST. */
3941 riscv_memory_move_cost (machine_mode mode
, reg_class_t rclass
, bool in
)
3943 return (tune_info
->memory_cost
3944 + memory_move_secondary_cost (mode
, rclass
, in
));
3947 /* Return the number of instructions that can be issued per cycle. */
3950 riscv_issue_rate (void)
3952 return tune_info
->issue_rate
;
3955 /* Implement TARGET_ASM_FILE_START. */
3958 riscv_file_start (void)
3960 default_file_start ();
3962 /* Instruct GAS to generate position-[in]dependent code. */
3963 fprintf (asm_out_file
, "\t.option %spic\n", (flag_pic
? "" : "no"));
3966 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
3967 in order to avoid duplicating too much logic from elsewhere. */
3970 riscv_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
3971 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
3974 rtx this_rtx
, temp1
, temp2
, fnaddr
;
3977 /* Pretend to be a post-reload pass while generating rtl. */
3978 reload_completed
= 1;
3980 /* Mark the end of the (empty) prologue. */
3981 emit_note (NOTE_INSN_PROLOGUE_END
);
3983 /* Determine if we can use a sibcall to call FUNCTION directly. */
3984 fnaddr
= gen_rtx_MEM (FUNCTION_MODE
, XEXP (DECL_RTL (function
), 0));
3986 /* We need two temporary registers in some cases. */
3987 temp1
= gen_rtx_REG (Pmode
, RISCV_PROLOGUE_TEMP_REGNUM
);
3988 temp2
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
3990 /* Find out which register contains the "this" pointer. */
3991 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
3992 this_rtx
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
+ 1);
3994 this_rtx
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
3996 /* Add DELTA to THIS_RTX. */
3999 rtx offset
= GEN_INT (delta
);
4000 if (!SMALL_OPERAND (delta
))
4002 riscv_emit_move (temp1
, offset
);
4005 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, offset
));
4008 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
4009 if (vcall_offset
!= 0)
4013 /* Set TEMP1 to *THIS_RTX. */
4014 riscv_emit_move (temp1
, gen_rtx_MEM (Pmode
, this_rtx
));
4016 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
4017 addr
= riscv_add_offset (temp2
, temp1
, vcall_offset
);
4019 /* Load the offset and add it to THIS_RTX. */
4020 riscv_emit_move (temp1
, gen_rtx_MEM (Pmode
, addr
));
4021 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, temp1
));
4024 /* Jump to the target function. */
4025 insn
= emit_call_insn (gen_sibcall (fnaddr
, const0_rtx
, NULL
, const0_rtx
));
4026 SIBLING_CALL_P (insn
) = 1;
4028 /* Run just enough of rest_of_compilation. This sequence was
4029 "borrowed" from alpha.c. */
4030 insn
= get_insns ();
4031 split_all_insns_noflow ();
4032 shorten_branches (insn
);
4033 final_start_function (insn
, file
, 1);
4034 final (insn
, file
, 1);
4035 final_end_function ();
4037 /* Clean up the vars set above. Note that final_end_function resets
4038 the global pointer for us. */
4039 reload_completed
= 0;
4042 /* Allocate a chunk of memory for per-function machine-dependent data. */
4044 static struct machine_function
*
4045 riscv_init_machine_status (void)
4047 return ggc_cleared_alloc
<machine_function
> ();
4050 /* Implement TARGET_OPTION_OVERRIDE. */
4053 riscv_option_override (void)
4055 const struct riscv_cpu_info
*cpu
;
4057 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4058 SUBTARGET_OVERRIDE_OPTIONS
;
4061 flag_pcc_struct_return
= 0;
4066 /* The presence of the M extension implies that division instructions
4067 are present, so include them unless explicitly disabled. */
4068 if (TARGET_MUL
&& (target_flags_explicit
& MASK_DIV
) == 0)
4069 target_flags
|= MASK_DIV
;
4070 else if (!TARGET_MUL
&& TARGET_DIV
)
4071 error ("-mdiv requires -march to subsume the %<M%> extension");
4073 /* Likewise floating-point division and square root. */
4074 if (TARGET_HARD_FLOAT
&& (target_flags_explicit
& MASK_FDIV
) == 0)
4075 target_flags
|= MASK_FDIV
;
4077 /* Handle -mtune. */
4078 cpu
= riscv_parse_cpu (riscv_tune_string
? riscv_tune_string
:
4079 RISCV_TUNE_STRING_DEFAULT
);
4080 tune_info
= optimize_size
? &optimize_size_tune_info
: cpu
->tune_info
;
4082 /* Use -mtune's setting for slow_unaligned_access, even when optimizing
4083 for size. For architectures that trap and emulate unaligned accesses,
4084 the performance cost is too great, even for -Os. Similarly, if
4085 -m[no-]strict-align is left unspecified, heed -mtune's advice. */
4086 riscv_slow_unaligned_access_p
= (cpu
->tune_info
->slow_unaligned_access
4087 || TARGET_STRICT_ALIGN
);
4088 if ((target_flags_explicit
& MASK_STRICT_ALIGN
) == 0
4089 && cpu
->tune_info
->slow_unaligned_access
)
4090 target_flags
|= MASK_STRICT_ALIGN
;
4092 /* If the user hasn't specified a branch cost, use the processor's
4094 if (riscv_branch_cost
== 0)
4095 riscv_branch_cost
= tune_info
->branch_cost
;
4097 /* Function to allocate machine-dependent function status. */
4098 init_machine_status
= &riscv_init_machine_status
;
4101 riscv_cmodel
= CM_PIC
;
4103 /* We get better code with explicit relocs for CM_MEDLOW, but
4104 worse code for the others (for now). Pick the best default. */
4105 if ((target_flags_explicit
& MASK_EXPLICIT_RELOCS
) == 0)
4106 if (riscv_cmodel
== CM_MEDLOW
)
4107 target_flags
|= MASK_EXPLICIT_RELOCS
;
4109 /* Require that the ISA supports the requested floating-point ABI. */
4110 if (UNITS_PER_FP_ARG
> (TARGET_HARD_FLOAT
? UNITS_PER_FP_REG
: 0))
4111 error ("requested ABI requires -march to subsume the %qc extension",
4112 UNITS_PER_FP_ARG
> 8 ? 'Q' : (UNITS_PER_FP_ARG
> 4 ? 'D' : 'F'));
4114 /* We do not yet support ILP32 on RV64. */
4115 if (BITS_PER_WORD
!= POINTER_SIZE
)
4116 error ("ABI requires -march=rv%d", POINTER_SIZE
);
4118 /* Validate -mpreferred-stack-boundary= value. */
4119 riscv_stack_boundary
= ABI_STACK_BOUNDARY
;
4120 if (riscv_preferred_stack_boundary_arg
)
4122 int min
= ctz_hwi (MIN_STACK_BOUNDARY
/ 8);
4125 if (!IN_RANGE (riscv_preferred_stack_boundary_arg
, min
, max
))
4126 error ("-mpreferred-stack-boundary=%d must be between %d and %d",
4127 riscv_preferred_stack_boundary_arg
, min
, max
);
4129 riscv_stack_boundary
= 8 << riscv_preferred_stack_boundary_arg
;
4133 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
4136 riscv_conditional_register_usage (void)
4138 if (!TARGET_HARD_FLOAT
)
4140 for (int regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
++)
4141 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
4144 /* In the soft-float ABI, there are no callee-saved FP registers. */
4145 if (UNITS_PER_FP_ARG
== 0)
4147 for (int regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
++)
4148 call_used_regs
[regno
] = 1;
4152 /* Return a register priority for hard reg REGNO. */
4155 riscv_register_priority (int regno
)
4157 /* Favor x8-x15/f8-f15 to improve the odds of RVC instruction selection. */
4158 if (TARGET_RVC
&& (IN_RANGE (regno
, GP_REG_FIRST
+ 8, GP_REG_FIRST
+ 15)
4159 || IN_RANGE (regno
, FP_REG_FIRST
+ 8, FP_REG_FIRST
+ 15)))
4165 /* Implement TARGET_TRAMPOLINE_INIT. */
4168 riscv_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
4170 rtx addr
, end_addr
, mem
;
4171 uint32_t trampoline
[4];
4173 HOST_WIDE_INT static_chain_offset
, target_function_offset
;
4175 /* Work out the offsets of the pointers from the start of the
4177 gcc_assert (ARRAY_SIZE (trampoline
) * 4 == TRAMPOLINE_CODE_SIZE
);
4179 /* Get pointers to the beginning and end of the code block. */
4180 addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
4181 end_addr
= riscv_force_binary (Pmode
, PLUS
, addr
,
4182 GEN_INT (TRAMPOLINE_CODE_SIZE
));
4185 if (Pmode
== SImode
)
4187 chain_value
= force_reg (Pmode
, chain_value
);
4189 rtx target_function
= force_reg (Pmode
, XEXP (DECL_RTL (fndecl
), 0));
4190 /* lui t2, hi(chain)
4192 addi t2, t2, lo(chain)
4195 unsigned HOST_WIDE_INT lui_hi_chain_code
, lui_hi_func_code
;
4196 unsigned HOST_WIDE_INT lo_chain_code
, lo_func_code
;
4198 rtx uimm_mask
= force_reg (SImode
, gen_int_mode (-IMM_REACH
, SImode
));
4201 rtx imm12_mask
= gen_reg_rtx (SImode
);
4202 emit_insn (gen_one_cmplsi2 (imm12_mask
, uimm_mask
));
4204 rtx fixup_value
= force_reg (SImode
, gen_int_mode (IMM_REACH
/2, SImode
));
4206 /* Gen lui t2, hi(chain). */
4207 rtx hi_chain
= riscv_force_binary (SImode
, PLUS
, chain_value
,
4209 hi_chain
= riscv_force_binary (SImode
, AND
, hi_chain
,
4211 lui_hi_chain_code
= OPCODE_LUI
| (STATIC_CHAIN_REGNUM
<< SHIFT_RD
);
4212 rtx lui_hi_chain
= riscv_force_binary (SImode
, IOR
, hi_chain
,
4213 gen_int_mode (lui_hi_chain_code
, SImode
));
4215 mem
= adjust_address (m_tramp
, SImode
, 0);
4216 riscv_emit_move (mem
, lui_hi_chain
);
4218 /* Gen lui t1, hi(func). */
4219 rtx hi_func
= riscv_force_binary (SImode
, PLUS
, target_function
,
4221 hi_func
= riscv_force_binary (SImode
, AND
, hi_func
,
4223 lui_hi_func_code
= OPCODE_LUI
| (RISCV_PROLOGUE_TEMP_REGNUM
<< SHIFT_RD
);
4224 rtx lui_hi_func
= riscv_force_binary (SImode
, IOR
, hi_func
,
4225 gen_int_mode (lui_hi_func_code
, SImode
));
4227 mem
= adjust_address (m_tramp
, SImode
, 1 * GET_MODE_SIZE (SImode
));
4228 riscv_emit_move (mem
, lui_hi_func
);
4230 /* Gen addi t2, t2, lo(chain). */
4231 rtx lo_chain
= riscv_force_binary (SImode
, AND
, chain_value
,
4233 lo_chain
= riscv_force_binary (SImode
, ASHIFT
, lo_chain
, GEN_INT (20));
4235 lo_chain_code
= OPCODE_ADDI
4236 | (STATIC_CHAIN_REGNUM
<< SHIFT_RD
)
4237 | (STATIC_CHAIN_REGNUM
<< SHIFT_RS1
);
4239 rtx addi_lo_chain
= riscv_force_binary (SImode
, IOR
, lo_chain
,
4240 force_reg (SImode
, GEN_INT (lo_chain_code
)));
4242 mem
= adjust_address (m_tramp
, SImode
, 2 * GET_MODE_SIZE (SImode
));
4243 riscv_emit_move (mem
, addi_lo_chain
);
4245 /* Gen jr r1, lo(func). */
4246 rtx lo_func
= riscv_force_binary (SImode
, AND
, target_function
,
4248 lo_func
= riscv_force_binary (SImode
, ASHIFT
, lo_func
, GEN_INT (20));
4250 lo_func_code
= OPCODE_JALR
| (RISCV_PROLOGUE_TEMP_REGNUM
<< SHIFT_RS1
);
4252 rtx jr_lo_func
= riscv_force_binary (SImode
, IOR
, lo_func
,
4253 force_reg (SImode
, GEN_INT (lo_func_code
)));
4255 mem
= adjust_address (m_tramp
, SImode
, 3 * GET_MODE_SIZE (SImode
));
4256 riscv_emit_move (mem
, jr_lo_func
);
4260 static_chain_offset
= TRAMPOLINE_CODE_SIZE
;
4261 target_function_offset
= static_chain_offset
+ GET_MODE_SIZE (ptr_mode
);
4264 l[wd] t1, target_function_offset(t2)
4265 l[wd] t2, static_chain_offset(t2)
4268 trampoline
[0] = OPCODE_AUIPC
| (STATIC_CHAIN_REGNUM
<< SHIFT_RD
);
4269 trampoline
[1] = (Pmode
== DImode
? OPCODE_LD
: OPCODE_LW
)
4270 | (RISCV_PROLOGUE_TEMP_REGNUM
<< SHIFT_RD
)
4271 | (STATIC_CHAIN_REGNUM
<< SHIFT_RS1
)
4272 | (target_function_offset
<< SHIFT_IMM
);
4273 trampoline
[2] = (Pmode
== DImode
? OPCODE_LD
: OPCODE_LW
)
4274 | (STATIC_CHAIN_REGNUM
<< SHIFT_RD
)
4275 | (STATIC_CHAIN_REGNUM
<< SHIFT_RS1
)
4276 | (static_chain_offset
<< SHIFT_IMM
);
4277 trampoline
[3] = OPCODE_JALR
| (RISCV_PROLOGUE_TEMP_REGNUM
<< SHIFT_RS1
);
4279 /* Copy the trampoline code. */
4280 for (i
= 0; i
< ARRAY_SIZE (trampoline
); i
++)
4282 mem
= adjust_address (m_tramp
, SImode
, i
* GET_MODE_SIZE (SImode
));
4283 riscv_emit_move (mem
, gen_int_mode (trampoline
[i
], SImode
));
4286 /* Set up the static chain pointer field. */
4287 mem
= adjust_address (m_tramp
, ptr_mode
, static_chain_offset
);
4288 riscv_emit_move (mem
, chain_value
);
4290 /* Set up the target function field. */
4291 mem
= adjust_address (m_tramp
, ptr_mode
, target_function_offset
);
4292 riscv_emit_move (mem
, XEXP (DECL_RTL (fndecl
), 0));
4295 /* Flush the code part of the trampoline. */
4296 emit_insn (gen_add3_insn (end_addr
, addr
, GEN_INT (TRAMPOLINE_SIZE
)));
4297 emit_insn (gen_clear_cache (addr
, end_addr
));
4300 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
4303 riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED
,
4304 tree exp ATTRIBUTE_UNUSED
)
4306 /* Don't use sibcalls when use save-restore routine. */
4307 if (TARGET_SAVE_RESTORE
)
4310 /* Don't use sibcall for naked function. */
4311 if (cfun
->machine
->naked_p
)
4317 /* Implement `TARGET_SET_CURRENT_FUNCTION'. */
4318 /* Sanity cheching for above function attributes. */
4320 riscv_set_current_function (tree decl
)
4322 if (decl
== NULL_TREE
4323 || current_function_decl
== NULL_TREE
4324 || current_function_decl
== error_mark_node
4328 cfun
->machine
->naked_p
= riscv_naked_function_p (decl
);
4331 /* Implement TARGET_CANNOT_COPY_INSN_P. */
4334 riscv_cannot_copy_insn_p (rtx_insn
*insn
)
4336 return recog_memoized (insn
) >= 0 && get_attr_cannot_copy (insn
);
4339 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. */
4342 riscv_slow_unaligned_access (machine_mode
, unsigned int)
4344 return riscv_slow_unaligned_access_p
;
4347 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
4350 riscv_can_change_mode_class (machine_mode
, machine_mode
, reg_class_t rclass
)
4352 return !reg_classes_intersect_p (FP_REGS
, rclass
);
4356 /* Implement TARGET_CONSTANT_ALIGNMENT. */
4358 static HOST_WIDE_INT
4359 riscv_constant_alignment (const_tree exp
, HOST_WIDE_INT align
)
4361 if (TREE_CODE (exp
) == STRING_CST
|| TREE_CODE (exp
) == CONSTRUCTOR
)
4362 return MAX (align
, BITS_PER_WORD
);
4366 /* Initialize the GCC target structure. */
4367 #undef TARGET_ASM_ALIGNED_HI_OP
4368 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
4369 #undef TARGET_ASM_ALIGNED_SI_OP
4370 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
4371 #undef TARGET_ASM_ALIGNED_DI_OP
4372 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
4374 #undef TARGET_OPTION_OVERRIDE
4375 #define TARGET_OPTION_OVERRIDE riscv_option_override
4377 #undef TARGET_LEGITIMIZE_ADDRESS
4378 #define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
4380 #undef TARGET_SCHED_ISSUE_RATE
4381 #define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
4383 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
4384 #define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
4386 #undef TARGET_SET_CURRENT_FUNCTION
4387 #define TARGET_SET_CURRENT_FUNCTION riscv_set_current_function
4389 #undef TARGET_REGISTER_MOVE_COST
4390 #define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
4391 #undef TARGET_MEMORY_MOVE_COST
4392 #define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
4393 #undef TARGET_RTX_COSTS
4394 #define TARGET_RTX_COSTS riscv_rtx_costs
4395 #undef TARGET_ADDRESS_COST
4396 #define TARGET_ADDRESS_COST riscv_address_cost
4398 #undef TARGET_ASM_FILE_START
4399 #define TARGET_ASM_FILE_START riscv_file_start
4400 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
4401 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
4403 #undef TARGET_EXPAND_BUILTIN_VA_START
4404 #define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
4406 #undef TARGET_PROMOTE_FUNCTION_MODE
4407 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
4409 #undef TARGET_RETURN_IN_MEMORY
4410 #define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
4412 #undef TARGET_ASM_OUTPUT_MI_THUNK
4413 #define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
4414 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
4415 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
4417 #undef TARGET_PRINT_OPERAND
4418 #define TARGET_PRINT_OPERAND riscv_print_operand
4419 #undef TARGET_PRINT_OPERAND_ADDRESS
4420 #define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
4422 #undef TARGET_SETUP_INCOMING_VARARGS
4423 #define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
4424 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
4425 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS riscv_allocate_stack_slots_for_args
4426 #undef TARGET_STRICT_ARGUMENT_NAMING
4427 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
4428 #undef TARGET_MUST_PASS_IN_STACK
4429 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
4430 #undef TARGET_PASS_BY_REFERENCE
4431 #define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
4432 #undef TARGET_ARG_PARTIAL_BYTES
4433 #define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
4434 #undef TARGET_FUNCTION_ARG
4435 #define TARGET_FUNCTION_ARG riscv_function_arg
4436 #undef TARGET_FUNCTION_ARG_ADVANCE
4437 #define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
4438 #undef TARGET_FUNCTION_ARG_BOUNDARY
4439 #define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
4441 /* The generic ELF target does not always have TLS support. */
4443 #undef TARGET_HAVE_TLS
4444 #define TARGET_HAVE_TLS true
4447 #undef TARGET_CANNOT_FORCE_CONST_MEM
4448 #define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
4450 #undef TARGET_LEGITIMATE_CONSTANT_P
4451 #define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
4453 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
4454 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
4456 #undef TARGET_LEGITIMATE_ADDRESS_P
4457 #define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
4459 #undef TARGET_CAN_ELIMINATE
4460 #define TARGET_CAN_ELIMINATE riscv_can_eliminate
4462 #undef TARGET_CONDITIONAL_REGISTER_USAGE
4463 #define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
4465 #undef TARGET_CLASS_MAX_NREGS
4466 #define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
4468 #undef TARGET_TRAMPOLINE_INIT
4469 #define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
4471 #undef TARGET_IN_SMALL_DATA_P
4472 #define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
4474 #undef TARGET_HAVE_SRODATA_SECTION
4475 #define TARGET_HAVE_SRODATA_SECTION true
4477 #undef TARGET_ASM_SELECT_SECTION
4478 #define TARGET_ASM_SELECT_SECTION riscv_select_section
4480 #undef TARGET_ASM_SELECT_RTX_SECTION
4481 #define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
4483 #undef TARGET_MIN_ANCHOR_OFFSET
4484 #define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
4486 #undef TARGET_MAX_ANCHOR_OFFSET
4487 #define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
4489 #undef TARGET_REGISTER_PRIORITY
4490 #define TARGET_REGISTER_PRIORITY riscv_register_priority
4492 #undef TARGET_CANNOT_COPY_INSN_P
4493 #define TARGET_CANNOT_COPY_INSN_P riscv_cannot_copy_insn_p
4495 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
4496 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV riscv_atomic_assign_expand_fenv
4498 #undef TARGET_INIT_BUILTINS
4499 #define TARGET_INIT_BUILTINS riscv_init_builtins
4501 #undef TARGET_BUILTIN_DECL
4502 #define TARGET_BUILTIN_DECL riscv_builtin_decl
4504 #undef TARGET_EXPAND_BUILTIN
4505 #define TARGET_EXPAND_BUILTIN riscv_expand_builtin
4507 #undef TARGET_HARD_REGNO_NREGS
4508 #define TARGET_HARD_REGNO_NREGS riscv_hard_regno_nregs
4509 #undef TARGET_HARD_REGNO_MODE_OK
4510 #define TARGET_HARD_REGNO_MODE_OK riscv_hard_regno_mode_ok
4512 #undef TARGET_MODES_TIEABLE_P
4513 #define TARGET_MODES_TIEABLE_P riscv_modes_tieable_p
4515 #undef TARGET_SLOW_UNALIGNED_ACCESS
4516 #define TARGET_SLOW_UNALIGNED_ACCESS riscv_slow_unaligned_access
4518 #undef TARGET_SECONDARY_MEMORY_NEEDED
4519 #define TARGET_SECONDARY_MEMORY_NEEDED riscv_secondary_memory_needed
4521 #undef TARGET_CAN_CHANGE_MODE_CLASS
4522 #define TARGET_CAN_CHANGE_MODE_CLASS riscv_can_change_mode_class
4524 #undef TARGET_CONSTANT_ALIGNMENT
4525 #define TARGET_CONSTANT_ALIGNMENT riscv_constant_alignment
4527 #undef TARGET_ATTRIBUTE_TABLE
4528 #define TARGET_ATTRIBUTE_TABLE riscv_attribute_table
4530 #undef TARGET_WARN_FUNC_RETURN
4531 #define TARGET_WARN_FUNC_RETURN riscv_warn_func_return
4533 struct gcc_target targetm
= TARGET_INITIALIZER
;
4535 #include "gt-riscv.h"