* config/i386/i386.c (ix86_expand_prologue): Tighten assert
[official-gcc.git] / gcc / config / riscv / riscv.c
blobb81a2d29fbfdd5f2778b8cf1f9f8cb1848110371
1 /* Subroutines used for code generation for RISC-V.
2 Copyright (C) 2011-2017 Free Software Foundation, Inc.
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "insn-config.h"
29 #include "insn-attr.h"
30 #include "recog.h"
31 #include "output.h"
32 #include "alias.h"
33 #include "tree.h"
34 #include "stringpool.h"
35 #include "attribs.h"
36 #include "varasm.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "function.h"
40 #include "explow.h"
41 #include "memmodel.h"
42 #include "emit-rtl.h"
43 #include "reload.h"
44 #include "tm_p.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "basic-block.h"
48 #include "expr.h"
49 #include "optabs.h"
50 #include "bitmap.h"
51 #include "df.h"
52 #include "diagnostic.h"
53 #include "builtins.h"
55 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
56 #define UNSPEC_ADDRESS_P(X) \
57 (GET_CODE (X) == UNSPEC \
58 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
59 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
61 /* Extract the symbol or label from UNSPEC wrapper X. */
62 #define UNSPEC_ADDRESS(X) \
63 XVECEXP (X, 0, 0)
65 /* Extract the symbol type from UNSPEC wrapper X. */
66 #define UNSPEC_ADDRESS_TYPE(X) \
67 ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
69 /* True if bit BIT is set in VALUE. */
70 #define BITSET_P(VALUE, BIT) (((VALUE) & (1ULL << (BIT))) != 0)
72 /* Classifies an address.
74 ADDRESS_REG
75 A natural register + offset address. The register satisfies
76 riscv_valid_base_register_p and the offset is a const_arith_operand.
78 ADDRESS_LO_SUM
79 A LO_SUM rtx. The first operand is a valid base register and
80 the second operand is a symbolic address.
82 ADDRESS_CONST_INT
83 A signed 16-bit constant address.
85 ADDRESS_SYMBOLIC:
86 A constant symbolic address. */
87 enum riscv_address_type {
88 ADDRESS_REG,
89 ADDRESS_LO_SUM,
90 ADDRESS_CONST_INT,
91 ADDRESS_SYMBOLIC
94 /* Information about a function's frame layout. */
95 struct GTY(()) riscv_frame_info {
96 /* The size of the frame in bytes. */
97 HOST_WIDE_INT total_size;
99 /* Bit X is set if the function saves or restores GPR X. */
100 unsigned int mask;
102 /* Likewise FPR X. */
103 unsigned int fmask;
105 /* How much the GPR save/restore routines adjust sp (or 0 if unused). */
106 unsigned save_libcall_adjustment;
108 /* Offsets of fixed-point and floating-point save areas from frame bottom */
109 HOST_WIDE_INT gp_sp_offset;
110 HOST_WIDE_INT fp_sp_offset;
112 /* Offset of virtual frame pointer from stack pointer/frame bottom */
113 HOST_WIDE_INT frame_pointer_offset;
115 /* Offset of hard frame pointer from stack pointer/frame bottom */
116 HOST_WIDE_INT hard_frame_pointer_offset;
118 /* The offset of arg_pointer_rtx from the bottom of the frame. */
119 HOST_WIDE_INT arg_pointer_offset;
122 struct GTY(()) machine_function {
123 /* The number of extra stack bytes taken up by register varargs.
124 This area is allocated by the callee at the very top of the frame. */
125 int varargs_size;
127 /* Memoized return value of leaf_function_p. <0 if false, >0 if true. */
128 int is_leaf;
130 /* The current frame information, calculated by riscv_compute_frame_info. */
131 struct riscv_frame_info frame;
134 /* Information about a single argument. */
135 struct riscv_arg_info {
136 /* True if the argument is at least partially passed on the stack. */
137 bool stack_p;
139 /* The number of integer registers allocated to this argument. */
140 unsigned int num_gprs;
142 /* The offset of the first register used, provided num_gprs is nonzero.
143 If passed entirely on the stack, the value is MAX_ARGS_IN_REGISTERS. */
144 unsigned int gpr_offset;
146 /* The number of floating-point registers allocated to this argument. */
147 unsigned int num_fprs;
149 /* The offset of the first register used, provided num_fprs is nonzero. */
150 unsigned int fpr_offset;
153 /* Information about an address described by riscv_address_type.
155 ADDRESS_CONST_INT
156 No fields are used.
158 ADDRESS_REG
159 REG is the base register and OFFSET is the constant offset.
161 ADDRESS_LO_SUM
162 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
163 is the type of symbol it references.
165 ADDRESS_SYMBOLIC
166 SYMBOL_TYPE is the type of symbol that the address references. */
167 struct riscv_address_info {
168 enum riscv_address_type type;
169 rtx reg;
170 rtx offset;
171 enum riscv_symbol_type symbol_type;
174 /* One stage in a constant building sequence. These sequences have
175 the form:
177 A = VALUE[0]
178 A = A CODE[1] VALUE[1]
179 A = A CODE[2] VALUE[2]
182 where A is an accumulator, each CODE[i] is a binary rtl operation
183 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
184 struct riscv_integer_op {
185 enum rtx_code code;
186 unsigned HOST_WIDE_INT value;
189 /* The largest number of operations needed to load an integer constant.
190 The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI. */
191 #define RISCV_MAX_INTEGER_OPS 8
193 /* Costs of various operations on the different architectures. */
195 struct riscv_tune_info
197 unsigned short fp_add[2];
198 unsigned short fp_mul[2];
199 unsigned short fp_div[2];
200 unsigned short int_mul[2];
201 unsigned short int_div[2];
202 unsigned short issue_rate;
203 unsigned short branch_cost;
204 unsigned short memory_cost;
205 bool slow_unaligned_access;
208 /* Information about one CPU we know about. */
209 struct riscv_cpu_info {
210 /* This CPU's canonical name. */
211 const char *name;
213 /* Tuning parameters for this CPU. */
214 const struct riscv_tune_info *tune_info;
217 /* Global variables for machine-dependent things. */
219 /* Whether unaligned accesses execute very slowly. */
220 static bool riscv_slow_unaligned_access_p;
222 /* Which tuning parameters to use. */
223 static const struct riscv_tune_info *tune_info;
225 /* Index R is the smallest register class that contains register R. */
226 const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
227 GR_REGS, GR_REGS, GR_REGS, GR_REGS,
228 GR_REGS, GR_REGS, SIBCALL_REGS, SIBCALL_REGS,
229 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
230 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
231 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
232 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
233 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
234 SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS,
235 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
236 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
237 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
238 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
239 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
240 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
241 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
242 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
243 FRAME_REGS, FRAME_REGS,
246 /* Costs to use when optimizing for rocket. */
247 static const struct riscv_tune_info rocket_tune_info = {
248 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
249 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
250 {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
251 {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
252 {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
253 1, /* issue_rate */
254 3, /* branch_cost */
255 5, /* memory_cost */
256 true, /* slow_unaligned_access */
259 /* Costs to use when optimizing for size. */
260 static const struct riscv_tune_info optimize_size_tune_info = {
261 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
262 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
263 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
264 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
265 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
266 1, /* issue_rate */
267 1, /* branch_cost */
268 2, /* memory_cost */
269 false, /* slow_unaligned_access */
272 /* A table describing all the processors GCC knows about. */
273 static const struct riscv_cpu_info riscv_cpu_info_table[] = {
274 { "rocket", &rocket_tune_info },
275 { "size", &optimize_size_tune_info },
278 /* Return the riscv_cpu_info entry for the given name string. */
280 static const struct riscv_cpu_info *
281 riscv_parse_cpu (const char *cpu_string)
283 for (unsigned i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
284 if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
285 return riscv_cpu_info_table + i;
287 error ("unknown cpu %qs for -mtune", cpu_string);
288 return riscv_cpu_info_table;
291 /* Helper function for riscv_build_integer; arguments are as for
292 riscv_build_integer. */
294 static int
295 riscv_build_integer_1 (struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS],
296 HOST_WIDE_INT value, machine_mode mode)
298 HOST_WIDE_INT low_part = CONST_LOW_PART (value);
299 int cost = RISCV_MAX_INTEGER_OPS + 1, alt_cost;
300 struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
302 if (SMALL_OPERAND (value) || LUI_OPERAND (value))
304 /* Simply ADDI or LUI. */
305 codes[0].code = UNKNOWN;
306 codes[0].value = value;
307 return 1;
310 /* End with ADDI. When constructing HImode constants, do not generate any
311 intermediate value that is not itself a valid HImode constant. The
312 XORI case below will handle those remaining HImode constants. */
313 if (low_part != 0
314 && (mode != HImode
315 || value - low_part <= ((1 << (GET_MODE_BITSIZE (HImode) - 1)) - 1)))
317 alt_cost = 1 + riscv_build_integer_1 (alt_codes, value - low_part, mode);
318 if (alt_cost < cost)
320 alt_codes[alt_cost-1].code = PLUS;
321 alt_codes[alt_cost-1].value = low_part;
322 memcpy (codes, alt_codes, sizeof (alt_codes));
323 cost = alt_cost;
327 /* End with XORI. */
328 if (cost > 2 && (low_part < 0 || mode == HImode))
330 alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
331 if (alt_cost < cost)
333 alt_codes[alt_cost-1].code = XOR;
334 alt_codes[alt_cost-1].value = low_part;
335 memcpy (codes, alt_codes, sizeof (alt_codes));
336 cost = alt_cost;
340 /* Eliminate trailing zeros and end with SLLI. */
341 if (cost > 2 && (value & 1) == 0)
343 int shift = ctz_hwi (value);
344 unsigned HOST_WIDE_INT x = value;
345 x = sext_hwi (x >> shift, HOST_BITS_PER_WIDE_INT - shift);
347 /* Don't eliminate the lower 12 bits if LUI might apply. */
348 if (shift > IMM_BITS && !SMALL_OPERAND (x) && LUI_OPERAND (x << IMM_BITS))
349 shift -= IMM_BITS, x <<= IMM_BITS;
351 alt_cost = 1 + riscv_build_integer_1 (alt_codes, x, mode);
352 if (alt_cost < cost)
354 alt_codes[alt_cost-1].code = ASHIFT;
355 alt_codes[alt_cost-1].value = shift;
356 memcpy (codes, alt_codes, sizeof (alt_codes));
357 cost = alt_cost;
361 gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
362 return cost;
365 /* Fill CODES with a sequence of rtl operations to load VALUE.
366 Return the number of operations needed. */
368 static int
369 riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
370 machine_mode mode)
372 int cost = riscv_build_integer_1 (codes, value, mode);
374 /* Eliminate leading zeros and end with SRLI. */
375 if (value > 0 && cost > 2)
377 struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
378 int alt_cost, shift = clz_hwi (value);
379 HOST_WIDE_INT shifted_val;
381 /* Try filling trailing bits with 1s. */
382 shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
383 alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
384 if (alt_cost < cost)
386 alt_codes[alt_cost-1].code = LSHIFTRT;
387 alt_codes[alt_cost-1].value = shift;
388 memcpy (codes, alt_codes, sizeof (alt_codes));
389 cost = alt_cost;
392 /* Try filling trailing bits with 0s. */
393 shifted_val = value << shift;
394 alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
395 if (alt_cost < cost)
397 alt_codes[alt_cost-1].code = LSHIFTRT;
398 alt_codes[alt_cost-1].value = shift;
399 memcpy (codes, alt_codes, sizeof (alt_codes));
400 cost = alt_cost;
404 return cost;
407 /* Return the cost of constructing VAL in the event that a scratch
408 register is available. */
410 static int
411 riscv_split_integer_cost (HOST_WIDE_INT val)
413 int cost;
414 unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
415 unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
416 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
418 cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
419 if (loval != hival)
420 cost += riscv_build_integer (codes, hival, VOIDmode);
422 return cost;
425 /* Return the cost of constructing the integer constant VAL. */
427 static int
428 riscv_integer_cost (HOST_WIDE_INT val)
430 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
431 return MIN (riscv_build_integer (codes, val, VOIDmode),
432 riscv_split_integer_cost (val));
435 /* Try to split a 64b integer into 32b parts, then reassemble. */
437 static rtx
438 riscv_split_integer (HOST_WIDE_INT val, machine_mode mode)
440 unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
441 unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
442 rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
444 riscv_move_integer (hi, hi, hival);
445 riscv_move_integer (lo, lo, loval);
447 hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
448 hi = force_reg (mode, hi);
450 return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
453 /* Return true if X is a thread-local symbol. */
455 static bool
456 riscv_tls_symbol_p (const_rtx x)
458 return SYMBOL_REF_P (x) && SYMBOL_REF_TLS_MODEL (x) != 0;
461 /* Return true if symbol X binds locally. */
463 static bool
464 riscv_symbol_binds_local_p (const_rtx x)
466 if (SYMBOL_REF_P (x))
467 return (SYMBOL_REF_DECL (x)
468 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
469 : SYMBOL_REF_LOCAL_P (x));
470 else
471 return false;
474 /* Return the method that should be used to access SYMBOL_REF or
475 LABEL_REF X. */
477 static enum riscv_symbol_type
478 riscv_classify_symbol (const_rtx x)
480 if (riscv_tls_symbol_p (x))
481 return SYMBOL_TLS;
483 if (GET_CODE (x) == SYMBOL_REF && flag_pic && !riscv_symbol_binds_local_p (x))
484 return SYMBOL_GOT_DISP;
486 return riscv_cmodel == CM_MEDLOW ? SYMBOL_ABSOLUTE : SYMBOL_PCREL;
489 /* Classify the base of symbolic expression X. */
491 enum riscv_symbol_type
492 riscv_classify_symbolic_expression (rtx x)
494 rtx offset;
496 split_const (x, &x, &offset);
497 if (UNSPEC_ADDRESS_P (x))
498 return UNSPEC_ADDRESS_TYPE (x);
500 return riscv_classify_symbol (x);
503 /* Return true if X is a symbolic constant. If it is, store the type of
504 the symbol in *SYMBOL_TYPE. */
506 bool
507 riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
509 rtx offset;
511 split_const (x, &x, &offset);
512 if (UNSPEC_ADDRESS_P (x))
514 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
515 x = UNSPEC_ADDRESS (x);
517 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
518 *symbol_type = riscv_classify_symbol (x);
519 else
520 return false;
522 if (offset == const0_rtx)
523 return true;
525 /* Nonzero offsets are only valid for references that don't use the GOT. */
526 switch (*symbol_type)
528 case SYMBOL_ABSOLUTE:
529 case SYMBOL_PCREL:
530 case SYMBOL_TLS_LE:
531 /* GAS rejects offsets outside the range [-2^31, 2^31-1]. */
532 return sext_hwi (INTVAL (offset), 32) == INTVAL (offset);
534 default:
535 return false;
539 /* Returns the number of instructions necessary to reference a symbol. */
541 static int riscv_symbol_insns (enum riscv_symbol_type type)
543 switch (type)
545 case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
546 case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference. */
547 case SYMBOL_PCREL: return 2; /* AUIPC + the reference. */
548 case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference. */
549 case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference. */
550 default: gcc_unreachable ();
554 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
556 static bool
557 riscv_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
559 return riscv_const_insns (x) > 0;
562 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
564 static bool
565 riscv_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
567 enum riscv_symbol_type type;
568 rtx base, offset;
570 /* There is no assembler syntax for expressing an address-sized
571 high part. */
572 if (GET_CODE (x) == HIGH)
573 return true;
575 split_const (x, &base, &offset);
576 if (riscv_symbolic_constant_p (base, &type))
578 /* As an optimization, don't spill symbolic constants that are as
579 cheap to rematerialize as to access in the constant pool. */
580 if (SMALL_OPERAND (INTVAL (offset)) && riscv_symbol_insns (type) > 0)
581 return true;
583 /* As an optimization, avoid needlessly generate dynamic relocations. */
584 if (flag_pic)
585 return true;
588 /* TLS symbols must be computed by riscv_legitimize_move. */
589 if (tls_referenced_p (x))
590 return true;
592 return false;
595 /* Return true if register REGNO is a valid base register for mode MODE.
596 STRICT_P is true if REG_OK_STRICT is in effect. */
599 riscv_regno_mode_ok_for_base_p (int regno,
600 machine_mode mode ATTRIBUTE_UNUSED,
601 bool strict_p)
603 if (!HARD_REGISTER_NUM_P (regno))
605 if (!strict_p)
606 return true;
607 regno = reg_renumber[regno];
610 /* These fake registers will be eliminated to either the stack or
611 hard frame pointer, both of which are usually valid base registers.
612 Reload deals with the cases where the eliminated form isn't valid. */
613 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
614 return true;
616 return GP_REG_P (regno);
619 /* Return true if X is a valid base register for mode MODE.
620 STRICT_P is true if REG_OK_STRICT is in effect. */
622 static bool
623 riscv_valid_base_register_p (rtx x, machine_mode mode, bool strict_p)
625 if (!strict_p && GET_CODE (x) == SUBREG)
626 x = SUBREG_REG (x);
628 return (REG_P (x)
629 && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
632 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
633 can address a value of mode MODE. */
635 static bool
636 riscv_valid_offset_p (rtx x, machine_mode mode)
638 /* Check that X is a signed 12-bit number. */
639 if (!const_arith_operand (x, Pmode))
640 return false;
642 /* We may need to split multiword moves, so make sure that every word
643 is accessible. */
644 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
645 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
646 return false;
648 return true;
651 /* Should a symbol of type SYMBOL_TYPE should be split in two? */
653 bool
654 riscv_split_symbol_type (enum riscv_symbol_type symbol_type)
656 if (symbol_type == SYMBOL_TLS_LE)
657 return true;
659 if (!TARGET_EXPLICIT_RELOCS)
660 return false;
662 return symbol_type == SYMBOL_ABSOLUTE || symbol_type == SYMBOL_PCREL;
665 /* Return true if a LO_SUM can address a value of mode MODE when the
666 LO_SUM symbol has type SYM_TYPE. */
668 static bool
669 riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type, machine_mode mode)
671 /* Check that symbols of type SYMBOL_TYPE can be used to access values
672 of mode MODE. */
673 if (riscv_symbol_insns (sym_type) == 0)
674 return false;
676 /* Check that there is a known low-part relocation. */
677 if (!riscv_split_symbol_type (sym_type))
678 return false;
680 /* We may need to split multiword moves, so make sure that each word
681 can be accessed without inducing a carry. */
682 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
683 && (!TARGET_STRICT_ALIGN
684 || GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode)))
685 return false;
687 return true;
690 /* Return true if X is a valid address for machine mode MODE. If it is,
691 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
692 effect. */
694 static bool
695 riscv_classify_address (struct riscv_address_info *info, rtx x,
696 machine_mode mode, bool strict_p)
698 switch (GET_CODE (x))
700 case REG:
701 case SUBREG:
702 info->type = ADDRESS_REG;
703 info->reg = x;
704 info->offset = const0_rtx;
705 return riscv_valid_base_register_p (info->reg, mode, strict_p);
707 case PLUS:
708 info->type = ADDRESS_REG;
709 info->reg = XEXP (x, 0);
710 info->offset = XEXP (x, 1);
711 return (riscv_valid_base_register_p (info->reg, mode, strict_p)
712 && riscv_valid_offset_p (info->offset, mode));
714 case LO_SUM:
715 info->type = ADDRESS_LO_SUM;
716 info->reg = XEXP (x, 0);
717 info->offset = XEXP (x, 1);
718 /* We have to trust the creator of the LO_SUM to do something vaguely
719 sane. Target-independent code that creates a LO_SUM should also
720 create and verify the matching HIGH. Target-independent code that
721 adds an offset to a LO_SUM must prove that the offset will not
722 induce a carry. Failure to do either of these things would be
723 a bug, and we are not required to check for it here. The RISC-V
724 backend itself should only create LO_SUMs for valid symbolic
725 constants, with the high part being either a HIGH or a copy
726 of _gp. */
727 info->symbol_type
728 = riscv_classify_symbolic_expression (info->offset);
729 return (riscv_valid_base_register_p (info->reg, mode, strict_p)
730 && riscv_valid_lo_sum_p (info->symbol_type, mode));
732 case CONST_INT:
733 /* Small-integer addresses don't occur very often, but they
734 are legitimate if x0 is a valid base register. */
735 info->type = ADDRESS_CONST_INT;
736 return SMALL_OPERAND (INTVAL (x));
738 default:
739 return false;
743 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
745 static bool
746 riscv_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
748 struct riscv_address_info addr;
750 return riscv_classify_address (&addr, x, mode, strict_p);
753 /* Return the number of instructions needed to load or store a value
754 of mode MODE at address X. Return 0 if X isn't valid for MODE.
755 Assume that multiword moves may need to be split into word moves
756 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
757 enough. */
760 riscv_address_insns (rtx x, machine_mode mode, bool might_split_p)
762 struct riscv_address_info addr;
763 int n = 1;
765 if (!riscv_classify_address (&addr, x, mode, false))
766 return 0;
768 /* BLKmode is used for single unaligned loads and stores and should
769 not count as a multiword mode. */
770 if (mode != BLKmode && might_split_p)
771 n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
773 if (addr.type == ADDRESS_LO_SUM)
774 n += riscv_symbol_insns (addr.symbol_type) - 1;
776 return n;
779 /* Return the number of instructions needed to load constant X.
780 Return 0 if X isn't a valid constant. */
783 riscv_const_insns (rtx x)
785 enum riscv_symbol_type symbol_type;
786 rtx offset;
788 switch (GET_CODE (x))
790 case HIGH:
791 if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
792 || !riscv_split_symbol_type (symbol_type))
793 return 0;
795 /* This is simply an LUI. */
796 return 1;
798 case CONST_INT:
800 int cost = riscv_integer_cost (INTVAL (x));
801 /* Force complicated constants to memory. */
802 return cost < 4 ? cost : 0;
805 case CONST_DOUBLE:
806 case CONST_VECTOR:
807 /* We can use x0 to load floating-point zero. */
808 return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
810 case CONST:
811 /* See if we can refer to X directly. */
812 if (riscv_symbolic_constant_p (x, &symbol_type))
813 return riscv_symbol_insns (symbol_type);
815 /* Otherwise try splitting the constant into a base and offset. */
816 split_const (x, &x, &offset);
817 if (offset != 0)
819 int n = riscv_const_insns (x);
820 if (n != 0)
821 return n + riscv_integer_cost (INTVAL (offset));
823 return 0;
825 case SYMBOL_REF:
826 case LABEL_REF:
827 return riscv_symbol_insns (riscv_classify_symbol (x));
829 default:
830 return 0;
834 /* X is a doubleword constant that can be handled by splitting it into
835 two words and loading each word separately. Return the number of
836 instructions required to do this. */
839 riscv_split_const_insns (rtx x)
841 unsigned int low, high;
843 low = riscv_const_insns (riscv_subword (x, false));
844 high = riscv_const_insns (riscv_subword (x, true));
845 gcc_assert (low > 0 && high > 0);
846 return low + high;
849 /* Return the number of instructions needed to implement INSN,
850 given that it loads from or stores to MEM. */
853 riscv_load_store_insns (rtx mem, rtx_insn *insn)
855 machine_mode mode;
856 bool might_split_p;
857 rtx set;
859 gcc_assert (MEM_P (mem));
860 mode = GET_MODE (mem);
862 /* Try to prove that INSN does not need to be split. */
863 might_split_p = true;
864 if (GET_MODE_BITSIZE (mode) <= 32)
865 might_split_p = false;
866 else if (GET_MODE_BITSIZE (mode) == 64)
868 set = single_set (insn);
869 if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
870 might_split_p = false;
873 return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
876 /* Emit a move from SRC to DEST. Assume that the move expanders can
877 handle all moves if !can_create_pseudo_p (). The distinction is
878 important because, unlike emit_move_insn, the move expanders know
879 how to force Pmode objects into the constant pool even when the
880 constant pool address is not itself legitimate. */
883 riscv_emit_move (rtx dest, rtx src)
885 return (can_create_pseudo_p ()
886 ? emit_move_insn (dest, src)
887 : emit_move_insn_1 (dest, src));
890 /* Emit an instruction of the form (set TARGET SRC). */
892 static rtx
893 riscv_emit_set (rtx target, rtx src)
895 emit_insn (gen_rtx_SET (target, src));
896 return target;
899 /* Emit an instruction of the form (set DEST (CODE X Y)). */
901 static rtx
902 riscv_emit_binary (enum rtx_code code, rtx dest, rtx x, rtx y)
904 return riscv_emit_set (dest, gen_rtx_fmt_ee (code, GET_MODE (dest), x, y));
907 /* Compute (CODE X Y) and store the result in a new register
908 of mode MODE. Return that new register. */
910 static rtx
911 riscv_force_binary (machine_mode mode, enum rtx_code code, rtx x, rtx y)
913 return riscv_emit_binary (code, gen_reg_rtx (mode), x, y);
916 /* Copy VALUE to a register and return that register. If new pseudos
917 are allowed, copy it into a new register, otherwise use DEST. */
919 static rtx
920 riscv_force_temporary (rtx dest, rtx value)
922 if (can_create_pseudo_p ())
923 return force_reg (Pmode, value);
924 else
926 riscv_emit_move (dest, value);
927 return dest;
931 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
932 then add CONST_INT OFFSET to the result. */
934 static rtx
935 riscv_unspec_address_offset (rtx base, rtx offset,
936 enum riscv_symbol_type symbol_type)
938 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
939 UNSPEC_ADDRESS_FIRST + symbol_type);
940 if (offset != const0_rtx)
941 base = gen_rtx_PLUS (Pmode, base, offset);
942 return gen_rtx_CONST (Pmode, base);
945 /* Return an UNSPEC address with underlying address ADDRESS and symbol
946 type SYMBOL_TYPE. */
949 riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
951 rtx base, offset;
953 split_const (address, &base, &offset);
954 return riscv_unspec_address_offset (base, offset, symbol_type);
957 /* If OP is an UNSPEC address, return the address to which it refers,
958 otherwise return OP itself. */
960 static rtx
961 riscv_strip_unspec_address (rtx op)
963 rtx base, offset;
965 split_const (op, &base, &offset);
966 if (UNSPEC_ADDRESS_P (base))
967 op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
968 return op;
971 /* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
972 high part to BASE and return the result. Just return BASE otherwise.
973 TEMP is as for riscv_force_temporary.
975 The returned expression can be used as the first operand to a LO_SUM. */
977 static rtx
978 riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
980 addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
981 return riscv_force_temporary (temp, addr);
984 /* Load an entry from the GOT for a TLS GD access. */
986 static rtx riscv_got_load_tls_gd (rtx dest, rtx sym)
988 if (Pmode == DImode)
989 return gen_got_load_tls_gddi (dest, sym);
990 else
991 return gen_got_load_tls_gdsi (dest, sym);
994 /* Load an entry from the GOT for a TLS IE access. */
996 static rtx riscv_got_load_tls_ie (rtx dest, rtx sym)
998 if (Pmode == DImode)
999 return gen_got_load_tls_iedi (dest, sym);
1000 else
1001 return gen_got_load_tls_iesi (dest, sym);
1004 /* Add in the thread pointer for a TLS LE access. */
1006 static rtx riscv_tls_add_tp_le (rtx dest, rtx base, rtx sym)
1008 rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
1009 if (Pmode == DImode)
1010 return gen_tls_add_tp_ledi (dest, base, tp, sym);
1011 else
1012 return gen_tls_add_tp_lesi (dest, base, tp, sym);
1015 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
1016 it appears in a MEM of that mode. Return true if ADDR is a legitimate
1017 constant in that context and can be split into high and low parts.
1018 If so, and if LOW_OUT is nonnull, emit the high part and store the
1019 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
1021 TEMP is as for riscv_force_temporary and is used to load the high
1022 part into a register.
1024 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
1025 a legitimize SET_SRC for an .md pattern, otherwise the low part
1026 is guaranteed to be a legitimate address for mode MODE. */
1028 bool
1029 riscv_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
1031 enum riscv_symbol_type symbol_type;
1033 if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
1034 || !riscv_symbolic_constant_p (addr, &symbol_type)
1035 || riscv_symbol_insns (symbol_type) == 0
1036 || !riscv_split_symbol_type (symbol_type))
1037 return false;
1039 if (low_out)
1040 switch (symbol_type)
1042 case SYMBOL_ABSOLUTE:
1044 rtx high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
1045 high = riscv_force_temporary (temp, high);
1046 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
1048 break;
1050 case SYMBOL_PCREL:
1052 static unsigned seqno;
1053 char buf[32];
1054 rtx label;
1056 ssize_t bytes = snprintf (buf, sizeof (buf), ".LA%u", seqno);
1057 gcc_assert ((size_t) bytes < sizeof (buf));
1059 label = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
1060 SYMBOL_REF_FLAGS (label) |= SYMBOL_FLAG_LOCAL;
1062 if (temp == NULL)
1063 temp = gen_reg_rtx (Pmode);
1065 if (Pmode == DImode)
1066 emit_insn (gen_auipcdi (temp, copy_rtx (addr), GEN_INT (seqno)));
1067 else
1068 emit_insn (gen_auipcsi (temp, copy_rtx (addr), GEN_INT (seqno)));
1070 *low_out = gen_rtx_LO_SUM (Pmode, temp, label);
1072 seqno++;
1074 break;
1076 default:
1077 gcc_unreachable ();
1080 return true;
1083 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1084 riscv_force_temporary; it is only needed when OFFSET is not a
1085 SMALL_OPERAND. */
1087 static rtx
1088 riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1090 if (!SMALL_OPERAND (offset))
1092 rtx high;
1094 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
1095 The addition inside the macro CONST_HIGH_PART may cause an
1096 overflow, so we need to force a sign-extension check. */
1097 high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
1098 offset = CONST_LOW_PART (offset);
1099 high = riscv_force_temporary (temp, high);
1100 reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1102 return plus_constant (Pmode, reg, offset);
1105 /* The __tls_get_attr symbol. */
1106 static GTY(()) rtx riscv_tls_symbol;
1108 /* Return an instruction sequence that calls __tls_get_addr. SYM is
1109 the TLS symbol we are referencing and TYPE is the symbol type to use
1110 (either global dynamic or local dynamic). RESULT is an RTX for the
1111 return value location. */
1113 static rtx_insn *
1114 riscv_call_tls_get_addr (rtx sym, rtx result)
1116 rtx a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST), func;
1117 rtx_insn *insn;
1119 if (!riscv_tls_symbol)
1120 riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
1121 func = gen_rtx_MEM (FUNCTION_MODE, riscv_tls_symbol);
1123 start_sequence ();
1125 emit_insn (riscv_got_load_tls_gd (a0, sym));
1126 insn = emit_call_insn (gen_call_value (result, func, const0_rtx, NULL));
1127 RTL_CONST_CALL_P (insn) = 1;
1128 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
1129 insn = get_insns ();
1131 end_sequence ();
1133 return insn;
1136 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
1137 its address. The return value will be both a valid address and a valid
1138 SET_SRC (either a REG or a LO_SUM). */
1140 static rtx
1141 riscv_legitimize_tls_address (rtx loc)
1143 rtx dest, tp, tmp;
1144 enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
1146 /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
1147 if (!flag_pic)
1148 model = TLS_MODEL_LOCAL_EXEC;
1150 switch (model)
1152 case TLS_MODEL_LOCAL_DYNAMIC:
1153 /* Rely on section anchors for the optimization that LDM TLS
1154 provides. The anchor's address is loaded with GD TLS. */
1155 case TLS_MODEL_GLOBAL_DYNAMIC:
1156 tmp = gen_rtx_REG (Pmode, GP_RETURN);
1157 dest = gen_reg_rtx (Pmode);
1158 emit_libcall_block (riscv_call_tls_get_addr (loc, tmp), dest, tmp, loc);
1159 break;
1161 case TLS_MODEL_INITIAL_EXEC:
1162 /* la.tls.ie; tp-relative add */
1163 tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
1164 tmp = gen_reg_rtx (Pmode);
1165 emit_insn (riscv_got_load_tls_ie (tmp, loc));
1166 dest = gen_reg_rtx (Pmode);
1167 emit_insn (gen_add3_insn (dest, tmp, tp));
1168 break;
1170 case TLS_MODEL_LOCAL_EXEC:
1171 tmp = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
1172 dest = gen_reg_rtx (Pmode);
1173 emit_insn (riscv_tls_add_tp_le (dest, tmp, loc));
1174 dest = gen_rtx_LO_SUM (Pmode, dest,
1175 riscv_unspec_address (loc, SYMBOL_TLS_LE));
1176 break;
1178 default:
1179 gcc_unreachable ();
1181 return dest;
1184 /* If X is not a valid address for mode MODE, force it into a register. */
1186 static rtx
1187 riscv_force_address (rtx x, machine_mode mode)
1189 if (!riscv_legitimate_address_p (mode, x, false))
1190 x = force_reg (Pmode, x);
1191 return x;
1194 /* This function is used to implement LEGITIMIZE_ADDRESS. If X can
1195 be legitimized in a way that the generic machinery might not expect,
1196 return a new address, otherwise return NULL. MODE is the mode of
1197 the memory being accessed. */
1199 static rtx
1200 riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1201 machine_mode mode)
1203 rtx addr;
1205 if (riscv_tls_symbol_p (x))
1206 return riscv_legitimize_tls_address (x);
1208 /* See if the address can split into a high part and a LO_SUM. */
1209 if (riscv_split_symbol (NULL, x, mode, &addr))
1210 return riscv_force_address (addr, mode);
1212 /* Handle BASE + OFFSET using riscv_add_offset. */
1213 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
1214 && INTVAL (XEXP (x, 1)) != 0)
1216 rtx base = XEXP (x, 0);
1217 HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
1219 if (!riscv_valid_base_register_p (base, mode, false))
1220 base = copy_to_mode_reg (Pmode, base);
1221 addr = riscv_add_offset (NULL, base, offset);
1222 return riscv_force_address (addr, mode);
1225 return x;
1228 /* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
1230 void
1231 riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
1233 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
1234 machine_mode mode;
1235 int i, num_ops;
1236 rtx x;
1238 mode = GET_MODE (dest);
1239 num_ops = riscv_build_integer (codes, value, mode);
1241 if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
1242 && num_ops >= riscv_split_integer_cost (value))
1243 x = riscv_split_integer (value, mode);
1244 else
1246 /* Apply each binary operation to X. */
1247 x = GEN_INT (codes[0].value);
1249 for (i = 1; i < num_ops; i++)
1251 if (!can_create_pseudo_p ())
1252 x = riscv_emit_set (temp, x);
1253 else
1254 x = force_reg (mode, x);
1256 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1260 riscv_emit_set (dest, x);
1263 /* Subroutine of riscv_legitimize_move. Move constant SRC into register
1264 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1265 move_operand. */
1267 static void
1268 riscv_legitimize_const_move (machine_mode mode, rtx dest, rtx src)
1270 rtx base, offset;
1272 /* Split moves of big integers into smaller pieces. */
1273 if (splittable_const_int_operand (src, mode))
1275 riscv_move_integer (dest, dest, INTVAL (src));
1276 return;
1279 /* Split moves of symbolic constants into high/low pairs. */
1280 if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
1282 riscv_emit_set (dest, src);
1283 return;
1286 /* Generate the appropriate access sequences for TLS symbols. */
1287 if (riscv_tls_symbol_p (src))
1289 riscv_emit_move (dest, riscv_legitimize_tls_address (src));
1290 return;
1293 /* If we have (const (plus symbol offset)), and that expression cannot
1294 be forced into memory, load the symbol first and add in the offset. Also
1295 prefer to do this even if the constant _can_ be forced into memory, as it
1296 usually produces better code. */
1297 split_const (src, &base, &offset);
1298 if (offset != const0_rtx
1299 && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
1301 base = riscv_force_temporary (dest, base);
1302 riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
1303 return;
1306 src = force_const_mem (mode, src);
1308 /* When using explicit relocs, constant pool references are sometimes
1309 not legitimate addresses. */
1310 riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
1311 riscv_emit_move (dest, src);
1314 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
1315 sequence that is valid. */
1317 bool
1318 riscv_legitimize_move (machine_mode mode, rtx dest, rtx src)
1320 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1322 riscv_emit_move (dest, force_reg (mode, src));
1323 return true;
1326 /* We need to deal with constants that would be legitimate
1327 immediate_operands but aren't legitimate move_operands. */
1328 if (CONSTANT_P (src) && !move_operand (src, mode))
1330 riscv_legitimize_const_move (mode, dest, src);
1331 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1332 return true;
1335 /* RISC-V GCC may generate non-legitimate address due to we provide some
1336 pattern for optimize access PIC local symbol and it's make GCC generate
1337 unrecognizable instruction during optmizing. */
1339 if (MEM_P (dest) && !riscv_legitimate_address_p (mode, XEXP (dest, 0),
1340 reload_completed))
1342 XEXP (dest, 0) = riscv_force_address (XEXP (dest, 0), mode);
1345 if (MEM_P (src) && !riscv_legitimate_address_p (mode, XEXP (src, 0),
1346 reload_completed))
1348 XEXP (src, 0) = riscv_force_address (XEXP (src, 0), mode);
1351 return false;
1354 /* Return true if there is an instruction that implements CODE and accepts
1355 X as an immediate operand. */
1357 static int
1358 riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
1360 switch (code)
1362 case ASHIFT:
1363 case ASHIFTRT:
1364 case LSHIFTRT:
1365 /* All shift counts are truncated to a valid constant. */
1366 return true;
1368 case AND:
1369 case IOR:
1370 case XOR:
1371 case PLUS:
1372 case LT:
1373 case LTU:
1374 /* These instructions take 12-bit signed immediates. */
1375 return SMALL_OPERAND (x);
1377 case LE:
1378 /* We add 1 to the immediate and use SLT. */
1379 return SMALL_OPERAND (x + 1);
1381 case LEU:
1382 /* Likewise SLTU, but reject the always-true case. */
1383 return SMALL_OPERAND (x + 1) && x + 1 != 0;
1385 case GE:
1386 case GEU:
1387 /* We can emulate an immediate of 1 by using GT/GTU against x0. */
1388 return x == 1;
1390 default:
1391 /* By default assume that x0 can be used for 0. */
1392 return x == 0;
1396 /* Return the cost of binary operation X, given that the instruction
1397 sequence for a word-sized or smaller operation takes SIGNLE_INSNS
1398 instructions and that the sequence of a double-word operation takes
1399 DOUBLE_INSNS instructions. */
1401 static int
1402 riscv_binary_cost (rtx x, int single_insns, int double_insns)
1404 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
1405 return COSTS_N_INSNS (double_insns);
1406 return COSTS_N_INSNS (single_insns);
1409 /* Return the cost of sign- or zero-extending OP. */
1411 static int
1412 riscv_extend_cost (rtx op, bool unsigned_p)
1414 if (MEM_P (op))
1415 return 0;
1417 if (unsigned_p && GET_MODE (op) == QImode)
1418 /* We can use ANDI. */
1419 return COSTS_N_INSNS (1);
1421 if (!unsigned_p && GET_MODE (op) == SImode)
1422 /* We can use SEXT.W. */
1423 return COSTS_N_INSNS (1);
1425 /* We need to use a shift left and a shift right. */
1426 return COSTS_N_INSNS (2);
1429 /* Implement TARGET_RTX_COSTS. */
1431 static bool
1432 riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UNUSED,
1433 int *total, bool speed)
1435 bool float_mode_p = FLOAT_MODE_P (mode);
1436 int cost;
1438 switch (GET_CODE (x))
1440 case CONST_INT:
1441 if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
1443 *total = 0;
1444 return true;
1446 /* Fall through. */
1448 case SYMBOL_REF:
1449 case LABEL_REF:
1450 case CONST_DOUBLE:
1451 case CONST:
1452 if ((cost = riscv_const_insns (x)) > 0)
1454 /* If the constant is likely to be stored in a GPR, SETs of
1455 single-insn constants are as cheap as register sets; we
1456 never want to CSE them. */
1457 if (cost == 1 && outer_code == SET)
1458 *total = 0;
1459 /* When we load a constant more than once, it usually is better
1460 to duplicate the last operation in the sequence than to CSE
1461 the constant itself. */
1462 else if (outer_code == SET || GET_MODE (x) == VOIDmode)
1463 *total = COSTS_N_INSNS (1);
1465 else /* The instruction will be fetched from the constant pool. */
1466 *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
1467 return true;
1469 case MEM:
1470 /* If the address is legitimate, return the number of
1471 instructions it needs. */
1472 if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
1474 *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
1475 return true;
1477 /* Otherwise use the default handling. */
1478 return false;
1480 case NOT:
1481 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
1482 return false;
1484 case AND:
1485 case IOR:
1486 case XOR:
1487 /* Double-word operations use two single-word operations. */
1488 *total = riscv_binary_cost (x, 1, 2);
1489 return false;
1491 case ASHIFT:
1492 case ASHIFTRT:
1493 case LSHIFTRT:
1494 *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
1495 return false;
1497 case ABS:
1498 *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
1499 return false;
1501 case LO_SUM:
1502 *total = set_src_cost (XEXP (x, 0), mode, speed);
1503 return true;
1505 case LT:
1506 case LTU:
1507 case LE:
1508 case LEU:
1509 case GT:
1510 case GTU:
1511 case GE:
1512 case GEU:
1513 case EQ:
1514 case NE:
1515 /* Branch comparisons have VOIDmode, so use the first operand's
1516 mode instead. */
1517 mode = GET_MODE (XEXP (x, 0));
1518 if (float_mode_p)
1519 *total = tune_info->fp_add[mode == DFmode];
1520 else
1521 *total = riscv_binary_cost (x, 1, 3);
1522 return false;
1524 case UNORDERED:
1525 case ORDERED:
1526 /* (FEQ(A, A) & FEQ(B, B)) compared against 0. */
1527 mode = GET_MODE (XEXP (x, 0));
1528 *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (2);
1529 return false;
1531 case UNEQ:
1532 case LTGT:
1533 /* (FEQ(A, A) & FEQ(B, B)) compared against FEQ(A, B). */
1534 mode = GET_MODE (XEXP (x, 0));
1535 *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (3);
1536 return false;
1538 case UNGE:
1539 case UNGT:
1540 case UNLE:
1541 case UNLT:
1542 /* FLT or FLE, but guarded by an FFLAGS read and write. */
1543 mode = GET_MODE (XEXP (x, 0));
1544 *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (4);
1545 return false;
1547 case MINUS:
1548 case PLUS:
1549 if (float_mode_p)
1550 *total = tune_info->fp_add[mode == DFmode];
1551 else
1552 *total = riscv_binary_cost (x, 1, 4);
1553 return false;
1555 case NEG:
1557 rtx op = XEXP (x, 0);
1558 if (GET_CODE (op) == FMA && !HONOR_SIGNED_ZEROS (mode))
1560 *total = (tune_info->fp_mul[mode == DFmode]
1561 + set_src_cost (XEXP (op, 0), mode, speed)
1562 + set_src_cost (XEXP (op, 1), mode, speed)
1563 + set_src_cost (XEXP (op, 2), mode, speed));
1564 return true;
1568 if (float_mode_p)
1569 *total = tune_info->fp_add[mode == DFmode];
1570 else
1571 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
1572 return false;
1574 case MULT:
1575 if (float_mode_p)
1576 *total = tune_info->fp_mul[mode == DFmode];
1577 else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
1578 *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
1579 else if (!speed)
1580 *total = COSTS_N_INSNS (1);
1581 else
1582 *total = tune_info->int_mul[mode == DImode];
1583 return false;
1585 case DIV:
1586 case SQRT:
1587 case MOD:
1588 if (float_mode_p)
1590 *total = tune_info->fp_div[mode == DFmode];
1591 return false;
1593 /* Fall through. */
1595 case UDIV:
1596 case UMOD:
1597 if (speed)
1598 *total = tune_info->int_div[mode == DImode];
1599 else
1600 *total = COSTS_N_INSNS (1);
1601 return false;
1603 case SIGN_EXTEND:
1604 case ZERO_EXTEND:
1605 *total = riscv_extend_cost (XEXP (x, 0), GET_CODE (x) == ZERO_EXTEND);
1606 return false;
1608 case FLOAT:
1609 case UNSIGNED_FLOAT:
1610 case FIX:
1611 case FLOAT_EXTEND:
1612 case FLOAT_TRUNCATE:
1613 *total = tune_info->fp_add[mode == DFmode];
1614 return false;
1616 case FMA:
1617 *total = (tune_info->fp_mul[mode == DFmode]
1618 + set_src_cost (XEXP (x, 0), mode, speed)
1619 + set_src_cost (XEXP (x, 1), mode, speed)
1620 + set_src_cost (XEXP (x, 2), mode, speed));
1621 return true;
1623 case UNSPEC:
1624 if (XINT (x, 1) == UNSPEC_AUIPC)
1626 /* Make AUIPC cheap to avoid spilling its result to the stack. */
1627 *total = 1;
1628 return true;
1630 return false;
1632 default:
1633 return false;
1637 /* Implement TARGET_ADDRESS_COST. */
1639 static int
1640 riscv_address_cost (rtx addr, machine_mode mode,
1641 addr_space_t as ATTRIBUTE_UNUSED,
1642 bool speed ATTRIBUTE_UNUSED)
1644 return riscv_address_insns (addr, mode, false);
1647 /* Return one word of double-word value OP. HIGH_P is true to select the
1648 high part or false to select the low part. */
1651 riscv_subword (rtx op, bool high_p)
1653 unsigned int byte = high_p ? UNITS_PER_WORD : 0;
1654 machine_mode mode = GET_MODE (op);
1656 if (mode == VOIDmode)
1657 mode = TARGET_64BIT ? TImode : DImode;
1659 if (MEM_P (op))
1660 return adjust_address (op, word_mode, byte);
1662 if (REG_P (op))
1663 gcc_assert (!FP_REG_RTX_P (op));
1665 return simplify_gen_subreg (word_mode, op, mode, byte);
1668 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
1670 bool
1671 riscv_split_64bit_move_p (rtx dest, rtx src)
1673 if (TARGET_64BIT)
1674 return false;
1676 /* Allow FPR <-> FPR and FPR <-> MEM moves, and permit the special case
1677 of zeroing an FPR with FCVT.D.W. */
1678 if (TARGET_DOUBLE_FLOAT
1679 && ((FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
1680 || (FP_REG_RTX_P (dest) && MEM_P (src))
1681 || (FP_REG_RTX_P (src) && MEM_P (dest))
1682 || (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))))
1683 return false;
1685 return true;
1688 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
1689 this function handles 64-bit moves for which riscv_split_64bit_move_p
1690 holds. For 64-bit targets, this function handles 128-bit moves. */
1692 void
1693 riscv_split_doubleword_move (rtx dest, rtx src)
1695 rtx low_dest;
1697 /* The operation can be split into two normal moves. Decide in
1698 which order to do them. */
1699 low_dest = riscv_subword (dest, false);
1700 if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
1702 riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
1703 riscv_emit_move (low_dest, riscv_subword (src, false));
1705 else
1707 riscv_emit_move (low_dest, riscv_subword (src, false));
1708 riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
1712 /* Return the appropriate instructions to move SRC into DEST. Assume
1713 that SRC is operand 1 and DEST is operand 0. */
1715 const char *
1716 riscv_output_move (rtx dest, rtx src)
1718 enum rtx_code dest_code, src_code;
1719 machine_mode mode;
1720 bool dbl_p;
1722 dest_code = GET_CODE (dest);
1723 src_code = GET_CODE (src);
1724 mode = GET_MODE (dest);
1725 dbl_p = (GET_MODE_SIZE (mode) == 8);
1727 if (dbl_p && riscv_split_64bit_move_p (dest, src))
1728 return "#";
1730 if (dest_code == REG && GP_REG_P (REGNO (dest)))
1732 if (src_code == REG && FP_REG_P (REGNO (src)))
1733 return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
1735 if (src_code == MEM)
1736 switch (GET_MODE_SIZE (mode))
1738 case 1: return "lbu\t%0,%1";
1739 case 2: return "lhu\t%0,%1";
1740 case 4: return "lw\t%0,%1";
1741 case 8: return "ld\t%0,%1";
1744 if (src_code == CONST_INT)
1745 return "li\t%0,%1";
1747 if (src_code == HIGH)
1748 return "lui\t%0,%h1";
1750 if (symbolic_operand (src, VOIDmode))
1751 switch (riscv_classify_symbolic_expression (src))
1753 case SYMBOL_GOT_DISP: return "la\t%0,%1";
1754 case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
1755 case SYMBOL_PCREL: return "lla\t%0,%1";
1756 default: gcc_unreachable ();
1759 if ((src_code == REG && GP_REG_P (REGNO (src)))
1760 || (src == CONST0_RTX (mode)))
1762 if (dest_code == REG)
1764 if (GP_REG_P (REGNO (dest)))
1765 return "mv\t%0,%z1";
1767 if (FP_REG_P (REGNO (dest)))
1769 if (!dbl_p)
1770 return "fmv.s.x\t%0,%z1";
1771 if (TARGET_64BIT)
1772 return "fmv.d.x\t%0,%z1";
1773 /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
1774 gcc_assert (src == CONST0_RTX (mode));
1775 return "fcvt.d.w\t%0,x0";
1778 if (dest_code == MEM)
1779 switch (GET_MODE_SIZE (mode))
1781 case 1: return "sb\t%z1,%0";
1782 case 2: return "sh\t%z1,%0";
1783 case 4: return "sw\t%z1,%0";
1784 case 8: return "sd\t%z1,%0";
1787 if (src_code == REG && FP_REG_P (REGNO (src)))
1789 if (dest_code == REG && FP_REG_P (REGNO (dest)))
1790 return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
1792 if (dest_code == MEM)
1793 return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
1795 if (dest_code == REG && FP_REG_P (REGNO (dest)))
1797 if (src_code == MEM)
1798 return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
1800 gcc_unreachable ();
1803 /* Return true if CMP1 is a suitable second operand for integer ordering
1804 test CODE. See also the *sCC patterns in riscv.md. */
1806 static bool
1807 riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
1809 switch (code)
1811 case GT:
1812 case GTU:
1813 return reg_or_0_operand (cmp1, VOIDmode);
1815 case GE:
1816 case GEU:
1817 return cmp1 == const1_rtx;
1819 case LT:
1820 case LTU:
1821 return arith_operand (cmp1, VOIDmode);
1823 case LE:
1824 return sle_operand (cmp1, VOIDmode);
1826 case LEU:
1827 return sleu_operand (cmp1, VOIDmode);
1829 default:
1830 gcc_unreachable ();
1834 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
1835 integer ordering test *CODE, or if an equivalent combination can
1836 be formed by adjusting *CODE and *CMP1. When returning true, update
1837 *CODE and *CMP1 with the chosen code and operand, otherwise leave
1838 them alone. */
1840 static bool
1841 riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
1842 machine_mode mode)
1844 HOST_WIDE_INT plus_one;
1846 if (riscv_int_order_operand_ok_p (*code, *cmp1))
1847 return true;
1849 if (CONST_INT_P (*cmp1))
1850 switch (*code)
1852 case LE:
1853 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
1854 if (INTVAL (*cmp1) < plus_one)
1856 *code = LT;
1857 *cmp1 = force_reg (mode, GEN_INT (plus_one));
1858 return true;
1860 break;
1862 case LEU:
1863 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
1864 if (plus_one != 0)
1866 *code = LTU;
1867 *cmp1 = force_reg (mode, GEN_INT (plus_one));
1868 return true;
1870 break;
1872 default:
1873 break;
1875 return false;
1878 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
1879 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
1880 is nonnull, it's OK to set TARGET to the inverse of the result and
1881 flip *INVERT_PTR instead. */
1883 static void
1884 riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
1885 rtx target, rtx cmp0, rtx cmp1)
1887 machine_mode mode;
1889 /* First see if there is a RISCV instruction that can do this operation.
1890 If not, try doing the same for the inverse operation. If that also
1891 fails, force CMP1 into a register and try again. */
1892 mode = GET_MODE (cmp0);
1893 if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
1894 riscv_emit_binary (code, target, cmp0, cmp1);
1895 else
1897 enum rtx_code inv_code = reverse_condition (code);
1898 if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
1900 cmp1 = force_reg (mode, cmp1);
1901 riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
1903 else if (invert_ptr == 0)
1905 rtx inv_target = riscv_force_binary (GET_MODE (target),
1906 inv_code, cmp0, cmp1);
1907 riscv_emit_binary (XOR, target, inv_target, const1_rtx);
1909 else
1911 *invert_ptr = !*invert_ptr;
1912 riscv_emit_binary (inv_code, target, cmp0, cmp1);
1917 /* Return a register that is zero iff CMP0 and CMP1 are equal.
1918 The register will have the same mode as CMP0. */
1920 static rtx
1921 riscv_zero_if_equal (rtx cmp0, rtx cmp1)
1923 if (cmp1 == const0_rtx)
1924 return cmp0;
1926 return expand_binop (GET_MODE (cmp0), sub_optab,
1927 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
1930 /* Sign- or zero-extend OP0 and OP1 for integer comparisons. */
1932 static void
1933 riscv_extend_comparands (rtx_code code, rtx *op0, rtx *op1)
1935 /* Comparisons consider all XLEN bits, so extend sub-XLEN values. */
1936 if (GET_MODE_SIZE (word_mode) > GET_MODE_SIZE (GET_MODE (*op0)))
1938 /* It is more profitable to zero-extend QImode values. */
1939 if (unsigned_condition (code) == code && GET_MODE (*op0) == QImode)
1941 *op0 = gen_rtx_ZERO_EXTEND (word_mode, *op0);
1942 if (CONST_INT_P (*op1))
1943 *op1 = GEN_INT ((uint8_t) INTVAL (*op1));
1944 else
1945 *op1 = gen_rtx_ZERO_EXTEND (word_mode, *op1);
1947 else
1949 *op0 = gen_rtx_SIGN_EXTEND (word_mode, *op0);
1950 if (*op1 != const0_rtx)
1951 *op1 = gen_rtx_SIGN_EXTEND (word_mode, *op1);
1956 /* Convert a comparison into something that can be used in a branch. On
1957 entry, *OP0 and *OP1 are the values being compared and *CODE is the code
1958 used to compare them. Update them to describe the final comparison. */
1960 static void
1961 riscv_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1)
1963 if (splittable_const_int_operand (*op1, VOIDmode))
1965 HOST_WIDE_INT rhs = INTVAL (*op1);
1967 if (*code == EQ || *code == NE)
1969 /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
1970 if (SMALL_OPERAND (-rhs))
1972 *op0 = riscv_force_binary (GET_MODE (*op0), PLUS, *op0,
1973 GEN_INT (-rhs));
1974 *op1 = const0_rtx;
1977 else
1979 static const enum rtx_code mag_comparisons[][2] = {
1980 {LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE}
1983 /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */
1984 for (size_t i = 0; i < ARRAY_SIZE (mag_comparisons); i++)
1986 HOST_WIDE_INT new_rhs;
1987 bool increment = *code == mag_comparisons[i][0];
1988 bool decrement = *code == mag_comparisons[i][1];
1989 if (!increment && !decrement)
1990 continue;
1992 new_rhs = rhs + (increment ? 1 : -1);
1993 if (riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs)
1994 && (rhs < 0) == (new_rhs < 0))
1996 *op1 = GEN_INT (new_rhs);
1997 *code = mag_comparisons[i][increment];
1999 break;
2004 riscv_extend_comparands (*code, op0, op1);
2006 *op0 = force_reg (word_mode, *op0);
2007 if (*op1 != const0_rtx)
2008 *op1 = force_reg (word_mode, *op1);
2011 /* Like riscv_emit_int_compare, but for floating-point comparisons. */
2013 static void
2014 riscv_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1)
2016 rtx tmp0, tmp1, cmp_op0 = *op0, cmp_op1 = *op1;
2017 enum rtx_code fp_code = *code;
2018 *code = NE;
2020 switch (fp_code)
2022 case UNORDERED:
2023 *code = EQ;
2024 /* Fall through. */
2026 case ORDERED:
2027 /* a == a && b == b */
2028 tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
2029 tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
2030 *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
2031 *op1 = const0_rtx;
2032 break;
2034 case UNEQ:
2035 case LTGT:
2036 /* ordered(a, b) > (a == b) */
2037 *code = fp_code == LTGT ? GTU : EQ;
2038 tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
2039 tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
2040 *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
2041 *op1 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op1);
2042 break;
2044 #define UNORDERED_COMPARISON(CODE, CMP) \
2045 case CODE: \
2046 *code = EQ; \
2047 *op0 = gen_reg_rtx (word_mode); \
2048 if (GET_MODE (cmp_op0) == SFmode && TARGET_64BIT) \
2049 emit_insn (gen_f##CMP##_quietsfdi4 (*op0, cmp_op0, cmp_op1)); \
2050 else if (GET_MODE (cmp_op0) == SFmode) \
2051 emit_insn (gen_f##CMP##_quietsfsi4 (*op0, cmp_op0, cmp_op1)); \
2052 else if (GET_MODE (cmp_op0) == DFmode && TARGET_64BIT) \
2053 emit_insn (gen_f##CMP##_quietdfdi4 (*op0, cmp_op0, cmp_op1)); \
2054 else if (GET_MODE (cmp_op0) == DFmode) \
2055 emit_insn (gen_f##CMP##_quietdfsi4 (*op0, cmp_op0, cmp_op1)); \
2056 else \
2057 gcc_unreachable (); \
2058 *op1 = const0_rtx; \
2059 break;
2061 case UNLT:
2062 std::swap (cmp_op0, cmp_op1);
2063 gcc_fallthrough ();
2065 UNORDERED_COMPARISON(UNGT, le)
2067 case UNLE:
2068 std::swap (cmp_op0, cmp_op1);
2069 gcc_fallthrough ();
2071 UNORDERED_COMPARISON(UNGE, lt)
2072 #undef UNORDERED_COMPARISON
2074 case NE:
2075 fp_code = EQ;
2076 *code = EQ;
2077 /* Fall through. */
2079 case EQ:
2080 case LE:
2081 case LT:
2082 case GE:
2083 case GT:
2084 /* We have instructions for these cases. */
2085 *op0 = riscv_force_binary (word_mode, fp_code, cmp_op0, cmp_op1);
2086 *op1 = const0_rtx;
2087 break;
2089 default:
2090 gcc_unreachable ();
2094 /* CODE-compare OP0 and OP1. Store the result in TARGET. */
2096 void
2097 riscv_expand_int_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
2099 riscv_extend_comparands (code, &op0, &op1);
2100 op0 = force_reg (word_mode, op0);
2102 if (code == EQ || code == NE)
2104 rtx zie = riscv_zero_if_equal (op0, op1);
2105 riscv_emit_binary (code, target, zie, const0_rtx);
2107 else
2108 riscv_emit_int_order_test (code, 0, target, op0, op1);
2111 /* Like riscv_expand_int_scc, but for floating-point comparisons. */
2113 void
2114 riscv_expand_float_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
2116 riscv_emit_float_compare (&code, &op0, &op1);
2118 rtx cmp = riscv_force_binary (word_mode, code, op0, op1);
2119 riscv_emit_set (target, lowpart_subreg (SImode, cmp, word_mode));
2122 /* Jump to LABEL if (CODE OP0 OP1) holds. */
2124 void
2125 riscv_expand_conditional_branch (rtx label, rtx_code code, rtx op0, rtx op1)
2127 if (FLOAT_MODE_P (GET_MODE (op1)))
2128 riscv_emit_float_compare (&code, &op0, &op1);
2129 else
2130 riscv_emit_int_compare (&code, &op0, &op1);
2132 rtx condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2133 emit_jump_insn (gen_condjump (condition, label));
2136 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
2137 least PARM_BOUNDARY bits of alignment, but will be given anything up
2138 to STACK_BOUNDARY bits if the type requires it. */
2140 static unsigned int
2141 riscv_function_arg_boundary (machine_mode mode, const_tree type)
2143 unsigned int alignment;
2145 /* Use natural alignment if the type is not aggregate data. */
2146 if (type && !AGGREGATE_TYPE_P (type))
2147 alignment = TYPE_ALIGN (TYPE_MAIN_VARIANT (type));
2148 else
2149 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
2151 return MIN (STACK_BOUNDARY, MAX (PARM_BOUNDARY, alignment));
2154 /* If MODE represents an argument that can be passed or returned in
2155 floating-point registers, return the number of registers, else 0. */
2157 static unsigned
2158 riscv_pass_mode_in_fpr_p (machine_mode mode)
2160 if (GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG)
2162 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2163 return 1;
2165 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
2166 return 2;
2169 return 0;
2172 typedef struct {
2173 const_tree type;
2174 HOST_WIDE_INT offset;
2175 } riscv_aggregate_field;
2177 /* Identify subfields of aggregates that are candidates for passing in
2178 floating-point registers. */
2180 static int
2181 riscv_flatten_aggregate_field (const_tree type,
2182 riscv_aggregate_field fields[2],
2183 int n, HOST_WIDE_INT offset)
2185 switch (TREE_CODE (type))
2187 case RECORD_TYPE:
2188 /* Can't handle incomplete types nor sizes that are not fixed. */
2189 if (!COMPLETE_TYPE_P (type)
2190 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
2191 || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2192 return -1;
2194 for (tree f = TYPE_FIELDS (type); f; f = DECL_CHAIN (f))
2195 if (TREE_CODE (f) == FIELD_DECL)
2197 if (!TYPE_P (TREE_TYPE (f)))
2198 return -1;
2200 HOST_WIDE_INT pos = offset + int_byte_position (f);
2201 n = riscv_flatten_aggregate_field (TREE_TYPE (f), fields, n, pos);
2202 if (n < 0)
2203 return -1;
2205 return n;
2207 case ARRAY_TYPE:
2209 HOST_WIDE_INT n_elts;
2210 riscv_aggregate_field subfields[2];
2211 tree index = TYPE_DOMAIN (type);
2212 tree elt_size = TYPE_SIZE_UNIT (TREE_TYPE (type));
2213 int n_subfields = riscv_flatten_aggregate_field (TREE_TYPE (type),
2214 subfields, 0, offset);
2216 /* Can't handle incomplete types nor sizes that are not fixed. */
2217 if (n_subfields <= 0
2218 || !COMPLETE_TYPE_P (type)
2219 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
2220 || !index
2221 || !TYPE_MAX_VALUE (index)
2222 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
2223 || !TYPE_MIN_VALUE (index)
2224 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
2225 || !tree_fits_uhwi_p (elt_size))
2226 return -1;
2228 n_elts = 1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
2229 - tree_to_uhwi (TYPE_MIN_VALUE (index));
2230 gcc_assert (n_elts >= 0);
2232 for (HOST_WIDE_INT i = 0; i < n_elts; i++)
2233 for (int j = 0; j < n_subfields; j++)
2235 if (n >= 2)
2236 return -1;
2238 fields[n] = subfields[j];
2239 fields[n++].offset += i * tree_to_uhwi (elt_size);
2242 return n;
2245 case COMPLEX_TYPE:
2247 /* Complex type need consume 2 field, so n must be 0. */
2248 if (n != 0)
2249 return -1;
2251 HOST_WIDE_INT elt_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type)));
2253 if (elt_size <= UNITS_PER_FP_ARG)
2255 fields[0].type = TREE_TYPE (type);
2256 fields[0].offset = offset;
2257 fields[1].type = TREE_TYPE (type);
2258 fields[1].offset = offset + elt_size;
2260 return 2;
2263 return -1;
2266 default:
2267 if (n < 2
2268 && ((SCALAR_FLOAT_TYPE_P (type)
2269 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FP_ARG)
2270 || (INTEGRAL_TYPE_P (type)
2271 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD)))
2273 fields[n].type = type;
2274 fields[n].offset = offset;
2275 return n + 1;
2277 else
2278 return -1;
2282 /* Identify candidate aggregates for passing in floating-point registers.
2283 Candidates have at most two fields after flattening. */
2285 static int
2286 riscv_flatten_aggregate_argument (const_tree type,
2287 riscv_aggregate_field fields[2])
2289 if (!type || TREE_CODE (type) != RECORD_TYPE)
2290 return -1;
2292 return riscv_flatten_aggregate_field (type, fields, 0, 0);
2295 /* See whether TYPE is a record whose fields should be returned in one or
2296 two floating-point registers. If so, populate FIELDS accordingly. */
2298 static unsigned
2299 riscv_pass_aggregate_in_fpr_pair_p (const_tree type,
2300 riscv_aggregate_field fields[2])
2302 int n = riscv_flatten_aggregate_argument (type, fields);
2304 for (int i = 0; i < n; i++)
2305 if (!SCALAR_FLOAT_TYPE_P (fields[i].type))
2306 return 0;
2308 return n > 0 ? n : 0;
2311 /* See whether TYPE is a record whose fields should be returned in one or
2312 floating-point register and one integer register. If so, populate
2313 FIELDS accordingly. */
2315 static bool
2316 riscv_pass_aggregate_in_fpr_and_gpr_p (const_tree type,
2317 riscv_aggregate_field fields[2])
2319 unsigned num_int = 0, num_float = 0;
2320 int n = riscv_flatten_aggregate_argument (type, fields);
2322 for (int i = 0; i < n; i++)
2324 num_float += SCALAR_FLOAT_TYPE_P (fields[i].type);
2325 num_int += INTEGRAL_TYPE_P (fields[i].type);
2328 return num_int == 1 && num_float == 1;
2331 /* Return the representation of an argument passed or returned in an FPR
2332 when the value has mode VALUE_MODE and the type has TYPE_MODE. The
2333 two modes may be different for structures like:
2335 struct __attribute__((packed)) foo { float f; }
2337 where the SFmode value "f" is passed in REGNO but the struct itself
2338 has mode BLKmode. */
2340 static rtx
2341 riscv_pass_fpr_single (machine_mode type_mode, unsigned regno,
2342 machine_mode value_mode)
2344 rtx x = gen_rtx_REG (value_mode, regno);
2346 if (type_mode != value_mode)
2348 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
2349 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
2351 return x;
2354 /* Pass or return a composite value in the FPR pair REGNO and REGNO + 1.
2355 MODE is the mode of the composite. MODE1 and OFFSET1 are the mode and
2356 byte offset for the first value, likewise MODE2 and OFFSET2 for the
2357 second value. */
2359 static rtx
2360 riscv_pass_fpr_pair (machine_mode mode, unsigned regno1,
2361 machine_mode mode1, HOST_WIDE_INT offset1,
2362 unsigned regno2, machine_mode mode2,
2363 HOST_WIDE_INT offset2)
2365 return gen_rtx_PARALLEL
2366 (mode,
2367 gen_rtvec (2,
2368 gen_rtx_EXPR_LIST (VOIDmode,
2369 gen_rtx_REG (mode1, regno1),
2370 GEN_INT (offset1)),
2371 gen_rtx_EXPR_LIST (VOIDmode,
2372 gen_rtx_REG (mode2, regno2),
2373 GEN_INT (offset2))));
2376 /* Fill INFO with information about a single argument, and return an
2377 RTL pattern to pass or return the argument. CUM is the cumulative
2378 state for earlier arguments. MODE is the mode of this argument and
2379 TYPE is its type (if known). NAMED is true if this is a named
2380 (fixed) argument rather than a variable one. RETURN_P is true if
2381 returning the argument, or false if passing the argument. */
2383 static rtx
2384 riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
2385 machine_mode mode, const_tree type, bool named,
2386 bool return_p)
2388 unsigned num_bytes, num_words;
2389 unsigned fpr_base = return_p ? FP_RETURN : FP_ARG_FIRST;
2390 unsigned gpr_base = return_p ? GP_RETURN : GP_ARG_FIRST;
2391 unsigned alignment = riscv_function_arg_boundary (mode, type);
2393 memset (info, 0, sizeof (*info));
2394 info->gpr_offset = cum->num_gprs;
2395 info->fpr_offset = cum->num_fprs;
2397 if (named)
2399 riscv_aggregate_field fields[2];
2400 unsigned fregno = fpr_base + info->fpr_offset;
2401 unsigned gregno = gpr_base + info->gpr_offset;
2403 /* Pass one- or two-element floating-point aggregates in FPRs. */
2404 if ((info->num_fprs = riscv_pass_aggregate_in_fpr_pair_p (type, fields))
2405 && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
2406 switch (info->num_fprs)
2408 case 1:
2409 return riscv_pass_fpr_single (mode, fregno,
2410 TYPE_MODE (fields[0].type));
2412 case 2:
2413 return riscv_pass_fpr_pair (mode, fregno,
2414 TYPE_MODE (fields[0].type),
2415 fields[0].offset,
2416 fregno + 1,
2417 TYPE_MODE (fields[1].type),
2418 fields[1].offset);
2420 default:
2421 gcc_unreachable ();
2424 /* Pass real and complex floating-point numbers in FPRs. */
2425 if ((info->num_fprs = riscv_pass_mode_in_fpr_p (mode))
2426 && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
2427 switch (GET_MODE_CLASS (mode))
2429 case MODE_FLOAT:
2430 return gen_rtx_REG (mode, fregno);
2432 case MODE_COMPLEX_FLOAT:
2433 return riscv_pass_fpr_pair (mode, fregno, GET_MODE_INNER (mode), 0,
2434 fregno + 1, GET_MODE_INNER (mode),
2435 GET_MODE_UNIT_SIZE (mode));
2437 default:
2438 gcc_unreachable ();
2441 /* Pass structs with one float and one integer in an FPR and a GPR. */
2442 if (riscv_pass_aggregate_in_fpr_and_gpr_p (type, fields)
2443 && info->gpr_offset < MAX_ARGS_IN_REGISTERS
2444 && info->fpr_offset < MAX_ARGS_IN_REGISTERS)
2446 info->num_gprs = 1;
2447 info->num_fprs = 1;
2449 if (!SCALAR_FLOAT_TYPE_P (fields[0].type))
2450 std::swap (fregno, gregno);
2452 return riscv_pass_fpr_pair (mode, fregno, TYPE_MODE (fields[0].type),
2453 fields[0].offset,
2454 gregno, TYPE_MODE (fields[1].type),
2455 fields[1].offset);
2459 /* Work out the size of the argument. */
2460 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
2461 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2463 /* Doubleword-aligned varargs start on an even register boundary. */
2464 if (!named && num_bytes != 0 && alignment > BITS_PER_WORD)
2465 info->gpr_offset += info->gpr_offset & 1;
2467 /* Partition the argument between registers and stack. */
2468 info->num_fprs = 0;
2469 info->num_gprs = MIN (num_words, MAX_ARGS_IN_REGISTERS - info->gpr_offset);
2470 info->stack_p = (num_words - info->num_gprs) != 0;
2472 if (info->num_gprs || return_p)
2473 return gen_rtx_REG (mode, gpr_base + info->gpr_offset);
2475 return NULL_RTX;
2478 /* Implement TARGET_FUNCTION_ARG. */
2480 static rtx
2481 riscv_function_arg (cumulative_args_t cum_v, machine_mode mode,
2482 const_tree type, bool named)
2484 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2485 struct riscv_arg_info info;
2487 if (mode == VOIDmode)
2488 return NULL;
2490 return riscv_get_arg_info (&info, cum, mode, type, named, false);
2493 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
2495 static void
2496 riscv_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
2497 const_tree type, bool named)
2499 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2500 struct riscv_arg_info info;
2502 riscv_get_arg_info (&info, cum, mode, type, named, false);
2504 /* Advance the register count. This has the effect of setting
2505 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
2506 argument required us to skip the final GPR and pass the whole
2507 argument on the stack. */
2508 cum->num_fprs = info.fpr_offset + info.num_fprs;
2509 cum->num_gprs = info.gpr_offset + info.num_gprs;
2512 /* Implement TARGET_ARG_PARTIAL_BYTES. */
2514 static int
2515 riscv_arg_partial_bytes (cumulative_args_t cum,
2516 machine_mode mode, tree type, bool named)
2518 struct riscv_arg_info arg;
2520 riscv_get_arg_info (&arg, get_cumulative_args (cum), mode, type, named, false);
2521 return arg.stack_p ? arg.num_gprs * UNITS_PER_WORD : 0;
2524 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
2525 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
2526 VALTYPE is null and MODE is the mode of the return value. */
2529 riscv_function_value (const_tree type, const_tree func, machine_mode mode)
2531 struct riscv_arg_info info;
2532 CUMULATIVE_ARGS args;
2534 if (type)
2536 int unsigned_p = TYPE_UNSIGNED (type);
2538 mode = TYPE_MODE (type);
2540 /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
2541 return values, promote the mode here too. */
2542 mode = promote_function_mode (type, mode, &unsigned_p, func, 1);
2545 memset (&args, 0, sizeof args);
2546 return riscv_get_arg_info (&info, &args, mode, type, true, true);
2549 /* Implement TARGET_PASS_BY_REFERENCE. */
2551 static bool
2552 riscv_pass_by_reference (cumulative_args_t cum_v, machine_mode mode,
2553 const_tree type, bool named)
2555 HOST_WIDE_INT size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
2556 struct riscv_arg_info info;
2557 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2559 /* ??? std_gimplify_va_arg_expr passes NULL for cum. Fortunately, we
2560 never pass variadic arguments in floating-point registers, so we can
2561 avoid the call to riscv_get_arg_info in this case. */
2562 if (cum != NULL)
2564 /* Don't pass by reference if we can use a floating-point register. */
2565 riscv_get_arg_info (&info, cum, mode, type, named, false);
2566 if (info.num_fprs)
2567 return false;
2570 /* Pass by reference if the data do not fit in two integer registers. */
2571 return !IN_RANGE (size, 0, 2 * UNITS_PER_WORD);
2574 /* Implement TARGET_RETURN_IN_MEMORY. */
2576 static bool
2577 riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
2579 CUMULATIVE_ARGS args;
2580 cumulative_args_t cum = pack_cumulative_args (&args);
2582 /* The rules for returning in memory are the same as for passing the
2583 first named argument by reference. */
2584 memset (&args, 0, sizeof args);
2585 return riscv_pass_by_reference (cum, TYPE_MODE (type), type, true);
2588 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
2590 static void
2591 riscv_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
2592 tree type, int *pretend_size ATTRIBUTE_UNUSED,
2593 int no_rtl)
2595 CUMULATIVE_ARGS local_cum;
2596 int gp_saved;
2598 /* The caller has advanced CUM up to, but not beyond, the last named
2599 argument. Advance a local copy of CUM past the last "real" named
2600 argument, to find out how many registers are left over. */
2601 local_cum = *get_cumulative_args (cum);
2602 riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
2604 /* Found out how many registers we need to save. */
2605 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
2607 if (!no_rtl && gp_saved > 0)
2609 rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
2610 REG_PARM_STACK_SPACE (cfun->decl)
2611 - gp_saved * UNITS_PER_WORD);
2612 rtx mem = gen_frame_mem (BLKmode, ptr);
2613 set_mem_alias_set (mem, get_varargs_alias_set ());
2615 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
2616 mem, gp_saved);
2618 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
2619 cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
2622 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
2624 static void
2625 riscv_va_start (tree valist, rtx nextarg)
2627 nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
2628 std_expand_builtin_va_start (valist, nextarg);
2631 /* Make ADDR suitable for use as a call or sibcall target. */
2634 riscv_legitimize_call_address (rtx addr)
2636 if (!call_insn_operand (addr, VOIDmode))
2638 rtx reg = RISCV_PROLOGUE_TEMP (Pmode);
2639 riscv_emit_move (reg, addr);
2640 return reg;
2642 return addr;
2645 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
2646 in context CONTEXT. HI_RELOC indicates a high-part reloc. */
2648 static void
2649 riscv_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
2651 const char *reloc;
2653 switch (riscv_classify_symbolic_expression (op))
2655 case SYMBOL_ABSOLUTE:
2656 reloc = hi_reloc ? "%hi" : "%lo";
2657 break;
2659 case SYMBOL_PCREL:
2660 reloc = hi_reloc ? "%pcrel_hi" : "%pcrel_lo";
2661 break;
2663 case SYMBOL_TLS_LE:
2664 reloc = hi_reloc ? "%tprel_hi" : "%tprel_lo";
2665 break;
2667 default:
2668 gcc_unreachable ();
2671 fprintf (file, "%s(", reloc);
2672 output_addr_const (file, riscv_strip_unspec_address (op));
2673 fputc (')', file);
2676 /* Return true if the .AQ suffix should be added to an AMO to implement the
2677 acquire portion of memory model MODEL. */
2679 static bool
2680 riscv_memmodel_needs_amo_acquire (enum memmodel model)
2682 switch (model)
2684 case MEMMODEL_ACQ_REL:
2685 case MEMMODEL_SEQ_CST:
2686 case MEMMODEL_SYNC_SEQ_CST:
2687 case MEMMODEL_ACQUIRE:
2688 case MEMMODEL_CONSUME:
2689 case MEMMODEL_SYNC_ACQUIRE:
2690 return true;
2692 case MEMMODEL_RELEASE:
2693 case MEMMODEL_SYNC_RELEASE:
2694 case MEMMODEL_RELAXED:
2695 return false;
2697 default:
2698 gcc_unreachable ();
2702 /* Return true if a FENCE should be emitted to before a memory access to
2703 implement the release portion of memory model MODEL. */
2705 static bool
2706 riscv_memmodel_needs_release_fence (enum memmodel model)
2708 switch (model)
2710 case MEMMODEL_ACQ_REL:
2711 case MEMMODEL_SEQ_CST:
2712 case MEMMODEL_SYNC_SEQ_CST:
2713 case MEMMODEL_RELEASE:
2714 case MEMMODEL_SYNC_RELEASE:
2715 return true;
2717 case MEMMODEL_ACQUIRE:
2718 case MEMMODEL_CONSUME:
2719 case MEMMODEL_SYNC_ACQUIRE:
2720 case MEMMODEL_RELAXED:
2721 return false;
2723 default:
2724 gcc_unreachable ();
2728 /* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
2730 'h' Print the high-part relocation associated with OP, after stripping
2731 any outermost HIGH.
2732 'R' Print the low-part relocation associated with OP.
2733 'C' Print the integer branch condition for comparison OP.
2734 'A' Print the atomic operation suffix for memory model OP.
2735 'F' Print a FENCE if the memory model requires a release.
2736 'z' Print x0 if OP is zero, otherwise print OP normally. */
2738 static void
2739 riscv_print_operand (FILE *file, rtx op, int letter)
2741 machine_mode mode = GET_MODE (op);
2742 enum rtx_code code = GET_CODE (op);
2744 switch (letter)
2746 case 'h':
2747 if (code == HIGH)
2748 op = XEXP (op, 0);
2749 riscv_print_operand_reloc (file, op, true);
2750 break;
2752 case 'R':
2753 riscv_print_operand_reloc (file, op, false);
2754 break;
2756 case 'C':
2757 /* The RTL names match the instruction names. */
2758 fputs (GET_RTX_NAME (code), file);
2759 break;
2761 case 'A':
2762 if (riscv_memmodel_needs_amo_acquire ((enum memmodel) INTVAL (op)))
2763 fputs (".aq", file);
2764 break;
2766 case 'F':
2767 if (riscv_memmodel_needs_release_fence ((enum memmodel) INTVAL (op)))
2768 fputs ("fence iorw,ow; ", file);
2769 break;
2771 default:
2772 switch (code)
2774 case REG:
2775 if (letter && letter != 'z')
2776 output_operand_lossage ("invalid use of '%%%c'", letter);
2777 fprintf (file, "%s", reg_names[REGNO (op)]);
2778 break;
2780 case MEM:
2781 if (letter && letter != 'z')
2782 output_operand_lossage ("invalid use of '%%%c'", letter);
2783 else
2784 output_address (mode, XEXP (op, 0));
2785 break;
2787 default:
2788 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
2789 fputs (reg_names[GP_REG_FIRST], file);
2790 else if (letter && letter != 'z')
2791 output_operand_lossage ("invalid use of '%%%c'", letter);
2792 else
2793 output_addr_const (file, riscv_strip_unspec_address (op));
2794 break;
2799 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
2801 static void
2802 riscv_print_operand_address (FILE *file, machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2804 struct riscv_address_info addr;
2806 if (riscv_classify_address (&addr, x, word_mode, true))
2807 switch (addr.type)
2809 case ADDRESS_REG:
2810 riscv_print_operand (file, addr.offset, 0);
2811 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
2812 return;
2814 case ADDRESS_LO_SUM:
2815 riscv_print_operand_reloc (file, addr.offset, false);
2816 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
2817 return;
2819 case ADDRESS_CONST_INT:
2820 output_addr_const (file, x);
2821 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
2822 return;
2824 case ADDRESS_SYMBOLIC:
2825 output_addr_const (file, riscv_strip_unspec_address (x));
2826 return;
2828 gcc_unreachable ();
2831 static bool
2832 riscv_size_ok_for_small_data_p (int size)
2834 return g_switch_value && IN_RANGE (size, 1, g_switch_value);
2837 /* Return true if EXP should be placed in the small data section. */
2839 static bool
2840 riscv_in_small_data_p (const_tree x)
2842 if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
2843 return false;
2845 if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
2847 const char *sec = DECL_SECTION_NAME (x);
2848 return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
2851 return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
2854 /* Return a section for X, handling small data. */
2856 static section *
2857 riscv_elf_select_rtx_section (machine_mode mode, rtx x,
2858 unsigned HOST_WIDE_INT align)
2860 section *s = default_elf_select_rtx_section (mode, x, align);
2862 if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
2864 if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
2866 /* Rename .rodata.cst* to .srodata.cst*. */
2867 char *name = (char *) alloca (strlen (s->named.name) + 2);
2868 sprintf (name, ".s%s", s->named.name + 1);
2869 return get_section (name, s->named.common.flags, NULL);
2872 if (s == data_section)
2873 return sdata_section;
2876 return s;
2879 /* Make the last instruction frame-related and note that it performs
2880 the operation described by FRAME_PATTERN. */
2882 static void
2883 riscv_set_frame_expr (rtx frame_pattern)
2885 rtx insn;
2887 insn = get_last_insn ();
2888 RTX_FRAME_RELATED_P (insn) = 1;
2889 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2890 frame_pattern,
2891 REG_NOTES (insn));
2894 /* Return a frame-related rtx that stores REG at MEM.
2895 REG must be a single register. */
2897 static rtx
2898 riscv_frame_set (rtx mem, rtx reg)
2900 rtx set = gen_rtx_SET (mem, reg);
2901 RTX_FRAME_RELATED_P (set) = 1;
2902 return set;
2905 /* Return true if the current function must save register REGNO. */
2907 static bool
2908 riscv_save_reg_p (unsigned int regno)
2910 bool call_saved = !global_regs[regno] && !call_used_regs[regno];
2911 bool might_clobber = crtl->saves_all_registers
2912 || df_regs_ever_live_p (regno);
2914 if (call_saved && might_clobber)
2915 return true;
2917 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
2918 return true;
2920 if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
2921 return true;
2923 return false;
2926 /* Determine whether to call GPR save/restore routines. */
2927 static bool
2928 riscv_use_save_libcall (const struct riscv_frame_info *frame)
2930 if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed)
2931 return false;
2933 return frame->save_libcall_adjustment != 0;
2936 /* Determine which GPR save/restore routine to call. */
2938 static unsigned
2939 riscv_save_libcall_count (unsigned mask)
2941 for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
2942 if (BITSET_P (mask, n))
2943 return CALLEE_SAVED_REG_NUMBER (n) + 1;
2944 abort ();
2947 /* Populate the current function's riscv_frame_info structure.
2949 RISC-V stack frames grown downward. High addresses are at the top.
2951 +-------------------------------+
2953 | incoming stack arguments |
2955 +-------------------------------+ <-- incoming stack pointer
2957 | callee-allocated save area |
2958 | for arguments that are |
2959 | split between registers and |
2960 | the stack |
2962 +-------------------------------+ <-- arg_pointer_rtx
2964 | callee-allocated save area |
2965 | for register varargs |
2967 +-------------------------------+ <-- hard_frame_pointer_rtx;
2968 | | stack_pointer_rtx + gp_sp_offset
2969 | GPR save area | + UNITS_PER_WORD
2971 +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
2972 | | + UNITS_PER_HWVALUE
2973 | FPR save area |
2975 +-------------------------------+ <-- frame_pointer_rtx (virtual)
2977 | local variables |
2979 P +-------------------------------+
2981 | outgoing stack arguments |
2983 +-------------------------------+ <-- stack_pointer_rtx
2985 Dynamic stack allocations such as alloca insert data at point P.
2986 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
2987 hard_frame_pointer_rtx unchanged. */
2989 static void
2990 riscv_compute_frame_info (void)
2992 struct riscv_frame_info *frame;
2993 HOST_WIDE_INT offset;
2994 unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
2996 frame = &cfun->machine->frame;
2997 memset (frame, 0, sizeof (*frame));
2999 /* Find out which GPRs we need to save. */
3000 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
3001 if (riscv_save_reg_p (regno))
3002 frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
3004 /* If this function calls eh_return, we must also save and restore the
3005 EH data registers. */
3006 if (crtl->calls_eh_return)
3007 for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
3008 frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
3010 /* Find out which FPRs we need to save. This loop must iterate over
3011 the same space as its companion in riscv_for_each_saved_reg. */
3012 if (TARGET_HARD_FLOAT)
3013 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3014 if (riscv_save_reg_p (regno))
3015 frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
3017 /* At the bottom of the frame are any outgoing stack arguments. */
3018 offset = crtl->outgoing_args_size;
3019 /* Next are local stack variables. */
3020 offset += RISCV_STACK_ALIGN (get_frame_size ());
3021 /* The virtual frame pointer points above the local variables. */
3022 frame->frame_pointer_offset = offset;
3023 /* Next are the callee-saved FPRs. */
3024 if (frame->fmask)
3025 offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG);
3026 frame->fp_sp_offset = offset - UNITS_PER_FP_REG;
3027 /* Next are the callee-saved GPRs. */
3028 if (frame->mask)
3030 unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
3031 unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
3033 /* Only use save/restore routines if they don't alter the stack size. */
3034 if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
3035 frame->save_libcall_adjustment = x_save_size;
3037 offset += x_save_size;
3039 frame->gp_sp_offset = offset - UNITS_PER_WORD;
3040 /* The hard frame pointer points above the callee-saved GPRs. */
3041 frame->hard_frame_pointer_offset = offset;
3042 /* Above the hard frame pointer is the callee-allocated varags save area. */
3043 offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
3044 frame->arg_pointer_offset = offset;
3045 /* Next is the callee-allocated area for pretend stack arguments. */
3046 offset += crtl->args.pretend_args_size;
3047 frame->total_size = offset;
3048 /* Next points the incoming stack pointer and any incoming arguments. */
3050 /* Only use save/restore routines when the GPRs are atop the frame. */
3051 if (frame->hard_frame_pointer_offset != frame->total_size)
3052 frame->save_libcall_adjustment = 0;
3055 /* Make sure that we're not trying to eliminate to the wrong hard frame
3056 pointer. */
3058 static bool
3059 riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
3061 return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
3064 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
3065 or argument pointer. TO is either the stack pointer or hard frame
3066 pointer. */
3068 HOST_WIDE_INT
3069 riscv_initial_elimination_offset (int from, int to)
3071 HOST_WIDE_INT src, dest;
3073 riscv_compute_frame_info ();
3075 if (to == HARD_FRAME_POINTER_REGNUM)
3076 dest = cfun->machine->frame.hard_frame_pointer_offset;
3077 else if (to == STACK_POINTER_REGNUM)
3078 dest = 0; /* The stack pointer is the base of all offsets, hence 0. */
3079 else
3080 gcc_unreachable ();
3082 if (from == FRAME_POINTER_REGNUM)
3083 src = cfun->machine->frame.frame_pointer_offset;
3084 else if (from == ARG_POINTER_REGNUM)
3085 src = cfun->machine->frame.arg_pointer_offset;
3086 else
3087 gcc_unreachable ();
3089 return src - dest;
3092 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
3093 previous frame. */
3096 riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
3098 if (count != 0)
3099 return const0_rtx;
3101 return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
3104 /* Emit code to change the current function's return address to
3105 ADDRESS. SCRATCH is available as a scratch register, if needed.
3106 ADDRESS and SCRATCH are both word-mode GPRs. */
3108 void
3109 riscv_set_return_address (rtx address, rtx scratch)
3111 rtx slot_address;
3113 gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
3114 slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
3115 cfun->machine->frame.gp_sp_offset);
3116 riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
3119 /* A function to save or store a register. The first argument is the
3120 register and the second is the stack slot. */
3121 typedef void (*riscv_save_restore_fn) (rtx, rtx);
3123 /* Use FN to save or restore register REGNO. MODE is the register's
3124 mode and OFFSET is the offset of its save slot from the current
3125 stack pointer. */
3127 static void
3128 riscv_save_restore_reg (machine_mode mode, int regno,
3129 HOST_WIDE_INT offset, riscv_save_restore_fn fn)
3131 rtx mem;
3133 mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
3134 fn (gen_rtx_REG (mode, regno), mem);
3137 /* Call FN for each register that is saved by the current function.
3138 SP_OFFSET is the offset of the current stack pointer from the start
3139 of the frame. */
3141 static void
3142 riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset, riscv_save_restore_fn fn)
3144 HOST_WIDE_INT offset;
3146 /* Save the link register and s-registers. */
3147 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
3148 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
3149 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3151 riscv_save_restore_reg (word_mode, regno, offset, fn);
3152 offset -= UNITS_PER_WORD;
3155 /* This loop must iterate over the same space as its companion in
3156 riscv_compute_frame_info. */
3157 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
3158 for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3159 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
3161 machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
3163 riscv_save_restore_reg (mode, regno, offset, fn);
3164 offset -= GET_MODE_SIZE (mode);
3168 /* Save register REG to MEM. Make the instruction frame-related. */
3170 static void
3171 riscv_save_reg (rtx reg, rtx mem)
3173 riscv_emit_move (mem, reg);
3174 riscv_set_frame_expr (riscv_frame_set (mem, reg));
3177 /* Restore register REG from MEM. */
3179 static void
3180 riscv_restore_reg (rtx reg, rtx mem)
3182 rtx insn = riscv_emit_move (reg, mem);
3183 rtx dwarf = NULL_RTX;
3184 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
3185 REG_NOTES (insn) = dwarf;
3187 RTX_FRAME_RELATED_P (insn) = 1;
3190 /* Return the code to invoke the GPR save routine. */
3192 const char *
3193 riscv_output_gpr_save (unsigned mask)
3195 static char s[32];
3196 unsigned n = riscv_save_libcall_count (mask);
3198 ssize_t bytes = snprintf (s, sizeof (s), "call\tt0,__riscv_save_%u", n);
3199 gcc_assert ((size_t) bytes < sizeof (s));
3201 return s;
3204 /* For stack frames that can't be allocated with a single ADDI instruction,
3205 compute the best value to initially allocate. It must at a minimum
3206 allocate enough space to spill the callee-saved registers. */
3208 static HOST_WIDE_INT
3209 riscv_first_stack_step (struct riscv_frame_info *frame)
3211 HOST_WIDE_INT min_first_step = frame->total_size - frame->fp_sp_offset;
3212 HOST_WIDE_INT max_first_step = IMM_REACH / 2 - STACK_BOUNDARY / 8;
3214 if (SMALL_OPERAND (frame->total_size))
3215 return frame->total_size;
3217 /* As an optimization, use the least-significant bits of the total frame
3218 size, so that the second adjustment step is just LUI + ADD. */
3219 if (!SMALL_OPERAND (frame->total_size - max_first_step)
3220 && frame->total_size % IMM_REACH < IMM_REACH / 2
3221 && frame->total_size % IMM_REACH >= min_first_step)
3222 return frame->total_size % IMM_REACH;
3224 gcc_assert (min_first_step <= max_first_step);
3225 return max_first_step;
3228 static rtx
3229 riscv_adjust_libcall_cfi_prologue ()
3231 rtx dwarf = NULL_RTX;
3232 rtx adjust_sp_rtx, reg, mem, insn;
3233 int saved_size = cfun->machine->frame.save_libcall_adjustment;
3234 int offset;
3236 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
3237 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3239 /* The save order is ra, s0, s1, s2 to s11. */
3240 if (regno == RETURN_ADDR_REGNUM)
3241 offset = saved_size - UNITS_PER_WORD;
3242 else if (regno == S0_REGNUM)
3243 offset = saved_size - UNITS_PER_WORD * 2;
3244 else if (regno == S1_REGNUM)
3245 offset = saved_size - UNITS_PER_WORD * 3;
3246 else
3247 offset = saved_size - ((regno - S2_REGNUM + 4) * UNITS_PER_WORD);
3249 reg = gen_rtx_REG (SImode, regno);
3250 mem = gen_frame_mem (SImode, plus_constant (Pmode,
3251 stack_pointer_rtx,
3252 offset));
3254 insn = gen_rtx_SET (mem, reg);
3255 dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf);
3258 /* Debug info for adjust sp. */
3259 adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
3260 stack_pointer_rtx, GEN_INT (-saved_size));
3261 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
3262 dwarf);
3263 return dwarf;
3266 static void
3267 riscv_emit_stack_tie (void)
3269 if (Pmode == SImode)
3270 emit_insn (gen_stack_tiesi (stack_pointer_rtx, hard_frame_pointer_rtx));
3271 else
3272 emit_insn (gen_stack_tiedi (stack_pointer_rtx, hard_frame_pointer_rtx));
3275 /* Expand the "prologue" pattern. */
3277 void
3278 riscv_expand_prologue (void)
3280 struct riscv_frame_info *frame = &cfun->machine->frame;
3281 HOST_WIDE_INT size = frame->total_size;
3282 unsigned mask = frame->mask;
3283 rtx insn;
3285 if (flag_stack_usage_info)
3286 current_function_static_stack_size = size;
3288 /* When optimizing for size, call a subroutine to save the registers. */
3289 if (riscv_use_save_libcall (frame))
3291 rtx dwarf = NULL_RTX;
3292 dwarf = riscv_adjust_libcall_cfi_prologue ();
3294 frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
3295 size -= frame->save_libcall_adjustment;
3296 insn = emit_insn (gen_gpr_save (GEN_INT (mask)));
3298 RTX_FRAME_RELATED_P (insn) = 1;
3299 REG_NOTES (insn) = dwarf;
3302 /* Save the registers. */
3303 if ((frame->mask | frame->fmask) != 0)
3305 HOST_WIDE_INT step1 = MIN (size, riscv_first_stack_step (frame));
3307 insn = gen_add3_insn (stack_pointer_rtx,
3308 stack_pointer_rtx,
3309 GEN_INT (-step1));
3310 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
3311 size -= step1;
3312 riscv_for_each_saved_reg (size, riscv_save_reg);
3315 frame->mask = mask; /* Undo the above fib. */
3317 /* Set up the frame pointer, if we're using one. */
3318 if (frame_pointer_needed)
3320 insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
3321 GEN_INT (frame->hard_frame_pointer_offset - size));
3322 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
3324 riscv_emit_stack_tie ();
3327 /* Allocate the rest of the frame. */
3328 if (size > 0)
3330 if (SMALL_OPERAND (-size))
3332 insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
3333 GEN_INT (-size));
3334 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
3336 else
3338 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
3339 emit_insn (gen_add3_insn (stack_pointer_rtx,
3340 stack_pointer_rtx,
3341 RISCV_PROLOGUE_TEMP (Pmode)));
3343 /* Describe the effect of the previous instructions. */
3344 insn = plus_constant (Pmode, stack_pointer_rtx, -size);
3345 insn = gen_rtx_SET (stack_pointer_rtx, insn);
3346 riscv_set_frame_expr (insn);
3351 static rtx
3352 riscv_adjust_libcall_cfi_epilogue ()
3354 rtx dwarf = NULL_RTX;
3355 rtx adjust_sp_rtx, reg;
3356 int saved_size = cfun->machine->frame.save_libcall_adjustment;
3358 /* Debug info for adjust sp. */
3359 adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
3360 stack_pointer_rtx, GEN_INT (saved_size));
3361 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
3362 dwarf);
3364 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
3365 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3367 reg = gen_rtx_REG (SImode, regno);
3368 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
3371 return dwarf;
3374 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
3375 says which. */
3377 void
3378 riscv_expand_epilogue (bool sibcall_p)
3380 /* Split the frame into two. STEP1 is the amount of stack we should
3381 deallocate before restoring the registers. STEP2 is the amount we
3382 should deallocate afterwards.
3384 Start off by assuming that no registers need to be restored. */
3385 struct riscv_frame_info *frame = &cfun->machine->frame;
3386 unsigned mask = frame->mask;
3387 HOST_WIDE_INT step1 = frame->total_size;
3388 HOST_WIDE_INT step2 = 0;
3389 bool use_restore_libcall = !sibcall_p && riscv_use_save_libcall (frame);
3390 rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
3391 rtx insn;
3393 /* We need to add memory barrier to prevent read from deallocated stack. */
3394 bool need_barrier_p = (get_frame_size ()
3395 + cfun->machine->frame.arg_pointer_offset) != 0;
3397 if (!sibcall_p && riscv_can_use_return_insn ())
3399 emit_jump_insn (gen_return ());
3400 return;
3403 /* Move past any dynamic stack allocations. */
3404 if (cfun->calls_alloca)
3406 /* Emit a barrier to prevent loads from a deallocated stack. */
3407 riscv_emit_stack_tie ();
3408 need_barrier_p = false;
3410 rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
3411 if (!SMALL_OPERAND (INTVAL (adjust)))
3413 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
3414 adjust = RISCV_PROLOGUE_TEMP (Pmode);
3417 insn = emit_insn (
3418 gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx,
3419 adjust));
3421 rtx dwarf = NULL_RTX;
3422 rtx cfa_adjust_value = gen_rtx_PLUS (
3423 Pmode, hard_frame_pointer_rtx,
3424 GEN_INT (-frame->hard_frame_pointer_offset));
3425 rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value);
3426 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf);
3427 RTX_FRAME_RELATED_P (insn) = 1;
3429 REG_NOTES (insn) = dwarf;
3432 /* If we need to restore registers, deallocate as much stack as
3433 possible in the second step without going out of range. */
3434 if ((frame->mask | frame->fmask) != 0)
3436 step2 = riscv_first_stack_step (frame);
3437 step1 -= step2;
3440 /* Set TARGET to BASE + STEP1. */
3441 if (step1 > 0)
3443 /* Emit a barrier to prevent loads from a deallocated stack. */
3444 riscv_emit_stack_tie ();
3445 need_barrier_p = false;
3447 /* Get an rtx for STEP1 that we can add to BASE. */
3448 rtx adjust = GEN_INT (step1);
3449 if (!SMALL_OPERAND (step1))
3451 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
3452 adjust = RISCV_PROLOGUE_TEMP (Pmode);
3455 insn = emit_insn (
3456 gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
3458 rtx dwarf = NULL_RTX;
3459 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3460 GEN_INT (step2));
3462 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
3463 RTX_FRAME_RELATED_P (insn) = 1;
3465 REG_NOTES (insn) = dwarf;
3468 if (use_restore_libcall)
3469 frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
3471 /* Restore the registers. */
3472 riscv_for_each_saved_reg (frame->total_size - step2, riscv_restore_reg);
3474 if (use_restore_libcall)
3476 frame->mask = mask; /* Undo the above fib. */
3477 gcc_assert (step2 >= frame->save_libcall_adjustment);
3478 step2 -= frame->save_libcall_adjustment;
3481 if (need_barrier_p)
3482 riscv_emit_stack_tie ();
3484 /* Deallocate the final bit of the frame. */
3485 if (step2 > 0)
3487 insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
3488 GEN_INT (step2)));
3490 rtx dwarf = NULL_RTX;
3491 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3492 const0_rtx);
3493 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
3494 RTX_FRAME_RELATED_P (insn) = 1;
3496 REG_NOTES (insn) = dwarf;
3499 if (use_restore_libcall)
3501 rtx dwarf = riscv_adjust_libcall_cfi_epilogue ();
3502 insn = emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
3503 RTX_FRAME_RELATED_P (insn) = 1;
3504 REG_NOTES (insn) = dwarf;
3506 emit_jump_insn (gen_gpr_restore_return (ra));
3507 return;
3510 /* Add in the __builtin_eh_return stack adjustment. */
3511 if (crtl->calls_eh_return)
3512 emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
3513 EH_RETURN_STACKADJ_RTX));
3515 if (!sibcall_p)
3516 emit_jump_insn (gen_simple_return_internal (ra));
3519 /* Return nonzero if this function is known to have a null epilogue.
3520 This allows the optimizer to omit jumps to jumps if no stack
3521 was created. */
3523 bool
3524 riscv_can_use_return_insn (void)
3526 return reload_completed && cfun->machine->frame.total_size == 0;
3529 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
3531 When floating-point registers are wider than integer ones, moves between
3532 them must go through memory. */
3534 static bool
3535 riscv_secondary_memory_needed (machine_mode mode, reg_class_t class1,
3536 reg_class_t class2)
3538 return (GET_MODE_SIZE (mode) > UNITS_PER_WORD
3539 && (class1 == FP_REGS) != (class2 == FP_REGS));
3542 /* Implement TARGET_REGISTER_MOVE_COST. */
3544 static int
3545 riscv_register_move_cost (machine_mode mode,
3546 reg_class_t from, reg_class_t to)
3548 return riscv_secondary_memory_needed (mode, from, to) ? 8 : 2;
3551 /* Implement TARGET_HARD_REGNO_NREGS. */
3553 static unsigned int
3554 riscv_hard_regno_nregs (unsigned int regno, machine_mode mode)
3556 if (FP_REG_P (regno))
3557 return (GET_MODE_SIZE (mode) + UNITS_PER_FP_REG - 1) / UNITS_PER_FP_REG;
3559 /* All other registers are word-sized. */
3560 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3563 /* Implement TARGET_HARD_REGNO_MODE_OK. */
3565 static bool
3566 riscv_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
3568 unsigned int nregs = riscv_hard_regno_nregs (regno, mode);
3570 if (GP_REG_P (regno))
3572 if (!GP_REG_P (regno + nregs - 1))
3573 return false;
3575 else if (FP_REG_P (regno))
3577 if (!FP_REG_P (regno + nregs - 1))
3578 return false;
3580 if (GET_MODE_CLASS (mode) != MODE_FLOAT
3581 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
3582 return false;
3584 /* Only use callee-saved registers if a potential callee is guaranteed
3585 to spill the requisite width. */
3586 if (GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_REG
3587 || (!call_used_regs[regno]
3588 && GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_ARG))
3589 return false;
3591 else
3592 return false;
3594 /* Require same callee-savedness for all registers. */
3595 for (unsigned i = 1; i < nregs; i++)
3596 if (call_used_regs[regno] != call_used_regs[regno + i])
3597 return false;
3599 return true;
3602 /* Implement TARGET_MODES_TIEABLE_P.
3604 Don't allow floating-point modes to be tied, since type punning of
3605 single-precision and double-precision is implementation defined. */
3607 static bool
3608 riscv_modes_tieable_p (machine_mode mode1, machine_mode mode2)
3610 return (mode1 == mode2
3611 || !(GET_MODE_CLASS (mode1) == MODE_FLOAT
3612 && GET_MODE_CLASS (mode2) == MODE_FLOAT));
3615 /* Implement CLASS_MAX_NREGS. */
3617 static unsigned char
3618 riscv_class_max_nregs (reg_class_t rclass, machine_mode mode)
3620 if (reg_class_subset_p (FP_REGS, rclass))
3621 return riscv_hard_regno_nregs (FP_REG_FIRST, mode);
3623 if (reg_class_subset_p (GR_REGS, rclass))
3624 return riscv_hard_regno_nregs (GP_REG_FIRST, mode);
3626 return 0;
3629 /* Implement TARGET_MEMORY_MOVE_COST. */
3631 static int
3632 riscv_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in)
3634 return (tune_info->memory_cost
3635 + memory_move_secondary_cost (mode, rclass, in));
3638 /* Return the number of instructions that can be issued per cycle. */
3640 static int
3641 riscv_issue_rate (void)
3643 return tune_info->issue_rate;
3646 /* Implement TARGET_ASM_FILE_START. */
3648 static void
3649 riscv_file_start (void)
3651 default_file_start ();
3653 /* Instruct GAS to generate position-[in]dependent code. */
3654 fprintf (asm_out_file, "\t.option %spic\n", (flag_pic ? "" : "no"));
3657 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
3658 in order to avoid duplicating too much logic from elsewhere. */
3660 static void
3661 riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
3662 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
3663 tree function)
3665 rtx this_rtx, temp1, temp2, fnaddr;
3666 rtx_insn *insn;
3668 /* Pretend to be a post-reload pass while generating rtl. */
3669 reload_completed = 1;
3671 /* Mark the end of the (empty) prologue. */
3672 emit_note (NOTE_INSN_PROLOGUE_END);
3674 /* Determine if we can use a sibcall to call FUNCTION directly. */
3675 fnaddr = gen_rtx_MEM (FUNCTION_MODE, XEXP (DECL_RTL (function), 0));
3677 /* We need two temporary registers in some cases. */
3678 temp1 = gen_rtx_REG (Pmode, RISCV_PROLOGUE_TEMP_REGNUM);
3679 temp2 = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
3681 /* Find out which register contains the "this" pointer. */
3682 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
3683 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
3684 else
3685 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
3687 /* Add DELTA to THIS_RTX. */
3688 if (delta != 0)
3690 rtx offset = GEN_INT (delta);
3691 if (!SMALL_OPERAND (delta))
3693 riscv_emit_move (temp1, offset);
3694 offset = temp1;
3696 emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
3699 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
3700 if (vcall_offset != 0)
3702 rtx addr;
3704 /* Set TEMP1 to *THIS_RTX. */
3705 riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
3707 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
3708 addr = riscv_add_offset (temp2, temp1, vcall_offset);
3710 /* Load the offset and add it to THIS_RTX. */
3711 riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
3712 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
3715 /* Jump to the target function. */
3716 insn = emit_call_insn (gen_sibcall (fnaddr, const0_rtx, NULL, const0_rtx));
3717 SIBLING_CALL_P (insn) = 1;
3719 /* Run just enough of rest_of_compilation. This sequence was
3720 "borrowed" from alpha.c. */
3721 insn = get_insns ();
3722 split_all_insns_noflow ();
3723 shorten_branches (insn);
3724 final_start_function (insn, file, 1);
3725 final (insn, file, 1);
3726 final_end_function ();
3728 /* Clean up the vars set above. Note that final_end_function resets
3729 the global pointer for us. */
3730 reload_completed = 0;
3733 /* Allocate a chunk of memory for per-function machine-dependent data. */
3735 static struct machine_function *
3736 riscv_init_machine_status (void)
3738 return ggc_cleared_alloc<machine_function> ();
3741 /* Implement TARGET_OPTION_OVERRIDE. */
3743 static void
3744 riscv_option_override (void)
3746 const struct riscv_cpu_info *cpu;
3748 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3749 SUBTARGET_OVERRIDE_OPTIONS;
3750 #endif
3752 flag_pcc_struct_return = 0;
3754 if (flag_pic)
3755 g_switch_value = 0;
3757 /* The presence of the M extension implies that division instructions
3758 are present, so include them unless explicitly disabled. */
3759 if (TARGET_MUL && (target_flags_explicit & MASK_DIV) == 0)
3760 target_flags |= MASK_DIV;
3761 else if (!TARGET_MUL && TARGET_DIV)
3762 error ("-mdiv requires -march to subsume the %<M%> extension");
3764 /* Likewise floating-point division and square root. */
3765 if (TARGET_HARD_FLOAT && (target_flags_explicit & MASK_FDIV) == 0)
3766 target_flags |= MASK_FDIV;
3768 /* Handle -mtune. */
3769 cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
3770 RISCV_TUNE_STRING_DEFAULT);
3771 tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
3773 /* Use -mtune's setting for slow_unaligned_access, even when optimizing
3774 for size. For architectures that trap and emulate unaligned accesses,
3775 the performance cost is too great, even for -Os. */
3776 riscv_slow_unaligned_access_p = (cpu->tune_info->slow_unaligned_access
3777 || TARGET_STRICT_ALIGN);
3779 /* If the user hasn't specified a branch cost, use the processor's
3780 default. */
3781 if (riscv_branch_cost == 0)
3782 riscv_branch_cost = tune_info->branch_cost;
3784 /* Function to allocate machine-dependent function status. */
3785 init_machine_status = &riscv_init_machine_status;
3787 if (flag_pic)
3788 riscv_cmodel = CM_PIC;
3790 /* We get better code with explicit relocs for CM_MEDLOW, but
3791 worse code for the others (for now). Pick the best default. */
3792 if ((target_flags_explicit & MASK_EXPLICIT_RELOCS) == 0)
3793 if (riscv_cmodel == CM_MEDLOW)
3794 target_flags |= MASK_EXPLICIT_RELOCS;
3796 /* Require that the ISA supports the requested floating-point ABI. */
3797 if (UNITS_PER_FP_ARG > (TARGET_HARD_FLOAT ? UNITS_PER_FP_REG : 0))
3798 error ("requested ABI requires -march to subsume the %qc extension",
3799 UNITS_PER_FP_ARG > 8 ? 'Q' : (UNITS_PER_FP_ARG > 4 ? 'D' : 'F'));
3801 /* We do not yet support ILP32 on RV64. */
3802 if (BITS_PER_WORD != POINTER_SIZE)
3803 error ("ABI requires -march=rv%d", POINTER_SIZE);
3806 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
3808 static void
3809 riscv_conditional_register_usage (void)
3811 if (!TARGET_HARD_FLOAT)
3813 for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3814 fixed_regs[regno] = call_used_regs[regno] = 1;
3818 /* Return a register priority for hard reg REGNO. */
3820 static int
3821 riscv_register_priority (int regno)
3823 /* Favor x8-x15/f8-f15 to improve the odds of RVC instruction selection. */
3824 if (TARGET_RVC && (IN_RANGE (regno, GP_REG_FIRST + 8, GP_REG_FIRST + 15)
3825 || IN_RANGE (regno, FP_REG_FIRST + 8, FP_REG_FIRST + 15)))
3826 return 1;
3828 return 0;
3831 /* Implement TARGET_TRAMPOLINE_INIT. */
3833 static void
3834 riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3836 rtx addr, end_addr, mem;
3837 uint32_t trampoline[4];
3838 unsigned int i;
3839 HOST_WIDE_INT static_chain_offset, target_function_offset;
3841 /* Work out the offsets of the pointers from the start of the
3842 trampoline code. */
3843 gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
3845 /* Get pointers to the beginning and end of the code block. */
3846 addr = force_reg (Pmode, XEXP (m_tramp, 0));
3847 end_addr = riscv_force_binary (Pmode, PLUS, addr,
3848 GEN_INT (TRAMPOLINE_CODE_SIZE));
3851 if (Pmode == SImode)
3853 chain_value = force_reg (Pmode, chain_value);
3855 rtx target_function = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
3856 /* lui t2, hi(chain)
3857 lui t1, hi(func)
3858 addi t2, t2, lo(chain)
3859 jr r1, lo(func)
3861 unsigned HOST_WIDE_INT lui_hi_chain_code, lui_hi_func_code;
3862 unsigned HOST_WIDE_INT lo_chain_code, lo_func_code;
3864 rtx uimm_mask = force_reg (SImode, gen_int_mode (-IMM_REACH, SImode));
3866 /* 0xfff. */
3867 rtx imm12_mask = gen_reg_rtx (SImode);
3868 emit_insn (gen_one_cmplsi2 (imm12_mask, uimm_mask));
3870 rtx fixup_value = force_reg (SImode, gen_int_mode (IMM_REACH/2, SImode));
3872 /* Gen lui t2, hi(chain). */
3873 rtx hi_chain = riscv_force_binary (SImode, PLUS, chain_value,
3874 fixup_value);
3875 hi_chain = riscv_force_binary (SImode, AND, hi_chain,
3876 uimm_mask);
3877 lui_hi_chain_code = OPCODE_LUI | (STATIC_CHAIN_REGNUM << SHIFT_RD);
3878 rtx lui_hi_chain = riscv_force_binary (SImode, IOR, hi_chain,
3879 gen_int_mode (lui_hi_chain_code, SImode));
3881 mem = adjust_address (m_tramp, SImode, 0);
3882 riscv_emit_move (mem, lui_hi_chain);
3884 /* Gen lui t1, hi(func). */
3885 rtx hi_func = riscv_force_binary (SImode, PLUS, target_function,
3886 fixup_value);
3887 hi_func = riscv_force_binary (SImode, AND, hi_func,
3888 uimm_mask);
3889 lui_hi_func_code = OPCODE_LUI | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD);
3890 rtx lui_hi_func = riscv_force_binary (SImode, IOR, hi_func,
3891 gen_int_mode (lui_hi_func_code, SImode));
3893 mem = adjust_address (m_tramp, SImode, 1 * GET_MODE_SIZE (SImode));
3894 riscv_emit_move (mem, lui_hi_func);
3896 /* Gen addi t2, t2, lo(chain). */
3897 rtx lo_chain = riscv_force_binary (SImode, AND, chain_value,
3898 imm12_mask);
3899 lo_chain = riscv_force_binary (SImode, ASHIFT, lo_chain, GEN_INT (20));
3901 lo_chain_code = OPCODE_ADDI
3902 | (STATIC_CHAIN_REGNUM << SHIFT_RD)
3903 | (STATIC_CHAIN_REGNUM << SHIFT_RS1);
3905 rtx addi_lo_chain = riscv_force_binary (SImode, IOR, lo_chain,
3906 force_reg (SImode, GEN_INT (lo_chain_code)));
3908 mem = adjust_address (m_tramp, SImode, 2 * GET_MODE_SIZE (SImode));
3909 riscv_emit_move (mem, addi_lo_chain);
3911 /* Gen jr r1, lo(func). */
3912 rtx lo_func = riscv_force_binary (SImode, AND, target_function,
3913 imm12_mask);
3914 lo_func = riscv_force_binary (SImode, ASHIFT, lo_func, GEN_INT (20));
3916 lo_func_code = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
3918 rtx jr_lo_func = riscv_force_binary (SImode, IOR, lo_func,
3919 force_reg (SImode, GEN_INT (lo_func_code)));
3921 mem = adjust_address (m_tramp, SImode, 3 * GET_MODE_SIZE (SImode));
3922 riscv_emit_move (mem, jr_lo_func);
3924 else
3926 static_chain_offset = TRAMPOLINE_CODE_SIZE;
3927 target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
3929 /* auipc t2, 0
3930 l[wd] t1, target_function_offset(t2)
3931 l[wd] t2, static_chain_offset(t2)
3932 jr t1
3934 trampoline[0] = OPCODE_AUIPC | (STATIC_CHAIN_REGNUM << SHIFT_RD);
3935 trampoline[1] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
3936 | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD)
3937 | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
3938 | (target_function_offset << SHIFT_IMM);
3939 trampoline[2] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
3940 | (STATIC_CHAIN_REGNUM << SHIFT_RD)
3941 | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
3942 | (static_chain_offset << SHIFT_IMM);
3943 trampoline[3] = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
3945 /* Copy the trampoline code. */
3946 for (i = 0; i < ARRAY_SIZE (trampoline); i++)
3948 mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
3949 riscv_emit_move (mem, gen_int_mode (trampoline[i], SImode));
3952 /* Set up the static chain pointer field. */
3953 mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
3954 riscv_emit_move (mem, chain_value);
3956 /* Set up the target function field. */
3957 mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
3958 riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
3961 /* Flush the code part of the trampoline. */
3962 emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
3963 emit_insn (gen_clear_cache (addr, end_addr));
3966 /* Return leaf_function_p () and memoize the result. */
3968 static bool
3969 riscv_leaf_function_p (void)
3971 if (cfun->machine->is_leaf == 0)
3972 cfun->machine->is_leaf = leaf_function_p () ? 1 : -1;
3974 return cfun->machine->is_leaf > 0;
3977 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
3979 static bool
3980 riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3981 tree exp ATTRIBUTE_UNUSED)
3983 /* When optimzing for size, don't use sibcalls in non-leaf routines */
3984 if (TARGET_SAVE_RESTORE)
3985 return riscv_leaf_function_p ();
3987 return true;
3990 /* Implement TARGET_CANNOT_COPY_INSN_P. */
3992 static bool
3993 riscv_cannot_copy_insn_p (rtx_insn *insn)
3995 return recog_memoized (insn) >= 0 && get_attr_cannot_copy (insn);
3998 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. */
4000 static bool
4001 riscv_slow_unaligned_access (machine_mode, unsigned int)
4003 return riscv_slow_unaligned_access_p;
4006 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
4008 static bool
4009 riscv_can_change_mode_class (machine_mode, machine_mode, reg_class_t rclass)
4011 return !reg_classes_intersect_p (FP_REGS, rclass);
4015 /* Implement TARGET_CONSTANT_ALIGNMENT. */
4017 static HOST_WIDE_INT
4018 riscv_constant_alignment (const_tree exp, HOST_WIDE_INT align)
4020 if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR)
4021 return MAX (align, BITS_PER_WORD);
4022 return align;
4025 /* Initialize the GCC target structure. */
4026 #undef TARGET_ASM_ALIGNED_HI_OP
4027 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
4028 #undef TARGET_ASM_ALIGNED_SI_OP
4029 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
4030 #undef TARGET_ASM_ALIGNED_DI_OP
4031 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
4033 #undef TARGET_OPTION_OVERRIDE
4034 #define TARGET_OPTION_OVERRIDE riscv_option_override
4036 #undef TARGET_LEGITIMIZE_ADDRESS
4037 #define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
4039 #undef TARGET_SCHED_ISSUE_RATE
4040 #define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
4042 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
4043 #define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
4045 #undef TARGET_REGISTER_MOVE_COST
4046 #define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
4047 #undef TARGET_MEMORY_MOVE_COST
4048 #define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
4049 #undef TARGET_RTX_COSTS
4050 #define TARGET_RTX_COSTS riscv_rtx_costs
4051 #undef TARGET_ADDRESS_COST
4052 #define TARGET_ADDRESS_COST riscv_address_cost
4054 #undef TARGET_ASM_FILE_START
4055 #define TARGET_ASM_FILE_START riscv_file_start
4056 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
4057 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
4059 #undef TARGET_EXPAND_BUILTIN_VA_START
4060 #define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
4062 #undef TARGET_PROMOTE_FUNCTION_MODE
4063 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
4065 #undef TARGET_RETURN_IN_MEMORY
4066 #define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
4068 #undef TARGET_ASM_OUTPUT_MI_THUNK
4069 #define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
4070 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
4071 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
4073 #undef TARGET_PRINT_OPERAND
4074 #define TARGET_PRINT_OPERAND riscv_print_operand
4075 #undef TARGET_PRINT_OPERAND_ADDRESS
4076 #define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
4078 #undef TARGET_SETUP_INCOMING_VARARGS
4079 #define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
4080 #undef TARGET_STRICT_ARGUMENT_NAMING
4081 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
4082 #undef TARGET_MUST_PASS_IN_STACK
4083 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
4084 #undef TARGET_PASS_BY_REFERENCE
4085 #define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
4086 #undef TARGET_ARG_PARTIAL_BYTES
4087 #define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
4088 #undef TARGET_FUNCTION_ARG
4089 #define TARGET_FUNCTION_ARG riscv_function_arg
4090 #undef TARGET_FUNCTION_ARG_ADVANCE
4091 #define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
4092 #undef TARGET_FUNCTION_ARG_BOUNDARY
4093 #define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
4095 /* The generic ELF target does not always have TLS support. */
4096 #ifdef HAVE_AS_TLS
4097 #undef TARGET_HAVE_TLS
4098 #define TARGET_HAVE_TLS true
4099 #endif
4101 #undef TARGET_CANNOT_FORCE_CONST_MEM
4102 #define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
4104 #undef TARGET_LEGITIMATE_CONSTANT_P
4105 #define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
4107 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
4108 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
4110 #undef TARGET_LEGITIMATE_ADDRESS_P
4111 #define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
4113 #undef TARGET_CAN_ELIMINATE
4114 #define TARGET_CAN_ELIMINATE riscv_can_eliminate
4116 #undef TARGET_CONDITIONAL_REGISTER_USAGE
4117 #define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
4119 #undef TARGET_CLASS_MAX_NREGS
4120 #define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
4122 #undef TARGET_TRAMPOLINE_INIT
4123 #define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
4125 #undef TARGET_IN_SMALL_DATA_P
4126 #define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
4128 #undef TARGET_ASM_SELECT_RTX_SECTION
4129 #define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
4131 #undef TARGET_MIN_ANCHOR_OFFSET
4132 #define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
4134 #undef TARGET_MAX_ANCHOR_OFFSET
4135 #define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
4137 #undef TARGET_REGISTER_PRIORITY
4138 #define TARGET_REGISTER_PRIORITY riscv_register_priority
4140 #undef TARGET_CANNOT_COPY_INSN_P
4141 #define TARGET_CANNOT_COPY_INSN_P riscv_cannot_copy_insn_p
4143 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
4144 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV riscv_atomic_assign_expand_fenv
4146 #undef TARGET_INIT_BUILTINS
4147 #define TARGET_INIT_BUILTINS riscv_init_builtins
4149 #undef TARGET_BUILTIN_DECL
4150 #define TARGET_BUILTIN_DECL riscv_builtin_decl
4152 #undef TARGET_EXPAND_BUILTIN
4153 #define TARGET_EXPAND_BUILTIN riscv_expand_builtin
4155 #undef TARGET_HARD_REGNO_NREGS
4156 #define TARGET_HARD_REGNO_NREGS riscv_hard_regno_nregs
4157 #undef TARGET_HARD_REGNO_MODE_OK
4158 #define TARGET_HARD_REGNO_MODE_OK riscv_hard_regno_mode_ok
4160 #undef TARGET_MODES_TIEABLE_P
4161 #define TARGET_MODES_TIEABLE_P riscv_modes_tieable_p
4163 #undef TARGET_SLOW_UNALIGNED_ACCESS
4164 #define TARGET_SLOW_UNALIGNED_ACCESS riscv_slow_unaligned_access
4166 #undef TARGET_SECONDARY_MEMORY_NEEDED
4167 #define TARGET_SECONDARY_MEMORY_NEEDED riscv_secondary_memory_needed
4169 #undef TARGET_CAN_CHANGE_MODE_CLASS
4170 #define TARGET_CAN_CHANGE_MODE_CLASS riscv_can_change_mode_class
4172 #undef TARGET_CONSTANT_ALIGNMENT
4173 #define TARGET_CONSTANT_ALIGNMENT riscv_constant_alignment
4175 struct gcc_target targetm = TARGET_INITIALIZER;
4177 #include "gt-riscv.h"