RISC-V: Fix -msave-restore bug with sibcalls.
[official-gcc.git] / gcc / config / riscv / riscv.c
blobb6270f7bfd7ff30659d7238941171af8bd2dcb00
1 /* Subroutines used for code generation for RISC-V.
2 Copyright (C) 2011-2018 Free Software Foundation, Inc.
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #define IN_TARGET_CODE 1
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "regs.h"
30 #include "insn-config.h"
31 #include "insn-attr.h"
32 #include "recog.h"
33 #include "output.h"
34 #include "alias.h"
35 #include "tree.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38 #include "varasm.h"
39 #include "stor-layout.h"
40 #include "calls.h"
41 #include "function.h"
42 #include "explow.h"
43 #include "memmodel.h"
44 #include "emit-rtl.h"
45 #include "reload.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "basic-block.h"
50 #include "expr.h"
51 #include "optabs.h"
52 #include "bitmap.h"
53 #include "df.h"
54 #include "diagnostic.h"
55 #include "builtins.h"
56 #include "predict.h"
58 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
59 #define UNSPEC_ADDRESS_P(X) \
60 (GET_CODE (X) == UNSPEC \
61 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
62 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
64 /* Extract the symbol or label from UNSPEC wrapper X. */
65 #define UNSPEC_ADDRESS(X) \
66 XVECEXP (X, 0, 0)
68 /* Extract the symbol type from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS_TYPE(X) \
70 ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
72 /* True if bit BIT is set in VALUE. */
73 #define BITSET_P(VALUE, BIT) (((VALUE) & (1ULL << (BIT))) != 0)
75 /* Classifies an address.
77 ADDRESS_REG
78 A natural register + offset address. The register satisfies
79 riscv_valid_base_register_p and the offset is a const_arith_operand.
81 ADDRESS_LO_SUM
82 A LO_SUM rtx. The first operand is a valid base register and
83 the second operand is a symbolic address.
85 ADDRESS_CONST_INT
86 A signed 16-bit constant address.
88 ADDRESS_SYMBOLIC:
89 A constant symbolic address. */
90 enum riscv_address_type {
91 ADDRESS_REG,
92 ADDRESS_LO_SUM,
93 ADDRESS_CONST_INT,
94 ADDRESS_SYMBOLIC
97 /* Information about a function's frame layout. */
98 struct GTY(()) riscv_frame_info {
99 /* The size of the frame in bytes. */
100 HOST_WIDE_INT total_size;
102 /* Bit X is set if the function saves or restores GPR X. */
103 unsigned int mask;
105 /* Likewise FPR X. */
106 unsigned int fmask;
108 /* How much the GPR save/restore routines adjust sp (or 0 if unused). */
109 unsigned save_libcall_adjustment;
111 /* Offsets of fixed-point and floating-point save areas from frame bottom */
112 HOST_WIDE_INT gp_sp_offset;
113 HOST_WIDE_INT fp_sp_offset;
115 /* Offset of virtual frame pointer from stack pointer/frame bottom */
116 HOST_WIDE_INT frame_pointer_offset;
118 /* Offset of hard frame pointer from stack pointer/frame bottom */
119 HOST_WIDE_INT hard_frame_pointer_offset;
121 /* The offset of arg_pointer_rtx from the bottom of the frame. */
122 HOST_WIDE_INT arg_pointer_offset;
125 struct GTY(()) machine_function {
126 /* The number of extra stack bytes taken up by register varargs.
127 This area is allocated by the callee at the very top of the frame. */
128 int varargs_size;
130 /* The current frame information, calculated by riscv_compute_frame_info. */
131 struct riscv_frame_info frame;
134 /* Information about a single argument. */
135 struct riscv_arg_info {
136 /* True if the argument is at least partially passed on the stack. */
137 bool stack_p;
139 /* The number of integer registers allocated to this argument. */
140 unsigned int num_gprs;
142 /* The offset of the first register used, provided num_gprs is nonzero.
143 If passed entirely on the stack, the value is MAX_ARGS_IN_REGISTERS. */
144 unsigned int gpr_offset;
146 /* The number of floating-point registers allocated to this argument. */
147 unsigned int num_fprs;
149 /* The offset of the first register used, provided num_fprs is nonzero. */
150 unsigned int fpr_offset;
153 /* Information about an address described by riscv_address_type.
155 ADDRESS_CONST_INT
156 No fields are used.
158 ADDRESS_REG
159 REG is the base register and OFFSET is the constant offset.
161 ADDRESS_LO_SUM
162 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
163 is the type of symbol it references.
165 ADDRESS_SYMBOLIC
166 SYMBOL_TYPE is the type of symbol that the address references. */
167 struct riscv_address_info {
168 enum riscv_address_type type;
169 rtx reg;
170 rtx offset;
171 enum riscv_symbol_type symbol_type;
174 /* One stage in a constant building sequence. These sequences have
175 the form:
177 A = VALUE[0]
178 A = A CODE[1] VALUE[1]
179 A = A CODE[2] VALUE[2]
182 where A is an accumulator, each CODE[i] is a binary rtl operation
183 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
184 struct riscv_integer_op {
185 enum rtx_code code;
186 unsigned HOST_WIDE_INT value;
189 /* The largest number of operations needed to load an integer constant.
190 The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI. */
191 #define RISCV_MAX_INTEGER_OPS 8
193 /* Costs of various operations on the different architectures. */
195 struct riscv_tune_info
197 unsigned short fp_add[2];
198 unsigned short fp_mul[2];
199 unsigned short fp_div[2];
200 unsigned short int_mul[2];
201 unsigned short int_div[2];
202 unsigned short issue_rate;
203 unsigned short branch_cost;
204 unsigned short memory_cost;
205 bool slow_unaligned_access;
208 /* Information about one CPU we know about. */
209 struct riscv_cpu_info {
210 /* This CPU's canonical name. */
211 const char *name;
213 /* Tuning parameters for this CPU. */
214 const struct riscv_tune_info *tune_info;
217 /* Global variables for machine-dependent things. */
219 /* Whether unaligned accesses execute very slowly. */
220 bool riscv_slow_unaligned_access_p;
222 /* Which tuning parameters to use. */
223 static const struct riscv_tune_info *tune_info;
225 /* Index R is the smallest register class that contains register R. */
226 const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
227 GR_REGS, GR_REGS, GR_REGS, GR_REGS,
228 GR_REGS, GR_REGS, SIBCALL_REGS, SIBCALL_REGS,
229 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
230 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
231 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
232 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
233 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
234 SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS,
235 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
236 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
237 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
238 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
239 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
240 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
241 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
242 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
243 FRAME_REGS, FRAME_REGS,
246 /* Costs to use when optimizing for rocket. */
247 static const struct riscv_tune_info rocket_tune_info = {
248 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
249 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
250 {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
251 {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
252 {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
253 1, /* issue_rate */
254 3, /* branch_cost */
255 5, /* memory_cost */
256 true, /* slow_unaligned_access */
259 /* Costs to use when optimizing for size. */
260 static const struct riscv_tune_info optimize_size_tune_info = {
261 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
262 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
263 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
264 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
265 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
266 1, /* issue_rate */
267 1, /* branch_cost */
268 2, /* memory_cost */
269 false, /* slow_unaligned_access */
272 /* A table describing all the processors GCC knows about. */
273 static const struct riscv_cpu_info riscv_cpu_info_table[] = {
274 { "rocket", &rocket_tune_info },
275 { "size", &optimize_size_tune_info },
278 /* Return the riscv_cpu_info entry for the given name string. */
280 static const struct riscv_cpu_info *
281 riscv_parse_cpu (const char *cpu_string)
283 for (unsigned i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
284 if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
285 return riscv_cpu_info_table + i;
287 error ("unknown cpu %qs for -mtune", cpu_string);
288 return riscv_cpu_info_table;
291 /* Helper function for riscv_build_integer; arguments are as for
292 riscv_build_integer. */
294 static int
295 riscv_build_integer_1 (struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS],
296 HOST_WIDE_INT value, machine_mode mode)
298 HOST_WIDE_INT low_part = CONST_LOW_PART (value);
299 int cost = RISCV_MAX_INTEGER_OPS + 1, alt_cost;
300 struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
302 if (SMALL_OPERAND (value) || LUI_OPERAND (value))
304 /* Simply ADDI or LUI. */
305 codes[0].code = UNKNOWN;
306 codes[0].value = value;
307 return 1;
310 /* End with ADDI. When constructing HImode constants, do not generate any
311 intermediate value that is not itself a valid HImode constant. The
312 XORI case below will handle those remaining HImode constants. */
313 if (low_part != 0
314 && (mode != HImode
315 || value - low_part <= ((1 << (GET_MODE_BITSIZE (HImode) - 1)) - 1)))
317 alt_cost = 1 + riscv_build_integer_1 (alt_codes, value - low_part, mode);
318 if (alt_cost < cost)
320 alt_codes[alt_cost-1].code = PLUS;
321 alt_codes[alt_cost-1].value = low_part;
322 memcpy (codes, alt_codes, sizeof (alt_codes));
323 cost = alt_cost;
327 /* End with XORI. */
328 if (cost > 2 && (low_part < 0 || mode == HImode))
330 alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
331 if (alt_cost < cost)
333 alt_codes[alt_cost-1].code = XOR;
334 alt_codes[alt_cost-1].value = low_part;
335 memcpy (codes, alt_codes, sizeof (alt_codes));
336 cost = alt_cost;
340 /* Eliminate trailing zeros and end with SLLI. */
341 if (cost > 2 && (value & 1) == 0)
343 int shift = ctz_hwi (value);
344 unsigned HOST_WIDE_INT x = value;
345 x = sext_hwi (x >> shift, HOST_BITS_PER_WIDE_INT - shift);
347 /* Don't eliminate the lower 12 bits if LUI might apply. */
348 if (shift > IMM_BITS && !SMALL_OPERAND (x) && LUI_OPERAND (x << IMM_BITS))
349 shift -= IMM_BITS, x <<= IMM_BITS;
351 alt_cost = 1 + riscv_build_integer_1 (alt_codes, x, mode);
352 if (alt_cost < cost)
354 alt_codes[alt_cost-1].code = ASHIFT;
355 alt_codes[alt_cost-1].value = shift;
356 memcpy (codes, alt_codes, sizeof (alt_codes));
357 cost = alt_cost;
361 gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
362 return cost;
365 /* Fill CODES with a sequence of rtl operations to load VALUE.
366 Return the number of operations needed. */
368 static int
369 riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
370 machine_mode mode)
372 int cost = riscv_build_integer_1 (codes, value, mode);
374 /* Eliminate leading zeros and end with SRLI. */
375 if (value > 0 && cost > 2)
377 struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
378 int alt_cost, shift = clz_hwi (value);
379 HOST_WIDE_INT shifted_val;
381 /* Try filling trailing bits with 1s. */
382 shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
383 alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
384 if (alt_cost < cost)
386 alt_codes[alt_cost-1].code = LSHIFTRT;
387 alt_codes[alt_cost-1].value = shift;
388 memcpy (codes, alt_codes, sizeof (alt_codes));
389 cost = alt_cost;
392 /* Try filling trailing bits with 0s. */
393 shifted_val = value << shift;
394 alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
395 if (alt_cost < cost)
397 alt_codes[alt_cost-1].code = LSHIFTRT;
398 alt_codes[alt_cost-1].value = shift;
399 memcpy (codes, alt_codes, sizeof (alt_codes));
400 cost = alt_cost;
404 return cost;
407 /* Return the cost of constructing VAL in the event that a scratch
408 register is available. */
410 static int
411 riscv_split_integer_cost (HOST_WIDE_INT val)
413 int cost;
414 unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
415 unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
416 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
418 cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
419 if (loval != hival)
420 cost += riscv_build_integer (codes, hival, VOIDmode);
422 return cost;
425 /* Return the cost of constructing the integer constant VAL. */
427 static int
428 riscv_integer_cost (HOST_WIDE_INT val)
430 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
431 return MIN (riscv_build_integer (codes, val, VOIDmode),
432 riscv_split_integer_cost (val));
435 /* Try to split a 64b integer into 32b parts, then reassemble. */
437 static rtx
438 riscv_split_integer (HOST_WIDE_INT val, machine_mode mode)
440 unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
441 unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
442 rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
444 riscv_move_integer (hi, hi, hival);
445 riscv_move_integer (lo, lo, loval);
447 hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
448 hi = force_reg (mode, hi);
450 return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
453 /* Return true if X is a thread-local symbol. */
455 static bool
456 riscv_tls_symbol_p (const_rtx x)
458 return SYMBOL_REF_P (x) && SYMBOL_REF_TLS_MODEL (x) != 0;
461 /* Return true if symbol X binds locally. */
463 static bool
464 riscv_symbol_binds_local_p (const_rtx x)
466 if (SYMBOL_REF_P (x))
467 return (SYMBOL_REF_DECL (x)
468 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
469 : SYMBOL_REF_LOCAL_P (x));
470 else
471 return false;
474 /* Return the method that should be used to access SYMBOL_REF or
475 LABEL_REF X. */
477 static enum riscv_symbol_type
478 riscv_classify_symbol (const_rtx x)
480 if (riscv_tls_symbol_p (x))
481 return SYMBOL_TLS;
483 if (GET_CODE (x) == SYMBOL_REF && flag_pic && !riscv_symbol_binds_local_p (x))
484 return SYMBOL_GOT_DISP;
486 return riscv_cmodel == CM_MEDLOW ? SYMBOL_ABSOLUTE : SYMBOL_PCREL;
489 /* Classify the base of symbolic expression X. */
491 enum riscv_symbol_type
492 riscv_classify_symbolic_expression (rtx x)
494 rtx offset;
496 split_const (x, &x, &offset);
497 if (UNSPEC_ADDRESS_P (x))
498 return UNSPEC_ADDRESS_TYPE (x);
500 return riscv_classify_symbol (x);
503 /* Return true if X is a symbolic constant. If it is, store the type of
504 the symbol in *SYMBOL_TYPE. */
506 bool
507 riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
509 rtx offset;
511 split_const (x, &x, &offset);
512 if (UNSPEC_ADDRESS_P (x))
514 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
515 x = UNSPEC_ADDRESS (x);
517 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
518 *symbol_type = riscv_classify_symbol (x);
519 else
520 return false;
522 if (offset == const0_rtx)
523 return true;
525 /* Nonzero offsets are only valid for references that don't use the GOT. */
526 switch (*symbol_type)
528 case SYMBOL_ABSOLUTE:
529 case SYMBOL_PCREL:
530 case SYMBOL_TLS_LE:
531 /* GAS rejects offsets outside the range [-2^31, 2^31-1]. */
532 return sext_hwi (INTVAL (offset), 32) == INTVAL (offset);
534 default:
535 return false;
539 /* Returns the number of instructions necessary to reference a symbol. */
541 static int riscv_symbol_insns (enum riscv_symbol_type type)
543 switch (type)
545 case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
546 case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference. */
547 case SYMBOL_PCREL: return 2; /* AUIPC + the reference. */
548 case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference. */
549 case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference. */
550 default: gcc_unreachable ();
554 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
556 static bool
557 riscv_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
559 return riscv_const_insns (x) > 0;
562 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
564 static bool
565 riscv_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
567 enum riscv_symbol_type type;
568 rtx base, offset;
570 /* There is no assembler syntax for expressing an address-sized
571 high part. */
572 if (GET_CODE (x) == HIGH)
573 return true;
575 split_const (x, &base, &offset);
576 if (riscv_symbolic_constant_p (base, &type))
578 /* As an optimization, don't spill symbolic constants that are as
579 cheap to rematerialize as to access in the constant pool. */
580 if (SMALL_OPERAND (INTVAL (offset)) && riscv_symbol_insns (type) > 0)
581 return true;
583 /* As an optimization, avoid needlessly generate dynamic relocations. */
584 if (flag_pic)
585 return true;
588 /* TLS symbols must be computed by riscv_legitimize_move. */
589 if (tls_referenced_p (x))
590 return true;
592 return false;
595 /* Return true if register REGNO is a valid base register for mode MODE.
596 STRICT_P is true if REG_OK_STRICT is in effect. */
599 riscv_regno_mode_ok_for_base_p (int regno,
600 machine_mode mode ATTRIBUTE_UNUSED,
601 bool strict_p)
603 if (!HARD_REGISTER_NUM_P (regno))
605 if (!strict_p)
606 return true;
607 regno = reg_renumber[regno];
610 /* These fake registers will be eliminated to either the stack or
611 hard frame pointer, both of which are usually valid base registers.
612 Reload deals with the cases where the eliminated form isn't valid. */
613 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
614 return true;
616 return GP_REG_P (regno);
619 /* Return true if X is a valid base register for mode MODE.
620 STRICT_P is true if REG_OK_STRICT is in effect. */
622 static bool
623 riscv_valid_base_register_p (rtx x, machine_mode mode, bool strict_p)
625 if (!strict_p && GET_CODE (x) == SUBREG)
626 x = SUBREG_REG (x);
628 return (REG_P (x)
629 && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
632 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
633 can address a value of mode MODE. */
635 static bool
636 riscv_valid_offset_p (rtx x, machine_mode mode)
638 /* Check that X is a signed 12-bit number. */
639 if (!const_arith_operand (x, Pmode))
640 return false;
642 /* We may need to split multiword moves, so make sure that every word
643 is accessible. */
644 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
645 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
646 return false;
648 return true;
651 /* Should a symbol of type SYMBOL_TYPE should be split in two? */
653 bool
654 riscv_split_symbol_type (enum riscv_symbol_type symbol_type)
656 if (symbol_type == SYMBOL_TLS_LE)
657 return true;
659 if (!TARGET_EXPLICIT_RELOCS)
660 return false;
662 return symbol_type == SYMBOL_ABSOLUTE || symbol_type == SYMBOL_PCREL;
665 /* Return true if a LO_SUM can address a value of mode MODE when the
666 LO_SUM symbol has type SYM_TYPE. */
668 static bool
669 riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type, machine_mode mode)
671 /* Check that symbols of type SYMBOL_TYPE can be used to access values
672 of mode MODE. */
673 if (riscv_symbol_insns (sym_type) == 0)
674 return false;
676 /* Check that there is a known low-part relocation. */
677 if (!riscv_split_symbol_type (sym_type))
678 return false;
680 /* We may need to split multiword moves, so make sure that each word
681 can be accessed without inducing a carry. */
682 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
683 && (!TARGET_STRICT_ALIGN
684 || GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode)))
685 return false;
687 return true;
690 /* Return true if X is a valid address for machine mode MODE. If it is,
691 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
692 effect. */
694 static bool
695 riscv_classify_address (struct riscv_address_info *info, rtx x,
696 machine_mode mode, bool strict_p)
698 switch (GET_CODE (x))
700 case REG:
701 case SUBREG:
702 info->type = ADDRESS_REG;
703 info->reg = x;
704 info->offset = const0_rtx;
705 return riscv_valid_base_register_p (info->reg, mode, strict_p);
707 case PLUS:
708 info->type = ADDRESS_REG;
709 info->reg = XEXP (x, 0);
710 info->offset = XEXP (x, 1);
711 return (riscv_valid_base_register_p (info->reg, mode, strict_p)
712 && riscv_valid_offset_p (info->offset, mode));
714 case LO_SUM:
715 info->type = ADDRESS_LO_SUM;
716 info->reg = XEXP (x, 0);
717 info->offset = XEXP (x, 1);
718 /* We have to trust the creator of the LO_SUM to do something vaguely
719 sane. Target-independent code that creates a LO_SUM should also
720 create and verify the matching HIGH. Target-independent code that
721 adds an offset to a LO_SUM must prove that the offset will not
722 induce a carry. Failure to do either of these things would be
723 a bug, and we are not required to check for it here. The RISC-V
724 backend itself should only create LO_SUMs for valid symbolic
725 constants, with the high part being either a HIGH or a copy
726 of _gp. */
727 info->symbol_type
728 = riscv_classify_symbolic_expression (info->offset);
729 return (riscv_valid_base_register_p (info->reg, mode, strict_p)
730 && riscv_valid_lo_sum_p (info->symbol_type, mode));
732 case CONST_INT:
733 /* Small-integer addresses don't occur very often, but they
734 are legitimate if x0 is a valid base register. */
735 info->type = ADDRESS_CONST_INT;
736 return SMALL_OPERAND (INTVAL (x));
738 default:
739 return false;
743 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
745 static bool
746 riscv_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
748 struct riscv_address_info addr;
750 return riscv_classify_address (&addr, x, mode, strict_p);
753 /* Return the number of instructions needed to load or store a value
754 of mode MODE at address X. Return 0 if X isn't valid for MODE.
755 Assume that multiword moves may need to be split into word moves
756 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
757 enough. */
760 riscv_address_insns (rtx x, machine_mode mode, bool might_split_p)
762 struct riscv_address_info addr;
763 int n = 1;
765 if (!riscv_classify_address (&addr, x, mode, false))
766 return 0;
768 /* BLKmode is used for single unaligned loads and stores and should
769 not count as a multiword mode. */
770 if (mode != BLKmode && might_split_p)
771 n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
773 if (addr.type == ADDRESS_LO_SUM)
774 n += riscv_symbol_insns (addr.symbol_type) - 1;
776 return n;
779 /* Return the number of instructions needed to load constant X.
780 Return 0 if X isn't a valid constant. */
783 riscv_const_insns (rtx x)
785 enum riscv_symbol_type symbol_type;
786 rtx offset;
788 switch (GET_CODE (x))
790 case HIGH:
791 if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
792 || !riscv_split_symbol_type (symbol_type))
793 return 0;
795 /* This is simply an LUI. */
796 return 1;
798 case CONST_INT:
800 int cost = riscv_integer_cost (INTVAL (x));
801 /* Force complicated constants to memory. */
802 return cost < 4 ? cost : 0;
805 case CONST_DOUBLE:
806 case CONST_VECTOR:
807 /* We can use x0 to load floating-point zero. */
808 return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
810 case CONST:
811 /* See if we can refer to X directly. */
812 if (riscv_symbolic_constant_p (x, &symbol_type))
813 return riscv_symbol_insns (symbol_type);
815 /* Otherwise try splitting the constant into a base and offset. */
816 split_const (x, &x, &offset);
817 if (offset != 0)
819 int n = riscv_const_insns (x);
820 if (n != 0)
821 return n + riscv_integer_cost (INTVAL (offset));
823 return 0;
825 case SYMBOL_REF:
826 case LABEL_REF:
827 return riscv_symbol_insns (riscv_classify_symbol (x));
829 default:
830 return 0;
834 /* X is a doubleword constant that can be handled by splitting it into
835 two words and loading each word separately. Return the number of
836 instructions required to do this. */
839 riscv_split_const_insns (rtx x)
841 unsigned int low, high;
843 low = riscv_const_insns (riscv_subword (x, false));
844 high = riscv_const_insns (riscv_subword (x, true));
845 gcc_assert (low > 0 && high > 0);
846 return low + high;
849 /* Return the number of instructions needed to implement INSN,
850 given that it loads from or stores to MEM. */
853 riscv_load_store_insns (rtx mem, rtx_insn *insn)
855 machine_mode mode;
856 bool might_split_p;
857 rtx set;
859 gcc_assert (MEM_P (mem));
860 mode = GET_MODE (mem);
862 /* Try to prove that INSN does not need to be split. */
863 might_split_p = true;
864 if (GET_MODE_BITSIZE (mode) <= 32)
865 might_split_p = false;
866 else if (GET_MODE_BITSIZE (mode) == 64)
868 set = single_set (insn);
869 if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
870 might_split_p = false;
873 return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
876 /* Emit a move from SRC to DEST. Assume that the move expanders can
877 handle all moves if !can_create_pseudo_p (). The distinction is
878 important because, unlike emit_move_insn, the move expanders know
879 how to force Pmode objects into the constant pool even when the
880 constant pool address is not itself legitimate. */
883 riscv_emit_move (rtx dest, rtx src)
885 return (can_create_pseudo_p ()
886 ? emit_move_insn (dest, src)
887 : emit_move_insn_1 (dest, src));
890 /* Emit an instruction of the form (set TARGET SRC). */
892 static rtx
893 riscv_emit_set (rtx target, rtx src)
895 emit_insn (gen_rtx_SET (target, src));
896 return target;
899 /* Emit an instruction of the form (set DEST (CODE X Y)). */
901 static rtx
902 riscv_emit_binary (enum rtx_code code, rtx dest, rtx x, rtx y)
904 return riscv_emit_set (dest, gen_rtx_fmt_ee (code, GET_MODE (dest), x, y));
907 /* Compute (CODE X Y) and store the result in a new register
908 of mode MODE. Return that new register. */
910 static rtx
911 riscv_force_binary (machine_mode mode, enum rtx_code code, rtx x, rtx y)
913 return riscv_emit_binary (code, gen_reg_rtx (mode), x, y);
916 /* Copy VALUE to a register and return that register. If new pseudos
917 are allowed, copy it into a new register, otherwise use DEST. */
919 static rtx
920 riscv_force_temporary (rtx dest, rtx value)
922 if (can_create_pseudo_p ())
923 return force_reg (Pmode, value);
924 else
926 riscv_emit_move (dest, value);
927 return dest;
931 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
932 then add CONST_INT OFFSET to the result. */
934 static rtx
935 riscv_unspec_address_offset (rtx base, rtx offset,
936 enum riscv_symbol_type symbol_type)
938 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
939 UNSPEC_ADDRESS_FIRST + symbol_type);
940 if (offset != const0_rtx)
941 base = gen_rtx_PLUS (Pmode, base, offset);
942 return gen_rtx_CONST (Pmode, base);
945 /* Return an UNSPEC address with underlying address ADDRESS and symbol
946 type SYMBOL_TYPE. */
949 riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
951 rtx base, offset;
953 split_const (address, &base, &offset);
954 return riscv_unspec_address_offset (base, offset, symbol_type);
957 /* If OP is an UNSPEC address, return the address to which it refers,
958 otherwise return OP itself. */
960 static rtx
961 riscv_strip_unspec_address (rtx op)
963 rtx base, offset;
965 split_const (op, &base, &offset);
966 if (UNSPEC_ADDRESS_P (base))
967 op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
968 return op;
971 /* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
972 high part to BASE and return the result. Just return BASE otherwise.
973 TEMP is as for riscv_force_temporary.
975 The returned expression can be used as the first operand to a LO_SUM. */
977 static rtx
978 riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
980 addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
981 return riscv_force_temporary (temp, addr);
984 /* Load an entry from the GOT for a TLS GD access. */
986 static rtx riscv_got_load_tls_gd (rtx dest, rtx sym)
988 if (Pmode == DImode)
989 return gen_got_load_tls_gddi (dest, sym);
990 else
991 return gen_got_load_tls_gdsi (dest, sym);
994 /* Load an entry from the GOT for a TLS IE access. */
996 static rtx riscv_got_load_tls_ie (rtx dest, rtx sym)
998 if (Pmode == DImode)
999 return gen_got_load_tls_iedi (dest, sym);
1000 else
1001 return gen_got_load_tls_iesi (dest, sym);
1004 /* Add in the thread pointer for a TLS LE access. */
1006 static rtx riscv_tls_add_tp_le (rtx dest, rtx base, rtx sym)
1008 rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
1009 if (Pmode == DImode)
1010 return gen_tls_add_tp_ledi (dest, base, tp, sym);
1011 else
1012 return gen_tls_add_tp_lesi (dest, base, tp, sym);
1015 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
1016 it appears in a MEM of that mode. Return true if ADDR is a legitimate
1017 constant in that context and can be split into high and low parts.
1018 If so, and if LOW_OUT is nonnull, emit the high part and store the
1019 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
1021 TEMP is as for riscv_force_temporary and is used to load the high
1022 part into a register.
1024 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
1025 a legitimize SET_SRC for an .md pattern, otherwise the low part
1026 is guaranteed to be a legitimate address for mode MODE. */
1028 bool
1029 riscv_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
1031 enum riscv_symbol_type symbol_type;
1033 if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
1034 || !riscv_symbolic_constant_p (addr, &symbol_type)
1035 || riscv_symbol_insns (symbol_type) == 0
1036 || !riscv_split_symbol_type (symbol_type))
1037 return false;
1039 if (low_out)
1040 switch (symbol_type)
1042 case SYMBOL_ABSOLUTE:
1044 rtx high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
1045 high = riscv_force_temporary (temp, high);
1046 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
1048 break;
1050 case SYMBOL_PCREL:
1052 static unsigned seqno;
1053 char buf[32];
1054 rtx label;
1056 ssize_t bytes = snprintf (buf, sizeof (buf), ".LA%u", seqno);
1057 gcc_assert ((size_t) bytes < sizeof (buf));
1059 label = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
1060 SYMBOL_REF_FLAGS (label) |= SYMBOL_FLAG_LOCAL;
1062 if (temp == NULL)
1063 temp = gen_reg_rtx (Pmode);
1065 if (Pmode == DImode)
1066 emit_insn (gen_auipcdi (temp, copy_rtx (addr), GEN_INT (seqno)));
1067 else
1068 emit_insn (gen_auipcsi (temp, copy_rtx (addr), GEN_INT (seqno)));
1070 *low_out = gen_rtx_LO_SUM (Pmode, temp, label);
1072 seqno++;
1074 break;
1076 default:
1077 gcc_unreachable ();
1080 return true;
1083 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1084 riscv_force_temporary; it is only needed when OFFSET is not a
1085 SMALL_OPERAND. */
1087 static rtx
1088 riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1090 if (!SMALL_OPERAND (offset))
1092 rtx high;
1094 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
1095 The addition inside the macro CONST_HIGH_PART may cause an
1096 overflow, so we need to force a sign-extension check. */
1097 high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
1098 offset = CONST_LOW_PART (offset);
1099 high = riscv_force_temporary (temp, high);
1100 reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1102 return plus_constant (Pmode, reg, offset);
1105 /* The __tls_get_attr symbol. */
1106 static GTY(()) rtx riscv_tls_symbol;
1108 /* Return an instruction sequence that calls __tls_get_addr. SYM is
1109 the TLS symbol we are referencing and TYPE is the symbol type to use
1110 (either global dynamic or local dynamic). RESULT is an RTX for the
1111 return value location. */
1113 static rtx_insn *
1114 riscv_call_tls_get_addr (rtx sym, rtx result)
1116 rtx a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST), func;
1117 rtx_insn *insn;
1119 if (!riscv_tls_symbol)
1120 riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
1121 func = gen_rtx_MEM (FUNCTION_MODE, riscv_tls_symbol);
1123 start_sequence ();
1125 emit_insn (riscv_got_load_tls_gd (a0, sym));
1126 insn = emit_call_insn (gen_call_value (result, func, const0_rtx, NULL));
1127 RTL_CONST_CALL_P (insn) = 1;
1128 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
1129 insn = get_insns ();
1131 end_sequence ();
1133 return insn;
1136 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
1137 its address. The return value will be both a valid address and a valid
1138 SET_SRC (either a REG or a LO_SUM). */
1140 static rtx
1141 riscv_legitimize_tls_address (rtx loc)
1143 rtx dest, tp, tmp;
1144 enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
1146 /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
1147 if (!flag_pic)
1148 model = TLS_MODEL_LOCAL_EXEC;
1150 switch (model)
1152 case TLS_MODEL_LOCAL_DYNAMIC:
1153 /* Rely on section anchors for the optimization that LDM TLS
1154 provides. The anchor's address is loaded with GD TLS. */
1155 case TLS_MODEL_GLOBAL_DYNAMIC:
1156 tmp = gen_rtx_REG (Pmode, GP_RETURN);
1157 dest = gen_reg_rtx (Pmode);
1158 emit_libcall_block (riscv_call_tls_get_addr (loc, tmp), dest, tmp, loc);
1159 break;
1161 case TLS_MODEL_INITIAL_EXEC:
1162 /* la.tls.ie; tp-relative add */
1163 tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
1164 tmp = gen_reg_rtx (Pmode);
1165 emit_insn (riscv_got_load_tls_ie (tmp, loc));
1166 dest = gen_reg_rtx (Pmode);
1167 emit_insn (gen_add3_insn (dest, tmp, tp));
1168 break;
1170 case TLS_MODEL_LOCAL_EXEC:
1171 tmp = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
1172 dest = gen_reg_rtx (Pmode);
1173 emit_insn (riscv_tls_add_tp_le (dest, tmp, loc));
1174 dest = gen_rtx_LO_SUM (Pmode, dest,
1175 riscv_unspec_address (loc, SYMBOL_TLS_LE));
1176 break;
1178 default:
1179 gcc_unreachable ();
1181 return dest;
1184 /* If X is not a valid address for mode MODE, force it into a register. */
1186 static rtx
1187 riscv_force_address (rtx x, machine_mode mode)
1189 if (!riscv_legitimate_address_p (mode, x, false))
1190 x = force_reg (Pmode, x);
1191 return x;
1194 /* This function is used to implement LEGITIMIZE_ADDRESS. If X can
1195 be legitimized in a way that the generic machinery might not expect,
1196 return a new address, otherwise return NULL. MODE is the mode of
1197 the memory being accessed. */
1199 static rtx
1200 riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1201 machine_mode mode)
1203 rtx addr;
1205 if (riscv_tls_symbol_p (x))
1206 return riscv_legitimize_tls_address (x);
1208 /* See if the address can split into a high part and a LO_SUM. */
1209 if (riscv_split_symbol (NULL, x, mode, &addr))
1210 return riscv_force_address (addr, mode);
1212 /* Handle BASE + OFFSET using riscv_add_offset. */
1213 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
1214 && INTVAL (XEXP (x, 1)) != 0)
1216 rtx base = XEXP (x, 0);
1217 HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
1219 if (!riscv_valid_base_register_p (base, mode, false))
1220 base = copy_to_mode_reg (Pmode, base);
1221 addr = riscv_add_offset (NULL, base, offset);
1222 return riscv_force_address (addr, mode);
1225 return x;
1228 /* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
1230 void
1231 riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
1233 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
1234 machine_mode mode;
1235 int i, num_ops;
1236 rtx x;
1238 mode = GET_MODE (dest);
1239 num_ops = riscv_build_integer (codes, value, mode);
1241 if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
1242 && num_ops >= riscv_split_integer_cost (value))
1243 x = riscv_split_integer (value, mode);
1244 else
1246 /* Apply each binary operation to X. */
1247 x = GEN_INT (codes[0].value);
1249 for (i = 1; i < num_ops; i++)
1251 if (!can_create_pseudo_p ())
1252 x = riscv_emit_set (temp, x);
1253 else
1254 x = force_reg (mode, x);
1256 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1260 riscv_emit_set (dest, x);
1263 /* Subroutine of riscv_legitimize_move. Move constant SRC into register
1264 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1265 move_operand. */
1267 static void
1268 riscv_legitimize_const_move (machine_mode mode, rtx dest, rtx src)
1270 rtx base, offset;
1272 /* Split moves of big integers into smaller pieces. */
1273 if (splittable_const_int_operand (src, mode))
1275 riscv_move_integer (dest, dest, INTVAL (src));
1276 return;
1279 /* Split moves of symbolic constants into high/low pairs. */
1280 if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
1282 riscv_emit_set (dest, src);
1283 return;
1286 /* Generate the appropriate access sequences for TLS symbols. */
1287 if (riscv_tls_symbol_p (src))
1289 riscv_emit_move (dest, riscv_legitimize_tls_address (src));
1290 return;
1293 /* If we have (const (plus symbol offset)), and that expression cannot
1294 be forced into memory, load the symbol first and add in the offset. Also
1295 prefer to do this even if the constant _can_ be forced into memory, as it
1296 usually produces better code. */
1297 split_const (src, &base, &offset);
1298 if (offset != const0_rtx
1299 && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
1301 base = riscv_force_temporary (dest, base);
1302 riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
1303 return;
1306 src = force_const_mem (mode, src);
1308 /* When using explicit relocs, constant pool references are sometimes
1309 not legitimate addresses. */
1310 riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
1311 riscv_emit_move (dest, src);
1314 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
1315 sequence that is valid. */
1317 bool
1318 riscv_legitimize_move (machine_mode mode, rtx dest, rtx src)
1320 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1322 riscv_emit_move (dest, force_reg (mode, src));
1323 return true;
1326 /* We need to deal with constants that would be legitimate
1327 immediate_operands but aren't legitimate move_operands. */
1328 if (CONSTANT_P (src) && !move_operand (src, mode))
1330 riscv_legitimize_const_move (mode, dest, src);
1331 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1332 return true;
1335 /* RISC-V GCC may generate non-legitimate address due to we provide some
1336 pattern for optimize access PIC local symbol and it's make GCC generate
1337 unrecognizable instruction during optmizing. */
1339 if (MEM_P (dest) && !riscv_legitimate_address_p (mode, XEXP (dest, 0),
1340 reload_completed))
1342 XEXP (dest, 0) = riscv_force_address (XEXP (dest, 0), mode);
1345 if (MEM_P (src) && !riscv_legitimate_address_p (mode, XEXP (src, 0),
1346 reload_completed))
1348 XEXP (src, 0) = riscv_force_address (XEXP (src, 0), mode);
1351 return false;
1354 /* Return true if there is an instruction that implements CODE and accepts
1355 X as an immediate operand. */
1357 static int
1358 riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
1360 switch (code)
1362 case ASHIFT:
1363 case ASHIFTRT:
1364 case LSHIFTRT:
1365 /* All shift counts are truncated to a valid constant. */
1366 return true;
1368 case AND:
1369 case IOR:
1370 case XOR:
1371 case PLUS:
1372 case LT:
1373 case LTU:
1374 /* These instructions take 12-bit signed immediates. */
1375 return SMALL_OPERAND (x);
1377 case LE:
1378 /* We add 1 to the immediate and use SLT. */
1379 return SMALL_OPERAND (x + 1);
1381 case LEU:
1382 /* Likewise SLTU, but reject the always-true case. */
1383 return SMALL_OPERAND (x + 1) && x + 1 != 0;
1385 case GE:
1386 case GEU:
1387 /* We can emulate an immediate of 1 by using GT/GTU against x0. */
1388 return x == 1;
1390 default:
1391 /* By default assume that x0 can be used for 0. */
1392 return x == 0;
1396 /* Return the cost of binary operation X, given that the instruction
1397 sequence for a word-sized or smaller operation takes SIGNLE_INSNS
1398 instructions and that the sequence of a double-word operation takes
1399 DOUBLE_INSNS instructions. */
1401 static int
1402 riscv_binary_cost (rtx x, int single_insns, int double_insns)
1404 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
1405 return COSTS_N_INSNS (double_insns);
1406 return COSTS_N_INSNS (single_insns);
1409 /* Return the cost of sign- or zero-extending OP. */
1411 static int
1412 riscv_extend_cost (rtx op, bool unsigned_p)
1414 if (MEM_P (op))
1415 return 0;
1417 if (unsigned_p && GET_MODE (op) == QImode)
1418 /* We can use ANDI. */
1419 return COSTS_N_INSNS (1);
1421 if (!unsigned_p && GET_MODE (op) == SImode)
1422 /* We can use SEXT.W. */
1423 return COSTS_N_INSNS (1);
1425 /* We need to use a shift left and a shift right. */
1426 return COSTS_N_INSNS (2);
1429 /* Implement TARGET_RTX_COSTS. */
1431 #define SINGLE_SHIFT_COST 1
1433 static bool
1434 riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UNUSED,
1435 int *total, bool speed)
1437 bool float_mode_p = FLOAT_MODE_P (mode);
1438 int cost;
1440 switch (GET_CODE (x))
1442 case CONST_INT:
1443 if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
1445 *total = 0;
1446 return true;
1448 /* Fall through. */
1450 case SYMBOL_REF:
1451 case LABEL_REF:
1452 case CONST_DOUBLE:
1453 case CONST:
1454 if ((cost = riscv_const_insns (x)) > 0)
1456 /* If the constant is likely to be stored in a GPR, SETs of
1457 single-insn constants are as cheap as register sets; we
1458 never want to CSE them. */
1459 if (cost == 1 && outer_code == SET)
1460 *total = 0;
1461 /* When we load a constant more than once, it usually is better
1462 to duplicate the last operation in the sequence than to CSE
1463 the constant itself. */
1464 else if (outer_code == SET || GET_MODE (x) == VOIDmode)
1465 *total = COSTS_N_INSNS (1);
1467 else /* The instruction will be fetched from the constant pool. */
1468 *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
1469 return true;
1471 case MEM:
1472 /* If the address is legitimate, return the number of
1473 instructions it needs. */
1474 if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
1476 *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
1477 return true;
1479 /* Otherwise use the default handling. */
1480 return false;
1482 case NOT:
1483 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
1484 return false;
1486 case AND:
1487 case IOR:
1488 case XOR:
1489 /* Double-word operations use two single-word operations. */
1490 *total = riscv_binary_cost (x, 1, 2);
1491 return false;
1493 case ZERO_EXTRACT:
1494 /* This is an SImode shift. */
1495 if (outer_code == SET && (INTVAL (XEXP (x, 2)) > 0)
1496 && (INTVAL (XEXP (x, 1)) + INTVAL (XEXP (x, 2)) == 32))
1498 *total = COSTS_N_INSNS (SINGLE_SHIFT_COST);
1499 return true;
1501 return false;
1503 case ASHIFT:
1504 case ASHIFTRT:
1505 case LSHIFTRT:
1506 *total = riscv_binary_cost (x, SINGLE_SHIFT_COST,
1507 CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
1508 return false;
1510 case ABS:
1511 *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
1512 return false;
1514 case LO_SUM:
1515 *total = set_src_cost (XEXP (x, 0), mode, speed);
1516 return true;
1518 case LT:
1519 /* This is an SImode shift. */
1520 if (outer_code == SET && GET_MODE (x) == DImode
1521 && GET_MODE (XEXP (x, 0)) == SImode)
1523 *total = COSTS_N_INSNS (SINGLE_SHIFT_COST);
1524 return true;
1526 /* Fall through. */
1527 case LTU:
1528 case LE:
1529 case LEU:
1530 case GT:
1531 case GTU:
1532 case GE:
1533 case GEU:
1534 case EQ:
1535 case NE:
1536 /* Branch comparisons have VOIDmode, so use the first operand's
1537 mode instead. */
1538 mode = GET_MODE (XEXP (x, 0));
1539 if (float_mode_p)
1540 *total = tune_info->fp_add[mode == DFmode];
1541 else
1542 *total = riscv_binary_cost (x, 1, 3);
1543 return false;
1545 case UNORDERED:
1546 case ORDERED:
1547 /* (FEQ(A, A) & FEQ(B, B)) compared against 0. */
1548 mode = GET_MODE (XEXP (x, 0));
1549 *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (2);
1550 return false;
1552 case UNEQ:
1553 case LTGT:
1554 /* (FEQ(A, A) & FEQ(B, B)) compared against FEQ(A, B). */
1555 mode = GET_MODE (XEXP (x, 0));
1556 *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (3);
1557 return false;
1559 case UNGE:
1560 case UNGT:
1561 case UNLE:
1562 case UNLT:
1563 /* FLT or FLE, but guarded by an FFLAGS read and write. */
1564 mode = GET_MODE (XEXP (x, 0));
1565 *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (4);
1566 return false;
1568 case MINUS:
1569 case PLUS:
1570 if (float_mode_p)
1571 *total = tune_info->fp_add[mode == DFmode];
1572 else
1573 *total = riscv_binary_cost (x, 1, 4);
1574 return false;
1576 case NEG:
1578 rtx op = XEXP (x, 0);
1579 if (GET_CODE (op) == FMA && !HONOR_SIGNED_ZEROS (mode))
1581 *total = (tune_info->fp_mul[mode == DFmode]
1582 + set_src_cost (XEXP (op, 0), mode, speed)
1583 + set_src_cost (XEXP (op, 1), mode, speed)
1584 + set_src_cost (XEXP (op, 2), mode, speed));
1585 return true;
1589 if (float_mode_p)
1590 *total = tune_info->fp_add[mode == DFmode];
1591 else
1592 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
1593 return false;
1595 case MULT:
1596 if (float_mode_p)
1597 *total = tune_info->fp_mul[mode == DFmode];
1598 else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
1599 *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
1600 else if (!speed)
1601 *total = COSTS_N_INSNS (1);
1602 else
1603 *total = tune_info->int_mul[mode == DImode];
1604 return false;
1606 case DIV:
1607 case SQRT:
1608 case MOD:
1609 if (float_mode_p)
1611 *total = tune_info->fp_div[mode == DFmode];
1612 return false;
1614 /* Fall through. */
1616 case UDIV:
1617 case UMOD:
1618 if (speed)
1619 *total = tune_info->int_div[mode == DImode];
1620 else
1621 *total = COSTS_N_INSNS (1);
1622 return false;
1624 case ZERO_EXTEND:
1625 /* This is an SImode shift. */
1626 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT)
1628 *total = COSTS_N_INSNS (SINGLE_SHIFT_COST);
1629 return true;
1631 /* Fall through. */
1632 case SIGN_EXTEND:
1633 *total = riscv_extend_cost (XEXP (x, 0), GET_CODE (x) == ZERO_EXTEND);
1634 return false;
1636 case FLOAT:
1637 case UNSIGNED_FLOAT:
1638 case FIX:
1639 case FLOAT_EXTEND:
1640 case FLOAT_TRUNCATE:
1641 *total = tune_info->fp_add[mode == DFmode];
1642 return false;
1644 case FMA:
1645 *total = (tune_info->fp_mul[mode == DFmode]
1646 + set_src_cost (XEXP (x, 0), mode, speed)
1647 + set_src_cost (XEXP (x, 1), mode, speed)
1648 + set_src_cost (XEXP (x, 2), mode, speed));
1649 return true;
1651 case UNSPEC:
1652 if (XINT (x, 1) == UNSPEC_AUIPC)
1654 /* Make AUIPC cheap to avoid spilling its result to the stack. */
1655 *total = 1;
1656 return true;
1658 return false;
1660 default:
1661 return false;
1665 /* Implement TARGET_ADDRESS_COST. */
1667 static int
1668 riscv_address_cost (rtx addr, machine_mode mode,
1669 addr_space_t as ATTRIBUTE_UNUSED,
1670 bool speed ATTRIBUTE_UNUSED)
1672 return riscv_address_insns (addr, mode, false);
1675 /* Return one word of double-word value OP. HIGH_P is true to select the
1676 high part or false to select the low part. */
1679 riscv_subword (rtx op, bool high_p)
1681 unsigned int byte = high_p ? UNITS_PER_WORD : 0;
1682 machine_mode mode = GET_MODE (op);
1684 if (mode == VOIDmode)
1685 mode = TARGET_64BIT ? TImode : DImode;
1687 if (MEM_P (op))
1688 return adjust_address (op, word_mode, byte);
1690 if (REG_P (op))
1691 gcc_assert (!FP_REG_RTX_P (op));
1693 return simplify_gen_subreg (word_mode, op, mode, byte);
1696 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
1698 bool
1699 riscv_split_64bit_move_p (rtx dest, rtx src)
1701 if (TARGET_64BIT)
1702 return false;
1704 /* Allow FPR <-> FPR and FPR <-> MEM moves, and permit the special case
1705 of zeroing an FPR with FCVT.D.W. */
1706 if (TARGET_DOUBLE_FLOAT
1707 && ((FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
1708 || (FP_REG_RTX_P (dest) && MEM_P (src))
1709 || (FP_REG_RTX_P (src) && MEM_P (dest))
1710 || (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))))
1711 return false;
1713 return true;
1716 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
1717 this function handles 64-bit moves for which riscv_split_64bit_move_p
1718 holds. For 64-bit targets, this function handles 128-bit moves. */
1720 void
1721 riscv_split_doubleword_move (rtx dest, rtx src)
1723 rtx low_dest;
1725 /* The operation can be split into two normal moves. Decide in
1726 which order to do them. */
1727 low_dest = riscv_subword (dest, false);
1728 if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
1730 riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
1731 riscv_emit_move (low_dest, riscv_subword (src, false));
1733 else
1735 riscv_emit_move (low_dest, riscv_subword (src, false));
1736 riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
1740 /* Return the appropriate instructions to move SRC into DEST. Assume
1741 that SRC is operand 1 and DEST is operand 0. */
1743 const char *
1744 riscv_output_move (rtx dest, rtx src)
1746 enum rtx_code dest_code, src_code;
1747 machine_mode mode;
1748 bool dbl_p;
1750 dest_code = GET_CODE (dest);
1751 src_code = GET_CODE (src);
1752 mode = GET_MODE (dest);
1753 dbl_p = (GET_MODE_SIZE (mode) == 8);
1755 if (dbl_p && riscv_split_64bit_move_p (dest, src))
1756 return "#";
1758 if (dest_code == REG && GP_REG_P (REGNO (dest)))
1760 if (src_code == REG && FP_REG_P (REGNO (src)))
1761 return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
1763 if (src_code == MEM)
1764 switch (GET_MODE_SIZE (mode))
1766 case 1: return "lbu\t%0,%1";
1767 case 2: return "lhu\t%0,%1";
1768 case 4: return "lw\t%0,%1";
1769 case 8: return "ld\t%0,%1";
1772 if (src_code == CONST_INT)
1773 return "li\t%0,%1";
1775 if (src_code == HIGH)
1776 return "lui\t%0,%h1";
1778 if (symbolic_operand (src, VOIDmode))
1779 switch (riscv_classify_symbolic_expression (src))
1781 case SYMBOL_GOT_DISP: return "la\t%0,%1";
1782 case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
1783 case SYMBOL_PCREL: return "lla\t%0,%1";
1784 default: gcc_unreachable ();
1787 if ((src_code == REG && GP_REG_P (REGNO (src)))
1788 || (src == CONST0_RTX (mode)))
1790 if (dest_code == REG)
1792 if (GP_REG_P (REGNO (dest)))
1793 return "mv\t%0,%z1";
1795 if (FP_REG_P (REGNO (dest)))
1797 if (!dbl_p)
1798 return "fmv.s.x\t%0,%z1";
1799 if (TARGET_64BIT)
1800 return "fmv.d.x\t%0,%z1";
1801 /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
1802 gcc_assert (src == CONST0_RTX (mode));
1803 return "fcvt.d.w\t%0,x0";
1806 if (dest_code == MEM)
1807 switch (GET_MODE_SIZE (mode))
1809 case 1: return "sb\t%z1,%0";
1810 case 2: return "sh\t%z1,%0";
1811 case 4: return "sw\t%z1,%0";
1812 case 8: return "sd\t%z1,%0";
1815 if (src_code == REG && FP_REG_P (REGNO (src)))
1817 if (dest_code == REG && FP_REG_P (REGNO (dest)))
1818 return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
1820 if (dest_code == MEM)
1821 return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
1823 if (dest_code == REG && FP_REG_P (REGNO (dest)))
1825 if (src_code == MEM)
1826 return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
1828 gcc_unreachable ();
1831 /* Return true if CMP1 is a suitable second operand for integer ordering
1832 test CODE. See also the *sCC patterns in riscv.md. */
1834 static bool
1835 riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
1837 switch (code)
1839 case GT:
1840 case GTU:
1841 return reg_or_0_operand (cmp1, VOIDmode);
1843 case GE:
1844 case GEU:
1845 return cmp1 == const1_rtx;
1847 case LT:
1848 case LTU:
1849 return arith_operand (cmp1, VOIDmode);
1851 case LE:
1852 return sle_operand (cmp1, VOIDmode);
1854 case LEU:
1855 return sleu_operand (cmp1, VOIDmode);
1857 default:
1858 gcc_unreachable ();
1862 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
1863 integer ordering test *CODE, or if an equivalent combination can
1864 be formed by adjusting *CODE and *CMP1. When returning true, update
1865 *CODE and *CMP1 with the chosen code and operand, otherwise leave
1866 them alone. */
1868 static bool
1869 riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
1870 machine_mode mode)
1872 HOST_WIDE_INT plus_one;
1874 if (riscv_int_order_operand_ok_p (*code, *cmp1))
1875 return true;
1877 if (CONST_INT_P (*cmp1))
1878 switch (*code)
1880 case LE:
1881 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
1882 if (INTVAL (*cmp1) < plus_one)
1884 *code = LT;
1885 *cmp1 = force_reg (mode, GEN_INT (plus_one));
1886 return true;
1888 break;
1890 case LEU:
1891 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
1892 if (plus_one != 0)
1894 *code = LTU;
1895 *cmp1 = force_reg (mode, GEN_INT (plus_one));
1896 return true;
1898 break;
1900 default:
1901 break;
1903 return false;
1906 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
1907 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
1908 is nonnull, it's OK to set TARGET to the inverse of the result and
1909 flip *INVERT_PTR instead. */
1911 static void
1912 riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
1913 rtx target, rtx cmp0, rtx cmp1)
1915 machine_mode mode;
1917 /* First see if there is a RISCV instruction that can do this operation.
1918 If not, try doing the same for the inverse operation. If that also
1919 fails, force CMP1 into a register and try again. */
1920 mode = GET_MODE (cmp0);
1921 if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
1922 riscv_emit_binary (code, target, cmp0, cmp1);
1923 else
1925 enum rtx_code inv_code = reverse_condition (code);
1926 if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
1928 cmp1 = force_reg (mode, cmp1);
1929 riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
1931 else if (invert_ptr == 0)
1933 rtx inv_target = riscv_force_binary (GET_MODE (target),
1934 inv_code, cmp0, cmp1);
1935 riscv_emit_binary (XOR, target, inv_target, const1_rtx);
1937 else
1939 *invert_ptr = !*invert_ptr;
1940 riscv_emit_binary (inv_code, target, cmp0, cmp1);
1945 /* Return a register that is zero iff CMP0 and CMP1 are equal.
1946 The register will have the same mode as CMP0. */
1948 static rtx
1949 riscv_zero_if_equal (rtx cmp0, rtx cmp1)
1951 if (cmp1 == const0_rtx)
1952 return cmp0;
1954 return expand_binop (GET_MODE (cmp0), sub_optab,
1955 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
1958 /* Sign- or zero-extend OP0 and OP1 for integer comparisons. */
1960 static void
1961 riscv_extend_comparands (rtx_code code, rtx *op0, rtx *op1)
1963 /* Comparisons consider all XLEN bits, so extend sub-XLEN values. */
1964 if (GET_MODE_SIZE (word_mode) > GET_MODE_SIZE (GET_MODE (*op0)))
1966 /* It is more profitable to zero-extend QImode values. */
1967 if (unsigned_condition (code) == code && GET_MODE (*op0) == QImode)
1969 *op0 = gen_rtx_ZERO_EXTEND (word_mode, *op0);
1970 if (CONST_INT_P (*op1))
1971 *op1 = GEN_INT ((uint8_t) INTVAL (*op1));
1972 else
1973 *op1 = gen_rtx_ZERO_EXTEND (word_mode, *op1);
1975 else
1977 *op0 = gen_rtx_SIGN_EXTEND (word_mode, *op0);
1978 if (*op1 != const0_rtx)
1979 *op1 = gen_rtx_SIGN_EXTEND (word_mode, *op1);
1984 /* Convert a comparison into something that can be used in a branch. On
1985 entry, *OP0 and *OP1 are the values being compared and *CODE is the code
1986 used to compare them. Update them to describe the final comparison. */
1988 static void
1989 riscv_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1)
1991 if (splittable_const_int_operand (*op1, VOIDmode))
1993 HOST_WIDE_INT rhs = INTVAL (*op1);
1995 if (*code == EQ || *code == NE)
1997 /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
1998 if (SMALL_OPERAND (-rhs))
2000 *op0 = riscv_force_binary (GET_MODE (*op0), PLUS, *op0,
2001 GEN_INT (-rhs));
2002 *op1 = const0_rtx;
2005 else
2007 static const enum rtx_code mag_comparisons[][2] = {
2008 {LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE}
2011 /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */
2012 for (size_t i = 0; i < ARRAY_SIZE (mag_comparisons); i++)
2014 HOST_WIDE_INT new_rhs;
2015 bool increment = *code == mag_comparisons[i][0];
2016 bool decrement = *code == mag_comparisons[i][1];
2017 if (!increment && !decrement)
2018 continue;
2020 new_rhs = rhs + (increment ? 1 : -1);
2021 if (riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs)
2022 && (rhs < 0) == (new_rhs < 0))
2024 *op1 = GEN_INT (new_rhs);
2025 *code = mag_comparisons[i][increment];
2027 break;
2032 riscv_extend_comparands (*code, op0, op1);
2034 *op0 = force_reg (word_mode, *op0);
2035 if (*op1 != const0_rtx)
2036 *op1 = force_reg (word_mode, *op1);
2039 /* Like riscv_emit_int_compare, but for floating-point comparisons. */
2041 static void
2042 riscv_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1)
2044 rtx tmp0, tmp1, cmp_op0 = *op0, cmp_op1 = *op1;
2045 enum rtx_code fp_code = *code;
2046 *code = NE;
2048 switch (fp_code)
2050 case UNORDERED:
2051 *code = EQ;
2052 /* Fall through. */
2054 case ORDERED:
2055 /* a == a && b == b */
2056 tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
2057 tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
2058 *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
2059 *op1 = const0_rtx;
2060 break;
2062 case UNEQ:
2063 case LTGT:
2064 /* ordered(a, b) > (a == b) */
2065 *code = fp_code == LTGT ? GTU : EQ;
2066 tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
2067 tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
2068 *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
2069 *op1 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op1);
2070 break;
2072 #define UNORDERED_COMPARISON(CODE, CMP) \
2073 case CODE: \
2074 *code = EQ; \
2075 *op0 = gen_reg_rtx (word_mode); \
2076 if (GET_MODE (cmp_op0) == SFmode && TARGET_64BIT) \
2077 emit_insn (gen_f##CMP##_quietsfdi4 (*op0, cmp_op0, cmp_op1)); \
2078 else if (GET_MODE (cmp_op0) == SFmode) \
2079 emit_insn (gen_f##CMP##_quietsfsi4 (*op0, cmp_op0, cmp_op1)); \
2080 else if (GET_MODE (cmp_op0) == DFmode && TARGET_64BIT) \
2081 emit_insn (gen_f##CMP##_quietdfdi4 (*op0, cmp_op0, cmp_op1)); \
2082 else if (GET_MODE (cmp_op0) == DFmode) \
2083 emit_insn (gen_f##CMP##_quietdfsi4 (*op0, cmp_op0, cmp_op1)); \
2084 else \
2085 gcc_unreachable (); \
2086 *op1 = const0_rtx; \
2087 break;
2089 case UNLT:
2090 std::swap (cmp_op0, cmp_op1);
2091 gcc_fallthrough ();
2093 UNORDERED_COMPARISON(UNGT, le)
2095 case UNLE:
2096 std::swap (cmp_op0, cmp_op1);
2097 gcc_fallthrough ();
2099 UNORDERED_COMPARISON(UNGE, lt)
2100 #undef UNORDERED_COMPARISON
2102 case NE:
2103 fp_code = EQ;
2104 *code = EQ;
2105 /* Fall through. */
2107 case EQ:
2108 case LE:
2109 case LT:
2110 case GE:
2111 case GT:
2112 /* We have instructions for these cases. */
2113 *op0 = riscv_force_binary (word_mode, fp_code, cmp_op0, cmp_op1);
2114 *op1 = const0_rtx;
2115 break;
2117 default:
2118 gcc_unreachable ();
2122 /* CODE-compare OP0 and OP1. Store the result in TARGET. */
2124 void
2125 riscv_expand_int_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
2127 riscv_extend_comparands (code, &op0, &op1);
2128 op0 = force_reg (word_mode, op0);
2130 if (code == EQ || code == NE)
2132 rtx zie = riscv_zero_if_equal (op0, op1);
2133 riscv_emit_binary (code, target, zie, const0_rtx);
2135 else
2136 riscv_emit_int_order_test (code, 0, target, op0, op1);
2139 /* Like riscv_expand_int_scc, but for floating-point comparisons. */
2141 void
2142 riscv_expand_float_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
2144 riscv_emit_float_compare (&code, &op0, &op1);
2146 rtx cmp = riscv_force_binary (word_mode, code, op0, op1);
2147 riscv_emit_set (target, lowpart_subreg (SImode, cmp, word_mode));
2150 /* Jump to LABEL if (CODE OP0 OP1) holds. */
2152 void
2153 riscv_expand_conditional_branch (rtx label, rtx_code code, rtx op0, rtx op1)
2155 if (FLOAT_MODE_P (GET_MODE (op1)))
2156 riscv_emit_float_compare (&code, &op0, &op1);
2157 else
2158 riscv_emit_int_compare (&code, &op0, &op1);
2160 rtx condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2161 emit_jump_insn (gen_condjump (condition, label));
2164 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
2165 least PARM_BOUNDARY bits of alignment, but will be given anything up
2166 to STACK_BOUNDARY bits if the type requires it. */
2168 static unsigned int
2169 riscv_function_arg_boundary (machine_mode mode, const_tree type)
2171 unsigned int alignment;
2173 /* Use natural alignment if the type is not aggregate data. */
2174 if (type && !AGGREGATE_TYPE_P (type))
2175 alignment = TYPE_ALIGN (TYPE_MAIN_VARIANT (type));
2176 else
2177 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
2179 return MIN (STACK_BOUNDARY, MAX (PARM_BOUNDARY, alignment));
2182 /* If MODE represents an argument that can be passed or returned in
2183 floating-point registers, return the number of registers, else 0. */
2185 static unsigned
2186 riscv_pass_mode_in_fpr_p (machine_mode mode)
2188 if (GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG)
2190 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2191 return 1;
2193 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
2194 return 2;
2197 return 0;
2200 typedef struct {
2201 const_tree type;
2202 HOST_WIDE_INT offset;
2203 } riscv_aggregate_field;
2205 /* Identify subfields of aggregates that are candidates for passing in
2206 floating-point registers. */
2208 static int
2209 riscv_flatten_aggregate_field (const_tree type,
2210 riscv_aggregate_field fields[2],
2211 int n, HOST_WIDE_INT offset)
2213 switch (TREE_CODE (type))
2215 case RECORD_TYPE:
2216 /* Can't handle incomplete types nor sizes that are not fixed. */
2217 if (!COMPLETE_TYPE_P (type)
2218 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
2219 || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2220 return -1;
2222 for (tree f = TYPE_FIELDS (type); f; f = DECL_CHAIN (f))
2223 if (TREE_CODE (f) == FIELD_DECL)
2225 if (!TYPE_P (TREE_TYPE (f)))
2226 return -1;
2228 HOST_WIDE_INT pos = offset + int_byte_position (f);
2229 n = riscv_flatten_aggregate_field (TREE_TYPE (f), fields, n, pos);
2230 if (n < 0)
2231 return -1;
2233 return n;
2235 case ARRAY_TYPE:
2237 HOST_WIDE_INT n_elts;
2238 riscv_aggregate_field subfields[2];
2239 tree index = TYPE_DOMAIN (type);
2240 tree elt_size = TYPE_SIZE_UNIT (TREE_TYPE (type));
2241 int n_subfields = riscv_flatten_aggregate_field (TREE_TYPE (type),
2242 subfields, 0, offset);
2244 /* Can't handle incomplete types nor sizes that are not fixed. */
2245 if (n_subfields <= 0
2246 || !COMPLETE_TYPE_P (type)
2247 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
2248 || !index
2249 || !TYPE_MAX_VALUE (index)
2250 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
2251 || !TYPE_MIN_VALUE (index)
2252 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
2253 || !tree_fits_uhwi_p (elt_size))
2254 return -1;
2256 n_elts = 1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
2257 - tree_to_uhwi (TYPE_MIN_VALUE (index));
2258 gcc_assert (n_elts >= 0);
2260 for (HOST_WIDE_INT i = 0; i < n_elts; i++)
2261 for (int j = 0; j < n_subfields; j++)
2263 if (n >= 2)
2264 return -1;
2266 fields[n] = subfields[j];
2267 fields[n++].offset += i * tree_to_uhwi (elt_size);
2270 return n;
2273 case COMPLEX_TYPE:
2275 /* Complex type need consume 2 field, so n must be 0. */
2276 if (n != 0)
2277 return -1;
2279 HOST_WIDE_INT elt_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type)));
2281 if (elt_size <= UNITS_PER_FP_ARG)
2283 fields[0].type = TREE_TYPE (type);
2284 fields[0].offset = offset;
2285 fields[1].type = TREE_TYPE (type);
2286 fields[1].offset = offset + elt_size;
2288 return 2;
2291 return -1;
2294 default:
2295 if (n < 2
2296 && ((SCALAR_FLOAT_TYPE_P (type)
2297 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FP_ARG)
2298 || (INTEGRAL_TYPE_P (type)
2299 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD)))
2301 fields[n].type = type;
2302 fields[n].offset = offset;
2303 return n + 1;
2305 else
2306 return -1;
2310 /* Identify candidate aggregates for passing in floating-point registers.
2311 Candidates have at most two fields after flattening. */
2313 static int
2314 riscv_flatten_aggregate_argument (const_tree type,
2315 riscv_aggregate_field fields[2])
2317 if (!type || TREE_CODE (type) != RECORD_TYPE)
2318 return -1;
2320 return riscv_flatten_aggregate_field (type, fields, 0, 0);
2323 /* See whether TYPE is a record whose fields should be returned in one or
2324 two floating-point registers. If so, populate FIELDS accordingly. */
2326 static unsigned
2327 riscv_pass_aggregate_in_fpr_pair_p (const_tree type,
2328 riscv_aggregate_field fields[2])
2330 int n = riscv_flatten_aggregate_argument (type, fields);
2332 for (int i = 0; i < n; i++)
2333 if (!SCALAR_FLOAT_TYPE_P (fields[i].type))
2334 return 0;
2336 return n > 0 ? n : 0;
2339 /* See whether TYPE is a record whose fields should be returned in one or
2340 floating-point register and one integer register. If so, populate
2341 FIELDS accordingly. */
2343 static bool
2344 riscv_pass_aggregate_in_fpr_and_gpr_p (const_tree type,
2345 riscv_aggregate_field fields[2])
2347 unsigned num_int = 0, num_float = 0;
2348 int n = riscv_flatten_aggregate_argument (type, fields);
2350 for (int i = 0; i < n; i++)
2352 num_float += SCALAR_FLOAT_TYPE_P (fields[i].type);
2353 num_int += INTEGRAL_TYPE_P (fields[i].type);
2356 return num_int == 1 && num_float == 1;
2359 /* Return the representation of an argument passed or returned in an FPR
2360 when the value has mode VALUE_MODE and the type has TYPE_MODE. The
2361 two modes may be different for structures like:
2363 struct __attribute__((packed)) foo { float f; }
2365 where the SFmode value "f" is passed in REGNO but the struct itself
2366 has mode BLKmode. */
2368 static rtx
2369 riscv_pass_fpr_single (machine_mode type_mode, unsigned regno,
2370 machine_mode value_mode)
2372 rtx x = gen_rtx_REG (value_mode, regno);
2374 if (type_mode != value_mode)
2376 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
2377 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
2379 return x;
2382 /* Pass or return a composite value in the FPR pair REGNO and REGNO + 1.
2383 MODE is the mode of the composite. MODE1 and OFFSET1 are the mode and
2384 byte offset for the first value, likewise MODE2 and OFFSET2 for the
2385 second value. */
2387 static rtx
2388 riscv_pass_fpr_pair (machine_mode mode, unsigned regno1,
2389 machine_mode mode1, HOST_WIDE_INT offset1,
2390 unsigned regno2, machine_mode mode2,
2391 HOST_WIDE_INT offset2)
2393 return gen_rtx_PARALLEL
2394 (mode,
2395 gen_rtvec (2,
2396 gen_rtx_EXPR_LIST (VOIDmode,
2397 gen_rtx_REG (mode1, regno1),
2398 GEN_INT (offset1)),
2399 gen_rtx_EXPR_LIST (VOIDmode,
2400 gen_rtx_REG (mode2, regno2),
2401 GEN_INT (offset2))));
2404 /* Fill INFO with information about a single argument, and return an
2405 RTL pattern to pass or return the argument. CUM is the cumulative
2406 state for earlier arguments. MODE is the mode of this argument and
2407 TYPE is its type (if known). NAMED is true if this is a named
2408 (fixed) argument rather than a variable one. RETURN_P is true if
2409 returning the argument, or false if passing the argument. */
2411 static rtx
2412 riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
2413 machine_mode mode, const_tree type, bool named,
2414 bool return_p)
2416 unsigned num_bytes, num_words;
2417 unsigned fpr_base = return_p ? FP_RETURN : FP_ARG_FIRST;
2418 unsigned gpr_base = return_p ? GP_RETURN : GP_ARG_FIRST;
2419 unsigned alignment = riscv_function_arg_boundary (mode, type);
2421 memset (info, 0, sizeof (*info));
2422 info->gpr_offset = cum->num_gprs;
2423 info->fpr_offset = cum->num_fprs;
2425 if (named)
2427 riscv_aggregate_field fields[2];
2428 unsigned fregno = fpr_base + info->fpr_offset;
2429 unsigned gregno = gpr_base + info->gpr_offset;
2431 /* Pass one- or two-element floating-point aggregates in FPRs. */
2432 if ((info->num_fprs = riscv_pass_aggregate_in_fpr_pair_p (type, fields))
2433 && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
2434 switch (info->num_fprs)
2436 case 1:
2437 return riscv_pass_fpr_single (mode, fregno,
2438 TYPE_MODE (fields[0].type));
2440 case 2:
2441 return riscv_pass_fpr_pair (mode, fregno,
2442 TYPE_MODE (fields[0].type),
2443 fields[0].offset,
2444 fregno + 1,
2445 TYPE_MODE (fields[1].type),
2446 fields[1].offset);
2448 default:
2449 gcc_unreachable ();
2452 /* Pass real and complex floating-point numbers in FPRs. */
2453 if ((info->num_fprs = riscv_pass_mode_in_fpr_p (mode))
2454 && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
2455 switch (GET_MODE_CLASS (mode))
2457 case MODE_FLOAT:
2458 return gen_rtx_REG (mode, fregno);
2460 case MODE_COMPLEX_FLOAT:
2461 return riscv_pass_fpr_pair (mode, fregno, GET_MODE_INNER (mode), 0,
2462 fregno + 1, GET_MODE_INNER (mode),
2463 GET_MODE_UNIT_SIZE (mode));
2465 default:
2466 gcc_unreachable ();
2469 /* Pass structs with one float and one integer in an FPR and a GPR. */
2470 if (riscv_pass_aggregate_in_fpr_and_gpr_p (type, fields)
2471 && info->gpr_offset < MAX_ARGS_IN_REGISTERS
2472 && info->fpr_offset < MAX_ARGS_IN_REGISTERS)
2474 info->num_gprs = 1;
2475 info->num_fprs = 1;
2477 if (!SCALAR_FLOAT_TYPE_P (fields[0].type))
2478 std::swap (fregno, gregno);
2480 return riscv_pass_fpr_pair (mode, fregno, TYPE_MODE (fields[0].type),
2481 fields[0].offset,
2482 gregno, TYPE_MODE (fields[1].type),
2483 fields[1].offset);
2487 /* Work out the size of the argument. */
2488 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
2489 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2491 /* Doubleword-aligned varargs start on an even register boundary. */
2492 if (!named && num_bytes != 0 && alignment > BITS_PER_WORD)
2493 info->gpr_offset += info->gpr_offset & 1;
2495 /* Partition the argument between registers and stack. */
2496 info->num_fprs = 0;
2497 info->num_gprs = MIN (num_words, MAX_ARGS_IN_REGISTERS - info->gpr_offset);
2498 info->stack_p = (num_words - info->num_gprs) != 0;
2500 if (info->num_gprs || return_p)
2501 return gen_rtx_REG (mode, gpr_base + info->gpr_offset);
2503 return NULL_RTX;
2506 /* Implement TARGET_FUNCTION_ARG. */
2508 static rtx
2509 riscv_function_arg (cumulative_args_t cum_v, machine_mode mode,
2510 const_tree type, bool named)
2512 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2513 struct riscv_arg_info info;
2515 if (mode == VOIDmode)
2516 return NULL;
2518 return riscv_get_arg_info (&info, cum, mode, type, named, false);
2521 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
2523 static void
2524 riscv_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
2525 const_tree type, bool named)
2527 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2528 struct riscv_arg_info info;
2530 riscv_get_arg_info (&info, cum, mode, type, named, false);
2532 /* Advance the register count. This has the effect of setting
2533 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
2534 argument required us to skip the final GPR and pass the whole
2535 argument on the stack. */
2536 cum->num_fprs = info.fpr_offset + info.num_fprs;
2537 cum->num_gprs = info.gpr_offset + info.num_gprs;
2540 /* Implement TARGET_ARG_PARTIAL_BYTES. */
2542 static int
2543 riscv_arg_partial_bytes (cumulative_args_t cum,
2544 machine_mode mode, tree type, bool named)
2546 struct riscv_arg_info arg;
2548 riscv_get_arg_info (&arg, get_cumulative_args (cum), mode, type, named, false);
2549 return arg.stack_p ? arg.num_gprs * UNITS_PER_WORD : 0;
2552 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
2553 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
2554 VALTYPE is null and MODE is the mode of the return value. */
2557 riscv_function_value (const_tree type, const_tree func, machine_mode mode)
2559 struct riscv_arg_info info;
2560 CUMULATIVE_ARGS args;
2562 if (type)
2564 int unsigned_p = TYPE_UNSIGNED (type);
2566 mode = TYPE_MODE (type);
2568 /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
2569 return values, promote the mode here too. */
2570 mode = promote_function_mode (type, mode, &unsigned_p, func, 1);
2573 memset (&args, 0, sizeof args);
2574 return riscv_get_arg_info (&info, &args, mode, type, true, true);
2577 /* Implement TARGET_PASS_BY_REFERENCE. */
2579 static bool
2580 riscv_pass_by_reference (cumulative_args_t cum_v, machine_mode mode,
2581 const_tree type, bool named)
2583 HOST_WIDE_INT size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
2584 struct riscv_arg_info info;
2585 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2587 /* ??? std_gimplify_va_arg_expr passes NULL for cum. Fortunately, we
2588 never pass variadic arguments in floating-point registers, so we can
2589 avoid the call to riscv_get_arg_info in this case. */
2590 if (cum != NULL)
2592 /* Don't pass by reference if we can use a floating-point register. */
2593 riscv_get_arg_info (&info, cum, mode, type, named, false);
2594 if (info.num_fprs)
2595 return false;
2598 /* Pass by reference if the data do not fit in two integer registers. */
2599 return !IN_RANGE (size, 0, 2 * UNITS_PER_WORD);
2602 /* Implement TARGET_RETURN_IN_MEMORY. */
2604 static bool
2605 riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
2607 CUMULATIVE_ARGS args;
2608 cumulative_args_t cum = pack_cumulative_args (&args);
2610 /* The rules for returning in memory are the same as for passing the
2611 first named argument by reference. */
2612 memset (&args, 0, sizeof args);
2613 return riscv_pass_by_reference (cum, TYPE_MODE (type), type, true);
2616 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
2618 static void
2619 riscv_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
2620 tree type, int *pretend_size ATTRIBUTE_UNUSED,
2621 int no_rtl)
2623 CUMULATIVE_ARGS local_cum;
2624 int gp_saved;
2626 /* The caller has advanced CUM up to, but not beyond, the last named
2627 argument. Advance a local copy of CUM past the last "real" named
2628 argument, to find out how many registers are left over. */
2629 local_cum = *get_cumulative_args (cum);
2630 riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
2632 /* Found out how many registers we need to save. */
2633 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
2635 if (!no_rtl && gp_saved > 0)
2637 rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
2638 REG_PARM_STACK_SPACE (cfun->decl)
2639 - gp_saved * UNITS_PER_WORD);
2640 rtx mem = gen_frame_mem (BLKmode, ptr);
2641 set_mem_alias_set (mem, get_varargs_alias_set ());
2643 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
2644 mem, gp_saved);
2646 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
2647 cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
2650 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
2652 static void
2653 riscv_va_start (tree valist, rtx nextarg)
2655 nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
2656 std_expand_builtin_va_start (valist, nextarg);
2659 /* Make ADDR suitable for use as a call or sibcall target. */
2662 riscv_legitimize_call_address (rtx addr)
2664 if (!call_insn_operand (addr, VOIDmode))
2666 rtx reg = RISCV_PROLOGUE_TEMP (Pmode);
2667 riscv_emit_move (reg, addr);
2668 return reg;
2670 return addr;
2673 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
2674 Assume that the areas do not overlap. */
2676 static void
2677 riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
2679 HOST_WIDE_INT offset, delta;
2680 unsigned HOST_WIDE_INT bits;
2681 int i;
2682 enum machine_mode mode;
2683 rtx *regs;
2685 bits = MAX (BITS_PER_UNIT,
2686 MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))));
2688 mode = mode_for_size (bits, MODE_INT, 0).require ();
2689 delta = bits / BITS_PER_UNIT;
2691 /* Allocate a buffer for the temporary registers. */
2692 regs = XALLOCAVEC (rtx, length / delta);
2694 /* Load as many BITS-sized chunks as possible. Use a normal load if
2695 the source has enough alignment, otherwise use left/right pairs. */
2696 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2698 regs[i] = gen_reg_rtx (mode);
2699 riscv_emit_move (regs[i], adjust_address (src, mode, offset));
2702 /* Copy the chunks to the destination. */
2703 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2704 riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
2706 /* Mop up any left-over bytes. */
2707 if (offset < length)
2709 src = adjust_address (src, BLKmode, offset);
2710 dest = adjust_address (dest, BLKmode, offset);
2711 move_by_pieces (dest, src, length - offset,
2712 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
2716 /* Helper function for doing a loop-based block operation on memory
2717 reference MEM. Each iteration of the loop will operate on LENGTH
2718 bytes of MEM.
2720 Create a new base register for use within the loop and point it to
2721 the start of MEM. Create a new memory reference that uses this
2722 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
2724 static void
2725 riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
2726 rtx *loop_reg, rtx *loop_mem)
2728 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
2730 /* Although the new mem does not refer to a known location,
2731 it does keep up to LENGTH bytes of alignment. */
2732 *loop_mem = change_address (mem, BLKmode, *loop_reg);
2733 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
2736 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
2737 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
2738 the memory regions do not overlap. */
2740 static void
2741 riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
2742 HOST_WIDE_INT bytes_per_iter)
2744 rtx label, src_reg, dest_reg, final_src, test;
2745 HOST_WIDE_INT leftover;
2747 leftover = length % bytes_per_iter;
2748 length -= leftover;
2750 /* Create registers and memory references for use within the loop. */
2751 riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
2752 riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
2754 /* Calculate the value that SRC_REG should have after the last iteration
2755 of the loop. */
2756 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
2757 0, 0, OPTAB_WIDEN);
2759 /* Emit the start of the loop. */
2760 label = gen_label_rtx ();
2761 emit_label (label);
2763 /* Emit the loop body. */
2764 riscv_block_move_straight (dest, src, bytes_per_iter);
2766 /* Move on to the next block. */
2767 riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
2768 riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
2770 /* Emit the loop condition. */
2771 test = gen_rtx_NE (VOIDmode, src_reg, final_src);
2772 if (Pmode == DImode)
2773 emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
2774 else
2775 emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
2777 /* Mop up any left-over bytes. */
2778 if (leftover)
2779 riscv_block_move_straight (dest, src, leftover);
2780 else
2781 emit_insn(gen_nop ());
2784 /* Expand a movmemsi instruction, which copies LENGTH bytes from
2785 memory reference SRC to memory reference DEST. */
2787 bool
2788 riscv_expand_block_move (rtx dest, rtx src, rtx length)
2790 if (CONST_INT_P (length))
2792 HOST_WIDE_INT factor, align;
2794 align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
2795 factor = BITS_PER_WORD / align;
2797 if (optimize_function_for_size_p (cfun)
2798 && INTVAL (length) * factor * UNITS_PER_WORD > MOVE_RATIO (false))
2799 return false;
2801 if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
2803 riscv_block_move_straight (dest, src, INTVAL (length));
2804 return true;
2806 else if (optimize && align >= BITS_PER_WORD)
2808 unsigned min_iter_words
2809 = RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD;
2810 unsigned iter_words = min_iter_words;
2811 HOST_WIDE_INT bytes = INTVAL (length), words = bytes / UNITS_PER_WORD;
2813 /* Lengthen the loop body if it shortens the tail. */
2814 for (unsigned i = min_iter_words; i < min_iter_words * 2 - 1; i++)
2816 unsigned cur_cost = iter_words + words % iter_words;
2817 unsigned new_cost = i + words % i;
2818 if (new_cost <= cur_cost)
2819 iter_words = i;
2822 riscv_block_move_loop (dest, src, bytes, iter_words * UNITS_PER_WORD);
2823 return true;
2826 return false;
2829 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
2830 in context CONTEXT. HI_RELOC indicates a high-part reloc. */
2832 static void
2833 riscv_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
2835 const char *reloc;
2837 switch (riscv_classify_symbolic_expression (op))
2839 case SYMBOL_ABSOLUTE:
2840 reloc = hi_reloc ? "%hi" : "%lo";
2841 break;
2843 case SYMBOL_PCREL:
2844 reloc = hi_reloc ? "%pcrel_hi" : "%pcrel_lo";
2845 break;
2847 case SYMBOL_TLS_LE:
2848 reloc = hi_reloc ? "%tprel_hi" : "%tprel_lo";
2849 break;
2851 default:
2852 gcc_unreachable ();
2855 fprintf (file, "%s(", reloc);
2856 output_addr_const (file, riscv_strip_unspec_address (op));
2857 fputc (')', file);
2860 /* Return true if the .AQ suffix should be added to an AMO to implement the
2861 acquire portion of memory model MODEL. */
2863 static bool
2864 riscv_memmodel_needs_amo_acquire (enum memmodel model)
2866 switch (model)
2868 case MEMMODEL_ACQ_REL:
2869 case MEMMODEL_SEQ_CST:
2870 case MEMMODEL_SYNC_SEQ_CST:
2871 case MEMMODEL_ACQUIRE:
2872 case MEMMODEL_CONSUME:
2873 case MEMMODEL_SYNC_ACQUIRE:
2874 return true;
2876 case MEMMODEL_RELEASE:
2877 case MEMMODEL_SYNC_RELEASE:
2878 case MEMMODEL_RELAXED:
2879 return false;
2881 default:
2882 gcc_unreachable ();
2886 /* Return true if a FENCE should be emitted to before a memory access to
2887 implement the release portion of memory model MODEL. */
2889 static bool
2890 riscv_memmodel_needs_release_fence (enum memmodel model)
2892 switch (model)
2894 case MEMMODEL_ACQ_REL:
2895 case MEMMODEL_SEQ_CST:
2896 case MEMMODEL_SYNC_SEQ_CST:
2897 case MEMMODEL_RELEASE:
2898 case MEMMODEL_SYNC_RELEASE:
2899 return true;
2901 case MEMMODEL_ACQUIRE:
2902 case MEMMODEL_CONSUME:
2903 case MEMMODEL_SYNC_ACQUIRE:
2904 case MEMMODEL_RELAXED:
2905 return false;
2907 default:
2908 gcc_unreachable ();
2912 /* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
2914 'h' Print the high-part relocation associated with OP, after stripping
2915 any outermost HIGH.
2916 'R' Print the low-part relocation associated with OP.
2917 'C' Print the integer branch condition for comparison OP.
2918 'A' Print the atomic operation suffix for memory model OP.
2919 'F' Print a FENCE if the memory model requires a release.
2920 'z' Print x0 if OP is zero, otherwise print OP normally.
2921 'i' Print i if the operand is not a register. */
2923 static void
2924 riscv_print_operand (FILE *file, rtx op, int letter)
2926 machine_mode mode = GET_MODE (op);
2927 enum rtx_code code = GET_CODE (op);
2929 switch (letter)
2931 case 'h':
2932 if (code == HIGH)
2933 op = XEXP (op, 0);
2934 riscv_print_operand_reloc (file, op, true);
2935 break;
2937 case 'R':
2938 riscv_print_operand_reloc (file, op, false);
2939 break;
2941 case 'C':
2942 /* The RTL names match the instruction names. */
2943 fputs (GET_RTX_NAME (code), file);
2944 break;
2946 case 'A':
2947 if (riscv_memmodel_needs_amo_acquire ((enum memmodel) INTVAL (op)))
2948 fputs (".aq", file);
2949 break;
2951 case 'F':
2952 if (riscv_memmodel_needs_release_fence ((enum memmodel) INTVAL (op)))
2953 fputs ("fence iorw,ow; ", file);
2954 break;
2956 case 'i':
2957 if (code != REG)
2958 fputs ("i", file);
2959 break;
2961 default:
2962 switch (code)
2964 case REG:
2965 if (letter && letter != 'z')
2966 output_operand_lossage ("invalid use of '%%%c'", letter);
2967 fprintf (file, "%s", reg_names[REGNO (op)]);
2968 break;
2970 case MEM:
2971 if (letter && letter != 'z')
2972 output_operand_lossage ("invalid use of '%%%c'", letter);
2973 else
2974 output_address (mode, XEXP (op, 0));
2975 break;
2977 default:
2978 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
2979 fputs (reg_names[GP_REG_FIRST], file);
2980 else if (letter && letter != 'z')
2981 output_operand_lossage ("invalid use of '%%%c'", letter);
2982 else
2983 output_addr_const (file, riscv_strip_unspec_address (op));
2984 break;
2989 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
2991 static void
2992 riscv_print_operand_address (FILE *file, machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2994 struct riscv_address_info addr;
2996 if (riscv_classify_address (&addr, x, word_mode, true))
2997 switch (addr.type)
2999 case ADDRESS_REG:
3000 riscv_print_operand (file, addr.offset, 0);
3001 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
3002 return;
3004 case ADDRESS_LO_SUM:
3005 riscv_print_operand_reloc (file, addr.offset, false);
3006 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
3007 return;
3009 case ADDRESS_CONST_INT:
3010 output_addr_const (file, x);
3011 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
3012 return;
3014 case ADDRESS_SYMBOLIC:
3015 output_addr_const (file, riscv_strip_unspec_address (x));
3016 return;
3018 gcc_unreachable ();
3021 static bool
3022 riscv_size_ok_for_small_data_p (int size)
3024 return g_switch_value && IN_RANGE (size, 1, g_switch_value);
3027 /* Return true if EXP should be placed in the small data section. */
3029 static bool
3030 riscv_in_small_data_p (const_tree x)
3032 if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
3033 return false;
3035 if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
3037 const char *sec = DECL_SECTION_NAME (x);
3038 return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
3041 return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
3044 /* Switch to the appropriate section for output of DECL. */
3046 static section *
3047 riscv_select_section (tree decl, int reloc,
3048 unsigned HOST_WIDE_INT align)
3050 switch (categorize_decl_for_section (decl, reloc))
3052 case SECCAT_SRODATA:
3053 return get_named_section (decl, ".srodata", reloc);
3055 default:
3056 return default_elf_select_section (decl, reloc, align);
3060 /* Return a section for X, handling small data. */
3062 static section *
3063 riscv_elf_select_rtx_section (machine_mode mode, rtx x,
3064 unsigned HOST_WIDE_INT align)
3066 section *s = default_elf_select_rtx_section (mode, x, align);
3068 if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
3070 if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
3072 /* Rename .rodata.cst* to .srodata.cst*. */
3073 char *name = (char *) alloca (strlen (s->named.name) + 2);
3074 sprintf (name, ".s%s", s->named.name + 1);
3075 return get_section (name, s->named.common.flags, NULL);
3078 if (s == data_section)
3079 return sdata_section;
3082 return s;
3085 /* Make the last instruction frame-related and note that it performs
3086 the operation described by FRAME_PATTERN. */
3088 static void
3089 riscv_set_frame_expr (rtx frame_pattern)
3091 rtx insn;
3093 insn = get_last_insn ();
3094 RTX_FRAME_RELATED_P (insn) = 1;
3095 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3096 frame_pattern,
3097 REG_NOTES (insn));
3100 /* Return a frame-related rtx that stores REG at MEM.
3101 REG must be a single register. */
3103 static rtx
3104 riscv_frame_set (rtx mem, rtx reg)
3106 rtx set = gen_rtx_SET (mem, reg);
3107 RTX_FRAME_RELATED_P (set) = 1;
3108 return set;
3111 /* Return true if the current function must save register REGNO. */
3113 static bool
3114 riscv_save_reg_p (unsigned int regno)
3116 bool call_saved = !global_regs[regno] && !call_used_regs[regno];
3117 bool might_clobber = crtl->saves_all_registers
3118 || df_regs_ever_live_p (regno);
3120 if (call_saved && might_clobber)
3121 return true;
3123 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
3124 return true;
3126 if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
3127 return true;
3129 return false;
3132 /* Determine whether to call GPR save/restore routines. */
3133 static bool
3134 riscv_use_save_libcall (const struct riscv_frame_info *frame)
3136 if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed)
3137 return false;
3139 return frame->save_libcall_adjustment != 0;
3142 /* Determine which GPR save/restore routine to call. */
3144 static unsigned
3145 riscv_save_libcall_count (unsigned mask)
3147 for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
3148 if (BITSET_P (mask, n))
3149 return CALLEE_SAVED_REG_NUMBER (n) + 1;
3150 abort ();
3153 /* Populate the current function's riscv_frame_info structure.
3155 RISC-V stack frames grown downward. High addresses are at the top.
3157 +-------------------------------+
3159 | incoming stack arguments |
3161 +-------------------------------+ <-- incoming stack pointer
3163 | callee-allocated save area |
3164 | for arguments that are |
3165 | split between registers and |
3166 | the stack |
3168 +-------------------------------+ <-- arg_pointer_rtx
3170 | callee-allocated save area |
3171 | for register varargs |
3173 +-------------------------------+ <-- hard_frame_pointer_rtx;
3174 | | stack_pointer_rtx + gp_sp_offset
3175 | GPR save area | + UNITS_PER_WORD
3177 +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
3178 | | + UNITS_PER_HWVALUE
3179 | FPR save area |
3181 +-------------------------------+ <-- frame_pointer_rtx (virtual)
3183 | local variables |
3185 P +-------------------------------+
3187 | outgoing stack arguments |
3189 +-------------------------------+ <-- stack_pointer_rtx
3191 Dynamic stack allocations such as alloca insert data at point P.
3192 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
3193 hard_frame_pointer_rtx unchanged. */
3195 static void
3196 riscv_compute_frame_info (void)
3198 struct riscv_frame_info *frame;
3199 HOST_WIDE_INT offset;
3200 unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
3202 frame = &cfun->machine->frame;
3203 memset (frame, 0, sizeof (*frame));
3205 /* Find out which GPRs we need to save. */
3206 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
3207 if (riscv_save_reg_p (regno))
3208 frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
3210 /* If this function calls eh_return, we must also save and restore the
3211 EH data registers. */
3212 if (crtl->calls_eh_return)
3213 for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
3214 frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
3216 /* Find out which FPRs we need to save. This loop must iterate over
3217 the same space as its companion in riscv_for_each_saved_reg. */
3218 if (TARGET_HARD_FLOAT)
3219 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3220 if (riscv_save_reg_p (regno))
3221 frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
3223 /* At the bottom of the frame are any outgoing stack arguments. */
3224 offset = crtl->outgoing_args_size;
3225 /* Next are local stack variables. */
3226 offset += RISCV_STACK_ALIGN (get_frame_size ());
3227 /* The virtual frame pointer points above the local variables. */
3228 frame->frame_pointer_offset = offset;
3229 /* Next are the callee-saved FPRs. */
3230 if (frame->fmask)
3231 offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG);
3232 frame->fp_sp_offset = offset - UNITS_PER_FP_REG;
3233 /* Next are the callee-saved GPRs. */
3234 if (frame->mask)
3236 unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
3237 unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
3239 /* Only use save/restore routines if they don't alter the stack size. */
3240 if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
3241 frame->save_libcall_adjustment = x_save_size;
3243 offset += x_save_size;
3245 frame->gp_sp_offset = offset - UNITS_PER_WORD;
3246 /* The hard frame pointer points above the callee-saved GPRs. */
3247 frame->hard_frame_pointer_offset = offset;
3248 /* Above the hard frame pointer is the callee-allocated varags save area. */
3249 offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
3250 frame->arg_pointer_offset = offset;
3251 /* Next is the callee-allocated area for pretend stack arguments. */
3252 offset += crtl->args.pretend_args_size;
3253 frame->total_size = offset;
3254 /* Next points the incoming stack pointer and any incoming arguments. */
3256 /* Only use save/restore routines when the GPRs are atop the frame. */
3257 if (frame->hard_frame_pointer_offset != frame->total_size)
3258 frame->save_libcall_adjustment = 0;
3261 /* Make sure that we're not trying to eliminate to the wrong hard frame
3262 pointer. */
3264 static bool
3265 riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
3267 return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
3270 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
3271 or argument pointer. TO is either the stack pointer or hard frame
3272 pointer. */
3274 HOST_WIDE_INT
3275 riscv_initial_elimination_offset (int from, int to)
3277 HOST_WIDE_INT src, dest;
3279 riscv_compute_frame_info ();
3281 if (to == HARD_FRAME_POINTER_REGNUM)
3282 dest = cfun->machine->frame.hard_frame_pointer_offset;
3283 else if (to == STACK_POINTER_REGNUM)
3284 dest = 0; /* The stack pointer is the base of all offsets, hence 0. */
3285 else
3286 gcc_unreachable ();
3288 if (from == FRAME_POINTER_REGNUM)
3289 src = cfun->machine->frame.frame_pointer_offset;
3290 else if (from == ARG_POINTER_REGNUM)
3291 src = cfun->machine->frame.arg_pointer_offset;
3292 else
3293 gcc_unreachable ();
3295 return src - dest;
3298 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
3299 previous frame. */
3302 riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
3304 if (count != 0)
3305 return const0_rtx;
3307 return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
3310 /* Emit code to change the current function's return address to
3311 ADDRESS. SCRATCH is available as a scratch register, if needed.
3312 ADDRESS and SCRATCH are both word-mode GPRs. */
3314 void
3315 riscv_set_return_address (rtx address, rtx scratch)
3317 rtx slot_address;
3319 gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
3320 slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
3321 cfun->machine->frame.gp_sp_offset);
3322 riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
3325 /* A function to save or store a register. The first argument is the
3326 register and the second is the stack slot. */
3327 typedef void (*riscv_save_restore_fn) (rtx, rtx);
3329 /* Use FN to save or restore register REGNO. MODE is the register's
3330 mode and OFFSET is the offset of its save slot from the current
3331 stack pointer. */
3333 static void
3334 riscv_save_restore_reg (machine_mode mode, int regno,
3335 HOST_WIDE_INT offset, riscv_save_restore_fn fn)
3337 rtx mem;
3339 mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
3340 fn (gen_rtx_REG (mode, regno), mem);
3343 /* Call FN for each register that is saved by the current function.
3344 SP_OFFSET is the offset of the current stack pointer from the start
3345 of the frame. */
3347 static void
3348 riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset, riscv_save_restore_fn fn)
3350 HOST_WIDE_INT offset;
3352 /* Save the link register and s-registers. */
3353 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
3354 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
3355 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3357 riscv_save_restore_reg (word_mode, regno, offset, fn);
3358 offset -= UNITS_PER_WORD;
3361 /* This loop must iterate over the same space as its companion in
3362 riscv_compute_frame_info. */
3363 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
3364 for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3365 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
3367 machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
3369 riscv_save_restore_reg (mode, regno, offset, fn);
3370 offset -= GET_MODE_SIZE (mode);
3374 /* Save register REG to MEM. Make the instruction frame-related. */
3376 static void
3377 riscv_save_reg (rtx reg, rtx mem)
3379 riscv_emit_move (mem, reg);
3380 riscv_set_frame_expr (riscv_frame_set (mem, reg));
3383 /* Restore register REG from MEM. */
3385 static void
3386 riscv_restore_reg (rtx reg, rtx mem)
3388 rtx insn = riscv_emit_move (reg, mem);
3389 rtx dwarf = NULL_RTX;
3390 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
3391 REG_NOTES (insn) = dwarf;
3393 RTX_FRAME_RELATED_P (insn) = 1;
3396 /* Return the code to invoke the GPR save routine. */
3398 const char *
3399 riscv_output_gpr_save (unsigned mask)
3401 static char s[32];
3402 unsigned n = riscv_save_libcall_count (mask);
3404 ssize_t bytes = snprintf (s, sizeof (s), "call\tt0,__riscv_save_%u", n);
3405 gcc_assert ((size_t) bytes < sizeof (s));
3407 return s;
3410 /* For stack frames that can't be allocated with a single ADDI instruction,
3411 compute the best value to initially allocate. It must at a minimum
3412 allocate enough space to spill the callee-saved registers. */
3414 static HOST_WIDE_INT
3415 riscv_first_stack_step (struct riscv_frame_info *frame)
3417 HOST_WIDE_INT min_first_step = frame->total_size - frame->fp_sp_offset;
3418 HOST_WIDE_INT max_first_step = IMM_REACH / 2 - STACK_BOUNDARY / 8;
3420 if (SMALL_OPERAND (frame->total_size))
3421 return frame->total_size;
3423 /* As an optimization, use the least-significant bits of the total frame
3424 size, so that the second adjustment step is just LUI + ADD. */
3425 if (!SMALL_OPERAND (frame->total_size - max_first_step)
3426 && frame->total_size % IMM_REACH < IMM_REACH / 2
3427 && frame->total_size % IMM_REACH >= min_first_step)
3428 return frame->total_size % IMM_REACH;
3430 gcc_assert (min_first_step <= max_first_step);
3431 return max_first_step;
3434 static rtx
3435 riscv_adjust_libcall_cfi_prologue ()
3437 rtx dwarf = NULL_RTX;
3438 rtx adjust_sp_rtx, reg, mem, insn;
3439 int saved_size = cfun->machine->frame.save_libcall_adjustment;
3440 int offset;
3442 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
3443 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3445 /* The save order is ra, s0, s1, s2 to s11. */
3446 if (regno == RETURN_ADDR_REGNUM)
3447 offset = saved_size - UNITS_PER_WORD;
3448 else if (regno == S0_REGNUM)
3449 offset = saved_size - UNITS_PER_WORD * 2;
3450 else if (regno == S1_REGNUM)
3451 offset = saved_size - UNITS_PER_WORD * 3;
3452 else
3453 offset = saved_size - ((regno - S2_REGNUM + 4) * UNITS_PER_WORD);
3455 reg = gen_rtx_REG (SImode, regno);
3456 mem = gen_frame_mem (SImode, plus_constant (Pmode,
3457 stack_pointer_rtx,
3458 offset));
3460 insn = gen_rtx_SET (mem, reg);
3461 dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf);
3464 /* Debug info for adjust sp. */
3465 adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
3466 stack_pointer_rtx, GEN_INT (-saved_size));
3467 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
3468 dwarf);
3469 return dwarf;
3472 static void
3473 riscv_emit_stack_tie (void)
3475 if (Pmode == SImode)
3476 emit_insn (gen_stack_tiesi (stack_pointer_rtx, hard_frame_pointer_rtx));
3477 else
3478 emit_insn (gen_stack_tiedi (stack_pointer_rtx, hard_frame_pointer_rtx));
3481 /* Expand the "prologue" pattern. */
3483 void
3484 riscv_expand_prologue (void)
3486 struct riscv_frame_info *frame = &cfun->machine->frame;
3487 HOST_WIDE_INT size = frame->total_size;
3488 unsigned mask = frame->mask;
3489 rtx insn;
3491 if (flag_stack_usage_info)
3492 current_function_static_stack_size = size;
3494 /* When optimizing for size, call a subroutine to save the registers. */
3495 if (riscv_use_save_libcall (frame))
3497 rtx dwarf = NULL_RTX;
3498 dwarf = riscv_adjust_libcall_cfi_prologue ();
3500 frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
3501 size -= frame->save_libcall_adjustment;
3502 insn = emit_insn (gen_gpr_save (GEN_INT (mask)));
3504 RTX_FRAME_RELATED_P (insn) = 1;
3505 REG_NOTES (insn) = dwarf;
3508 /* Save the registers. */
3509 if ((frame->mask | frame->fmask) != 0)
3511 HOST_WIDE_INT step1 = MIN (size, riscv_first_stack_step (frame));
3513 insn = gen_add3_insn (stack_pointer_rtx,
3514 stack_pointer_rtx,
3515 GEN_INT (-step1));
3516 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
3517 size -= step1;
3518 riscv_for_each_saved_reg (size, riscv_save_reg);
3521 frame->mask = mask; /* Undo the above fib. */
3523 /* Set up the frame pointer, if we're using one. */
3524 if (frame_pointer_needed)
3526 insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
3527 GEN_INT (frame->hard_frame_pointer_offset - size));
3528 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
3530 riscv_emit_stack_tie ();
3533 /* Allocate the rest of the frame. */
3534 if (size > 0)
3536 if (SMALL_OPERAND (-size))
3538 insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
3539 GEN_INT (-size));
3540 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
3542 else
3544 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
3545 emit_insn (gen_add3_insn (stack_pointer_rtx,
3546 stack_pointer_rtx,
3547 RISCV_PROLOGUE_TEMP (Pmode)));
3549 /* Describe the effect of the previous instructions. */
3550 insn = plus_constant (Pmode, stack_pointer_rtx, -size);
3551 insn = gen_rtx_SET (stack_pointer_rtx, insn);
3552 riscv_set_frame_expr (insn);
3557 static rtx
3558 riscv_adjust_libcall_cfi_epilogue ()
3560 rtx dwarf = NULL_RTX;
3561 rtx adjust_sp_rtx, reg;
3562 int saved_size = cfun->machine->frame.save_libcall_adjustment;
3564 /* Debug info for adjust sp. */
3565 adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
3566 stack_pointer_rtx, GEN_INT (saved_size));
3567 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
3568 dwarf);
3570 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
3571 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3573 reg = gen_rtx_REG (SImode, regno);
3574 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
3577 return dwarf;
3580 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
3581 says which. */
3583 void
3584 riscv_expand_epilogue (bool sibcall_p)
3586 /* Split the frame into two. STEP1 is the amount of stack we should
3587 deallocate before restoring the registers. STEP2 is the amount we
3588 should deallocate afterwards.
3590 Start off by assuming that no registers need to be restored. */
3591 struct riscv_frame_info *frame = &cfun->machine->frame;
3592 unsigned mask = frame->mask;
3593 HOST_WIDE_INT step1 = frame->total_size;
3594 HOST_WIDE_INT step2 = 0;
3595 bool use_restore_libcall = !sibcall_p && riscv_use_save_libcall (frame);
3596 rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
3597 rtx insn;
3599 /* We need to add memory barrier to prevent read from deallocated stack. */
3600 bool need_barrier_p = (get_frame_size ()
3601 + cfun->machine->frame.arg_pointer_offset) != 0;
3603 if (!sibcall_p && riscv_can_use_return_insn ())
3605 emit_jump_insn (gen_return ());
3606 return;
3609 /* Move past any dynamic stack allocations. */
3610 if (cfun->calls_alloca)
3612 /* Emit a barrier to prevent loads from a deallocated stack. */
3613 riscv_emit_stack_tie ();
3614 need_barrier_p = false;
3616 rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
3617 if (!SMALL_OPERAND (INTVAL (adjust)))
3619 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
3620 adjust = RISCV_PROLOGUE_TEMP (Pmode);
3623 insn = emit_insn (
3624 gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx,
3625 adjust));
3627 rtx dwarf = NULL_RTX;
3628 rtx cfa_adjust_value = gen_rtx_PLUS (
3629 Pmode, hard_frame_pointer_rtx,
3630 GEN_INT (-frame->hard_frame_pointer_offset));
3631 rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value);
3632 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf);
3633 RTX_FRAME_RELATED_P (insn) = 1;
3635 REG_NOTES (insn) = dwarf;
3638 /* If we need to restore registers, deallocate as much stack as
3639 possible in the second step without going out of range. */
3640 if ((frame->mask | frame->fmask) != 0)
3642 step2 = riscv_first_stack_step (frame);
3643 step1 -= step2;
3646 /* Set TARGET to BASE + STEP1. */
3647 if (step1 > 0)
3649 /* Emit a barrier to prevent loads from a deallocated stack. */
3650 riscv_emit_stack_tie ();
3651 need_barrier_p = false;
3653 /* Get an rtx for STEP1 that we can add to BASE. */
3654 rtx adjust = GEN_INT (step1);
3655 if (!SMALL_OPERAND (step1))
3657 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
3658 adjust = RISCV_PROLOGUE_TEMP (Pmode);
3661 insn = emit_insn (
3662 gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
3664 rtx dwarf = NULL_RTX;
3665 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3666 GEN_INT (step2));
3668 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
3669 RTX_FRAME_RELATED_P (insn) = 1;
3671 REG_NOTES (insn) = dwarf;
3674 if (use_restore_libcall)
3675 frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
3677 /* Restore the registers. */
3678 riscv_for_each_saved_reg (frame->total_size - step2, riscv_restore_reg);
3680 if (use_restore_libcall)
3682 frame->mask = mask; /* Undo the above fib. */
3683 gcc_assert (step2 >= frame->save_libcall_adjustment);
3684 step2 -= frame->save_libcall_adjustment;
3687 if (need_barrier_p)
3688 riscv_emit_stack_tie ();
3690 /* Deallocate the final bit of the frame. */
3691 if (step2 > 0)
3693 insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
3694 GEN_INT (step2)));
3696 rtx dwarf = NULL_RTX;
3697 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3698 const0_rtx);
3699 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
3700 RTX_FRAME_RELATED_P (insn) = 1;
3702 REG_NOTES (insn) = dwarf;
3705 if (use_restore_libcall)
3707 rtx dwarf = riscv_adjust_libcall_cfi_epilogue ();
3708 insn = emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
3709 RTX_FRAME_RELATED_P (insn) = 1;
3710 REG_NOTES (insn) = dwarf;
3712 emit_jump_insn (gen_gpr_restore_return (ra));
3713 return;
3716 /* Add in the __builtin_eh_return stack adjustment. */
3717 if (crtl->calls_eh_return)
3718 emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
3719 EH_RETURN_STACKADJ_RTX));
3721 if (!sibcall_p)
3722 emit_jump_insn (gen_simple_return_internal (ra));
3725 /* Return nonzero if this function is known to have a null epilogue.
3726 This allows the optimizer to omit jumps to jumps if no stack
3727 was created. */
3729 bool
3730 riscv_can_use_return_insn (void)
3732 return reload_completed && cfun->machine->frame.total_size == 0;
3735 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
3737 When floating-point registers are wider than integer ones, moves between
3738 them must go through memory. */
3740 static bool
3741 riscv_secondary_memory_needed (machine_mode mode, reg_class_t class1,
3742 reg_class_t class2)
3744 return (GET_MODE_SIZE (mode) > UNITS_PER_WORD
3745 && (class1 == FP_REGS) != (class2 == FP_REGS));
3748 /* Implement TARGET_REGISTER_MOVE_COST. */
3750 static int
3751 riscv_register_move_cost (machine_mode mode,
3752 reg_class_t from, reg_class_t to)
3754 return riscv_secondary_memory_needed (mode, from, to) ? 8 : 2;
3757 /* Implement TARGET_HARD_REGNO_NREGS. */
3759 static unsigned int
3760 riscv_hard_regno_nregs (unsigned int regno, machine_mode mode)
3762 if (FP_REG_P (regno))
3763 return (GET_MODE_SIZE (mode) + UNITS_PER_FP_REG - 1) / UNITS_PER_FP_REG;
3765 /* All other registers are word-sized. */
3766 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3769 /* Implement TARGET_HARD_REGNO_MODE_OK. */
3771 static bool
3772 riscv_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
3774 unsigned int nregs = riscv_hard_regno_nregs (regno, mode);
3776 if (GP_REG_P (regno))
3778 if (!GP_REG_P (regno + nregs - 1))
3779 return false;
3781 else if (FP_REG_P (regno))
3783 if (!FP_REG_P (regno + nregs - 1))
3784 return false;
3786 if (GET_MODE_CLASS (mode) != MODE_FLOAT
3787 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
3788 return false;
3790 /* Only use callee-saved registers if a potential callee is guaranteed
3791 to spill the requisite width. */
3792 if (GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_REG
3793 || (!call_used_regs[regno]
3794 && GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_ARG))
3795 return false;
3797 else
3798 return false;
3800 /* Require same callee-savedness for all registers. */
3801 for (unsigned i = 1; i < nregs; i++)
3802 if (call_used_regs[regno] != call_used_regs[regno + i])
3803 return false;
3805 return true;
3808 /* Implement TARGET_MODES_TIEABLE_P.
3810 Don't allow floating-point modes to be tied, since type punning of
3811 single-precision and double-precision is implementation defined. */
3813 static bool
3814 riscv_modes_tieable_p (machine_mode mode1, machine_mode mode2)
3816 return (mode1 == mode2
3817 || !(GET_MODE_CLASS (mode1) == MODE_FLOAT
3818 && GET_MODE_CLASS (mode2) == MODE_FLOAT));
3821 /* Implement CLASS_MAX_NREGS. */
3823 static unsigned char
3824 riscv_class_max_nregs (reg_class_t rclass, machine_mode mode)
3826 if (reg_class_subset_p (FP_REGS, rclass))
3827 return riscv_hard_regno_nregs (FP_REG_FIRST, mode);
3829 if (reg_class_subset_p (GR_REGS, rclass))
3830 return riscv_hard_regno_nregs (GP_REG_FIRST, mode);
3832 return 0;
3835 /* Implement TARGET_MEMORY_MOVE_COST. */
3837 static int
3838 riscv_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in)
3840 return (tune_info->memory_cost
3841 + memory_move_secondary_cost (mode, rclass, in));
3844 /* Return the number of instructions that can be issued per cycle. */
3846 static int
3847 riscv_issue_rate (void)
3849 return tune_info->issue_rate;
3852 /* Implement TARGET_ASM_FILE_START. */
3854 static void
3855 riscv_file_start (void)
3857 default_file_start ();
3859 /* Instruct GAS to generate position-[in]dependent code. */
3860 fprintf (asm_out_file, "\t.option %spic\n", (flag_pic ? "" : "no"));
3863 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
3864 in order to avoid duplicating too much logic from elsewhere. */
3866 static void
3867 riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
3868 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
3869 tree function)
3871 rtx this_rtx, temp1, temp2, fnaddr;
3872 rtx_insn *insn;
3874 /* Pretend to be a post-reload pass while generating rtl. */
3875 reload_completed = 1;
3877 /* Mark the end of the (empty) prologue. */
3878 emit_note (NOTE_INSN_PROLOGUE_END);
3880 /* Determine if we can use a sibcall to call FUNCTION directly. */
3881 fnaddr = gen_rtx_MEM (FUNCTION_MODE, XEXP (DECL_RTL (function), 0));
3883 /* We need two temporary registers in some cases. */
3884 temp1 = gen_rtx_REG (Pmode, RISCV_PROLOGUE_TEMP_REGNUM);
3885 temp2 = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
3887 /* Find out which register contains the "this" pointer. */
3888 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
3889 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
3890 else
3891 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
3893 /* Add DELTA to THIS_RTX. */
3894 if (delta != 0)
3896 rtx offset = GEN_INT (delta);
3897 if (!SMALL_OPERAND (delta))
3899 riscv_emit_move (temp1, offset);
3900 offset = temp1;
3902 emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
3905 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
3906 if (vcall_offset != 0)
3908 rtx addr;
3910 /* Set TEMP1 to *THIS_RTX. */
3911 riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
3913 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
3914 addr = riscv_add_offset (temp2, temp1, vcall_offset);
3916 /* Load the offset and add it to THIS_RTX. */
3917 riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
3918 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
3921 /* Jump to the target function. */
3922 insn = emit_call_insn (gen_sibcall (fnaddr, const0_rtx, NULL, const0_rtx));
3923 SIBLING_CALL_P (insn) = 1;
3925 /* Run just enough of rest_of_compilation. This sequence was
3926 "borrowed" from alpha.c. */
3927 insn = get_insns ();
3928 split_all_insns_noflow ();
3929 shorten_branches (insn);
3930 final_start_function (insn, file, 1);
3931 final (insn, file, 1);
3932 final_end_function ();
3934 /* Clean up the vars set above. Note that final_end_function resets
3935 the global pointer for us. */
3936 reload_completed = 0;
3939 /* Allocate a chunk of memory for per-function machine-dependent data. */
3941 static struct machine_function *
3942 riscv_init_machine_status (void)
3944 return ggc_cleared_alloc<machine_function> ();
3947 /* Implement TARGET_OPTION_OVERRIDE. */
3949 static void
3950 riscv_option_override (void)
3952 const struct riscv_cpu_info *cpu;
3954 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3955 SUBTARGET_OVERRIDE_OPTIONS;
3956 #endif
3958 flag_pcc_struct_return = 0;
3960 if (flag_pic)
3961 g_switch_value = 0;
3963 /* The presence of the M extension implies that division instructions
3964 are present, so include them unless explicitly disabled. */
3965 if (TARGET_MUL && (target_flags_explicit & MASK_DIV) == 0)
3966 target_flags |= MASK_DIV;
3967 else if (!TARGET_MUL && TARGET_DIV)
3968 error ("-mdiv requires -march to subsume the %<M%> extension");
3970 /* Likewise floating-point division and square root. */
3971 if (TARGET_HARD_FLOAT && (target_flags_explicit & MASK_FDIV) == 0)
3972 target_flags |= MASK_FDIV;
3974 /* Handle -mtune. */
3975 cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
3976 RISCV_TUNE_STRING_DEFAULT);
3977 tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
3979 /* Use -mtune's setting for slow_unaligned_access, even when optimizing
3980 for size. For architectures that trap and emulate unaligned accesses,
3981 the performance cost is too great, even for -Os. Similarly, if
3982 -m[no-]strict-align is left unspecified, heed -mtune's advice. */
3983 riscv_slow_unaligned_access_p = (cpu->tune_info->slow_unaligned_access
3984 || TARGET_STRICT_ALIGN);
3985 if ((target_flags_explicit & MASK_STRICT_ALIGN) == 0
3986 && cpu->tune_info->slow_unaligned_access)
3987 target_flags |= MASK_STRICT_ALIGN;
3989 /* If the user hasn't specified a branch cost, use the processor's
3990 default. */
3991 if (riscv_branch_cost == 0)
3992 riscv_branch_cost = tune_info->branch_cost;
3994 /* Function to allocate machine-dependent function status. */
3995 init_machine_status = &riscv_init_machine_status;
3997 if (flag_pic)
3998 riscv_cmodel = CM_PIC;
4000 /* We get better code with explicit relocs for CM_MEDLOW, but
4001 worse code for the others (for now). Pick the best default. */
4002 if ((target_flags_explicit & MASK_EXPLICIT_RELOCS) == 0)
4003 if (riscv_cmodel == CM_MEDLOW)
4004 target_flags |= MASK_EXPLICIT_RELOCS;
4006 /* Require that the ISA supports the requested floating-point ABI. */
4007 if (UNITS_PER_FP_ARG > (TARGET_HARD_FLOAT ? UNITS_PER_FP_REG : 0))
4008 error ("requested ABI requires -march to subsume the %qc extension",
4009 UNITS_PER_FP_ARG > 8 ? 'Q' : (UNITS_PER_FP_ARG > 4 ? 'D' : 'F'));
4011 /* We do not yet support ILP32 on RV64. */
4012 if (BITS_PER_WORD != POINTER_SIZE)
4013 error ("ABI requires -march=rv%d", POINTER_SIZE);
4016 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
4018 static void
4019 riscv_conditional_register_usage (void)
4021 if (!TARGET_HARD_FLOAT)
4023 for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4024 fixed_regs[regno] = call_used_regs[regno] = 1;
4028 /* Return a register priority for hard reg REGNO. */
4030 static int
4031 riscv_register_priority (int regno)
4033 /* Favor x8-x15/f8-f15 to improve the odds of RVC instruction selection. */
4034 if (TARGET_RVC && (IN_RANGE (regno, GP_REG_FIRST + 8, GP_REG_FIRST + 15)
4035 || IN_RANGE (regno, FP_REG_FIRST + 8, FP_REG_FIRST + 15)))
4036 return 1;
4038 return 0;
4041 /* Implement TARGET_TRAMPOLINE_INIT. */
4043 static void
4044 riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
4046 rtx addr, end_addr, mem;
4047 uint32_t trampoline[4];
4048 unsigned int i;
4049 HOST_WIDE_INT static_chain_offset, target_function_offset;
4051 /* Work out the offsets of the pointers from the start of the
4052 trampoline code. */
4053 gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
4055 /* Get pointers to the beginning and end of the code block. */
4056 addr = force_reg (Pmode, XEXP (m_tramp, 0));
4057 end_addr = riscv_force_binary (Pmode, PLUS, addr,
4058 GEN_INT (TRAMPOLINE_CODE_SIZE));
4061 if (Pmode == SImode)
4063 chain_value = force_reg (Pmode, chain_value);
4065 rtx target_function = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
4066 /* lui t2, hi(chain)
4067 lui t1, hi(func)
4068 addi t2, t2, lo(chain)
4069 jr r1, lo(func)
4071 unsigned HOST_WIDE_INT lui_hi_chain_code, lui_hi_func_code;
4072 unsigned HOST_WIDE_INT lo_chain_code, lo_func_code;
4074 rtx uimm_mask = force_reg (SImode, gen_int_mode (-IMM_REACH, SImode));
4076 /* 0xfff. */
4077 rtx imm12_mask = gen_reg_rtx (SImode);
4078 emit_insn (gen_one_cmplsi2 (imm12_mask, uimm_mask));
4080 rtx fixup_value = force_reg (SImode, gen_int_mode (IMM_REACH/2, SImode));
4082 /* Gen lui t2, hi(chain). */
4083 rtx hi_chain = riscv_force_binary (SImode, PLUS, chain_value,
4084 fixup_value);
4085 hi_chain = riscv_force_binary (SImode, AND, hi_chain,
4086 uimm_mask);
4087 lui_hi_chain_code = OPCODE_LUI | (STATIC_CHAIN_REGNUM << SHIFT_RD);
4088 rtx lui_hi_chain = riscv_force_binary (SImode, IOR, hi_chain,
4089 gen_int_mode (lui_hi_chain_code, SImode));
4091 mem = adjust_address (m_tramp, SImode, 0);
4092 riscv_emit_move (mem, lui_hi_chain);
4094 /* Gen lui t1, hi(func). */
4095 rtx hi_func = riscv_force_binary (SImode, PLUS, target_function,
4096 fixup_value);
4097 hi_func = riscv_force_binary (SImode, AND, hi_func,
4098 uimm_mask);
4099 lui_hi_func_code = OPCODE_LUI | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD);
4100 rtx lui_hi_func = riscv_force_binary (SImode, IOR, hi_func,
4101 gen_int_mode (lui_hi_func_code, SImode));
4103 mem = adjust_address (m_tramp, SImode, 1 * GET_MODE_SIZE (SImode));
4104 riscv_emit_move (mem, lui_hi_func);
4106 /* Gen addi t2, t2, lo(chain). */
4107 rtx lo_chain = riscv_force_binary (SImode, AND, chain_value,
4108 imm12_mask);
4109 lo_chain = riscv_force_binary (SImode, ASHIFT, lo_chain, GEN_INT (20));
4111 lo_chain_code = OPCODE_ADDI
4112 | (STATIC_CHAIN_REGNUM << SHIFT_RD)
4113 | (STATIC_CHAIN_REGNUM << SHIFT_RS1);
4115 rtx addi_lo_chain = riscv_force_binary (SImode, IOR, lo_chain,
4116 force_reg (SImode, GEN_INT (lo_chain_code)));
4118 mem = adjust_address (m_tramp, SImode, 2 * GET_MODE_SIZE (SImode));
4119 riscv_emit_move (mem, addi_lo_chain);
4121 /* Gen jr r1, lo(func). */
4122 rtx lo_func = riscv_force_binary (SImode, AND, target_function,
4123 imm12_mask);
4124 lo_func = riscv_force_binary (SImode, ASHIFT, lo_func, GEN_INT (20));
4126 lo_func_code = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
4128 rtx jr_lo_func = riscv_force_binary (SImode, IOR, lo_func,
4129 force_reg (SImode, GEN_INT (lo_func_code)));
4131 mem = adjust_address (m_tramp, SImode, 3 * GET_MODE_SIZE (SImode));
4132 riscv_emit_move (mem, jr_lo_func);
4134 else
4136 static_chain_offset = TRAMPOLINE_CODE_SIZE;
4137 target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
4139 /* auipc t2, 0
4140 l[wd] t1, target_function_offset(t2)
4141 l[wd] t2, static_chain_offset(t2)
4142 jr t1
4144 trampoline[0] = OPCODE_AUIPC | (STATIC_CHAIN_REGNUM << SHIFT_RD);
4145 trampoline[1] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
4146 | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD)
4147 | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
4148 | (target_function_offset << SHIFT_IMM);
4149 trampoline[2] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
4150 | (STATIC_CHAIN_REGNUM << SHIFT_RD)
4151 | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
4152 | (static_chain_offset << SHIFT_IMM);
4153 trampoline[3] = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
4155 /* Copy the trampoline code. */
4156 for (i = 0; i < ARRAY_SIZE (trampoline); i++)
4158 mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
4159 riscv_emit_move (mem, gen_int_mode (trampoline[i], SImode));
4162 /* Set up the static chain pointer field. */
4163 mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
4164 riscv_emit_move (mem, chain_value);
4166 /* Set up the target function field. */
4167 mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
4168 riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
4171 /* Flush the code part of the trampoline. */
4172 emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
4173 emit_insn (gen_clear_cache (addr, end_addr));
4176 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
4178 static bool
4179 riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
4180 tree exp ATTRIBUTE_UNUSED)
4182 /* Don't use sibcalls when use save-restore routine. */
4183 if (TARGET_SAVE_RESTORE)
4184 return false;
4186 return true;
4189 /* Implement TARGET_CANNOT_COPY_INSN_P. */
4191 static bool
4192 riscv_cannot_copy_insn_p (rtx_insn *insn)
4194 return recog_memoized (insn) >= 0 && get_attr_cannot_copy (insn);
4197 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. */
4199 static bool
4200 riscv_slow_unaligned_access (machine_mode, unsigned int)
4202 return riscv_slow_unaligned_access_p;
4205 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
4207 static bool
4208 riscv_can_change_mode_class (machine_mode, machine_mode, reg_class_t rclass)
4210 return !reg_classes_intersect_p (FP_REGS, rclass);
4214 /* Implement TARGET_CONSTANT_ALIGNMENT. */
4216 static HOST_WIDE_INT
4217 riscv_constant_alignment (const_tree exp, HOST_WIDE_INT align)
4219 if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR)
4220 return MAX (align, BITS_PER_WORD);
4221 return align;
4224 /* Initialize the GCC target structure. */
4225 #undef TARGET_ASM_ALIGNED_HI_OP
4226 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
4227 #undef TARGET_ASM_ALIGNED_SI_OP
4228 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
4229 #undef TARGET_ASM_ALIGNED_DI_OP
4230 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
4232 #undef TARGET_OPTION_OVERRIDE
4233 #define TARGET_OPTION_OVERRIDE riscv_option_override
4235 #undef TARGET_LEGITIMIZE_ADDRESS
4236 #define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
4238 #undef TARGET_SCHED_ISSUE_RATE
4239 #define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
4241 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
4242 #define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
4244 #undef TARGET_REGISTER_MOVE_COST
4245 #define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
4246 #undef TARGET_MEMORY_MOVE_COST
4247 #define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
4248 #undef TARGET_RTX_COSTS
4249 #define TARGET_RTX_COSTS riscv_rtx_costs
4250 #undef TARGET_ADDRESS_COST
4251 #define TARGET_ADDRESS_COST riscv_address_cost
4253 #undef TARGET_ASM_FILE_START
4254 #define TARGET_ASM_FILE_START riscv_file_start
4255 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
4256 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
4258 #undef TARGET_EXPAND_BUILTIN_VA_START
4259 #define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
4261 #undef TARGET_PROMOTE_FUNCTION_MODE
4262 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
4264 #undef TARGET_RETURN_IN_MEMORY
4265 #define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
4267 #undef TARGET_ASM_OUTPUT_MI_THUNK
4268 #define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
4269 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
4270 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
4272 #undef TARGET_PRINT_OPERAND
4273 #define TARGET_PRINT_OPERAND riscv_print_operand
4274 #undef TARGET_PRINT_OPERAND_ADDRESS
4275 #define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
4277 #undef TARGET_SETUP_INCOMING_VARARGS
4278 #define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
4279 #undef TARGET_STRICT_ARGUMENT_NAMING
4280 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
4281 #undef TARGET_MUST_PASS_IN_STACK
4282 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
4283 #undef TARGET_PASS_BY_REFERENCE
4284 #define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
4285 #undef TARGET_ARG_PARTIAL_BYTES
4286 #define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
4287 #undef TARGET_FUNCTION_ARG
4288 #define TARGET_FUNCTION_ARG riscv_function_arg
4289 #undef TARGET_FUNCTION_ARG_ADVANCE
4290 #define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
4291 #undef TARGET_FUNCTION_ARG_BOUNDARY
4292 #define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
4294 /* The generic ELF target does not always have TLS support. */
4295 #ifdef HAVE_AS_TLS
4296 #undef TARGET_HAVE_TLS
4297 #define TARGET_HAVE_TLS true
4298 #endif
4300 #undef TARGET_CANNOT_FORCE_CONST_MEM
4301 #define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
4303 #undef TARGET_LEGITIMATE_CONSTANT_P
4304 #define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
4306 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
4307 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
4309 #undef TARGET_LEGITIMATE_ADDRESS_P
4310 #define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
4312 #undef TARGET_CAN_ELIMINATE
4313 #define TARGET_CAN_ELIMINATE riscv_can_eliminate
4315 #undef TARGET_CONDITIONAL_REGISTER_USAGE
4316 #define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
4318 #undef TARGET_CLASS_MAX_NREGS
4319 #define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
4321 #undef TARGET_TRAMPOLINE_INIT
4322 #define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
4324 #undef TARGET_IN_SMALL_DATA_P
4325 #define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
4327 #undef TARGET_HAVE_SRODATA_SECTION
4328 #define TARGET_HAVE_SRODATA_SECTION true
4330 #undef TARGET_ASM_SELECT_SECTION
4331 #define TARGET_ASM_SELECT_SECTION riscv_select_section
4333 #undef TARGET_ASM_SELECT_RTX_SECTION
4334 #define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
4336 #undef TARGET_MIN_ANCHOR_OFFSET
4337 #define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
4339 #undef TARGET_MAX_ANCHOR_OFFSET
4340 #define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
4342 #undef TARGET_REGISTER_PRIORITY
4343 #define TARGET_REGISTER_PRIORITY riscv_register_priority
4345 #undef TARGET_CANNOT_COPY_INSN_P
4346 #define TARGET_CANNOT_COPY_INSN_P riscv_cannot_copy_insn_p
4348 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
4349 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV riscv_atomic_assign_expand_fenv
4351 #undef TARGET_INIT_BUILTINS
4352 #define TARGET_INIT_BUILTINS riscv_init_builtins
4354 #undef TARGET_BUILTIN_DECL
4355 #define TARGET_BUILTIN_DECL riscv_builtin_decl
4357 #undef TARGET_EXPAND_BUILTIN
4358 #define TARGET_EXPAND_BUILTIN riscv_expand_builtin
4360 #undef TARGET_HARD_REGNO_NREGS
4361 #define TARGET_HARD_REGNO_NREGS riscv_hard_regno_nregs
4362 #undef TARGET_HARD_REGNO_MODE_OK
4363 #define TARGET_HARD_REGNO_MODE_OK riscv_hard_regno_mode_ok
4365 #undef TARGET_MODES_TIEABLE_P
4366 #define TARGET_MODES_TIEABLE_P riscv_modes_tieable_p
4368 #undef TARGET_SLOW_UNALIGNED_ACCESS
4369 #define TARGET_SLOW_UNALIGNED_ACCESS riscv_slow_unaligned_access
4371 #undef TARGET_SECONDARY_MEMORY_NEEDED
4372 #define TARGET_SECONDARY_MEMORY_NEEDED riscv_secondary_memory_needed
4374 #undef TARGET_CAN_CHANGE_MODE_CLASS
4375 #define TARGET_CAN_CHANGE_MODE_CLASS riscv_can_change_mode_class
4377 #undef TARGET_CONSTANT_ALIGNMENT
4378 #define TARGET_CONSTANT_ALIGNMENT riscv_constant_alignment
4380 struct gcc_target targetm = TARGET_INITIALIZER;
4382 #include "gt-riscv.h"