1 /* Subroutines used for code generation for RISC-V.
2 Copyright (C) 2023-2024 Free Software Foundation, Inc.
3 Contributed by Christoph Müllner (christoph.muellner@vrull.eu).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
25 #include "coretypes.h"
37 #include "riscv-protos.h"
39 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
40 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
43 split_plus (rtx x
, rtx
*base_ptr
, HOST_WIDE_INT
*offset_ptr
)
45 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
47 *base_ptr
= XEXP (x
, 0);
48 *offset_ptr
= INTVAL (XEXP (x
, 1));
57 /* Output a mempair instruction with the provided OPERANDS.
58 LOAD_P is true if a we have a pair of loads (stores otherwise).
59 MODE is the access mode (DI or SI).
60 CODE is the extension code (UNKNOWN, SIGN_EXTEND or ZERO_EXTEND).
61 This instruction does not handle invalid inputs gracefully,
62 but is full of assertions to ensure that only valid instructions
66 th_mempair_output_move (rtx operands
[4], bool load_p
,
67 machine_mode mode
, RTX_CODE code
)
69 rtx reg1
, reg2
, mem1
, mem2
, base1
, base2
;
70 HOST_WIDE_INT offset1
, offset2
;
71 rtx output_operands
[5];
74 gcc_assert (mode
== SImode
|| mode
== DImode
);
76 /* Paired 64-bit access instructions have a fixed shift amount of 4.
77 Paired 32-bit access instructions have a fixed shift amount of 3. */
78 unsigned shamt
= (mode
== DImode
) ? 4 : 3;
82 reg1
= copy_rtx (operands
[0]);
83 reg2
= copy_rtx (operands
[2]);
84 mem1
= copy_rtx (operands
[1]);
85 mem2
= copy_rtx (operands
[3]);
88 if (code
== ZERO_EXTEND
)
89 format
= "th.lwud\t%0, %1, (%2), %3, %4";
90 else //SIGN_EXTEND or UNKNOWN
91 format
= "th.lwd\t%0, %1, (%2), %3, %4";
93 format
= "th.ldd\t%0, %1, (%2), %3, %4";
97 reg1
= copy_rtx (operands
[1]);
98 reg2
= copy_rtx (operands
[3]);
99 mem1
= copy_rtx (operands
[0]);
100 mem2
= copy_rtx (operands
[2]);
103 format
= "th.swd\t%z0, %z1, (%2), %3, %4";
105 format
= "th.sdd\t%z0, %z1, (%2), %3, %4";
108 split_plus (XEXP (mem1
, 0), &base1
, &offset1
);
109 split_plus (XEXP (mem2
, 0), &base2
, &offset2
);
110 gcc_assert (rtx_equal_p (base1
, base2
));
111 auto size1
= MEM_SIZE (mem1
);
112 auto size2
= MEM_SIZE (mem2
);
113 gcc_assert (known_eq (size1
, size2
));
114 gcc_assert (known_eq (offset1
+ size1
, offset2
));
116 HOST_WIDE_INT imm2
= offset1
>> shamt
;
118 /* Make sure all mempair instruction constraints are met. */
119 gcc_assert (imm2
>= 0 && imm2
< 4);
120 gcc_assert ((imm2
<< shamt
) == offset1
);
121 gcc_assert (REG_P (reg1
));
122 gcc_assert (REG_P (reg2
));
123 gcc_assert (REG_P (base1
));
126 gcc_assert (REGNO (reg1
) != REGNO (reg2
));
127 gcc_assert (REGNO (reg1
) != REGNO (base1
));
128 gcc_assert (REGNO (reg2
) != REGNO (base1
));
131 /* Output the mempair instruction. */
132 output_operands
[0] = copy_rtx (reg1
);
133 output_operands
[1] = copy_rtx (reg2
);
134 output_operands
[2] = copy_rtx (base1
);
135 output_operands
[3] = gen_rtx_CONST_INT (mode
, imm2
);
136 output_operands
[4] = gen_rtx_CONST_INT (mode
, shamt
);
137 output_asm_insn (format
, output_operands
);
142 /* Analyse if a pair of loads/stores MEM1 and MEM2 with given MODE
143 are consecutive so they can be merged into a mempair instruction.
144 RESERVED will be set to true, if a reversal of the accesses is
145 required (false otherwise). Returns true if the accesses can be
146 merged (even if reversing is necessary) and false if not. */
149 th_mempair_check_consecutive_mems (machine_mode mode
, rtx
*mem1
, rtx
*mem2
,
152 rtx base1
, base2
, offset1
, offset2
;
153 extract_base_offset_in_addr (*mem1
, &base1
, &offset1
);
154 extract_base_offset_in_addr (*mem2
, &base2
, &offset2
);
156 /* Make sure both mems are in base+offset form. */
157 if (!base1
|| !base2
)
160 /* If both mems use the same base register, just check the offsets. */
161 if (rtx_equal_p (base1
, base2
))
163 auto size
= GET_MODE_SIZE (mode
);
165 if (known_eq (UINTVAL (offset1
) + size
, UINTVAL (offset2
)))
171 if (known_eq (UINTVAL (offset2
) + size
, UINTVAL (offset1
)))
183 /* Check if the given MEM can be used to define the address of a mempair
187 th_mempair_operand_p (rtx mem
, machine_mode mode
)
189 if (!MEM_SIZE_KNOWN_P (mem
))
192 /* Only DI or SI mempair instructions exist. */
193 gcc_assert (mode
== SImode
|| mode
== DImode
);
194 auto mem_sz
= MEM_SIZE (mem
);
195 auto mode_sz
= GET_MODE_SIZE (mode
);
196 if (!known_eq (mem_sz
, mode_sz
))
199 /* Paired 64-bit access instructions have a fixed shift amount of 4.
200 Paired 32-bit access instructions have a fixed shift amount of 3. */
201 machine_mode mem_mode
= GET_MODE (mem
);
202 unsigned shamt
= (mem_mode
== DImode
) ? 4 : 3;
205 HOST_WIDE_INT offset
;
206 split_plus (XEXP (mem
, 0), &base
, &offset
);
207 HOST_WIDE_INT imm2
= offset
>> shamt
;
209 if (imm2
< 0 || imm2
>= 4)
212 if ((imm2
<< shamt
) != offset
)
219 th_mempair_load_overlap_p (rtx reg1
, rtx reg2
, rtx mem
)
221 if (REGNO (reg1
) == REGNO (reg2
))
224 if (reg_overlap_mentioned_p (reg1
, mem
))
228 HOST_WIDE_INT offset
;
229 split_plus (XEXP (mem
, 0), &base
, &offset
);
236 if (REGNO (base
) == REGNO (reg1
)
237 || REGNO (base
) == REGNO (reg2
))
244 /* Given OPERANDS of consecutive load/store, check if we can merge
245 them into load-pair or store-pair instructions.
246 LOAD is true if they are load instructions.
247 MODE is the mode of memory operation. */
250 th_mempair_operands_p (rtx operands
[4], bool load_p
,
253 rtx mem_1
, mem_2
, reg_1
, reg_2
;
261 if (!REG_P (reg_1
) || !REG_P (reg_2
))
263 if (th_mempair_load_overlap_p (reg_1
, reg_2
, mem_1
))
265 if (th_mempair_load_overlap_p (reg_1
, reg_2
, mem_2
))
276 /* Check if the registers are GP registers. */
277 if (!REG_P (reg_1
) || !GP_REG_P (REGNO (reg_1
))
278 || !REG_P (reg_2
) || !GP_REG_P (REGNO (reg_2
)))
281 /* The mems cannot be volatile. */
282 if (!MEM_P (mem_1
) || !MEM_P (mem_2
))
284 if (MEM_VOLATILE_P (mem_1
) || MEM_VOLATILE_P (mem_2
))
287 /* If we have slow unaligned access, we only accept aligned memory. */
288 if (riscv_slow_unaligned_access_p
289 && known_lt (MEM_ALIGN (mem_1
), GET_MODE_SIZE (mode
) * BITS_PER_UNIT
))
292 /* Check if the addresses are in the form of [base+offset]. */
293 bool reversed
= false;
294 if (!th_mempair_check_consecutive_mems (mode
, &mem_1
, &mem_2
, &reversed
))
297 /* The first memory accesses must be a mempair operand. */
298 if ((!reversed
&& !th_mempair_operand_p (mem_1
, mode
))
299 || (reversed
&& !th_mempair_operand_p (mem_2
, mode
)))
302 /* The operands must be of the same size. */
303 gcc_assert (known_eq (GET_MODE_SIZE (GET_MODE (mem_1
)),
304 GET_MODE_SIZE (GET_MODE (mem_2
))));
309 /* Given OPERANDS of consecutive load/store that can be merged,
310 swap them if they are not in ascending order. */
313 th_mempair_order_operands (rtx operands
[4], bool load_p
, machine_mode mode
)
315 int mem_op
= load_p
? 1 : 0;
316 bool reversed
= false;
317 if (!th_mempair_check_consecutive_mems (mode
,
319 operands
+ mem_op
+ 2,
325 /* Irrespective of whether this is a load or a store,
326 we do the same swap. */
327 std::swap (operands
[0], operands
[2]);
328 std::swap (operands
[1], operands
[3]);
332 /* Similar like riscv_save_reg, but saves two registers to memory
333 and marks the resulting instruction as frame-related. */
336 th_mempair_save_regs (rtx operands
[4])
338 rtx set1
= gen_rtx_SET (operands
[0], operands
[1]);
339 rtx set2
= gen_rtx_SET (operands
[2], operands
[3]);
340 rtx dwarf
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (2));
341 rtx insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set1
, set2
)));
342 RTX_FRAME_RELATED_P (insn
) = 1;
344 XVECEXP (dwarf
, 0, 0) = copy_rtx (set1
);
345 XVECEXP (dwarf
, 0, 1) = copy_rtx (set2
);
346 RTX_FRAME_RELATED_P (XVECEXP (dwarf
, 0, 0)) = 1;
347 RTX_FRAME_RELATED_P (XVECEXP (dwarf
, 0, 1)) = 1;
348 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, dwarf
);
351 /* Similar like riscv_restore_reg, but restores two registers from memory
352 and marks the instruction frame-related. */
355 th_mempair_restore_regs (rtx operands
[4])
357 rtx set1
= gen_rtx_SET (operands
[0], operands
[1]);
358 rtx set2
= gen_rtx_SET (operands
[2], operands
[3]);
359 rtx insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set1
, set2
)));
360 RTX_FRAME_RELATED_P (insn
) = 1;
361 add_reg_note (insn
, REG_CFA_RESTORE
, operands
[0]);
362 add_reg_note (insn
, REG_CFA_RESTORE
, operands
[2]);
365 /* Prepare the OPERANDS array to emit a mempair instruction using the
366 provided information. No checks are performed, the resulting array
367 should be validated using th_mempair_operands_p(). */
370 th_mempair_prepare_save_restore_operands (rtx operands
[4],
371 bool load_p
, machine_mode mode
,
372 int regno
, HOST_WIDE_INT offset
,
373 int regno2
, HOST_WIDE_INT offset2
)
375 int reg_op
= load_p
? 0 : 1;
376 int mem_op
= load_p
? 1 : 0;
378 rtx mem1
= plus_constant (mode
, stack_pointer_rtx
, offset
);
379 mem1
= gen_frame_mem (mode
, mem1
);
380 rtx mem2
= plus_constant (mode
, stack_pointer_rtx
, offset2
);
381 mem2
= gen_frame_mem (mode
, mem2
);
383 operands
[reg_op
] = gen_rtx_REG (mode
, regno
);
384 operands
[mem_op
] = mem1
;
385 operands
[2 + reg_op
] = gen_rtx_REG (mode
, regno2
);
386 operands
[2 + mem_op
] = mem2
;
389 /* Emit a mempair instruction to save/restore two registers to/from stack. */
392 th_mempair_save_restore_regs (rtx operands
[4], bool load_p
,
395 gcc_assert (th_mempair_operands_p (operands
, load_p
, mode
));
397 th_mempair_order_operands (operands
, load_p
, mode
);
400 th_mempair_restore_regs (operands
);
402 th_mempair_save_regs (operands
);
405 /* Return true if X can be represented as signed immediate of NBITS bits.
406 The immediate is assumed to be shifted by LSHAMT bits left. */
409 valid_signed_immediate (rtx x
, unsigned nbits
, unsigned lshamt
)
411 if (GET_CODE (x
) != CONST_INT
)
414 HOST_WIDE_INT v
= INTVAL (x
);
416 HOST_WIDE_INT vunshifted
= v
>> lshamt
;
418 /* Make sure we did not shift out any bits. */
419 if (vunshifted
<< lshamt
!= v
)
422 unsigned HOST_WIDE_INT imm_reach
= 1LL << nbits
;
423 return ((unsigned HOST_WIDE_INT
) vunshifted
+ imm_reach
/2 < imm_reach
);
426 /* Return the address RTX of a move to/from memory
430 th_get_move_mem_addr (rtx dest
, rtx src
, bool load
)
439 gcc_assert (GET_CODE (mem
) == MEM
);
440 return XEXP (mem
, 0);
443 /* Return true if X is a valid address for T-Head's memory addressing modes
444 with pre/post modification for machine mode MODE.
445 If it is, fill in INFO appropriately (if non-NULL).
446 If STRICT_P is true then REG_OK_STRICT is in effect. */
449 th_memidx_classify_address_modify (struct riscv_address_info
*info
, rtx x
,
450 machine_mode mode
, bool strict_p
)
452 if (!TARGET_XTHEADMEMIDX
)
455 if (!TARGET_64BIT
&& mode
== DImode
)
458 if (!(INTEGRAL_MODE_P (mode
) && GET_MODE_SIZE (mode
).to_constant () <= 8))
461 if (GET_CODE (x
) != POST_MODIFY
462 && GET_CODE (x
) != PRE_MODIFY
)
465 rtx reg
= XEXP (x
, 0);
466 rtx exp
= XEXP (x
, 1);
467 rtx expreg
= XEXP (exp
, 0);
468 rtx expoff
= XEXP (exp
, 1);
470 if (GET_CODE (exp
) != PLUS
471 || !rtx_equal_p (expreg
, reg
)
472 || !CONST_INT_P (expoff
)
473 || !riscv_valid_base_register_p (reg
, mode
, strict_p
))
476 /* The offset is calculated as (sign_extend(imm5) << imm2) */
477 const int shamt_bits
= 2;
478 for (int shamt
= 0; shamt
< (1 << shamt_bits
); shamt
++)
481 if (valid_signed_immediate (expoff
, nbits
, shamt
))
485 info
->type
= ADDRESS_REG_WB
;
487 info
->offset
= expoff
;
497 /* Return TRUE if X is a MEM with a legitimate modify address. */
500 th_memidx_legitimate_modify_p (rtx x
)
505 /* Get the mode from the MEM and unpack it. */
506 machine_mode mode
= GET_MODE (x
);
509 return th_memidx_classify_address_modify (NULL
, x
, mode
, reload_completed
);
512 /* Return TRUE if X is a MEM with a legitimate modify address
513 and the address is POST_MODIFY (if POST is true) or a PRE_MODIFY
517 th_memidx_legitimate_modify_p (rtx x
, bool post
)
519 if (!th_memidx_legitimate_modify_p (x
))
522 /* Unpack the MEM and check the code. */
525 return GET_CODE (x
) == POST_MODIFY
;
527 return GET_CODE (x
) == PRE_MODIFY
;
530 /* Provide a buffer for a th.lXia/th.lXib/th.sXia/th.sXib instruction
531 for the given MODE. If LOAD is true, a load instruction will be
532 provided (otherwise, a store instruction). If X is not suitable
536 th_memidx_output_modify (rtx dest
, rtx src
, machine_mode mode
, bool load
)
539 rtx output_operands
[2];
540 rtx x
= th_get_move_mem_addr (dest
, src
, load
);
543 if (!th_memidx_classify_address_modify (NULL
, x
, mode
, reload_completed
))
546 int index
= exact_log2 (GET_MODE_SIZE (mode
).to_constant ());
547 bool post
= GET_CODE (x
) == POST_MODIFY
;
549 const char *const insn
[][4] = {
551 "th.sbi%s\t%%z1,%%0",
552 "th.shi%s\t%%z1,%%0",
553 "th.swi%s\t%%z1,%%0",
557 "th.lbui%s\t%%0,%%1",
558 "th.lhui%s\t%%0,%%1",
564 snprintf (format
, sizeof (format
), insn
[load
][index
], post
? "a" : "b");
565 output_operands
[0] = dest
;
566 output_operands
[1] = src
;
567 output_asm_insn (format
, output_operands
);
572 is_memidx_mode (machine_mode mode
)
574 if (mode
== QImode
|| mode
== HImode
|| mode
== SImode
)
577 if (mode
== DImode
&& TARGET_64BIT
)
584 is_fmemidx_mode (machine_mode mode
)
586 if (mode
== SFmode
&& TARGET_HARD_FLOAT
)
589 if (mode
== DFmode
&& TARGET_DOUBLE_FLOAT
)
595 /* Return true if X is a valid address for T-Head's memory addressing modes
596 with scaled register offsets for machine mode MODE.
597 If it is, fill in INFO appropriately (if non-NULL).
598 If STRICT_P is true then REG_OK_STRICT is in effect. */
601 th_memidx_classify_address_index (struct riscv_address_info
*info
, rtx x
,
602 machine_mode mode
, bool strict_p
)
604 /* Ensure that the mode is supported. */
605 if (!(TARGET_XTHEADMEMIDX
&& is_memidx_mode (mode
))
606 && !(TARGET_XTHEADMEMIDX
607 && TARGET_XTHEADFMEMIDX
&& is_fmemidx_mode (mode
)))
610 if (GET_CODE (x
) != PLUS
)
613 rtx reg
= XEXP (x
, 0);
614 enum riscv_address_type type
;
615 rtx offset
= XEXP (x
, 1);
618 if (!riscv_valid_base_register_p (reg
, mode
, strict_p
))
623 && GET_MODE (offset
) == Xmode
)
625 type
= ADDRESS_REG_REG
;
629 /* (zero_extend:DI (reg:SI)) */
630 else if (GET_CODE (offset
) == ZERO_EXTEND
631 && GET_MODE (offset
) == DImode
632 && GET_MODE (XEXP (offset
, 0)) == SImode
)
634 type
= ADDRESS_REG_UREG
;
636 offset
= XEXP (offset
, 0);
638 /* (ashift:X (reg:X) (const_int shift)) */
639 else if (GET_CODE (offset
) == ASHIFT
640 && GET_MODE (offset
) == Xmode
641 && REG_P (XEXP (offset
, 0))
642 && GET_MODE (XEXP (offset
, 0)) == Xmode
643 && CONST_INT_P (XEXP (offset
, 1))
644 && IN_RANGE (INTVAL (XEXP (offset
, 1)), 0, 3))
646 type
= ADDRESS_REG_REG
;
647 shift
= INTVAL (XEXP (offset
, 1));
648 offset
= XEXP (offset
, 0);
650 /* (ashift:DI (zero_extend:DI (reg:SI)) (const_int shift)) */
651 else if (GET_CODE (offset
) == ASHIFT
652 && GET_MODE (offset
) == DImode
653 && GET_CODE (XEXP (offset
, 0)) == ZERO_EXTEND
654 && GET_MODE (XEXP (offset
, 0)) == DImode
655 && GET_MODE (XEXP (XEXP (offset
, 0), 0)) == SImode
656 && CONST_INT_P (XEXP (offset
, 1))
657 && IN_RANGE(INTVAL (XEXP (offset
, 1)), 0, 3))
659 type
= ADDRESS_REG_UREG
;
660 shift
= INTVAL (XEXP (offset
, 1));
661 offset
= XEXP (XEXP (offset
, 0), 0);
666 if (!strict_p
&& GET_CODE (offset
) == SUBREG
)
667 offset
= SUBREG_REG (offset
);
670 || !riscv_regno_mode_ok_for_base_p (REGNO (offset
), mode
, strict_p
))
677 info
->offset
= offset
;
683 /* Return TRUE if X is a MEM with a legitimate indexed address. */
686 th_memidx_legitimate_index_p (rtx x
)
691 /* Get the mode from the MEM and unpack it. */
692 machine_mode mode
= GET_MODE (x
);
695 return th_memidx_classify_address_index (NULL
, x
, mode
, reload_completed
);
698 /* Return TRUE if X is a MEM with a legitimate indexed address
699 and the offset register is zero-extended (if UINDEX is true)
700 or sign-extended (otherwise). */
703 th_memidx_legitimate_index_p (rtx x
, bool uindex
)
708 /* Get the mode from the MEM and unpack it. */
709 machine_mode mode
= GET_MODE (x
);
712 struct riscv_address_info info
;
713 if (!th_memidx_classify_address_index (&info
, x
, mode
, reload_completed
))
717 return info
.type
== ADDRESS_REG_UREG
;
719 return info
.type
== ADDRESS_REG_REG
;
722 /* Provide a buffer for a th.lrX/th.lurX/th.srX/th.surX instruction
723 for the given MODE. If LOAD is true, a load instruction will be
724 provided (otherwise, a store instruction). If X is not suitable
728 th_memidx_output_index (rtx dest
, rtx src
, machine_mode mode
, bool load
)
730 struct riscv_address_info info
;
732 rtx output_operands
[2];
733 rtx x
= th_get_move_mem_addr (dest
, src
, load
);
736 if (!th_memidx_classify_address_index (&info
, x
, mode
, reload_completed
))
739 int index
= exact_log2 (GET_MODE_SIZE (mode
).to_constant ());
740 bool uindex
= info
.type
== ADDRESS_REG_UREG
;
742 const char *const insn
[][4] = {
744 "th.s%srb\t%%z1,%%0",
745 "th.s%srh\t%%z1,%%0",
746 "th.s%srw\t%%z1,%%0",
750 "th.l%srbu\t%%0,%%1",
751 "th.l%srhu\t%%0,%%1",
757 snprintf (format
, sizeof (format
), insn
[load
][index
], uindex
? "u" : "");
758 output_operands
[0] = dest
;
759 output_operands
[1] = src
;
760 output_asm_insn (format
, output_operands
);
764 /* Provide a buffer for a th.flX/th.fluX/th.fsX/th.fsuX instruction
765 for the given MODE. If LOAD is true, a load instruction will be
766 provided (otherwise, a store instruction). If X is not suitable
770 th_fmemidx_output_index (rtx dest
, rtx src
, machine_mode mode
, bool load
)
772 struct riscv_address_info info
;
774 rtx output_operands
[2];
775 rtx x
= th_get_move_mem_addr (dest
, src
, load
);
778 if (!th_memidx_classify_address_index (&info
, x
, mode
, false))
781 int index
= exact_log2 (GET_MODE_SIZE (mode
).to_constant ()) - 2;
782 bool uindex
= info
.type
== ADDRESS_REG_UREG
;
784 const char *const insn
[][2] = {
786 "th.fs%srw\t%%z1,%%0",
787 "th.fs%srd\t%%z1,%%0"
790 "th.fl%srw\t%%0,%%1",
795 snprintf (format
, sizeof (format
), insn
[load
][index
], uindex
? "u" : "");
796 output_operands
[0] = dest
;
797 output_operands
[1] = src
;
798 output_asm_insn (format
, output_operands
);
802 /* Return true if X is a valid address for T-Head's memory addressing modes
803 for machine mode MODE. If it is, fill in INFO appropriately (if non-NULL).
804 If STRICT_P is true then REG_OK_STRICT is in effect. */
807 th_classify_address (struct riscv_address_info
*info
, rtx x
,
808 machine_mode mode
, bool strict_p
)
810 switch (GET_CODE (x
))
813 if (th_memidx_classify_address_index (info
, x
, mode
, strict_p
))
819 if (th_memidx_classify_address_modify (info
, x
, mode
, strict_p
))
830 /* Provide a string containing a XTheadMemIdx instruction for the given
831 MODE from the provided SRC to the provided DEST.
832 A pointer to a NULL-terminated string containing the instruction will
833 be returned if a suitable instruction is available. Otherwise, this
834 function returns NULL. */
837 th_output_move (rtx dest
, rtx src
)
839 enum rtx_code dest_code
, src_code
;
841 const char *insn
= NULL
;
843 dest_code
= GET_CODE (dest
);
844 src_code
= GET_CODE (src
);
845 mode
= GET_MODE (dest
);
847 if (!(mode
== GET_MODE (src
) || src
== CONST0_RTX (mode
)))
850 if (dest_code
== REG
&& src_code
== MEM
)
852 if (GET_MODE_CLASS (mode
) == MODE_INT
853 || (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& GP_REG_P (REGNO (dest
))))
855 if ((insn
= th_memidx_output_index (dest
, src
, mode
, true)))
857 if ((insn
= th_memidx_output_modify (dest
, src
, mode
, true)))
860 else if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& HARDFP_REG_P (REGNO (dest
)))
862 if ((insn
= th_fmemidx_output_index (dest
, src
, mode
, true)))
866 else if (dest_code
== MEM
&& (src_code
== REG
|| src
== CONST0_RTX (mode
)))
868 if (GET_MODE_CLASS (mode
) == MODE_INT
869 || src
== CONST0_RTX (mode
)
870 || (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& GP_REG_P (REGNO (src
))))
872 if ((insn
= th_memidx_output_index (dest
, src
, mode
, false)))
874 if ((insn
= th_memidx_output_modify (dest
, src
, mode
, false)))
877 else if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& HARDFP_REG_P (REGNO (src
)))
879 if ((insn
= th_fmemidx_output_index (dest
, src
, mode
, false)))
886 /* Implement TARGET_PRINT_OPERAND_ADDRESS for XTheadMemIdx. */
889 th_print_operand_address (FILE *file
, machine_mode mode
, rtx x
)
891 struct riscv_address_info addr
;
893 if (!th_classify_address (&addr
, x
, mode
, reload_completed
))
898 case ADDRESS_REG_REG
:
899 case ADDRESS_REG_UREG
:
900 fprintf (file
, "%s,%s,%u", reg_names
[REGNO (addr
.reg
)],
901 reg_names
[REGNO (addr
.offset
)], addr
.shift
);
905 fprintf (file
, "(%s),%ld,%u", reg_names
[REGNO (addr
.reg
)],
906 INTVAL (addr
.offset
) >> addr
.shift
, addr
.shift
);
916 /* Number array of registers X1, X5-X7, X10-X17, X28-X31, to be
917 operated on by instruction th.ipush/th.ipop in XTheadInt. */
921 T0_REGNUM
, T1_REGNUM
, T2_REGNUM
,
922 A0_REGNUM
, A1_REGNUM
, A2_REGNUM
, A3_REGNUM
,
923 A4_REGNUM
, A5_REGNUM
, A6_REGNUM
, A7_REGNUM
,
924 T3_REGNUM
, T4_REGNUM
, T5_REGNUM
, T6_REGNUM
,
927 /* If MASK contains registers X1, X5-X7, X10-X17, X28-X31, then
928 return the mask composed of these registers, otherwise return
932 th_int_get_mask (unsigned int mask
)
934 unsigned int xtheadint_mask
= 0;
936 if (!TARGET_XTHEADINT
|| TARGET_64BIT
)
939 for (unsigned int i
= 0; i
< ARRAY_SIZE (th_int_regs
); i
++)
941 if (!BITSET_P (mask
, th_int_regs
[i
]))
944 xtheadint_mask
|= (1 << th_int_regs
[i
]);
947 return xtheadint_mask
; /* Usually 0xf003fce2. */
950 /* Returns the occupied frame needed to save registers X1, X5-X7,
954 th_int_get_save_adjustment (void)
956 gcc_assert (TARGET_XTHEADINT
&& !TARGET_64BIT
);
957 return ARRAY_SIZE (th_int_regs
) * UNITS_PER_WORD
;
961 th_int_adjust_cfi_prologue (unsigned int mask
)
963 gcc_assert (TARGET_XTHEADINT
&& !TARGET_64BIT
);
965 rtx dwarf
= NULL_RTX
;
966 rtx adjust_sp_rtx
, reg
, mem
, insn
;
967 int saved_size
= ARRAY_SIZE (th_int_regs
) * UNITS_PER_WORD
;
968 int offset
= saved_size
;
970 for (int regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
971 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
973 offset
-= UNITS_PER_WORD
;
974 reg
= gen_rtx_REG (SImode
, regno
);
975 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
979 insn
= gen_rtx_SET (mem
, reg
);
980 dwarf
= alloc_reg_note (REG_CFA_OFFSET
, insn
, dwarf
);
983 /* Debug info for adjust sp. */
985 gen_rtx_SET (stack_pointer_rtx
,
986 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx
),
987 stack_pointer_rtx
, GEN_INT (-saved_size
)));
988 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
, adjust_sp_rtx
, dwarf
);