Add hppa*-*-hpux* to targets which do not support split DWARF
[official-gcc.git] / gcc / config / riscv / thead.cc
blob0b7e4d8f0306e2dadf94707e6b05f7fb93cd11b7
1 /* Subroutines used for code generation for RISC-V.
2 Copyright (C) 2023-2024 Free Software Foundation, Inc.
3 Contributed by Christoph Müllner (christoph.muellner@vrull.eu).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "target.h"
27 #include "backend.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "explow.h"
31 #include "memmodel.h"
32 #include "emit-rtl.h"
33 #include "optabs.h"
34 #include "poly-int.h"
35 #include "output.h"
36 #include "regs.h"
37 #include "riscv-protos.h"
39 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
40 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
42 static void
43 split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
45 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
47 *base_ptr = XEXP (x, 0);
48 *offset_ptr = INTVAL (XEXP (x, 1));
50 else
52 *base_ptr = x;
53 *offset_ptr = 0;
57 /* Output a mempair instruction with the provided OPERANDS.
58 LOAD_P is true if a we have a pair of loads (stores otherwise).
59 MODE is the access mode (DI or SI).
60 CODE is the extension code (UNKNOWN, SIGN_EXTEND or ZERO_EXTEND).
61 This instruction does not handle invalid inputs gracefully,
62 but is full of assertions to ensure that only valid instructions
63 are emitted. */
65 const char *
66 th_mempair_output_move (rtx operands[4], bool load_p,
67 machine_mode mode, RTX_CODE code)
69 rtx reg1, reg2, mem1, mem2, base1, base2;
70 HOST_WIDE_INT offset1, offset2;
71 rtx output_operands[5];
72 const char* format;
74 gcc_assert (mode == SImode || mode == DImode);
76 /* Paired 64-bit access instructions have a fixed shift amount of 4.
77 Paired 32-bit access instructions have a fixed shift amount of 3. */
78 unsigned shamt = (mode == DImode) ? 4 : 3;
80 if (load_p)
82 reg1 = copy_rtx (operands[0]);
83 reg2 = copy_rtx (operands[2]);
84 mem1 = copy_rtx (operands[1]);
85 mem2 = copy_rtx (operands[3]);
87 if (mode == SImode)
88 if (code == ZERO_EXTEND)
89 format = "th.lwud\t%0, %1, (%2), %3, %4";
90 else //SIGN_EXTEND or UNKNOWN
91 format = "th.lwd\t%0, %1, (%2), %3, %4";
92 else
93 format = "th.ldd\t%0, %1, (%2), %3, %4";
95 else
97 reg1 = copy_rtx (operands[1]);
98 reg2 = copy_rtx (operands[3]);
99 mem1 = copy_rtx (operands[0]);
100 mem2 = copy_rtx (operands[2]);
102 if (mode == SImode)
103 format = "th.swd\t%z0, %z1, (%2), %3, %4";
104 else
105 format = "th.sdd\t%z0, %z1, (%2), %3, %4";
108 split_plus (XEXP (mem1, 0), &base1, &offset1);
109 split_plus (XEXP (mem2, 0), &base2, &offset2);
110 gcc_assert (rtx_equal_p (base1, base2));
111 auto size1 = MEM_SIZE (mem1);
112 auto size2 = MEM_SIZE (mem2);
113 gcc_assert (known_eq (size1, size2));
114 gcc_assert (known_eq (offset1 + size1, offset2));
116 HOST_WIDE_INT imm2 = offset1 >> shamt;
118 /* Make sure all mempair instruction constraints are met. */
119 gcc_assert (imm2 >= 0 && imm2 < 4);
120 gcc_assert ((imm2 << shamt) == offset1);
121 gcc_assert (REG_P (reg1));
122 gcc_assert (REG_P (reg2));
123 gcc_assert (REG_P (base1));
124 if (load_p)
126 gcc_assert (REGNO (reg1) != REGNO (reg2));
127 gcc_assert (REGNO (reg1) != REGNO (base1));
128 gcc_assert (REGNO (reg2) != REGNO (base1));
131 /* Output the mempair instruction. */
132 output_operands[0] = copy_rtx (reg1);
133 output_operands[1] = copy_rtx (reg2);
134 output_operands[2] = copy_rtx (base1);
135 output_operands[3] = gen_rtx_CONST_INT (mode, imm2);
136 output_operands[4] = gen_rtx_CONST_INT (mode, shamt);
137 output_asm_insn (format, output_operands);
139 return "";
142 /* Analyse if a pair of loads/stores MEM1 and MEM2 with given MODE
143 are consecutive so they can be merged into a mempair instruction.
144 RESERVED will be set to true, if a reversal of the accesses is
145 required (false otherwise). Returns true if the accesses can be
146 merged (even if reversing is necessary) and false if not. */
148 static bool
149 th_mempair_check_consecutive_mems (machine_mode mode, rtx *mem1, rtx *mem2,
150 bool *reversed)
152 rtx base1, base2, offset1, offset2;
153 extract_base_offset_in_addr (*mem1, &base1, &offset1);
154 extract_base_offset_in_addr (*mem2, &base2, &offset2);
156 /* Make sure both mems are in base+offset form. */
157 if (!base1 || !base2)
158 return false;
160 /* If both mems use the same base register, just check the offsets. */
161 if (rtx_equal_p (base1, base2))
163 auto size = GET_MODE_SIZE (mode);
165 if (known_eq (UINTVAL (offset1) + size, UINTVAL (offset2)))
167 *reversed = false;
168 return true;
171 if (known_eq (UINTVAL (offset2) + size, UINTVAL (offset1)))
173 *reversed = true;
174 return true;
177 return false;
180 return false;
183 /* Check if the given MEM can be used to define the address of a mempair
184 instruction. */
186 static bool
187 th_mempair_operand_p (rtx mem, machine_mode mode)
189 if (!MEM_SIZE_KNOWN_P (mem))
190 return false;
192 /* Only DI or SI mempair instructions exist. */
193 gcc_assert (mode == SImode || mode == DImode);
194 auto mem_sz = MEM_SIZE (mem);
195 auto mode_sz = GET_MODE_SIZE (mode);
196 if (!known_eq (mem_sz, mode_sz))
197 return false;
199 /* Paired 64-bit access instructions have a fixed shift amount of 4.
200 Paired 32-bit access instructions have a fixed shift amount of 3. */
201 machine_mode mem_mode = GET_MODE (mem);
202 unsigned shamt = (mem_mode == DImode) ? 4 : 3;
204 rtx base;
205 HOST_WIDE_INT offset;
206 split_plus (XEXP (mem, 0), &base, &offset);
207 HOST_WIDE_INT imm2 = offset >> shamt;
209 if (imm2 < 0 || imm2 >= 4)
210 return false;
212 if ((imm2 << shamt) != offset)
213 return false;
215 return true;
218 static bool
219 th_mempair_load_overlap_p (rtx reg1, rtx reg2, rtx mem)
221 if (REGNO (reg1) == REGNO (reg2))
222 return true;
224 if (reg_overlap_mentioned_p (reg1, mem))
225 return true;
227 rtx base;
228 HOST_WIDE_INT offset;
229 split_plus (XEXP (mem, 0), &base, &offset);
231 if (!REG_P (base))
232 return true;
234 if (REG_P (base))
236 if (REGNO (base) == REGNO (reg1)
237 || REGNO (base) == REGNO (reg2))
238 return true;
241 return false;
244 /* Given OPERANDS of consecutive load/store, check if we can merge
245 them into load-pair or store-pair instructions.
246 LOAD is true if they are load instructions.
247 MODE is the mode of memory operation. */
249 bool
250 th_mempair_operands_p (rtx operands[4], bool load_p,
251 machine_mode mode)
253 rtx mem_1, mem_2, reg_1, reg_2;
255 if (load_p)
257 reg_1 = operands[0];
258 mem_1 = operands[1];
259 reg_2 = operands[2];
260 mem_2 = operands[3];
261 if (!REG_P (reg_1) || !REG_P (reg_2))
262 return false;
263 if (th_mempair_load_overlap_p (reg_1, reg_2, mem_1))
264 return false;
265 if (th_mempair_load_overlap_p (reg_1, reg_2, mem_2))
266 return false;
268 else
270 mem_1 = operands[0];
271 reg_1 = operands[1];
272 mem_2 = operands[2];
273 reg_2 = operands[3];
276 /* Check if the registers are GP registers. */
277 if (!REG_P (reg_1) || !GP_REG_P (REGNO (reg_1))
278 || !REG_P (reg_2) || !GP_REG_P (REGNO (reg_2)))
279 return false;
281 /* The mems cannot be volatile. */
282 if (!MEM_P (mem_1) || !MEM_P (mem_2))
283 return false;
284 if (MEM_VOLATILE_P (mem_1) || MEM_VOLATILE_P (mem_2))
285 return false;
287 /* If we have slow unaligned access, we only accept aligned memory. */
288 if (riscv_slow_unaligned_access_p
289 && known_lt (MEM_ALIGN (mem_1), GET_MODE_SIZE (mode) * BITS_PER_UNIT))
290 return false;
292 /* Check if the addresses are in the form of [base+offset]. */
293 bool reversed = false;
294 if (!th_mempair_check_consecutive_mems (mode, &mem_1, &mem_2, &reversed))
295 return false;
297 /* The first memory accesses must be a mempair operand. */
298 if ((!reversed && !th_mempair_operand_p (mem_1, mode))
299 || (reversed && !th_mempair_operand_p (mem_2, mode)))
300 return false;
302 /* The operands must be of the same size. */
303 gcc_assert (known_eq (GET_MODE_SIZE (GET_MODE (mem_1)),
304 GET_MODE_SIZE (GET_MODE (mem_2))));
306 return true;
309 /* Given OPERANDS of consecutive load/store that can be merged,
310 swap them if they are not in ascending order. */
312 void
313 th_mempair_order_operands (rtx operands[4], bool load_p, machine_mode mode)
315 int mem_op = load_p ? 1 : 0;
316 bool reversed = false;
317 if (!th_mempair_check_consecutive_mems (mode,
318 operands + mem_op,
319 operands + mem_op + 2,
320 &reversed))
321 gcc_unreachable ();
323 if (reversed)
325 /* Irrespective of whether this is a load or a store,
326 we do the same swap. */
327 std::swap (operands[0], operands[2]);
328 std::swap (operands[1], operands[3]);
332 /* Similar like riscv_save_reg, but saves two registers to memory
333 and marks the resulting instruction as frame-related. */
335 static void
336 th_mempair_save_regs (rtx operands[4])
338 rtx set1 = gen_rtx_SET (operands[0], operands[1]);
339 rtx set2 = gen_rtx_SET (operands[2], operands[3]);
340 rtx dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
341 rtx insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set1, set2)));
342 RTX_FRAME_RELATED_P (insn) = 1;
344 XVECEXP (dwarf, 0, 0) = copy_rtx (set1);
345 XVECEXP (dwarf, 0, 1) = copy_rtx (set2);
346 RTX_FRAME_RELATED_P (XVECEXP (dwarf, 0, 0)) = 1;
347 RTX_FRAME_RELATED_P (XVECEXP (dwarf, 0, 1)) = 1;
348 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
351 /* Similar like riscv_restore_reg, but restores two registers from memory
352 and marks the instruction frame-related. */
354 static void
355 th_mempair_restore_regs (rtx operands[4])
357 rtx set1 = gen_rtx_SET (operands[0], operands[1]);
358 rtx set2 = gen_rtx_SET (operands[2], operands[3]);
359 rtx insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set1, set2)));
360 RTX_FRAME_RELATED_P (insn) = 1;
361 add_reg_note (insn, REG_CFA_RESTORE, operands[0]);
362 add_reg_note (insn, REG_CFA_RESTORE, operands[2]);
365 /* Prepare the OPERANDS array to emit a mempair instruction using the
366 provided information. No checks are performed, the resulting array
367 should be validated using th_mempair_operands_p(). */
369 void
370 th_mempair_prepare_save_restore_operands (rtx operands[4],
371 bool load_p, machine_mode mode,
372 int regno, HOST_WIDE_INT offset,
373 int regno2, HOST_WIDE_INT offset2)
375 int reg_op = load_p ? 0 : 1;
376 int mem_op = load_p ? 1 : 0;
378 rtx mem1 = plus_constant (mode, stack_pointer_rtx, offset);
379 mem1 = gen_frame_mem (mode, mem1);
380 rtx mem2 = plus_constant (mode, stack_pointer_rtx, offset2);
381 mem2 = gen_frame_mem (mode, mem2);
383 operands[reg_op] = gen_rtx_REG (mode, regno);
384 operands[mem_op] = mem1;
385 operands[2 + reg_op] = gen_rtx_REG (mode, regno2);
386 operands[2 + mem_op] = mem2;
389 /* Emit a mempair instruction to save/restore two registers to/from stack. */
391 void
392 th_mempair_save_restore_regs (rtx operands[4], bool load_p,
393 machine_mode mode)
395 gcc_assert (th_mempair_operands_p (operands, load_p, mode));
397 th_mempair_order_operands (operands, load_p, mode);
399 if (load_p)
400 th_mempair_restore_regs (operands);
401 else
402 th_mempair_save_regs (operands);
405 /* Return true if X can be represented as signed immediate of NBITS bits.
406 The immediate is assumed to be shifted by LSHAMT bits left. */
408 static bool
409 valid_signed_immediate (rtx x, unsigned nbits, unsigned lshamt)
411 if (GET_CODE (x) != CONST_INT)
412 return false;
414 HOST_WIDE_INT v = INTVAL (x);
416 HOST_WIDE_INT vunshifted = v >> lshamt;
418 /* Make sure we did not shift out any bits. */
419 if (vunshifted << lshamt != v)
420 return false;
422 unsigned HOST_WIDE_INT imm_reach = 1LL << nbits;
423 return ((unsigned HOST_WIDE_INT) vunshifted + imm_reach/2 < imm_reach);
426 /* Return the address RTX of a move to/from memory
427 instruction. */
429 static rtx
430 th_get_move_mem_addr (rtx dest, rtx src, bool load)
432 rtx mem;
434 if (load)
435 mem = src;
436 else
437 mem = dest;
439 gcc_assert (GET_CODE (mem) == MEM);
440 return XEXP (mem, 0);
443 /* Return true if X is a valid address for T-Head's memory addressing modes
444 with pre/post modification for machine mode MODE.
445 If it is, fill in INFO appropriately (if non-NULL).
446 If STRICT_P is true then REG_OK_STRICT is in effect. */
448 static bool
449 th_memidx_classify_address_modify (struct riscv_address_info *info, rtx x,
450 machine_mode mode, bool strict_p)
452 if (!TARGET_XTHEADMEMIDX)
453 return false;
455 if (!TARGET_64BIT && mode == DImode)
456 return false;
458 if (!(INTEGRAL_MODE_P (mode) && GET_MODE_SIZE (mode).to_constant () <= 8))
459 return false;
461 if (GET_CODE (x) != POST_MODIFY
462 && GET_CODE (x) != PRE_MODIFY)
463 return false;
465 rtx reg = XEXP (x, 0);
466 rtx exp = XEXP (x, 1);
467 rtx expreg = XEXP (exp, 0);
468 rtx expoff = XEXP (exp, 1);
470 if (GET_CODE (exp) != PLUS
471 || !rtx_equal_p (expreg, reg)
472 || !CONST_INT_P (expoff)
473 || !riscv_valid_base_register_p (reg, mode, strict_p))
474 return false;
476 /* The offset is calculated as (sign_extend(imm5) << imm2) */
477 const int shamt_bits = 2;
478 for (int shamt = 0; shamt < (1 << shamt_bits); shamt++)
480 const int nbits = 5;
481 if (valid_signed_immediate (expoff, nbits, shamt))
483 if (info)
485 info->type = ADDRESS_REG_WB;
486 info->reg = reg;
487 info->offset = expoff;
488 info->shift = shamt;
490 return true;
494 return false;
497 /* Return TRUE if X is a MEM with a legitimate modify address. */
499 bool
500 th_memidx_legitimate_modify_p (rtx x)
502 if (!MEM_P (x))
503 return false;
505 /* Get the mode from the MEM and unpack it. */
506 machine_mode mode = GET_MODE (x);
507 x = XEXP (x, 0);
509 return th_memidx_classify_address_modify (NULL, x, mode, reload_completed);
512 /* Return TRUE if X is a MEM with a legitimate modify address
513 and the address is POST_MODIFY (if POST is true) or a PRE_MODIFY
514 (otherwise). */
516 bool
517 th_memidx_legitimate_modify_p (rtx x, bool post)
519 if (!th_memidx_legitimate_modify_p (x))
520 return false;
522 /* Unpack the MEM and check the code. */
523 x = XEXP (x, 0);
524 if (post)
525 return GET_CODE (x) == POST_MODIFY;
526 else
527 return GET_CODE (x) == PRE_MODIFY;
530 /* Provide a buffer for a th.lXia/th.lXib/th.sXia/th.sXib instruction
531 for the given MODE. If LOAD is true, a load instruction will be
532 provided (otherwise, a store instruction). If X is not suitable
533 return NULL. */
535 static const char *
536 th_memidx_output_modify (rtx dest, rtx src, machine_mode mode, bool load)
538 char format[24];
539 rtx output_operands[2];
540 rtx x = th_get_move_mem_addr (dest, src, load);
542 /* Validate x. */
543 if (!th_memidx_classify_address_modify (NULL, x, mode, reload_completed))
544 return NULL;
546 int index = exact_log2 (GET_MODE_SIZE (mode).to_constant ());
547 bool post = GET_CODE (x) == POST_MODIFY;
549 const char *const insn[][4] = {
551 "th.sbi%s\t%%z1,%%0",
552 "th.shi%s\t%%z1,%%0",
553 "th.swi%s\t%%z1,%%0",
554 "th.sdi%s\t%%z1,%%0"
557 "th.lbui%s\t%%0,%%1",
558 "th.lhui%s\t%%0,%%1",
559 "th.lwi%s\t%%0,%%1",
560 "th.ldi%s\t%%0,%%1"
564 snprintf (format, sizeof (format), insn[load][index], post ? "a" : "b");
565 output_operands[0] = dest;
566 output_operands[1] = src;
567 output_asm_insn (format, output_operands);
568 return "";
571 static bool
572 is_memidx_mode (machine_mode mode)
574 if (mode == QImode || mode == HImode || mode == SImode)
575 return true;
577 if (mode == DImode && TARGET_64BIT)
578 return true;
580 return false;
583 static bool
584 is_fmemidx_mode (machine_mode mode)
586 if (mode == SFmode && TARGET_HARD_FLOAT)
587 return true;
589 if (mode == DFmode && TARGET_DOUBLE_FLOAT)
590 return true;
592 return false;
595 /* Return true if X is a valid address for T-Head's memory addressing modes
596 with scaled register offsets for machine mode MODE.
597 If it is, fill in INFO appropriately (if non-NULL).
598 If STRICT_P is true then REG_OK_STRICT is in effect. */
600 static bool
601 th_memidx_classify_address_index (struct riscv_address_info *info, rtx x,
602 machine_mode mode, bool strict_p)
604 /* Ensure that the mode is supported. */
605 if (!(TARGET_XTHEADMEMIDX && is_memidx_mode (mode))
606 && !(TARGET_XTHEADMEMIDX
607 && TARGET_XTHEADFMEMIDX && is_fmemidx_mode (mode)))
608 return false;
610 if (GET_CODE (x) != PLUS)
611 return false;
613 rtx reg = XEXP (x, 0);
614 enum riscv_address_type type;
615 rtx offset = XEXP (x, 1);
616 int shift;
618 if (!riscv_valid_base_register_p (reg, mode, strict_p))
619 return false;
621 /* (reg:X) */
622 if (REG_P (offset)
623 && GET_MODE (offset) == Xmode)
625 type = ADDRESS_REG_REG;
626 shift = 0;
627 offset = offset;
629 /* (zero_extend:DI (reg:SI)) */
630 else if (GET_CODE (offset) == ZERO_EXTEND
631 && GET_MODE (offset) == DImode
632 && GET_MODE (XEXP (offset, 0)) == SImode)
634 type = ADDRESS_REG_UREG;
635 shift = 0;
636 offset = XEXP (offset, 0);
638 /* (ashift:X (reg:X) (const_int shift)) */
639 else if (GET_CODE (offset) == ASHIFT
640 && GET_MODE (offset) == Xmode
641 && REG_P (XEXP (offset, 0))
642 && GET_MODE (XEXP (offset, 0)) == Xmode
643 && CONST_INT_P (XEXP (offset, 1))
644 && IN_RANGE (INTVAL (XEXP (offset, 1)), 0, 3))
646 type = ADDRESS_REG_REG;
647 shift = INTVAL (XEXP (offset, 1));
648 offset = XEXP (offset, 0);
650 /* (ashift:DI (zero_extend:DI (reg:SI)) (const_int shift)) */
651 else if (GET_CODE (offset) == ASHIFT
652 && GET_MODE (offset) == DImode
653 && GET_CODE (XEXP (offset, 0)) == ZERO_EXTEND
654 && GET_MODE (XEXP (offset, 0)) == DImode
655 && GET_MODE (XEXP (XEXP (offset, 0), 0)) == SImode
656 && CONST_INT_P (XEXP (offset, 1))
657 && IN_RANGE(INTVAL (XEXP (offset, 1)), 0, 3))
659 type = ADDRESS_REG_UREG;
660 shift = INTVAL (XEXP (offset, 1));
661 offset = XEXP (XEXP (offset, 0), 0);
663 else
664 return false;
666 if (!strict_p && GET_CODE (offset) == SUBREG)
667 offset = SUBREG_REG (offset);
669 if (!REG_P (offset)
670 || !riscv_regno_mode_ok_for_base_p (REGNO (offset), mode, strict_p))
671 return false;
673 if (info)
675 info->reg = reg;
676 info->type = type;
677 info->offset = offset;
678 info->shift = shift;
680 return true;
683 /* Return TRUE if X is a MEM with a legitimate indexed address. */
685 bool
686 th_memidx_legitimate_index_p (rtx x)
688 if (!MEM_P (x))
689 return false;
691 /* Get the mode from the MEM and unpack it. */
692 machine_mode mode = GET_MODE (x);
693 x = XEXP (x, 0);
695 return th_memidx_classify_address_index (NULL, x, mode, reload_completed);
698 /* Return TRUE if X is a MEM with a legitimate indexed address
699 and the offset register is zero-extended (if UINDEX is true)
700 or sign-extended (otherwise). */
702 bool
703 th_memidx_legitimate_index_p (rtx x, bool uindex)
705 if (!MEM_P (x))
706 return false;
708 /* Get the mode from the MEM and unpack it. */
709 machine_mode mode = GET_MODE (x);
710 x = XEXP (x, 0);
712 struct riscv_address_info info;
713 if (!th_memidx_classify_address_index (&info, x, mode, reload_completed))
714 return false;
716 if (uindex)
717 return info.type == ADDRESS_REG_UREG;
718 else
719 return info.type == ADDRESS_REG_REG;
722 /* Provide a buffer for a th.lrX/th.lurX/th.srX/th.surX instruction
723 for the given MODE. If LOAD is true, a load instruction will be
724 provided (otherwise, a store instruction). If X is not suitable
725 return NULL. */
727 static const char *
728 th_memidx_output_index (rtx dest, rtx src, machine_mode mode, bool load)
730 struct riscv_address_info info;
731 char format[24];
732 rtx output_operands[2];
733 rtx x = th_get_move_mem_addr (dest, src, load);
735 /* Validate x. */
736 if (!th_memidx_classify_address_index (&info, x, mode, reload_completed))
737 return NULL;
739 int index = exact_log2 (GET_MODE_SIZE (mode).to_constant ());
740 bool uindex = info.type == ADDRESS_REG_UREG;
742 const char *const insn[][4] = {
744 "th.s%srb\t%%z1,%%0",
745 "th.s%srh\t%%z1,%%0",
746 "th.s%srw\t%%z1,%%0",
747 "th.s%srd\t%%z1,%%0"
750 "th.l%srbu\t%%0,%%1",
751 "th.l%srhu\t%%0,%%1",
752 "th.l%srw\t%%0,%%1",
753 "th.l%srd\t%%0,%%1"
757 snprintf (format, sizeof (format), insn[load][index], uindex ? "u" : "");
758 output_operands[0] = dest;
759 output_operands[1] = src;
760 output_asm_insn (format, output_operands);
761 return "";
764 /* Provide a buffer for a th.flX/th.fluX/th.fsX/th.fsuX instruction
765 for the given MODE. If LOAD is true, a load instruction will be
766 provided (otherwise, a store instruction). If X is not suitable
767 return NULL. */
769 static const char *
770 th_fmemidx_output_index (rtx dest, rtx src, machine_mode mode, bool load)
772 struct riscv_address_info info;
773 char format[24];
774 rtx output_operands[2];
775 rtx x = th_get_move_mem_addr (dest, src, load);
777 /* Validate x. */
778 if (!th_memidx_classify_address_index (&info, x, mode, false))
779 return NULL;
781 int index = exact_log2 (GET_MODE_SIZE (mode).to_constant ()) - 2;
782 bool uindex = info.type == ADDRESS_REG_UREG;
784 const char *const insn[][2] = {
786 "th.fs%srw\t%%z1,%%0",
787 "th.fs%srd\t%%z1,%%0"
790 "th.fl%srw\t%%0,%%1",
791 "th.fl%srd\t%%0,%%1"
795 snprintf (format, sizeof (format), insn[load][index], uindex ? "u" : "");
796 output_operands[0] = dest;
797 output_operands[1] = src;
798 output_asm_insn (format, output_operands);
799 return "";
802 /* Return true if X is a valid address for T-Head's memory addressing modes
803 for machine mode MODE. If it is, fill in INFO appropriately (if non-NULL).
804 If STRICT_P is true then REG_OK_STRICT is in effect. */
806 bool
807 th_classify_address (struct riscv_address_info *info, rtx x,
808 machine_mode mode, bool strict_p)
810 switch (GET_CODE (x))
812 case PLUS:
813 if (th_memidx_classify_address_index (info, x, mode, strict_p))
814 return true;
815 break;
817 case POST_MODIFY:
818 case PRE_MODIFY:
819 if (th_memidx_classify_address_modify (info, x, mode, strict_p))
820 return true;
821 break;
823 default:
824 return false;
827 return false;
830 /* Provide a string containing a XTheadMemIdx instruction for the given
831 MODE from the provided SRC to the provided DEST.
832 A pointer to a NULL-terminated string containing the instruction will
833 be returned if a suitable instruction is available. Otherwise, this
834 function returns NULL. */
836 const char *
837 th_output_move (rtx dest, rtx src)
839 enum rtx_code dest_code, src_code;
840 machine_mode mode;
841 const char *insn = NULL;
843 dest_code = GET_CODE (dest);
844 src_code = GET_CODE (src);
845 mode = GET_MODE (dest);
847 if (!(mode == GET_MODE (src) || src == CONST0_RTX (mode)))
848 return NULL;
850 if (dest_code == REG && src_code == MEM)
852 if (GET_MODE_CLASS (mode) == MODE_INT
853 || (GET_MODE_CLASS (mode) == MODE_FLOAT && GP_REG_P (REGNO (dest))))
855 if ((insn = th_memidx_output_index (dest, src, mode, true)))
856 return insn;
857 if ((insn = th_memidx_output_modify (dest, src, mode, true)))
858 return insn;
860 else if (GET_MODE_CLASS (mode) == MODE_FLOAT && HARDFP_REG_P (REGNO (dest)))
862 if ((insn = th_fmemidx_output_index (dest, src, mode, true)))
863 return insn;
866 else if (dest_code == MEM && (src_code == REG || src == CONST0_RTX (mode)))
868 if (GET_MODE_CLASS (mode) == MODE_INT
869 || src == CONST0_RTX (mode)
870 || (GET_MODE_CLASS (mode) == MODE_FLOAT && GP_REG_P (REGNO (src))))
872 if ((insn = th_memidx_output_index (dest, src, mode, false)))
873 return insn;
874 if ((insn = th_memidx_output_modify (dest, src, mode, false)))
875 return insn;
877 else if (GET_MODE_CLASS (mode) == MODE_FLOAT && HARDFP_REG_P (REGNO (src)))
879 if ((insn = th_fmemidx_output_index (dest, src, mode, false)))
880 return insn;
883 return NULL;
886 /* Implement TARGET_PRINT_OPERAND_ADDRESS for XTheadMemIdx. */
888 bool
889 th_print_operand_address (FILE *file, machine_mode mode, rtx x)
891 struct riscv_address_info addr;
893 if (!th_classify_address (&addr, x, mode, reload_completed))
894 return false;
896 switch (addr.type)
898 case ADDRESS_REG_REG:
899 case ADDRESS_REG_UREG:
900 fprintf (file, "%s,%s,%u", reg_names[REGNO (addr.reg)],
901 reg_names[REGNO (addr.offset)], addr.shift);
902 return true;
904 case ADDRESS_REG_WB:
905 fprintf (file, "(%s),%ld,%u", reg_names[REGNO (addr.reg)],
906 INTVAL (addr.offset) >> addr.shift, addr.shift);
907 return true;
909 default:
910 gcc_unreachable ();
913 gcc_unreachable ();
916 /* Number array of registers X1, X5-X7, X10-X17, X28-X31, to be
917 operated on by instruction th.ipush/th.ipop in XTheadInt. */
919 int th_int_regs[] ={
920 RETURN_ADDR_REGNUM,
921 T0_REGNUM, T1_REGNUM, T2_REGNUM,
922 A0_REGNUM, A1_REGNUM, A2_REGNUM, A3_REGNUM,
923 A4_REGNUM, A5_REGNUM, A6_REGNUM, A7_REGNUM,
924 T3_REGNUM, T4_REGNUM, T5_REGNUM, T6_REGNUM,
927 /* If MASK contains registers X1, X5-X7, X10-X17, X28-X31, then
928 return the mask composed of these registers, otherwise return
929 zero. */
931 unsigned int
932 th_int_get_mask (unsigned int mask)
934 unsigned int xtheadint_mask = 0;
936 if (!TARGET_XTHEADINT || TARGET_64BIT)
937 return 0;
939 for (unsigned int i = 0; i < ARRAY_SIZE (th_int_regs); i++)
941 if (!BITSET_P (mask, th_int_regs[i]))
942 return 0;
944 xtheadint_mask |= (1 << th_int_regs[i]);
947 return xtheadint_mask; /* Usually 0xf003fce2. */
950 /* Returns the occupied frame needed to save registers X1, X5-X7,
951 X10-X17, X28-X31. */
953 unsigned int
954 th_int_get_save_adjustment (void)
956 gcc_assert (TARGET_XTHEADINT && !TARGET_64BIT);
957 return ARRAY_SIZE (th_int_regs) * UNITS_PER_WORD;
961 th_int_adjust_cfi_prologue (unsigned int mask)
963 gcc_assert (TARGET_XTHEADINT && !TARGET_64BIT);
965 rtx dwarf = NULL_RTX;
966 rtx adjust_sp_rtx, reg, mem, insn;
967 int saved_size = ARRAY_SIZE (th_int_regs) * UNITS_PER_WORD;
968 int offset = saved_size;
970 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
971 if (BITSET_P (mask, regno - GP_REG_FIRST))
973 offset -= UNITS_PER_WORD;
974 reg = gen_rtx_REG (SImode, regno);
975 mem = gen_frame_mem (SImode, plus_constant (Pmode,
976 stack_pointer_rtx,
977 offset));
979 insn = gen_rtx_SET (mem, reg);
980 dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf);
983 /* Debug info for adjust sp. */
984 adjust_sp_rtx =
985 gen_rtx_SET (stack_pointer_rtx,
986 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
987 stack_pointer_rtx, GEN_INT (-saved_size)));
988 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx, dwarf);
990 return dwarf;