1 /* Auxiliary functions for pipeline descriptions pattern of Andes
2 NDS32 cpu for GNU compiler
3 Copyright (C) 2012-2018 Free Software Foundation, Inc.
4 Contributed by Andes Technology Corporation.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* ------------------------------------------------------------------------ */
24 #define IN_TARGET_CODE 1
28 #include "coretypes.h"
31 #include "insn-attr.h"
32 #include "insn-codes.h"
35 #include "nds32-protos.h"
37 /* ------------------------------------------------------------------------ */
40 namespace scheduling
{
42 /* Classify the memory access direction. It's unknown if the offset register
43 is not a constant value. */
44 enum memory_access_direction
48 MEM_ACCESS_DIR_UNKNOWN
51 /* A safe wrapper to the function reg_overlap_mentioned_p (). */
53 reg_overlap_p (rtx x
, rtx in
)
55 if (x
== NULL_RTX
|| in
== NULL_RTX
)
58 return static_cast <bool> (reg_overlap_mentioned_p (x
, in
));
62 /* Determine the memory access direction of a load/store insn. */
63 memory_access_direction
64 determine_access_direction (rtx_insn
*insn
)
66 int post_update_rtx_index
;
71 switch (get_attr_type (insn
))
73 case TYPE_LOAD_MULTIPLE
:
74 gcc_assert (parallel_elements (insn
) >= 2);
76 post_update_rtx_index
= find_post_update_rtx (insn
);
77 if (post_update_rtx_index
!= -1)
78 plus_rtx
= SET_SRC (parallel_element (insn
, post_update_rtx_index
));
82 [(set (reg) (mem (reg))) : index 0
83 (set (reg) (mem (plus (reg) (...)))) : index 1
85 mem_rtx
= SET_SRC (parallel_element (insn
, 1));
86 if (GET_CODE (mem_rtx
) == UNSPEC
)
87 mem_rtx
= XVECEXP (mem_rtx
, 0, 0);
88 gcc_assert (MEM_P (mem_rtx
));
89 plus_rtx
= XEXP (mem_rtx
, 0);
93 case TYPE_STORE_MULTIPLE
:
94 gcc_assert (parallel_elements (insn
) >= 2);
96 post_update_rtx_index
= find_post_update_rtx (insn
);
97 if (post_update_rtx_index
!= -1)
98 plus_rtx
= SET_SRC (parallel_element (insn
, post_update_rtx_index
));
102 [(set (mem (reg)) (reg)) : index 0
103 (set (mem (plus (reg) (...))) (reg)) : index 1
105 mem_rtx
= SET_DEST (parallel_element (insn
, 1));
106 if (GET_CODE (mem_rtx
) == UNSPEC
)
107 mem_rtx
= XVECEXP (mem_rtx
, 0, 0);
108 gcc_assert (MEM_P (mem_rtx
));
109 plus_rtx
= XEXP (mem_rtx
, 0);
115 mem_rtx
= extract_mem_rtx (insn
);
117 switch (GET_CODE (XEXP (mem_rtx
, 0)))
120 /* (mem (post_inc (...))) */
121 return MEM_ACCESS_DIR_POS
;
124 /* (mem (post_dec (...))) */
125 return MEM_ACCESS_DIR_NEG
;
128 /* (mem (plus (reg) (...))) */
129 plus_rtx
= XEXP (mem_rtx
, 0);
133 /* (mem (post_modify (reg) (plus (reg) (...)))) */
134 plus_rtx
= XEXP (XEXP (mem_rtx
, 0), 1);
146 gcc_assert (GET_CODE (plus_rtx
) == PLUS
);
148 offset_rtx
= XEXP (plus_rtx
, 1);
149 if (GET_CODE (offset_rtx
) == CONST_INT
)
151 if (INTVAL (offset_rtx
) < 0)
152 return MEM_ACCESS_DIR_NEG
;
154 return MEM_ACCESS_DIR_POS
;
157 return MEM_ACCESS_DIR_UNKNOWN
;
160 /* Return the nth load/store operation in the real micro-operation
163 extract_nth_access_rtx (rtx_insn
*insn
, int n
)
165 int n_elems
= parallel_elements (insn
);
166 int post_update_rtx_index
= find_post_update_rtx (insn
);
167 memory_access_direction direction
= determine_access_direction (insn
);
169 gcc_assert (direction
!= MEM_ACCESS_DIR_UNKNOWN
);
171 /* Reverse the order if the direction negative. */
172 if (direction
== MEM_ACCESS_DIR_NEG
)
175 if (post_update_rtx_index
!= -1)
177 if (n
>= 0 && post_update_rtx_index
<= n
)
179 else if (n
< 0 && post_update_rtx_index
>= n
+ n_elems
)
183 return parallel_element (insn
, n
);
186 /* Returns the register operated by the nth load/store operation in the real
187 micro-operation accessing order. This function assumes INSN must be a
188 multiple-word load/store insn. */
190 extract_nth_lmsw_access_reg (rtx_insn
*insn
, int n
)
192 rtx nth_rtx
= extract_nth_access_rtx (insn
, n
);
194 if (nth_rtx
== NULL_RTX
)
197 switch (get_attr_type (insn
))
199 case TYPE_LOAD_MULTIPLE
:
200 return SET_DEST (nth_rtx
);
202 case TYPE_STORE_MULTIPLE
:
203 return SET_SRC (nth_rtx
);
210 /* Returns the register operated by the nth load/store operation in the real
211 micro-operation accessing order. This function assumes INSN must be a
212 double-word load/store insn. */
214 extract_nth_ls2_access_reg (rtx_insn
*insn
, int n
)
219 if (post_update_insn_p (insn
))
221 memory_access_direction direction
= determine_access_direction (insn
);
222 gcc_assert (direction
!= MEM_ACCESS_DIR_UNKNOWN
);
224 /* Reverse the order if the direction negative. */
225 if (direction
== MEM_ACCESS_DIR_NEG
)
229 /* Handle the out-of-range case. */
233 /* Convert the index to a positive one. */
237 switch (get_attr_type (insn
))
240 reg
= SET_DEST (PATTERN (insn
));
244 reg
= SET_SRC (PATTERN (insn
));
251 gcc_assert (REG_P (reg
) || GET_CODE (reg
) == SUBREG
);
253 switch (GET_MODE (reg
))
268 return gen_lowpart (mode
, reg
);
270 return gen_highpart (mode
, reg
);
273 /* Returns the register operated by the nth load/store operation in the real
274 micro-operation accessing order. */
276 extract_nth_access_reg (rtx_insn
*insn
, int index
)
278 switch (GET_CODE (PATTERN (insn
)))
281 return extract_nth_lmsw_access_reg (insn
, index
);
284 return extract_nth_ls2_access_reg (insn
, index
);
291 /* Determine if the latency is occured when the consumer PBSADA_INSN uses the
292 value of DEF_REG in its Ra or Rb fields. */
294 pbsada_insn_ra_rb_dep_reg_p (rtx pbsada_insn
, rtx def_reg
)
296 rtx unspec_rtx
= SET_SRC (PATTERN (pbsada_insn
));
297 gcc_assert (GET_CODE (unspec_rtx
) == UNSPEC
);
299 rtx pbsada_ra
= XVECEXP (unspec_rtx
, 0, 0);
300 rtx pbsada_rb
= XVECEXP (unspec_rtx
, 0, 1);
302 if (rtx_equal_p (def_reg
, pbsada_ra
)
303 || rtx_equal_p (def_reg
, pbsada_rb
))
309 /* Check if INSN is a movd44 insn consuming DEF_REG. */
311 movd44_even_dep_p (rtx_insn
*insn
, rtx def_reg
)
313 if (!movd44_insn_p (insn
))
316 rtx use_rtx
= SET_SRC (PATTERN (insn
));
320 return rtx_equal_p (def_reg
, use_rtx
);
322 else if (GET_CODE (def_reg
) == SUBREG
323 && GET_MODE (def_reg
) == SImode
324 && rtx_equal_p (SUBREG_REG (def_reg
), use_rtx
))
326 if (TARGET_BIG_ENDIAN
&& SUBREG_BYTE (def_reg
) == 4)
329 if (!TARGET_BIG_ENDIAN
&& SUBREG_BYTE (def_reg
) == 0)
338 } // namespace scheduling
341 /* ------------------------------------------------------------------------ */
343 using namespace nds32
;
344 using namespace nds32::scheduling
;
346 namespace { // anonymous namespace
347 /* Check the dependency between the producer defining DEF_REG and CONSUMER
348 requiring input operand at AG (II). */
350 n8_consumed_by_addr_in_p (rtx_insn
*consumer
, rtx def_reg
)
354 switch (get_attr_type (consumer
))
357 use_rtx
= extract_branch_target_rtx (consumer
);
361 if (load_single_p (consumer
))
362 use_rtx
= extract_mem_rtx (consumer
);
364 use_rtx
= extract_base_reg (consumer
);
368 if (store_single_p (consumer
)
369 && (!post_update_insn_p (consumer
)
370 || immed_offset_p (extract_mem_rtx (consumer
))))
371 use_rtx
= extract_mem_rtx (consumer
);
373 use_rtx
= extract_base_reg (consumer
);
376 case TYPE_LOAD_MULTIPLE
:
377 case TYPE_STORE_MULTIPLE
:
378 use_rtx
= extract_base_reg (consumer
);
385 return reg_overlap_p (def_reg
, use_rtx
);
388 /* Check the dependency between the producer defining DEF_REG and CONSUMER
389 requiring input operand at EX. */
391 n8_consumed_by_ex_p (rtx_insn
*consumer
, rtx def_reg
)
395 switch (get_attr_type (consumer
))
398 if (movd44_even_dep_p (consumer
, def_reg
))
401 use_rtx
= SET_SRC (PATTERN (consumer
));
405 use_rtx
= SET_SRC (PATTERN (consumer
));
409 use_rtx
= extract_mac_non_acc_rtx (consumer
);
412 /* Some special instructions, divmodsi4 and udivmodsi4, produce two
413 results, the quotient and the remainder. It requires two micro-
414 operations in order to write two registers. We have to check the
415 dependency from the producer to the first micro-operation. */
417 if (INSN_CODE (consumer
) == CODE_FOR_divmodsi4
418 || INSN_CODE (consumer
) == CODE_FOR_udivmodsi4
)
419 use_rtx
= SET_SRC (parallel_element (consumer
, 0));
421 use_rtx
= SET_SRC (PATTERN (consumer
));
425 use_rtx
= extract_branch_condition_rtx (consumer
);
429 /* exclude ST_!bi_RR */
430 if (!post_update_insn_p (consumer
)
431 && !immed_offset_p (extract_mem_rtx (consumer
)))
434 use_rtx
= SET_SRC (PATTERN (consumer
));
437 case TYPE_STORE_MULTIPLE
:
438 use_rtx
= extract_nth_access_rtx (consumer
, 0);
445 return reg_overlap_p (def_reg
, use_rtx
);
448 /* Check the dependency between the producer defining DEF_REG and CONSUMER
449 requiring input operand at AG (II). */
451 e8_consumed_by_addr_in_p (rtx_insn
*consumer
, rtx def_reg
)
453 return n8_consumed_by_addr_in_p (consumer
, def_reg
);
456 /* Check the dependency between the producer defining DEF_REG and CONSUMER
457 requiring input operand at EX. */
459 e8_consumed_by_ex_p (rtx_insn
*consumer
, rtx def_reg
)
463 switch (get_attr_type (consumer
))
467 use_rtx
= SET_SRC (PATTERN (consumer
));
474 case TYPE_STORE_MULTIPLE
:
475 return n8_consumed_by_ex_p (consumer
, def_reg
);
481 return reg_overlap_p (def_reg
, use_rtx
);
484 /* Check the dependency between the producer defining DEF_REG and CONSUMER
485 requiring input operand at EX. */
487 n9_2r1w_consumed_by_ex_dep_p (rtx_insn
*consumer
, rtx def_reg
)
491 switch (get_attr_type (consumer
))
494 if (movd44_even_dep_p (consumer
, def_reg
))
497 use_rtx
= SET_SRC (PATTERN (consumer
));
502 use_rtx
= SET_SRC (PATTERN (consumer
));
506 use_rtx
= extract_shift_reg (consumer
);
510 return pbsada_insn_ra_rb_dep_reg_p (consumer
, def_reg
);
513 use_rtx
= PATTERN (consumer
);
517 if (INSN_CODE (consumer
) == CODE_FOR_divmodsi4
518 || INSN_CODE (consumer
) == CODE_FOR_udivmodsi4
)
519 use_rtx
= SET_SRC (parallel_element (consumer
, 0));
521 use_rtx
= SET_SRC (PATTERN (consumer
));
525 if (GET_CODE (PATTERN (consumer
)) == SET
)
526 use_rtx
= SET_SRC (PATTERN (consumer
));
532 /* ADDR_IN_bi_Ra, ADDR_IN_!bi */
533 if (post_update_insn_p (consumer
))
534 use_rtx
= extract_base_reg (consumer
);
536 use_rtx
= extract_mem_rtx (consumer
);
540 /* ADDR_IN_bi_Ra, ADDR_IN_!bi */
541 if (post_update_insn_p (consumer
))
542 use_rtx
= extract_base_reg (consumer
);
544 use_rtx
= extract_mem_rtx (consumer
);
546 if (reg_overlap_p (def_reg
, use_rtx
))
549 /* exclude ST_!bi_RR */
550 if (!post_update_insn_p (consumer
)
551 && !immed_offset_p (extract_mem_rtx (consumer
)))
554 use_rtx
= SET_SRC (PATTERN (consumer
));
557 case TYPE_LOAD_MULTIPLE
:
558 use_rtx
= extract_base_reg (consumer
);
561 case TYPE_STORE_MULTIPLE
:
563 use_rtx
= extract_base_reg (consumer
);
564 if (reg_overlap_p (def_reg
, use_rtx
))
568 use_rtx
= extract_nth_access_rtx (consumer
, 0);
572 use_rtx
= PATTERN (consumer
);
579 if (reg_overlap_p (def_reg
, use_rtx
))
585 /* Check the dependency between the producer defining DEF_REG and CONSUMER
586 requiring input operand at EX. */
588 n9_3r2w_consumed_by_ex_dep_p (rtx_insn
*consumer
, rtx def_reg
)
592 switch (get_attr_type (consumer
))
597 use_rtx
= SET_SRC (PATTERN (consumer
));
601 use_rtx
= extract_shift_reg (consumer
);
605 return pbsada_insn_ra_rb_dep_reg_p (consumer
, def_reg
);
608 use_rtx
= extract_mac_non_acc_rtx (consumer
);
611 /* Some special instructions, divmodsi4 and udivmodsi4, produce two
612 results, the quotient and the remainder. In 2R1W configuration,
613 it requires two micro-operations in order to write two registers.
614 We have to check the dependency from the producer to the first
617 if (INSN_CODE (consumer
) == CODE_FOR_divmodsi4
618 || INSN_CODE (consumer
) == CODE_FOR_udivmodsi4
)
619 use_rtx
= SET_SRC (parallel_element (consumer
, 0));
621 use_rtx
= SET_SRC (PATTERN (consumer
));
625 if (GET_CODE (PATTERN (consumer
)) == SET
)
626 use_rtx
= SET_SRC (PATTERN (consumer
));
633 use_rtx
= extract_mem_rtx (consumer
);
636 case TYPE_LOAD_MULTIPLE
:
637 case TYPE_STORE_MULTIPLE
:
638 use_rtx
= extract_base_reg (consumer
);
642 use_rtx
= PATTERN (consumer
);
649 if (reg_overlap_p (def_reg
, use_rtx
))
656 } // anonymous namespace
658 /* ------------------------------------------------------------------------ */
660 /* Guard functions for N8 core. */
663 nds32_n8_load_to_ii_p (rtx_insn
*producer
, rtx_insn
*consumer
)
665 if (post_update_insn_p (producer
))
668 rtx def_reg
= SET_DEST (PATTERN (producer
));
670 return n8_consumed_by_addr_in_p (consumer
, def_reg
);
674 nds32_n8_load_bi_to_ii_p (rtx_insn
*producer
, rtx_insn
*consumer
)
676 if (!post_update_insn_p (producer
))
679 rtx def_reg
= SET_DEST (PATTERN (producer
));
681 return n8_consumed_by_addr_in_p (consumer
, def_reg
);
685 nds32_n8_load_to_ex_p (rtx_insn
*producer
, rtx_insn
*consumer
)
687 if (post_update_insn_p (producer
))
690 rtx def_reg
= SET_DEST (PATTERN (producer
));
692 return n8_consumed_by_ex_p (consumer
, def_reg
);
696 nds32_n8_ex_to_ii_p (rtx_insn
*producer
, rtx_insn
*consumer
)
700 switch (get_attr_type (producer
))
703 if (movd44_insn_p (producer
))
704 def_reg
= extract_movd44_odd_reg (producer
);
706 def_reg
= SET_DEST (PATTERN (producer
));
711 def_reg
= SET_DEST (PATTERN (producer
));
715 if (INSN_CODE (producer
) == CODE_FOR_divmodsi4
716 || INSN_CODE (producer
) == CODE_FOR_udivmodsi4
)
717 def_reg
= SET_DEST (parallel_element (producer
, 1));
719 def_reg
= SET_DEST (PATTERN (producer
));
724 case TYPE_LOAD_MULTIPLE
:
725 case TYPE_STORE_MULTIPLE
:
726 if (!post_update_insn_p (producer
))
729 def_reg
= extract_base_reg (producer
);
736 return n8_consumed_by_addr_in_p (consumer
, def_reg
);
740 nds32_n8_last_load_to_ii_p (rtx_insn
*producer
, rtx_insn
*consumer
)
742 /* If PRODUCER is a post-update LMW insn, the last micro-operation updates
743 the base register and the result is ready in EX stage, so we don't need
744 to handle that case in this guard function and the corresponding bypass
746 if (post_update_insn_p (producer
))
749 rtx last_def_reg
= extract_nth_access_reg (producer
, -1);
751 if (last_def_reg
== NULL_RTX
)
754 gcc_assert (REG_P (last_def_reg
) || GET_CODE (last_def_reg
) == SUBREG
);
756 return n8_consumed_by_addr_in_p (consumer
, last_def_reg
);
760 nds32_n8_last_load_two_to_ii_p (rtx_insn
*producer
, rtx_insn
*consumer
)
764 /* If PRODUCER is a post-update insn, there is an additional one micro-
765 operation inserted in the end, so the last memory access operation should
766 be handled by this guard function and the corresponding bypass rule. */
767 if (post_update_insn_p (producer
))
770 rtx last_two_def_reg
= extract_nth_access_reg (producer
, index
);
772 if (last_two_def_reg
== NULL_RTX
)
775 gcc_assert (REG_P (last_two_def_reg
)
776 || GET_CODE (last_two_def_reg
) == SUBREG
);
778 return n8_consumed_by_addr_in_p (consumer
, last_two_def_reg
);
782 nds32_n8_last_load_to_ex_p (rtx_insn
*producer
, rtx_insn
*consumer
)
784 /* If PRODUCER is a post-update LMW insn, the last micro-operation updates
785 the base register and the result is ready in EX stage, so we don't need
786 to handle that case in this guard function and the corresponding bypass
788 if (post_update_insn_p (producer
))
791 rtx last_def_reg
= extract_nth_access_reg (producer
, -1);
793 if (last_def_reg
== NULL_RTX
)
796 gcc_assert (REG_P (last_def_reg
) || GET_CODE (last_def_reg
) == SUBREG
);
798 return n8_consumed_by_ex_p (consumer
, last_def_reg
);
801 /* Guard functions for E8 cores. */
804 nds32_e8_load_to_ii_p (rtx_insn
*producer
, rtx_insn
*consumer
)
806 rtx def_reg
= SET_DEST (PATTERN (producer
));
808 return e8_consumed_by_addr_in_p (consumer
, def_reg
);
812 nds32_e8_load_to_ex_p (rtx_insn
*producer
, rtx_insn
*consumer
)
814 rtx def_reg
= SET_DEST (PATTERN (producer
));
816 return e8_consumed_by_ex_p (consumer
, def_reg
);
820 nds32_e8_ex_to_ii_p (rtx_insn
*producer
, rtx_insn
*consumer
)
824 switch (get_attr_type (producer
))
827 /* No data hazards if AGEN's input is produced by MOVI or SETHI. */
828 if (GET_CODE (PATTERN (producer
)) == SET
)
830 rtx dest
= SET_DEST (PATTERN (producer
));
831 rtx src
= SET_SRC (PATTERN (producer
));
833 if ((REG_P (dest
) || GET_CODE (dest
) == SUBREG
)
834 && (GET_CODE (src
) == CONST_INT
|| GET_CODE (src
) == HIGH
))
838 def_reg
= SET_DEST (PATTERN (producer
));
843 def_reg
= SET_DEST (PATTERN (producer
));
847 if (INSN_CODE (producer
) == CODE_FOR_divmodsi4
848 || INSN_CODE (producer
) == CODE_FOR_udivmodsi4
)
850 rtx def_reg1
= SET_DEST (parallel_element (producer
, 0));
851 rtx def_reg2
= SET_DEST (parallel_element (producer
, 1));
853 return (e8_consumed_by_addr_in_p (consumer
, def_reg1
)
854 || e8_consumed_by_addr_in_p (consumer
, def_reg2
));
857 def_reg
= SET_DEST (PATTERN (producer
));
862 case TYPE_LOAD_MULTIPLE
:
863 case TYPE_STORE_MULTIPLE
:
864 if (!post_update_insn_p (producer
))
867 def_reg
= extract_base_reg (producer
);
874 return e8_consumed_by_addr_in_p (consumer
, def_reg
);
878 nds32_e8_last_load_to_ii_p (rtx_insn
*producer
, rtx_insn
*consumer
)
880 rtx last_def_reg
= extract_nth_access_reg (producer
, -1);
882 if (last_def_reg
== NULL_RTX
)
885 gcc_assert (REG_P (last_def_reg
) || GET_CODE (last_def_reg
) == SUBREG
);
887 return e8_consumed_by_addr_in_p (consumer
, last_def_reg
);
891 nds32_e8_last_load_to_ex_p (rtx_insn
*producer
, rtx_insn
*consumer
)
893 rtx last_def_reg
= extract_nth_access_reg (producer
, -1);
895 if (last_def_reg
== NULL_RTX
)
898 gcc_assert (REG_P (last_def_reg
) || GET_CODE (last_def_reg
) == SUBREG
);
900 return e8_consumed_by_ex_p (consumer
, last_def_reg
);
903 /* Guard functions for N9 cores. */
905 /* Check dependencies from MM to EX. */
907 nds32_n9_2r1w_mm_to_ex_p (rtx_insn
*producer
, rtx_insn
*consumer
)
911 switch (get_attr_type (producer
))
915 if (post_update_insn_p (producer
))
918 def_reg
= SET_DEST (PATTERN (producer
));
923 def_reg
= SET_DEST (PATTERN (producer
));
930 return n9_2r1w_consumed_by_ex_dep_p (consumer
, def_reg
);
933 /* Check dependencies from MM to EX. */
935 nds32_n9_3r2w_mm_to_ex_p (rtx_insn
*producer
, rtx_insn
*consumer
)
939 switch (get_attr_type (producer
))
944 def_reg
= SET_DEST (PATTERN (producer
));
947 /* Some special instructions, divmodsi4 and udivmodsi4, produce two
948 results, the quotient and the remainder. We have to handle them
951 if (INSN_CODE (producer
) == CODE_FOR_divmodsi4
952 || INSN_CODE (producer
) == CODE_FOR_udivmodsi4
)
954 rtx def_reg1
= SET_DEST (parallel_element (producer
, 0));
955 rtx def_reg2
= SET_DEST (parallel_element (producer
, 1));
957 return (n9_3r2w_consumed_by_ex_dep_p (consumer
, def_reg1
)
958 || n9_3r2w_consumed_by_ex_dep_p (consumer
, def_reg2
));
961 def_reg
= SET_DEST (PATTERN (producer
));
968 return n9_3r2w_consumed_by_ex_dep_p (consumer
, def_reg
);
971 /* Check dependencies from LMW(N, N) to EX. */
973 nds32_n9_last_load_to_ex_p (rtx_insn
*producer
, rtx_insn
*consumer
)
975 rtx last_def_reg
= extract_nth_access_reg (producer
, -1);
977 if (nds32_register_ports_config
== REG_PORT_2R1W
)
979 /* The base-update micro operation occupies the last cycle. */
980 if (post_update_insn_p (producer
))
983 /* When the base register is in the list of a load multiple insn and the
984 access order of the base register is not the last one, we need an
985 additional micro operation to commit the load result to the base
986 register -- we can treat the base register as the last defined
989 size_t n_elems
= parallel_elements (producer
);
990 rtx base_reg
= extract_base_reg (producer
);
992 for (i
= 0; i
< n_elems
; ++i
)
994 rtx load_rtx
= extract_nth_access_rtx (producer
, i
);
995 rtx list_element
= SET_DEST (load_rtx
);
997 if (rtx_equal_p (base_reg
, list_element
) && i
!= n_elems
- 1)
999 last_def_reg
= base_reg
;
1004 return n9_2r1w_consumed_by_ex_dep_p (consumer
, last_def_reg
);
1007 return n9_3r2w_consumed_by_ex_dep_p (consumer
, last_def_reg
);
1010 /* ------------------------------------------------------------------------ */