1 /* Auxiliary functions for expand movmem, setmem, cmpmem, load_multiple
2 and store_multiple pattern of Andes NDS32 cpu for GNU compiler
3 Copyright (C) 2012-2018 Free Software Foundation, Inc.
4 Contributed by Andes Technology Corporation.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* ------------------------------------------------------------------------ */
24 #define IN_TARGET_CODE 1
28 #include "coretypes.h"
36 /* ------------------------------------------------------------------------ */
38 /* Functions to expand load_multiple and store_multiple.
39 They are auxiliary extern functions to help create rtx template.
40 Check nds32-multiple.md file for the patterns. */
42 nds32_expand_load_multiple (int base_regno
, int count
,
43 rtx base_addr
, rtx basemem
,
44 bool update_base_reg_p
,
51 rtx new_addr
, mem
, reg
;
53 /* Generate a unaligned load to prevent load instruction pull out from
54 parallel, and then it will generate lwi, and lose unaligned acces */
57 reg
= gen_rtx_REG (SImode
, base_regno
);
58 if (update_base_reg_p
)
60 *update_base_reg
= gen_reg_rtx (SImode
);
61 return gen_unaligned_load_update_base_w (*update_base_reg
, reg
, base_addr
);
64 return gen_unaligned_load_w (reg
, gen_rtx_MEM (SImode
, base_addr
));
67 /* Create the pattern that is presented in nds32-multiple.md. */
68 if (update_base_reg_p
)
70 result
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
+ 1));
75 result
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
));
79 if (update_base_reg_p
)
82 new_addr
= plus_constant (Pmode
, base_addr
, offset
);
83 *update_base_reg
= gen_reg_rtx (SImode
);
85 XVECEXP (result
, 0, 0) = gen_rtx_SET (*update_base_reg
, new_addr
);
88 for (par_index
= 0; par_index
< count
; par_index
++)
90 offset
= par_index
* 4;
91 /* 4-byte for loading data to each register. */
92 new_addr
= plus_constant (Pmode
, base_addr
, offset
);
93 mem
= adjust_automodify_address_nv (basemem
, SImode
,
95 reg
= gen_rtx_REG (SImode
, base_regno
+ par_index
);
97 XVECEXP (result
, 0, (par_index
+ start_idx
)) = gen_rtx_SET (reg
, mem
);
104 nds32_expand_store_multiple (int base_regno
, int count
,
105 rtx base_addr
, rtx basemem
,
106 bool update_base_reg_p
,
107 rtx
*update_base_reg
)
113 rtx new_addr
, mem
, reg
;
117 reg
= gen_rtx_REG (SImode
, base_regno
);
118 if (update_base_reg_p
)
120 *update_base_reg
= gen_reg_rtx (SImode
);
121 return gen_unaligned_store_update_base_w (*update_base_reg
, base_addr
, reg
);
124 return gen_unaligned_store_w (gen_rtx_MEM (SImode
, base_addr
), reg
);
127 /* Create the pattern that is presented in nds32-multiple.md. */
129 if (update_base_reg_p
)
131 result
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
+ 1));
136 result
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
));
140 if (update_base_reg_p
)
143 new_addr
= plus_constant (Pmode
, base_addr
, offset
);
144 *update_base_reg
= gen_reg_rtx (SImode
);
146 XVECEXP (result
, 0, 0) = gen_rtx_SET (*update_base_reg
, new_addr
);
149 for (par_index
= 0; par_index
< count
; par_index
++)
151 offset
= par_index
* 4;
152 /* 4-byte for storing data to memory. */
153 new_addr
= plus_constant (Pmode
, base_addr
, offset
);
154 mem
= adjust_automodify_address_nv (basemem
, SImode
,
156 reg
= gen_rtx_REG (SImode
, base_regno
+ par_index
);
158 XVECEXP (result
, 0, par_index
+ start_idx
) = gen_rtx_SET (mem
, reg
);
164 /* Function to move block memory content by
165 using load_multiple and store_multiple.
166 This is auxiliary extern function to help create rtx template.
167 Check nds32-multiple.md file for the patterns. */
169 nds32_expand_movmemqi (rtx dstmem
, rtx srcmem
, rtx total_bytes
, rtx alignment
)
171 HOST_WIDE_INT in_words
, out_words
;
172 rtx dst_base_reg
, src_base_reg
;
175 /* Because reduced-set regsiters has few registers
176 (r0~r5, r6~10, r15, r28~r31, where 'r15' and 'r28~r31'
177 cannot be used for register allocation),
178 using 8 registers (32 bytes) for moving memory block
179 may easily consume all of them.
180 It makes register allocation/spilling hard to work.
181 So we only allow maximum=4 registers (16 bytes) for
182 moving memory block under reduced-set registers. */
183 if (TARGET_REDUCED_REGS
)
188 /* 1. Total_bytes is integer for sure.
189 2. Alignment is integer for sure.
190 3. Maximum 4 or 8 registers, 4 * 4 = 16 bytes, 8 * 4 = 32 bytes.
191 4. Requires (n * 4) block size.
192 5. Requires 4-byte alignment. */
193 if (GET_CODE (total_bytes
) != CONST_INT
194 || GET_CODE (alignment
) != CONST_INT
195 || INTVAL (total_bytes
) > maximum_bytes
196 || INTVAL (total_bytes
) & 3
197 || INTVAL (alignment
) & 3)
200 dst_base_reg
= copy_to_mode_reg (SImode
, XEXP (dstmem
, 0));
201 src_base_reg
= copy_to_mode_reg (SImode
, XEXP (srcmem
, 0));
203 out_words
= in_words
= INTVAL (total_bytes
) / UNITS_PER_WORD
;
206 nds32_expand_load_multiple (0, in_words
, src_base_reg
,
207 srcmem
, false, NULL
));
209 nds32_expand_store_multiple (0, out_words
, dst_base_reg
,
210 dstmem
, false, NULL
));
212 /* Successfully create patterns, return 1. */
216 /* ------------------------------------------------------------------------ */