mips.c (mips_file_start): Add ".previous" directives to both ".section"s.
[official-gcc.git] / gcc / config / mips / mips.c
blob6a4a1d90ff3b19c10943aaba7dda9f2c885d956e
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
59 #include "bitmap.h"
60 #include "diagnostic.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
70 XVECEXP (X, 0, 0)
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
91 part of the frame. */
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
99 (INSN_P (INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
110 : (INSN))
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
116 : (INSN))
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
130 ADDRESS_REG
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
134 ADDRESS_LO_SUM
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
138 ADDRESS_CONST_INT
139 A signed 16-bit constant address.
141 ADDRESS_SYMBOLIC:
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
144 ADDRESS_REG,
145 ADDRESS_LO_SUM,
146 ADDRESS_CONST_INT,
147 ADDRESS_SYMBOLIC
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
162 MIPS_SF_FTYPE_V2SF,
163 MIPS_SF_FTYPE_SF,
164 MIPS_SF_FTYPE_SF_SF,
165 MIPS_DF_FTYPE_DF,
166 MIPS_DF_FTYPE_DF_DF,
168 /* For MIPS DSP ASE */
169 MIPS_DI_FTYPE_DI_SI,
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
173 MIPS_SI_FTYPE_DI_SI,
174 MIPS_SI_FTYPE_PTR_SI,
175 MIPS_SI_FTYPE_SI,
176 MIPS_SI_FTYPE_SI_SI,
177 MIPS_SI_FTYPE_V2HI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
179 MIPS_SI_FTYPE_V4QI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
181 MIPS_SI_FTYPE_VOID,
182 MIPS_V2HI_FTYPE_SI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
189 MIPS_V4QI_FTYPE_SI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
201 MIPS_DI_FTYPE_SI_SI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
205 /* The last type. */
206 MIPS_MAX_FTYPE_MAX
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
215 MIPS_BUILTIN_DIRECT,
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
225 MIPS_BUILTIN_MOVF,
226 MIPS_BUILTIN_MOVT,
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
252 MACRO (f), \
253 MACRO (un), \
254 MACRO (eq), \
255 MACRO (ueq), \
256 MACRO (olt), \
257 MACRO (ult), \
258 MACRO (ole), \
259 MACRO (ule), \
260 MACRO (sf), \
261 MACRO (ngle), \
262 MACRO (seq), \
263 MACRO (ngl), \
264 MACRO (lt), \
265 MACRO (nge), \
266 MACRO (le), \
267 MACRO (ngt)
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn) (rtx, rtx);
285 struct mips16_constant;
286 struct mips_arg_info;
287 struct mips_address_info;
288 struct mips_integer_op;
289 struct mips_sim;
291 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
292 static bool mips_classify_address (struct mips_address_info *, rtx,
293 enum machine_mode, int);
294 static bool mips_cannot_force_const_mem (rtx);
295 static bool mips_use_blocks_for_constant_p (enum machine_mode, const_rtx);
296 static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
297 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
298 static rtx mips_force_temporary (rtx, rtx);
299 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
300 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
301 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
302 static unsigned int mips_build_lower (struct mips_integer_op *,
303 unsigned HOST_WIDE_INT);
304 static unsigned int mips_build_integer (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
307 static int m16_check_op (rtx, int, int, int);
308 static bool mips_rtx_costs (rtx, int, int, int *);
309 static int mips_address_cost (rtx);
310 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
311 static void mips_load_call_address (rtx, rtx, int);
312 static bool mips_function_ok_for_sibcall (tree, tree);
313 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
314 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
315 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
316 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
317 tree, int, struct mips_arg_info *);
318 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
319 static void mips_set_architecture (const struct mips_cpu_info *);
320 static void mips_set_tune (const struct mips_cpu_info *);
321 static bool mips_handle_option (size_t, const char *, int);
322 static struct machine_function *mips_init_machine_status (void);
323 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
324 const char **);
325 static void mips_file_start (void);
326 static int mips_small_data_pattern_1 (rtx *, void *);
327 static int mips_rewrite_small_data_1 (rtx *, void *);
328 static bool mips_function_has_gp_insn (void);
329 static unsigned int mips_global_pointer (void);
330 static bool mips_save_reg_p (unsigned int);
331 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
332 mips_save_restore_fn);
333 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
334 static void mips_output_cplocal (void);
335 static void mips_emit_loadgp (void);
336 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
337 static void mips_set_frame_expr (rtx);
338 static rtx mips_frame_set (rtx, rtx);
339 static void mips_save_reg (rtx, rtx);
340 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
341 static void mips_restore_reg (rtx, rtx);
342 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
343 HOST_WIDE_INT, tree);
344 static section *mips_select_rtx_section (enum machine_mode, rtx,
345 unsigned HOST_WIDE_INT);
346 static section *mips_function_rodata_section (tree);
347 static bool mips_in_small_data_p (const_tree);
348 static bool mips_use_anchors_for_symbol_p (const_rtx);
349 static int mips_fpr_return_fields (const_tree, tree *);
350 static bool mips_return_in_msb (const_tree);
351 static rtx mips_return_fpr_pair (enum machine_mode mode,
352 enum machine_mode mode1, HOST_WIDE_INT,
353 enum machine_mode mode2, HOST_WIDE_INT);
354 static rtx mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
358 static void dump_constants (struct mips16_constant *, rtx);
359 static int mips16_insn_length (rtx);
360 static int mips16_rewrite_pool_refs (rtx *, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim *);
363 static void mips_sim_init (struct mips_sim *, state_t);
364 static void mips_sim_next_cycle (struct mips_sim *);
365 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
366 static int mips_sim_wait_regs_2 (rtx *, void *);
367 static void mips_sim_wait_regs_1 (rtx *, void *);
368 static void mips_sim_wait_regs (struct mips_sim *, rtx);
369 static void mips_sim_wait_units (struct mips_sim *, rtx);
370 static void mips_sim_wait_insn (struct mips_sim *, rtx);
371 static void mips_sim_record_set (rtx, const_rtx, void *);
372 static void mips_sim_issue_insn (struct mips_sim *, rtx);
373 static void mips_sim_issue_nop (struct mips_sim *);
374 static void mips_sim_finish_insn (struct mips_sim *, rtx);
375 static void vr4130_avoid_branch_rt_conflict (rtx);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info *mips_parse_cpu (const char *);
383 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree, const_tree);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
386 static void mips_macc_chains_record (rtx);
387 static void mips_macc_chains_reorder (rtx *, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
389 static bool vr4130_true_reg_dependence_p (rtx);
390 static bool vr4130_swap_insns_p (rtx, rtx);
391 static void vr4130_reorder (rtx *, int);
392 static void mips_promote_ready (rtx *, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
401 tree, int *, int);
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
405 const_tree, bool);
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
407 const_tree, bool);
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
409 tree, bool);
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_scalar_mode_supported_p (enum machine_mode);
412 static bool mips_vector_mode_supported_p (enum machine_mode);
413 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
414 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
415 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
416 static void mips_init_builtins (void);
417 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
418 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
419 enum insn_code, enum mips_fp_condition,
420 rtx, tree);
421 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
422 enum insn_code, enum mips_fp_condition,
423 rtx, tree);
424 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
425 static void mips_encode_section_info (tree, rtx, int);
426 static void mips_extra_live_on_entry (bitmap);
427 static int mips_comp_type_attributes (const_tree, const_tree);
428 static void mips_set_mips16_mode (int);
429 static void mips_insert_attributes (tree, tree *);
430 static tree mips_merge_decl_attributes (tree, tree);
431 static void mips_set_current_function (tree);
432 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
433 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
434 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
436 /* Structure to be filled in by compute_frame_size with register
437 save masks, and offsets for the current function. */
439 struct mips_frame_info GTY(())
441 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
442 HOST_WIDE_INT var_size; /* # bytes that variables take up */
443 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
444 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
445 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
446 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
447 unsigned int mask; /* mask of saved gp registers */
448 unsigned int fmask; /* mask of saved fp registers */
449 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
450 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
451 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
452 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
453 bool initialized; /* true if frame size already calculated */
454 int num_gp; /* number of gp registers saved */
455 int num_fp; /* number of fp registers saved */
458 struct machine_function GTY(()) {
459 /* Pseudo-reg holding the value of $28 in a mips16 function which
460 refers to GP relative global variables. */
461 rtx mips16_gp_pseudo_rtx;
463 /* The number of extra stack bytes taken up by register varargs.
464 This area is allocated by the callee at the very top of the frame. */
465 int varargs_size;
467 /* Current frame information, calculated by compute_frame_size. */
468 struct mips_frame_info frame;
470 /* The register to use as the global pointer within this function. */
471 unsigned int global_pointer;
473 /* True if mips_adjust_insn_length should ignore an instruction's
474 hazard attribute. */
475 bool ignore_hazard_length_p;
477 /* True if the whole function is suitable for .set noreorder and
478 .set nomacro. */
479 bool all_noreorder_p;
481 /* True if the function is known to have an instruction that needs $gp. */
482 bool has_gp_insn_p;
484 /* True if we have emitted an instruction to initialize
485 mips16_gp_pseudo_rtx. */
486 bool initialized_mips16_gp_pseudo_p;
489 /* Information about a single argument. */
490 struct mips_arg_info
492 /* True if the argument is passed in a floating-point register, or
493 would have been if we hadn't run out of registers. */
494 bool fpr_p;
496 /* The number of words passed in registers, rounded up. */
497 unsigned int reg_words;
499 /* For EABI, the offset of the first register from GP_ARG_FIRST or
500 FP_ARG_FIRST. For other ABIs, the offset of the first register from
501 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
502 comment for details).
504 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
505 on the stack. */
506 unsigned int reg_offset;
508 /* The number of words that must be passed on the stack, rounded up. */
509 unsigned int stack_words;
511 /* The offset from the start of the stack overflow area of the argument's
512 first stack word. Only meaningful when STACK_WORDS is nonzero. */
513 unsigned int stack_offset;
517 /* Information about an address described by mips_address_type.
519 ADDRESS_CONST_INT
520 No fields are used.
522 ADDRESS_REG
523 REG is the base register and OFFSET is the constant offset.
525 ADDRESS_LO_SUM
526 REG is the register that contains the high part of the address,
527 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
528 is the type of OFFSET's symbol.
530 ADDRESS_SYMBOLIC
531 SYMBOL_TYPE is the type of symbol being referenced. */
533 struct mips_address_info
535 enum mips_address_type type;
536 rtx reg;
537 rtx offset;
538 enum mips_symbol_type symbol_type;
542 /* One stage in a constant building sequence. These sequences have
543 the form:
545 A = VALUE[0]
546 A = A CODE[1] VALUE[1]
547 A = A CODE[2] VALUE[2]
550 where A is an accumulator, each CODE[i] is a binary rtl operation
551 and each VALUE[i] is a constant integer. */
552 struct mips_integer_op {
553 enum rtx_code code;
554 unsigned HOST_WIDE_INT value;
558 /* The largest number of operations needed to load an integer constant.
559 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
560 When the lowest bit is clear, we can try, but reject a sequence with
561 an extra SLL at the end. */
562 #define MIPS_MAX_INTEGER_OPS 7
564 /* Information about a MIPS16e SAVE or RESTORE instruction. */
565 struct mips16e_save_restore_info {
566 /* The number of argument registers saved by a SAVE instruction.
567 0 for RESTORE instructions. */
568 unsigned int nargs;
570 /* Bit X is set if the instruction saves or restores GPR X. */
571 unsigned int mask;
573 /* The total number of bytes to allocate. */
574 HOST_WIDE_INT size;
577 /* Global variables for machine-dependent things. */
579 /* Threshold for data being put into the small data/bss area, instead
580 of the normal data area. */
581 int mips_section_threshold = -1;
583 /* Count the number of .file directives, so that .loc is up to date. */
584 int num_source_filenames = 0;
586 /* Count the number of sdb related labels are generated (to find block
587 start and end boundaries). */
588 int sdb_label_count = 0;
590 /* Next label # for each statement for Silicon Graphics IRIS systems. */
591 int sym_lineno = 0;
593 /* Name of the file containing the current function. */
594 const char *current_function_file = "";
596 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
597 int set_noreorder;
598 int set_noat;
599 int set_nomacro;
600 int set_volatile;
602 /* The next branch instruction is a branch likely, not branch normal. */
603 int mips_branch_likely;
605 /* The operands passed to the last cmpMM expander. */
606 rtx cmp_operands[2];
608 /* The target cpu for code generation. */
609 enum processor_type mips_arch;
610 const struct mips_cpu_info *mips_arch_info;
612 /* The target cpu for optimization and scheduling. */
613 enum processor_type mips_tune;
614 const struct mips_cpu_info *mips_tune_info;
616 /* Which instruction set architecture to use. */
617 int mips_isa;
619 /* Which ABI to use. */
620 int mips_abi = MIPS_ABI_DEFAULT;
622 /* Cost information to use. */
623 const struct mips_rtx_cost_data *mips_cost;
625 /* Remember the ambient target flags, excluding mips16. */
626 static int mips_base_target_flags;
627 /* The mips16 command-line target flags only. */
628 static bool mips_base_mips16;
629 /* Similar copies of option settings. */
630 static int mips_base_schedule_insns; /* flag_schedule_insns */
631 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
632 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
633 static int mips_base_align_loops; /* align_loops */
634 static int mips_base_align_jumps; /* align_jumps */
635 static int mips_base_align_functions; /* align_functions */
636 static GTY(()) int mips16_flipper;
638 /* The -mtext-loads setting. */
639 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
641 /* The -mllsc setting. */
642 enum mips_llsc_setting mips_llsc = LLSC_DEFAULT;
644 /* The architecture selected by -mipsN. */
645 static const struct mips_cpu_info *mips_isa_info;
647 /* If TRUE, we split addresses into their high and low parts in the RTL. */
648 int mips_split_addresses;
650 /* Mode used for saving/restoring general purpose registers. */
651 static enum machine_mode gpr_mode;
653 /* Array giving truth value on whether or not a given hard register
654 can support a given mode. */
655 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
657 /* List of all MIPS punctuation characters used by print_operand. */
658 char mips_print_operand_punct[256];
660 /* Map GCC register number to debugger register number. */
661 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
662 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
664 /* A copy of the original flag_delayed_branch: see override_options. */
665 static int mips_flag_delayed_branch;
667 static GTY (()) int mips_output_filename_first_time = 1;
669 /* mips_split_p[X] is true if symbols of type X can be split by
670 mips_split_symbol(). */
671 bool mips_split_p[NUM_SYMBOL_TYPES];
673 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
674 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
675 if they are matched by a special .md file pattern. */
676 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
678 /* Likewise for HIGHs. */
679 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
681 /* Map hard register number to register class */
682 const enum reg_class mips_regno_to_class[] =
684 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
685 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
686 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
687 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
688 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
689 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
690 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
691 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
692 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
693 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
694 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
695 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
696 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
697 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
698 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
699 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
700 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
701 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
702 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
703 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
704 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
705 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
706 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
707 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
708 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
709 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
710 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
711 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
712 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
713 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
714 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
715 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
716 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
717 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
718 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
719 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
720 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
721 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
722 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
723 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
724 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
725 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
726 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
727 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
728 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
729 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
730 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
733 /* Table of machine dependent attributes. */
734 const struct attribute_spec mips_attribute_table[] =
736 { "long_call", 0, 0, false, true, true, NULL },
737 { "far", 0, 0, false, true, true, NULL },
738 { "near", 0, 0, false, true, true, NULL },
739 /* Switch MIPS16 ASE on and off per-function. We would really like
740 to make these type attributes, but GCC doesn't provide the hooks
741 we need to support the right conversion rules. As declaration
742 attributes, they affect code generation but don't carry other
743 semantics. */
744 { "mips16", 0, 0, true, false, false, NULL },
745 { "nomips16", 0, 0, true, false, false, NULL },
746 { NULL, 0, 0, false, false, false, NULL }
749 /* A table describing all the processors gcc knows about. Names are
750 matched in the order listed. The first mention of an ISA level is
751 taken as the canonical name for that ISA.
753 To ease comparison, please keep this table in the same order
754 as gas's mips_cpu_info_table[]. Please also make sure that
755 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
756 options correctly. */
757 const struct mips_cpu_info mips_cpu_info_table[] = {
758 /* Entries for generic ISAs */
759 { "mips1", PROCESSOR_R3000, 1 },
760 { "mips2", PROCESSOR_R6000, 2 },
761 { "mips3", PROCESSOR_R4000, 3 },
762 { "mips4", PROCESSOR_R8000, 4 },
763 { "mips32", PROCESSOR_4KC, 32 },
764 { "mips32r2", PROCESSOR_M4K, 33 },
765 { "mips64", PROCESSOR_5KC, 64 },
767 /* MIPS I */
768 { "r3000", PROCESSOR_R3000, 1 },
769 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
770 { "r3900", PROCESSOR_R3900, 1 },
772 /* MIPS II */
773 { "r6000", PROCESSOR_R6000, 2 },
775 /* MIPS III */
776 { "r4000", PROCESSOR_R4000, 3 },
777 { "vr4100", PROCESSOR_R4100, 3 },
778 { "vr4111", PROCESSOR_R4111, 3 },
779 { "vr4120", PROCESSOR_R4120, 3 },
780 { "vr4130", PROCESSOR_R4130, 3 },
781 { "vr4300", PROCESSOR_R4300, 3 },
782 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
783 { "r4600", PROCESSOR_R4600, 3 },
784 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
785 { "r4650", PROCESSOR_R4650, 3 },
787 /* MIPS IV */
788 { "r8000", PROCESSOR_R8000, 4 },
789 { "vr5000", PROCESSOR_R5000, 4 },
790 { "vr5400", PROCESSOR_R5400, 4 },
791 { "vr5500", PROCESSOR_R5500, 4 },
792 { "rm7000", PROCESSOR_R7000, 4 },
793 { "rm9000", PROCESSOR_R9000, 4 },
795 /* MIPS32 */
796 { "4kc", PROCESSOR_4KC, 32 },
797 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
798 { "4kp", PROCESSOR_4KP, 32 },
799 { "4ksc", PROCESSOR_4KC, 32 },
801 /* MIPS32 Release 2 */
802 { "m4k", PROCESSOR_M4K, 33 },
803 { "4kec", PROCESSOR_4KC, 33 },
804 { "4kem", PROCESSOR_4KC, 33 },
805 { "4kep", PROCESSOR_4KP, 33 },
806 { "4ksd", PROCESSOR_4KC, 33 },
808 { "24kc", PROCESSOR_24KC, 33 },
809 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
810 { "24kf", PROCESSOR_24KF2_1, 33 },
811 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
812 { "24kfx", PROCESSOR_24KF1_1, 33 },
813 { "24kx", PROCESSOR_24KF1_1, 33 },
815 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
816 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
817 { "24kef", PROCESSOR_24KF2_1, 33 },
818 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
819 { "24kefx", PROCESSOR_24KF1_1, 33 },
820 { "24kex", PROCESSOR_24KF1_1, 33 },
822 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
823 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
824 { "34kf", PROCESSOR_24KF2_1, 33 },
825 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
826 { "34kfx", PROCESSOR_24KF1_1, 33 },
827 { "34kx", PROCESSOR_24KF1_1, 33 },
829 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
830 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
831 { "74kf", PROCESSOR_74KF2_1, 33 },
832 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
833 { "74kfx", PROCESSOR_74KF1_1, 33 },
834 { "74kx", PROCESSOR_74KF1_1, 33 },
835 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
837 /* MIPS64 */
838 { "5kc", PROCESSOR_5KC, 64 },
839 { "5kf", PROCESSOR_5KF, 64 },
840 { "20kc", PROCESSOR_20KC, 64 },
841 { "sb1", PROCESSOR_SB1, 64 },
842 { "sb1a", PROCESSOR_SB1A, 64 },
843 { "sr71000", PROCESSOR_SR71000, 64 },
845 /* End marker */
846 { 0, 0, 0 }
849 /* Default costs. If these are used for a processor we should look
850 up the actual costs. */
851 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
852 COSTS_N_INSNS (7), /* fp_mult_sf */ \
853 COSTS_N_INSNS (8), /* fp_mult_df */ \
854 COSTS_N_INSNS (23), /* fp_div_sf */ \
855 COSTS_N_INSNS (36), /* fp_div_df */ \
856 COSTS_N_INSNS (10), /* int_mult_si */ \
857 COSTS_N_INSNS (10), /* int_mult_di */ \
858 COSTS_N_INSNS (69), /* int_div_si */ \
859 COSTS_N_INSNS (69), /* int_div_di */ \
860 2, /* branch_cost */ \
861 4 /* memory_latency */
863 /* Need to replace these with the costs of calling the appropriate
864 libgcc routine. */
865 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
866 COSTS_N_INSNS (256), /* fp_mult_sf */ \
867 COSTS_N_INSNS (256), /* fp_mult_df */ \
868 COSTS_N_INSNS (256), /* fp_div_sf */ \
869 COSTS_N_INSNS (256) /* fp_div_df */
871 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
873 COSTS_N_INSNS (1), /* fp_add */
874 COSTS_N_INSNS (1), /* fp_mult_sf */
875 COSTS_N_INSNS (1), /* fp_mult_df */
876 COSTS_N_INSNS (1), /* fp_div_sf */
877 COSTS_N_INSNS (1), /* fp_div_df */
878 COSTS_N_INSNS (1), /* int_mult_si */
879 COSTS_N_INSNS (1), /* int_mult_di */
880 COSTS_N_INSNS (1), /* int_div_si */
881 COSTS_N_INSNS (1), /* int_div_di */
882 2, /* branch_cost */
883 4 /* memory_latency */
886 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
888 { /* R3000 */
889 COSTS_N_INSNS (2), /* fp_add */
890 COSTS_N_INSNS (4), /* fp_mult_sf */
891 COSTS_N_INSNS (5), /* fp_mult_df */
892 COSTS_N_INSNS (12), /* fp_div_sf */
893 COSTS_N_INSNS (19), /* fp_div_df */
894 COSTS_N_INSNS (12), /* int_mult_si */
895 COSTS_N_INSNS (12), /* int_mult_di */
896 COSTS_N_INSNS (35), /* int_div_si */
897 COSTS_N_INSNS (35), /* int_div_di */
898 1, /* branch_cost */
899 4 /* memory_latency */
902 { /* 4KC */
903 SOFT_FP_COSTS,
904 COSTS_N_INSNS (6), /* int_mult_si */
905 COSTS_N_INSNS (6), /* int_mult_di */
906 COSTS_N_INSNS (36), /* int_div_si */
907 COSTS_N_INSNS (36), /* int_div_di */
908 1, /* branch_cost */
909 4 /* memory_latency */
911 { /* 4KP */
912 SOFT_FP_COSTS,
913 COSTS_N_INSNS (36), /* int_mult_si */
914 COSTS_N_INSNS (36), /* int_mult_di */
915 COSTS_N_INSNS (37), /* int_div_si */
916 COSTS_N_INSNS (37), /* int_div_di */
917 1, /* branch_cost */
918 4 /* memory_latency */
920 { /* 5KC */
921 SOFT_FP_COSTS,
922 COSTS_N_INSNS (4), /* int_mult_si */
923 COSTS_N_INSNS (11), /* int_mult_di */
924 COSTS_N_INSNS (36), /* int_div_si */
925 COSTS_N_INSNS (68), /* int_div_di */
926 1, /* branch_cost */
927 4 /* memory_latency */
929 { /* 5KF */
930 COSTS_N_INSNS (4), /* fp_add */
931 COSTS_N_INSNS (4), /* fp_mult_sf */
932 COSTS_N_INSNS (5), /* fp_mult_df */
933 COSTS_N_INSNS (17), /* fp_div_sf */
934 COSTS_N_INSNS (32), /* fp_div_df */
935 COSTS_N_INSNS (4), /* int_mult_si */
936 COSTS_N_INSNS (11), /* int_mult_di */
937 COSTS_N_INSNS (36), /* int_div_si */
938 COSTS_N_INSNS (68), /* int_div_di */
939 1, /* branch_cost */
940 4 /* memory_latency */
942 { /* 20KC */
943 COSTS_N_INSNS (4), /* fp_add */
944 COSTS_N_INSNS (4), /* fp_mult_sf */
945 COSTS_N_INSNS (5), /* fp_mult_df */
946 COSTS_N_INSNS (17), /* fp_div_sf */
947 COSTS_N_INSNS (32), /* fp_div_df */
948 COSTS_N_INSNS (4), /* int_mult_si */
949 COSTS_N_INSNS (7), /* int_mult_di */
950 COSTS_N_INSNS (42), /* int_div_si */
951 COSTS_N_INSNS (72), /* int_div_di */
952 1, /* branch_cost */
953 4 /* memory_latency */
955 { /* 24KC */
956 SOFT_FP_COSTS,
957 COSTS_N_INSNS (5), /* int_mult_si */
958 COSTS_N_INSNS (5), /* int_mult_di */
959 COSTS_N_INSNS (41), /* int_div_si */
960 COSTS_N_INSNS (41), /* int_div_di */
961 1, /* branch_cost */
962 4 /* memory_latency */
964 { /* 24KF2_1 */
965 COSTS_N_INSNS (8), /* fp_add */
966 COSTS_N_INSNS (8), /* fp_mult_sf */
967 COSTS_N_INSNS (10), /* fp_mult_df */
968 COSTS_N_INSNS (34), /* fp_div_sf */
969 COSTS_N_INSNS (64), /* fp_div_df */
970 COSTS_N_INSNS (5), /* int_mult_si */
971 COSTS_N_INSNS (5), /* int_mult_di */
972 COSTS_N_INSNS (41), /* int_div_si */
973 COSTS_N_INSNS (41), /* int_div_di */
974 1, /* branch_cost */
975 4 /* memory_latency */
977 { /* 24KF1_1 */
978 COSTS_N_INSNS (4), /* fp_add */
979 COSTS_N_INSNS (4), /* fp_mult_sf */
980 COSTS_N_INSNS (5), /* fp_mult_df */
981 COSTS_N_INSNS (17), /* fp_div_sf */
982 COSTS_N_INSNS (32), /* fp_div_df */
983 COSTS_N_INSNS (5), /* int_mult_si */
984 COSTS_N_INSNS (5), /* int_mult_di */
985 COSTS_N_INSNS (41), /* int_div_si */
986 COSTS_N_INSNS (41), /* int_div_di */
987 1, /* branch_cost */
988 4 /* memory_latency */
990 { /* 74KC */
991 SOFT_FP_COSTS,
992 COSTS_N_INSNS (5), /* int_mult_si */
993 COSTS_N_INSNS (5), /* int_mult_di */
994 COSTS_N_INSNS (41), /* int_div_si */
995 COSTS_N_INSNS (41), /* int_div_di */
996 1, /* branch_cost */
997 4 /* memory_latency */
999 { /* 74KF2_1 */
1000 COSTS_N_INSNS (8), /* fp_add */
1001 COSTS_N_INSNS (8), /* fp_mult_sf */
1002 COSTS_N_INSNS (10), /* fp_mult_df */
1003 COSTS_N_INSNS (34), /* fp_div_sf */
1004 COSTS_N_INSNS (64), /* fp_div_df */
1005 COSTS_N_INSNS (5), /* int_mult_si */
1006 COSTS_N_INSNS (5), /* int_mult_di */
1007 COSTS_N_INSNS (41), /* int_div_si */
1008 COSTS_N_INSNS (41), /* int_div_di */
1009 1, /* branch_cost */
1010 4 /* memory_latency */
1012 { /* 74KF1_1 */
1013 COSTS_N_INSNS (4), /* fp_add */
1014 COSTS_N_INSNS (4), /* fp_mult_sf */
1015 COSTS_N_INSNS (5), /* fp_mult_df */
1016 COSTS_N_INSNS (17), /* fp_div_sf */
1017 COSTS_N_INSNS (32), /* fp_div_df */
1018 COSTS_N_INSNS (5), /* int_mult_si */
1019 COSTS_N_INSNS (5), /* int_mult_di */
1020 COSTS_N_INSNS (41), /* int_div_si */
1021 COSTS_N_INSNS (41), /* int_div_di */
1022 1, /* branch_cost */
1023 4 /* memory_latency */
1025 { /* 74KF3_2 */
1026 COSTS_N_INSNS (6), /* fp_add */
1027 COSTS_N_INSNS (6), /* fp_mult_sf */
1028 COSTS_N_INSNS (7), /* fp_mult_df */
1029 COSTS_N_INSNS (25), /* fp_div_sf */
1030 COSTS_N_INSNS (48), /* fp_div_df */
1031 COSTS_N_INSNS (5), /* int_mult_si */
1032 COSTS_N_INSNS (5), /* int_mult_di */
1033 COSTS_N_INSNS (41), /* int_div_si */
1034 COSTS_N_INSNS (41), /* int_div_di */
1035 1, /* branch_cost */
1036 4 /* memory_latency */
1038 { /* M4k */
1039 DEFAULT_COSTS
1041 { /* R3900 */
1042 COSTS_N_INSNS (2), /* fp_add */
1043 COSTS_N_INSNS (4), /* fp_mult_sf */
1044 COSTS_N_INSNS (5), /* fp_mult_df */
1045 COSTS_N_INSNS (12), /* fp_div_sf */
1046 COSTS_N_INSNS (19), /* fp_div_df */
1047 COSTS_N_INSNS (2), /* int_mult_si */
1048 COSTS_N_INSNS (2), /* int_mult_di */
1049 COSTS_N_INSNS (35), /* int_div_si */
1050 COSTS_N_INSNS (35), /* int_div_di */
1051 1, /* branch_cost */
1052 4 /* memory_latency */
1054 { /* R6000 */
1055 COSTS_N_INSNS (3), /* fp_add */
1056 COSTS_N_INSNS (5), /* fp_mult_sf */
1057 COSTS_N_INSNS (6), /* fp_mult_df */
1058 COSTS_N_INSNS (15), /* fp_div_sf */
1059 COSTS_N_INSNS (16), /* fp_div_df */
1060 COSTS_N_INSNS (17), /* int_mult_si */
1061 COSTS_N_INSNS (17), /* int_mult_di */
1062 COSTS_N_INSNS (38), /* int_div_si */
1063 COSTS_N_INSNS (38), /* int_div_di */
1064 2, /* branch_cost */
1065 6 /* memory_latency */
1067 { /* R4000 */
1068 COSTS_N_INSNS (6), /* fp_add */
1069 COSTS_N_INSNS (7), /* fp_mult_sf */
1070 COSTS_N_INSNS (8), /* fp_mult_df */
1071 COSTS_N_INSNS (23), /* fp_div_sf */
1072 COSTS_N_INSNS (36), /* fp_div_df */
1073 COSTS_N_INSNS (10), /* int_mult_si */
1074 COSTS_N_INSNS (10), /* int_mult_di */
1075 COSTS_N_INSNS (69), /* int_div_si */
1076 COSTS_N_INSNS (69), /* int_div_di */
1077 2, /* branch_cost */
1078 6 /* memory_latency */
1080 { /* R4100 */
1081 DEFAULT_COSTS
1083 { /* R4111 */
1084 DEFAULT_COSTS
1086 { /* R4120 */
1087 DEFAULT_COSTS
1089 { /* R4130 */
1090 /* The only costs that appear to be updated here are
1091 integer multiplication. */
1092 SOFT_FP_COSTS,
1093 COSTS_N_INSNS (4), /* int_mult_si */
1094 COSTS_N_INSNS (6), /* int_mult_di */
1095 COSTS_N_INSNS (69), /* int_div_si */
1096 COSTS_N_INSNS (69), /* int_div_di */
1097 1, /* branch_cost */
1098 4 /* memory_latency */
1100 { /* R4300 */
1101 DEFAULT_COSTS
1103 { /* R4600 */
1104 DEFAULT_COSTS
1106 { /* R4650 */
1107 DEFAULT_COSTS
1109 { /* R5000 */
1110 COSTS_N_INSNS (6), /* fp_add */
1111 COSTS_N_INSNS (4), /* fp_mult_sf */
1112 COSTS_N_INSNS (5), /* fp_mult_df */
1113 COSTS_N_INSNS (23), /* fp_div_sf */
1114 COSTS_N_INSNS (36), /* fp_div_df */
1115 COSTS_N_INSNS (5), /* int_mult_si */
1116 COSTS_N_INSNS (5), /* int_mult_di */
1117 COSTS_N_INSNS (36), /* int_div_si */
1118 COSTS_N_INSNS (36), /* int_div_di */
1119 1, /* branch_cost */
1120 4 /* memory_latency */
1122 { /* R5400 */
1123 COSTS_N_INSNS (6), /* fp_add */
1124 COSTS_N_INSNS (5), /* fp_mult_sf */
1125 COSTS_N_INSNS (6), /* fp_mult_df */
1126 COSTS_N_INSNS (30), /* fp_div_sf */
1127 COSTS_N_INSNS (59), /* fp_div_df */
1128 COSTS_N_INSNS (3), /* int_mult_si */
1129 COSTS_N_INSNS (4), /* int_mult_di */
1130 COSTS_N_INSNS (42), /* int_div_si */
1131 COSTS_N_INSNS (74), /* int_div_di */
1132 1, /* branch_cost */
1133 4 /* memory_latency */
1135 { /* R5500 */
1136 COSTS_N_INSNS (6), /* fp_add */
1137 COSTS_N_INSNS (5), /* fp_mult_sf */
1138 COSTS_N_INSNS (6), /* fp_mult_df */
1139 COSTS_N_INSNS (30), /* fp_div_sf */
1140 COSTS_N_INSNS (59), /* fp_div_df */
1141 COSTS_N_INSNS (5), /* int_mult_si */
1142 COSTS_N_INSNS (9), /* int_mult_di */
1143 COSTS_N_INSNS (42), /* int_div_si */
1144 COSTS_N_INSNS (74), /* int_div_di */
1145 1, /* branch_cost */
1146 4 /* memory_latency */
1148 { /* R7000 */
1149 /* The only costs that are changed here are
1150 integer multiplication. */
1151 COSTS_N_INSNS (6), /* fp_add */
1152 COSTS_N_INSNS (7), /* fp_mult_sf */
1153 COSTS_N_INSNS (8), /* fp_mult_df */
1154 COSTS_N_INSNS (23), /* fp_div_sf */
1155 COSTS_N_INSNS (36), /* fp_div_df */
1156 COSTS_N_INSNS (5), /* int_mult_si */
1157 COSTS_N_INSNS (9), /* int_mult_di */
1158 COSTS_N_INSNS (69), /* int_div_si */
1159 COSTS_N_INSNS (69), /* int_div_di */
1160 1, /* branch_cost */
1161 4 /* memory_latency */
1163 { /* R8000 */
1164 DEFAULT_COSTS
1166 { /* R9000 */
1167 /* The only costs that are changed here are
1168 integer multiplication. */
1169 COSTS_N_INSNS (6), /* fp_add */
1170 COSTS_N_INSNS (7), /* fp_mult_sf */
1171 COSTS_N_INSNS (8), /* fp_mult_df */
1172 COSTS_N_INSNS (23), /* fp_div_sf */
1173 COSTS_N_INSNS (36), /* fp_div_df */
1174 COSTS_N_INSNS (3), /* int_mult_si */
1175 COSTS_N_INSNS (8), /* int_mult_di */
1176 COSTS_N_INSNS (69), /* int_div_si */
1177 COSTS_N_INSNS (69), /* int_div_di */
1178 1, /* branch_cost */
1179 4 /* memory_latency */
1181 { /* SB1 */
1182 /* These costs are the same as the SB-1A below. */
1183 COSTS_N_INSNS (4), /* fp_add */
1184 COSTS_N_INSNS (4), /* fp_mult_sf */
1185 COSTS_N_INSNS (4), /* fp_mult_df */
1186 COSTS_N_INSNS (24), /* fp_div_sf */
1187 COSTS_N_INSNS (32), /* fp_div_df */
1188 COSTS_N_INSNS (3), /* int_mult_si */
1189 COSTS_N_INSNS (4), /* int_mult_di */
1190 COSTS_N_INSNS (36), /* int_div_si */
1191 COSTS_N_INSNS (68), /* int_div_di */
1192 1, /* branch_cost */
1193 4 /* memory_latency */
1195 { /* SB1-A */
1196 /* These costs are the same as the SB-1 above. */
1197 COSTS_N_INSNS (4), /* fp_add */
1198 COSTS_N_INSNS (4), /* fp_mult_sf */
1199 COSTS_N_INSNS (4), /* fp_mult_df */
1200 COSTS_N_INSNS (24), /* fp_div_sf */
1201 COSTS_N_INSNS (32), /* fp_div_df */
1202 COSTS_N_INSNS (3), /* int_mult_si */
1203 COSTS_N_INSNS (4), /* int_mult_di */
1204 COSTS_N_INSNS (36), /* int_div_si */
1205 COSTS_N_INSNS (68), /* int_div_di */
1206 1, /* branch_cost */
1207 4 /* memory_latency */
1209 { /* SR71000 */
1210 DEFAULT_COSTS
1214 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1215 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1216 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1217 static const unsigned char mips16e_s2_s8_regs[] = {
1218 30, 23, 22, 21, 20, 19, 18
1220 static const unsigned char mips16e_a0_a3_regs[] = {
1221 4, 5, 6, 7
1224 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1225 ordered from the uppermost in memory to the lowest in memory. */
1226 static const unsigned char mips16e_save_restore_regs[] = {
1227 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1230 /* Initialize the GCC target structure. */
1231 #undef TARGET_ASM_ALIGNED_HI_OP
1232 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1233 #undef TARGET_ASM_ALIGNED_SI_OP
1234 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1235 #undef TARGET_ASM_ALIGNED_DI_OP
1236 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1238 #undef TARGET_ASM_FUNCTION_PROLOGUE
1239 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1240 #undef TARGET_ASM_FUNCTION_EPILOGUE
1241 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1242 #undef TARGET_ASM_SELECT_RTX_SECTION
1243 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1244 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1245 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1247 #undef TARGET_SCHED_INIT
1248 #define TARGET_SCHED_INIT mips_sched_init
1249 #undef TARGET_SCHED_REORDER
1250 #define TARGET_SCHED_REORDER mips_sched_reorder
1251 #undef TARGET_SCHED_REORDER2
1252 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1253 #undef TARGET_SCHED_VARIABLE_ISSUE
1254 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1255 #undef TARGET_SCHED_ADJUST_COST
1256 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1257 #undef TARGET_SCHED_ISSUE_RATE
1258 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1259 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1260 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1261 mips_multipass_dfa_lookahead
1263 #undef TARGET_DEFAULT_TARGET_FLAGS
1264 #define TARGET_DEFAULT_TARGET_FLAGS \
1265 (TARGET_DEFAULT \
1266 | TARGET_CPU_DEFAULT \
1267 | TARGET_ENDIAN_DEFAULT \
1268 | TARGET_FP_EXCEPTIONS_DEFAULT \
1269 | MASK_CHECK_ZERO_DIV \
1270 | MASK_FUSED_MADD)
1271 #undef TARGET_HANDLE_OPTION
1272 #define TARGET_HANDLE_OPTION mips_handle_option
1274 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1275 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1277 #undef TARGET_INSERT_ATTRIBUTES
1278 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
1279 #undef TARGET_MERGE_DECL_ATTRIBUTES
1280 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
1281 #undef TARGET_SET_CURRENT_FUNCTION
1282 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
1284 #undef TARGET_VALID_POINTER_MODE
1285 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1286 #undef TARGET_RTX_COSTS
1287 #define TARGET_RTX_COSTS mips_rtx_costs
1288 #undef TARGET_ADDRESS_COST
1289 #define TARGET_ADDRESS_COST mips_address_cost
1291 #undef TARGET_IN_SMALL_DATA_P
1292 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1294 #undef TARGET_MACHINE_DEPENDENT_REORG
1295 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1297 #undef TARGET_ASM_FILE_START
1298 #define TARGET_ASM_FILE_START mips_file_start
1299 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1300 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1302 #undef TARGET_INIT_LIBFUNCS
1303 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1305 #undef TARGET_BUILD_BUILTIN_VA_LIST
1306 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1307 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1308 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1310 #undef TARGET_PROMOTE_FUNCTION_ARGS
1311 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1312 #undef TARGET_PROMOTE_FUNCTION_RETURN
1313 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1314 #undef TARGET_PROMOTE_PROTOTYPES
1315 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1317 #undef TARGET_RETURN_IN_MEMORY
1318 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1319 #undef TARGET_RETURN_IN_MSB
1320 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1322 #undef TARGET_ASM_OUTPUT_MI_THUNK
1323 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1324 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1325 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1327 #undef TARGET_SETUP_INCOMING_VARARGS
1328 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1329 #undef TARGET_STRICT_ARGUMENT_NAMING
1330 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1331 #undef TARGET_MUST_PASS_IN_STACK
1332 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1333 #undef TARGET_PASS_BY_REFERENCE
1334 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1335 #undef TARGET_CALLEE_COPIES
1336 #define TARGET_CALLEE_COPIES mips_callee_copies
1337 #undef TARGET_ARG_PARTIAL_BYTES
1338 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1340 #undef TARGET_MODE_REP_EXTENDED
1341 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1343 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1344 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1346 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1347 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
1349 #undef TARGET_INIT_BUILTINS
1350 #define TARGET_INIT_BUILTINS mips_init_builtins
1351 #undef TARGET_EXPAND_BUILTIN
1352 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1354 #undef TARGET_HAVE_TLS
1355 #define TARGET_HAVE_TLS HAVE_AS_TLS
1357 #undef TARGET_CANNOT_FORCE_CONST_MEM
1358 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1360 #undef TARGET_ENCODE_SECTION_INFO
1361 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1363 #undef TARGET_ATTRIBUTE_TABLE
1364 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1365 /* All our function attributes are related to how out-of-line copies should
1366 be compiled or called. They don't in themselves prevent inlining. */
1367 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
1368 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
1370 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1371 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1373 #undef TARGET_MIN_ANCHOR_OFFSET
1374 #define TARGET_MIN_ANCHOR_OFFSET -32768
1375 #undef TARGET_MAX_ANCHOR_OFFSET
1376 #define TARGET_MAX_ANCHOR_OFFSET 32767
1377 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1378 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1379 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1380 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1382 #undef TARGET_COMP_TYPE_ATTRIBUTES
1383 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1385 #ifdef HAVE_AS_DTPRELWORD
1386 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1387 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1388 #endif
1390 struct gcc_target targetm = TARGET_INITIALIZER;
1393 /* Predicates to test for presence of "near" and "far"/"long_call"
1394 attributes on the given TYPE. */
1396 static bool
1397 mips_near_type_p (const_tree type)
1399 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1402 static bool
1403 mips_far_type_p (const_tree type)
1405 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1406 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1409 /* Similar predicates for "mips16"/"nomips16" attributes. */
1411 static bool
1412 mips_mips16_decl_p (const_tree decl)
1414 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1417 static bool
1418 mips_nomips16_decl_p (const_tree decl)
1420 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1423 /* Return 0 if the attributes for two types are incompatible, 1 if they
1424 are compatible, and 2 if they are nearly compatible (which causes a
1425 warning to be generated). */
1427 static int
1428 mips_comp_type_attributes (const_tree type1, const_tree type2)
1430 /* Check for mismatch of non-default calling convention. */
1431 if (TREE_CODE (type1) != FUNCTION_TYPE)
1432 return 1;
1434 /* Disallow mixed near/far attributes. */
1435 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1436 return 0;
1437 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1438 return 0;
1440 return 1;
1443 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1444 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1446 static void
1447 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1449 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1451 *base_ptr = XEXP (x, 0);
1452 *offset_ptr = INTVAL (XEXP (x, 1));
1454 else
1456 *base_ptr = x;
1457 *offset_ptr = 0;
1461 /* Return true if SYMBOL_REF X is associated with a global symbol
1462 (in the STB_GLOBAL sense). */
1464 static bool
1465 mips_global_symbol_p (const_rtx x)
1467 const_tree const decl = SYMBOL_REF_DECL (x);
1469 if (!decl)
1470 return !SYMBOL_REF_LOCAL_P (x);
1472 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1473 or weak symbols. Relocations in the object file will be against
1474 the target symbol, so it's that symbol's binding that matters here. */
1475 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1478 /* Return true if SYMBOL_REF X binds locally. */
1480 static bool
1481 mips_symbol_binds_local_p (const_rtx x)
1483 return (SYMBOL_REF_DECL (x)
1484 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1485 : SYMBOL_REF_LOCAL_P (x));
1488 /* Return true if rtx constants of mode MODE should be put into a small
1489 data section. */
1491 static bool
1492 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1494 return (!TARGET_EMBEDDED_DATA
1495 && TARGET_LOCAL_SDATA
1496 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1499 /* Return the method that should be used to access SYMBOL_REF or
1500 LABEL_REF X in context CONTEXT. */
1502 static enum mips_symbol_type
1503 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1505 if (TARGET_RTP_PIC)
1506 return SYMBOL_GOT_DISP;
1508 if (GET_CODE (x) == LABEL_REF)
1510 /* LABEL_REFs are used for jump tables as well as text labels.
1511 Only return SYMBOL_PC_RELATIVE if we know the label is in
1512 the text section. */
1513 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1514 return SYMBOL_PC_RELATIVE;
1515 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1516 return SYMBOL_GOT_PAGE_OFST;
1517 return SYMBOL_ABSOLUTE;
1520 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1522 if (SYMBOL_REF_TLS_MODEL (x))
1523 return SYMBOL_TLS;
1525 if (CONSTANT_POOL_ADDRESS_P (x))
1527 if (TARGET_MIPS16_TEXT_LOADS)
1528 return SYMBOL_PC_RELATIVE;
1530 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1531 return SYMBOL_PC_RELATIVE;
1533 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1534 return SYMBOL_GP_RELATIVE;
1537 /* Do not use small-data accesses for weak symbols; they may end up
1538 being zero. */
1539 if (TARGET_GPOPT
1540 && SYMBOL_REF_SMALL_P (x)
1541 && !SYMBOL_REF_WEAK (x))
1542 return SYMBOL_GP_RELATIVE;
1544 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1545 is in effect. */
1546 if (TARGET_ABICALLS
1547 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1549 /* There are three cases to consider:
1551 - o32 PIC (either with or without explicit relocs)
1552 - n32/n64 PIC without explicit relocs
1553 - n32/n64 PIC with explicit relocs
1555 In the first case, both local and global accesses will use an
1556 R_MIPS_GOT16 relocation. We must correctly predict which of
1557 the two semantics (local or global) the assembler and linker
1558 will apply. The choice depends on the symbol's binding rather
1559 than its visibility.
1561 In the second case, the assembler will not use R_MIPS_GOT16
1562 relocations, but it chooses between local and global accesses
1563 in the same way as for o32 PIC.
1565 In the third case we have more freedom since both forms of
1566 access will work for any kind of symbol. However, there seems
1567 little point in doing things differently. */
1568 if (mips_global_symbol_p (x))
1569 return SYMBOL_GOT_DISP;
1571 return SYMBOL_GOT_PAGE_OFST;
1574 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1575 return SYMBOL_FORCE_TO_MEM;
1576 return SYMBOL_ABSOLUTE;
1579 /* Classify symbolic expression X, given that it appears in context
1580 CONTEXT. */
1582 static enum mips_symbol_type
1583 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1585 rtx offset;
1587 split_const (x, &x, &offset);
1588 if (UNSPEC_ADDRESS_P (x))
1589 return UNSPEC_ADDRESS_TYPE (x);
1591 return mips_classify_symbol (x, context);
1594 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1595 is the alignment (in bytes) of SYMBOL_REF X. */
1597 static bool
1598 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1600 /* If for some reason we can't get the alignment for the
1601 symbol, initializing this to one means we will only accept
1602 a zero offset. */
1603 HOST_WIDE_INT align = 1;
1604 tree t;
1606 /* Get the alignment of the symbol we're referring to. */
1607 t = SYMBOL_REF_DECL (x);
1608 if (t)
1609 align = DECL_ALIGN_UNIT (t);
1611 return offset >= 0 && offset < align;
1614 /* Return true if X is a symbolic constant that can be used in context
1615 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1617 bool
1618 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1619 enum mips_symbol_type *symbol_type)
1621 rtx offset;
1623 split_const (x, &x, &offset);
1624 if (UNSPEC_ADDRESS_P (x))
1626 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1627 x = UNSPEC_ADDRESS (x);
1629 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1631 *symbol_type = mips_classify_symbol (x, context);
1632 if (*symbol_type == SYMBOL_TLS)
1633 return false;
1635 else
1636 return false;
1638 if (offset == const0_rtx)
1639 return true;
1641 /* Check whether a nonzero offset is valid for the underlying
1642 relocations. */
1643 switch (*symbol_type)
1645 case SYMBOL_ABSOLUTE:
1646 case SYMBOL_FORCE_TO_MEM:
1647 case SYMBOL_32_HIGH:
1648 case SYMBOL_64_HIGH:
1649 case SYMBOL_64_MID:
1650 case SYMBOL_64_LOW:
1651 /* If the target has 64-bit pointers and the object file only
1652 supports 32-bit symbols, the values of those symbols will be
1653 sign-extended. In this case we can't allow an arbitrary offset
1654 in case the 32-bit value X + OFFSET has a different sign from X. */
1655 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1656 return offset_within_block_p (x, INTVAL (offset));
1658 /* In other cases the relocations can handle any offset. */
1659 return true;
1661 case SYMBOL_PC_RELATIVE:
1662 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1663 In this case, we no longer have access to the underlying constant,
1664 but the original symbol-based access was known to be valid. */
1665 if (GET_CODE (x) == LABEL_REF)
1666 return true;
1668 /* Fall through. */
1670 case SYMBOL_GP_RELATIVE:
1671 /* Make sure that the offset refers to something within the
1672 same object block. This should guarantee that the final
1673 PC- or GP-relative offset is within the 16-bit limit. */
1674 return offset_within_block_p (x, INTVAL (offset));
1676 case SYMBOL_GOT_PAGE_OFST:
1677 case SYMBOL_GOTOFF_PAGE:
1678 /* If the symbol is global, the GOT entry will contain the symbol's
1679 address, and we will apply a 16-bit offset after loading it.
1680 If the symbol is local, the linker should provide enough local
1681 GOT entries for a 16-bit offset, but larger offsets may lead
1682 to GOT overflow. */
1683 return SMALL_INT (offset);
1685 case SYMBOL_TPREL:
1686 case SYMBOL_DTPREL:
1687 /* There is no carry between the HI and LO REL relocations, so the
1688 offset is only valid if we know it won't lead to such a carry. */
1689 return mips_offset_within_alignment_p (x, INTVAL (offset));
1691 case SYMBOL_GOT_DISP:
1692 case SYMBOL_GOTOFF_DISP:
1693 case SYMBOL_GOTOFF_CALL:
1694 case SYMBOL_GOTOFF_LOADGP:
1695 case SYMBOL_TLSGD:
1696 case SYMBOL_TLSLDM:
1697 case SYMBOL_GOTTPREL:
1698 case SYMBOL_TLS:
1699 case SYMBOL_HALF:
1700 return false;
1702 gcc_unreachable ();
1706 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1709 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1711 if (!HARD_REGISTER_NUM_P (regno))
1713 if (!strict)
1714 return true;
1715 regno = reg_renumber[regno];
1718 /* These fake registers will be eliminated to either the stack or
1719 hard frame pointer, both of which are usually valid base registers.
1720 Reload deals with the cases where the eliminated form isn't valid. */
1721 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1722 return true;
1724 /* In mips16 mode, the stack pointer can only address word and doubleword
1725 values, nothing smaller. There are two problems here:
1727 (a) Instantiating virtual registers can introduce new uses of the
1728 stack pointer. If these virtual registers are valid addresses,
1729 the stack pointer should be too.
1731 (b) Most uses of the stack pointer are not made explicit until
1732 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1733 We don't know until that stage whether we'll be eliminating to the
1734 stack pointer (which needs the restriction) or the hard frame
1735 pointer (which doesn't).
1737 All in all, it seems more consistent to only enforce this restriction
1738 during and after reload. */
1739 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1740 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1742 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1746 /* Return true if X is a valid base register for the given mode.
1747 Allow only hard registers if STRICT. */
1749 static bool
1750 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1752 if (!strict && GET_CODE (x) == SUBREG)
1753 x = SUBREG_REG (x);
1755 return (REG_P (x)
1756 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1760 /* Return true if X is a valid address for machine mode MODE. If it is,
1761 fill in INFO appropriately. STRICT is true if we should only accept
1762 hard base registers. */
1764 static bool
1765 mips_classify_address (struct mips_address_info *info, rtx x,
1766 enum machine_mode mode, int strict)
1768 switch (GET_CODE (x))
1770 case REG:
1771 case SUBREG:
1772 info->type = ADDRESS_REG;
1773 info->reg = x;
1774 info->offset = const0_rtx;
1775 return mips_valid_base_register_p (info->reg, mode, strict);
1777 case PLUS:
1778 info->type = ADDRESS_REG;
1779 info->reg = XEXP (x, 0);
1780 info->offset = XEXP (x, 1);
1781 return (mips_valid_base_register_p (info->reg, mode, strict)
1782 && const_arith_operand (info->offset, VOIDmode));
1784 case LO_SUM:
1785 info->type = ADDRESS_LO_SUM;
1786 info->reg = XEXP (x, 0);
1787 info->offset = XEXP (x, 1);
1788 /* We have to trust the creator of the LO_SUM to do something vaguely
1789 sane. Target-independent code that creates a LO_SUM should also
1790 create and verify the matching HIGH. Target-independent code that
1791 adds an offset to a LO_SUM must prove that the offset will not
1792 induce a carry. Failure to do either of these things would be
1793 a bug, and we are not required to check for it here. The MIPS
1794 backend itself should only create LO_SUMs for valid symbolic
1795 constants, with the high part being either a HIGH or a copy
1796 of _gp. */
1797 info->symbol_type
1798 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1799 return (mips_valid_base_register_p (info->reg, mode, strict)
1800 && mips_symbol_insns (info->symbol_type, mode) > 0
1801 && mips_lo_relocs[info->symbol_type] != 0);
1803 case CONST_INT:
1804 /* Small-integer addresses don't occur very often, but they
1805 are legitimate if $0 is a valid base register. */
1806 info->type = ADDRESS_CONST_INT;
1807 return !TARGET_MIPS16 && SMALL_INT (x);
1809 case CONST:
1810 case LABEL_REF:
1811 case SYMBOL_REF:
1812 info->type = ADDRESS_SYMBOLIC;
1813 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1814 &info->symbol_type)
1815 && mips_symbol_insns (info->symbol_type, mode) > 0
1816 && !mips_split_p[info->symbol_type]);
1818 default:
1819 return false;
1823 /* Return true if X is a thread-local symbol. */
1825 static bool
1826 mips_tls_operand_p (rtx x)
1828 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1831 /* Return true if X can not be forced into a constant pool. */
1833 static int
1834 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1836 return mips_tls_operand_p (*x);
1839 /* Return true if X can not be forced into a constant pool. */
1841 static bool
1842 mips_cannot_force_const_mem (rtx x)
1844 rtx base, offset;
1846 if (!TARGET_MIPS16)
1848 /* As an optimization, reject constants that mips_legitimize_move
1849 can expand inline.
1851 Suppose we have a multi-instruction sequence that loads constant C
1852 into register R. If R does not get allocated a hard register, and
1853 R is used in an operand that allows both registers and memory
1854 references, reload will consider forcing C into memory and using
1855 one of the instruction's memory alternatives. Returning false
1856 here will force it to use an input reload instead. */
1857 if (GET_CODE (x) == CONST_INT)
1858 return true;
1860 split_const (x, &base, &offset);
1861 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1862 return true;
1865 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1866 return true;
1868 return false;
1871 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1872 constants when we're using a per-function constant pool. */
1874 static bool
1875 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1876 const_rtx x ATTRIBUTE_UNUSED)
1878 return !TARGET_MIPS16_PCREL_LOADS;
1881 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1882 single instruction. We rely on the fact that, in the worst case,
1883 all instructions involved in a MIPS16 address calculation are usually
1884 extended ones. */
1886 static int
1887 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1889 switch (type)
1891 case SYMBOL_ABSOLUTE:
1892 /* When using 64-bit symbols, we need 5 preparatory instructions,
1893 such as:
1895 lui $at,%highest(symbol)
1896 daddiu $at,$at,%higher(symbol)
1897 dsll $at,$at,16
1898 daddiu $at,$at,%hi(symbol)
1899 dsll $at,$at,16
1901 The final address is then $at + %lo(symbol). With 32-bit
1902 symbols we just need a preparatory lui for normal mode and
1903 a preparatory "li; sll" for MIPS16. */
1904 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1906 case SYMBOL_GP_RELATIVE:
1907 /* Treat GP-relative accesses as taking a single instruction on
1908 MIPS16 too; the copy of $gp can often be shared. */
1909 return 1;
1911 case SYMBOL_PC_RELATIVE:
1912 /* PC-relative constants can be only be used with addiupc,
1913 lwpc and ldpc. */
1914 if (mode == MAX_MACHINE_MODE
1915 || GET_MODE_SIZE (mode) == 4
1916 || GET_MODE_SIZE (mode) == 8)
1917 return 1;
1919 /* The constant must be loaded using addiupc first. */
1920 return 0;
1922 case SYMBOL_FORCE_TO_MEM:
1923 /* LEAs will be converted into constant-pool references by
1924 mips_reorg. */
1925 if (mode == MAX_MACHINE_MODE)
1926 return 1;
1928 /* The constant must be loaded from the constant pool. */
1929 return 0;
1931 case SYMBOL_GOT_DISP:
1932 /* The constant will have to be loaded from the GOT before it
1933 is used in an address. */
1934 if (mode != MAX_MACHINE_MODE)
1935 return 0;
1937 /* Fall through. */
1939 case SYMBOL_GOT_PAGE_OFST:
1940 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1941 the local/global classification is accurate. See override_options
1942 for details.
1944 The worst cases are:
1946 (1) For local symbols when generating o32 or o64 code. The assembler
1947 will use:
1949 lw $at,%got(symbol)
1952 ...and the final address will be $at + %lo(symbol).
1954 (2) For global symbols when -mxgot. The assembler will use:
1956 lui $at,%got_hi(symbol)
1957 (d)addu $at,$at,$gp
1959 ...and the final address will be $at + %got_lo(symbol). */
1960 return 3;
1962 case SYMBOL_GOTOFF_PAGE:
1963 case SYMBOL_GOTOFF_DISP:
1964 case SYMBOL_GOTOFF_CALL:
1965 case SYMBOL_GOTOFF_LOADGP:
1966 case SYMBOL_32_HIGH:
1967 case SYMBOL_64_HIGH:
1968 case SYMBOL_64_MID:
1969 case SYMBOL_64_LOW:
1970 case SYMBOL_TLSGD:
1971 case SYMBOL_TLSLDM:
1972 case SYMBOL_DTPREL:
1973 case SYMBOL_GOTTPREL:
1974 case SYMBOL_TPREL:
1975 case SYMBOL_HALF:
1976 /* A 16-bit constant formed by a single relocation, or a 32-bit
1977 constant formed from a high 16-bit relocation and a low 16-bit
1978 relocation. Use mips_split_p to determine which. */
1979 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1981 case SYMBOL_TLS:
1982 /* We don't treat a bare TLS symbol as a constant. */
1983 return 0;
1985 gcc_unreachable ();
1988 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1989 to load symbols of type TYPE into a register. Return 0 if the given
1990 type of symbol cannot be used as an immediate operand.
1992 Otherwise, return the number of instructions needed to load or store
1993 values of mode MODE to or from addresses of type TYPE. Return 0 if
1994 the given type of symbol is not valid in addresses.
1996 In both cases, treat extended MIPS16 instructions as two instructions. */
1998 static int
1999 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
2001 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
2004 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2006 bool
2007 mips_stack_address_p (rtx x, enum machine_mode mode)
2009 struct mips_address_info addr;
2011 return (mips_classify_address (&addr, x, mode, false)
2012 && addr.type == ADDRESS_REG
2013 && addr.reg == stack_pointer_rtx);
2016 /* Return true if a value at OFFSET bytes from BASE can be accessed
2017 using an unextended mips16 instruction. MODE is the mode of the
2018 value.
2020 Usually the offset in an unextended instruction is a 5-bit field.
2021 The offset is unsigned and shifted left once for HIs, twice
2022 for SIs, and so on. An exception is SImode accesses off the
2023 stack pointer, which have an 8-bit immediate field. */
2025 static bool
2026 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
2028 if (TARGET_MIPS16
2029 && GET_CODE (offset) == CONST_INT
2030 && INTVAL (offset) >= 0
2031 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
2033 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2034 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
2035 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
2037 return false;
2041 /* Return the number of instructions needed to load or store a value
2042 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
2043 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
2044 otherwise assume that a single load or store is enough.
2046 For mips16 code, count extended instructions as two instructions. */
2049 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2051 struct mips_address_info addr;
2052 int factor;
2054 /* BLKmode is used for single unaligned loads and stores and should
2055 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2056 meaningless, so we have to single it out as a special case one way
2057 or the other.) */
2058 if (mode != BLKmode && might_split_p)
2059 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2060 else
2061 factor = 1;
2063 if (mips_classify_address (&addr, x, mode, false))
2064 switch (addr.type)
2066 case ADDRESS_REG:
2067 if (TARGET_MIPS16
2068 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
2069 return factor * 2;
2070 return factor;
2072 case ADDRESS_LO_SUM:
2073 return (TARGET_MIPS16 ? factor * 2 : factor);
2075 case ADDRESS_CONST_INT:
2076 return factor;
2078 case ADDRESS_SYMBOLIC:
2079 return factor * mips_symbol_insns (addr.symbol_type, mode);
2081 return 0;
2085 /* Likewise for constant X. */
2088 mips_const_insns (rtx x)
2090 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2091 enum mips_symbol_type symbol_type;
2092 rtx offset;
2094 switch (GET_CODE (x))
2096 case HIGH:
2097 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2098 &symbol_type)
2099 || !mips_split_p[symbol_type])
2100 return 0;
2102 /* This is simply an lui for normal mode. It is an extended
2103 "li" followed by an extended "sll" for MIPS16. */
2104 return TARGET_MIPS16 ? 4 : 1;
2106 case CONST_INT:
2107 if (TARGET_MIPS16)
2108 /* Unsigned 8-bit constants can be loaded using an unextended
2109 LI instruction. Unsigned 16-bit constants can be loaded
2110 using an extended LI. Negative constants must be loaded
2111 using LI and then negated. */
2112 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2113 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2114 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2115 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2116 : 0);
2118 return mips_build_integer (codes, INTVAL (x));
2120 case CONST_DOUBLE:
2121 case CONST_VECTOR:
2122 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2124 case CONST:
2125 if (CONST_GP_P (x))
2126 return 1;
2128 /* See if we can refer to X directly. */
2129 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2130 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2132 /* Otherwise try splitting the constant into a base and offset.
2133 16-bit offsets can be added using an extra addiu. Larger offsets
2134 must be calculated separately and then added to the base. */
2135 split_const (x, &x, &offset);
2136 if (offset != 0)
2138 int n = mips_const_insns (x);
2139 if (n != 0)
2141 if (SMALL_INT (offset))
2142 return n + 1;
2143 else
2144 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2147 return 0;
2149 case SYMBOL_REF:
2150 case LABEL_REF:
2151 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2152 MAX_MACHINE_MODE);
2154 default:
2155 return 0;
2160 /* Return the number of instructions needed to implement INSN,
2161 given that it loads from or stores to MEM. Count extended
2162 mips16 instructions as two instructions. */
2165 mips_load_store_insns (rtx mem, rtx insn)
2167 enum machine_mode mode;
2168 bool might_split_p;
2169 rtx set;
2171 gcc_assert (MEM_P (mem));
2172 mode = GET_MODE (mem);
2174 /* Try to prove that INSN does not need to be split. */
2175 might_split_p = true;
2176 if (GET_MODE_BITSIZE (mode) == 64)
2178 set = single_set (insn);
2179 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2180 might_split_p = false;
2183 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2187 /* Return the number of instructions needed for an integer division. */
2190 mips_idiv_insns (void)
2192 int count;
2194 count = 1;
2195 if (TARGET_CHECK_ZERO_DIV)
2197 if (GENERATE_DIVIDE_TRAPS)
2198 count++;
2199 else
2200 count += 2;
2203 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2204 count++;
2205 return count;
2208 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2209 returns a nonzero value if X is a legitimate address for a memory
2210 operand of the indicated MODE. STRICT is nonzero if this function
2211 is called during reload. */
2213 bool
2214 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2216 struct mips_address_info addr;
2218 return mips_classify_address (&addr, x, mode, strict);
2221 /* Emit a move from SRC to DEST. Assume that the move expanders can
2222 handle all moves if !can_create_pseudo_p (). The distinction is
2223 important because, unlike emit_move_insn, the move expanders know
2224 how to force Pmode objects into the constant pool even when the
2225 constant pool address is not itself legitimate. */
2228 mips_emit_move (rtx dest, rtx src)
2230 return (can_create_pseudo_p ()
2231 ? emit_move_insn (dest, src)
2232 : emit_move_insn_1 (dest, src));
2235 /* Copy VALUE to a register and return that register. If new psuedos
2236 are allowed, copy it into a new register, otherwise use DEST. */
2238 static rtx
2239 mips_force_temporary (rtx dest, rtx value)
2241 if (can_create_pseudo_p ())
2242 return force_reg (Pmode, value);
2243 else
2245 mips_emit_move (copy_rtx (dest), value);
2246 return dest;
2251 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2252 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2253 constant in that context and can be split into a high part and a LO_SUM.
2254 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2255 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2257 TEMP is as for mips_force_temporary and is used to load the high
2258 part into a register. */
2260 bool
2261 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2263 enum mips_symbol_context context;
2264 enum mips_symbol_type symbol_type;
2265 rtx high;
2267 context = (mode == MAX_MACHINE_MODE
2268 ? SYMBOL_CONTEXT_LEA
2269 : SYMBOL_CONTEXT_MEM);
2270 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2271 || mips_symbol_insns (symbol_type, mode) == 0
2272 || !mips_split_p[symbol_type])
2273 return false;
2275 if (lo_sum_out)
2277 if (symbol_type == SYMBOL_GP_RELATIVE)
2279 if (!can_create_pseudo_p ())
2281 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2282 high = temp;
2284 else
2285 high = mips16_gp_pseudo_reg ();
2287 else
2289 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2290 high = mips_force_temporary (temp, high);
2292 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2294 return true;
2298 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2299 and add CONST_INT OFFSET to the result. */
2301 static rtx
2302 mips_unspec_address_offset (rtx base, rtx offset,
2303 enum mips_symbol_type symbol_type)
2305 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2306 UNSPEC_ADDRESS_FIRST + symbol_type);
2307 if (offset != const0_rtx)
2308 base = gen_rtx_PLUS (Pmode, base, offset);
2309 return gen_rtx_CONST (Pmode, base);
2312 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2313 type SYMBOL_TYPE. */
2316 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2318 rtx base, offset;
2320 split_const (address, &base, &offset);
2321 return mips_unspec_address_offset (base, offset, symbol_type);
2325 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2326 high part to BASE and return the result. Just return BASE otherwise.
2327 TEMP is available as a temporary register if needed.
2329 The returned expression can be used as the first operand to a LO_SUM. */
2331 static rtx
2332 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2333 enum mips_symbol_type symbol_type)
2335 if (mips_split_p[symbol_type])
2337 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2338 addr = mips_force_temporary (temp, addr);
2339 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2341 return base;
2345 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2346 mips_force_temporary; it is only needed when OFFSET is not a
2347 SMALL_OPERAND. */
2349 static rtx
2350 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2352 if (!SMALL_OPERAND (offset))
2354 rtx high;
2355 if (TARGET_MIPS16)
2357 /* Load the full offset into a register so that we can use
2358 an unextended instruction for the address itself. */
2359 high = GEN_INT (offset);
2360 offset = 0;
2362 else
2364 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2365 high = GEN_INT (CONST_HIGH_PART (offset));
2366 offset = CONST_LOW_PART (offset);
2368 high = mips_force_temporary (temp, high);
2369 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2371 return plus_constant (reg, offset);
2374 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2375 referencing, and TYPE is the symbol type to use (either global
2376 dynamic or local dynamic). V0 is an RTX for the return value
2377 location. The entire insn sequence is returned. */
2379 static GTY(()) rtx mips_tls_symbol;
2381 static rtx
2382 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2384 rtx insn, loc, tga, a0;
2386 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2388 if (!mips_tls_symbol)
2389 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2391 loc = mips_unspec_address (sym, type);
2393 start_sequence ();
2395 emit_insn (gen_rtx_SET (Pmode, a0,
2396 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2397 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2398 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2399 CONST_OR_PURE_CALL_P (insn) = 1;
2400 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2401 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2402 insn = get_insns ();
2404 end_sequence ();
2406 return insn;
2409 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2410 return value will be a valid address and move_operand (either a REG
2411 or a LO_SUM). */
2413 static rtx
2414 mips_legitimize_tls_address (rtx loc)
2416 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2417 enum tls_model model;
2419 if (TARGET_MIPS16)
2421 sorry ("MIPS16 TLS");
2422 return gen_reg_rtx (Pmode);
2425 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2426 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2428 model = SYMBOL_REF_TLS_MODEL (loc);
2429 /* Only TARGET_ABICALLS code can have more than one module; other
2430 code must be be static and should not use a GOT. All TLS models
2431 reduce to local exec in this situation. */
2432 if (!TARGET_ABICALLS)
2433 model = TLS_MODEL_LOCAL_EXEC;
2435 switch (model)
2437 case TLS_MODEL_GLOBAL_DYNAMIC:
2438 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2439 dest = gen_reg_rtx (Pmode);
2440 emit_libcall_block (insn, dest, v0, loc);
2441 break;
2443 case TLS_MODEL_LOCAL_DYNAMIC:
2444 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2445 tmp1 = gen_reg_rtx (Pmode);
2447 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2448 share the LDM result with other LD model accesses. */
2449 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2450 UNSPEC_TLS_LDM);
2451 emit_libcall_block (insn, tmp1, v0, eqv);
2453 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2454 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2455 mips_unspec_address (loc, SYMBOL_DTPREL));
2456 break;
2458 case TLS_MODEL_INITIAL_EXEC:
2459 tmp1 = gen_reg_rtx (Pmode);
2460 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2461 if (Pmode == DImode)
2463 emit_insn (gen_tls_get_tp_di (v1));
2464 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2466 else
2468 emit_insn (gen_tls_get_tp_si (v1));
2469 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2471 dest = gen_reg_rtx (Pmode);
2472 emit_insn (gen_add3_insn (dest, tmp1, v1));
2473 break;
2475 case TLS_MODEL_LOCAL_EXEC:
2476 if (Pmode == DImode)
2477 emit_insn (gen_tls_get_tp_di (v1));
2478 else
2479 emit_insn (gen_tls_get_tp_si (v1));
2481 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2482 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2483 mips_unspec_address (loc, SYMBOL_TPREL));
2484 break;
2486 default:
2487 gcc_unreachable ();
2490 return dest;
2493 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2494 be legitimized in a way that the generic machinery might not expect,
2495 put the new address in *XLOC and return true. MODE is the mode of
2496 the memory being accessed. */
2498 bool
2499 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2501 if (mips_tls_operand_p (*xloc))
2503 *xloc = mips_legitimize_tls_address (*xloc);
2504 return true;
2507 /* See if the address can split into a high part and a LO_SUM. */
2508 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2509 return true;
2511 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2513 /* Handle REG + CONSTANT using mips_add_offset. */
2514 rtx reg;
2516 reg = XEXP (*xloc, 0);
2517 if (!mips_valid_base_register_p (reg, mode, 0))
2518 reg = copy_to_mode_reg (Pmode, reg);
2519 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2520 return true;
2523 return false;
2527 /* Subroutine of mips_build_integer (with the same interface).
2528 Assume that the final action in the sequence should be a left shift. */
2530 static unsigned int
2531 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2533 unsigned int i, shift;
2535 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2536 since signed numbers are easier to load than unsigned ones. */
2537 shift = 0;
2538 while ((value & 1) == 0)
2539 value /= 2, shift++;
2541 i = mips_build_integer (codes, value);
2542 codes[i].code = ASHIFT;
2543 codes[i].value = shift;
2544 return i + 1;
2548 /* As for mips_build_shift, but assume that the final action will be
2549 an IOR or PLUS operation. */
2551 static unsigned int
2552 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2554 unsigned HOST_WIDE_INT high;
2555 unsigned int i;
2557 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2558 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2560 /* The constant is too complex to load with a simple lui/ori pair
2561 so our goal is to clear as many trailing zeros as possible.
2562 In this case, we know bit 16 is set and that the low 16 bits
2563 form a negative number. If we subtract that number from VALUE,
2564 we will clear at least the lowest 17 bits, maybe more. */
2565 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2566 codes[i].code = PLUS;
2567 codes[i].value = CONST_LOW_PART (value);
2569 else
2571 i = mips_build_integer (codes, high);
2572 codes[i].code = IOR;
2573 codes[i].value = value & 0xffff;
2575 return i + 1;
2579 /* Fill CODES with a sequence of rtl operations to load VALUE.
2580 Return the number of operations needed. */
2582 static unsigned int
2583 mips_build_integer (struct mips_integer_op *codes,
2584 unsigned HOST_WIDE_INT value)
2586 if (SMALL_OPERAND (value)
2587 || SMALL_OPERAND_UNSIGNED (value)
2588 || LUI_OPERAND (value))
2590 /* The value can be loaded with a single instruction. */
2591 codes[0].code = UNKNOWN;
2592 codes[0].value = value;
2593 return 1;
2595 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2597 /* Either the constant is a simple LUI/ORI combination or its
2598 lowest bit is set. We don't want to shift in this case. */
2599 return mips_build_lower (codes, value);
2601 else if ((value & 0xffff) == 0)
2603 /* The constant will need at least three actions. The lowest
2604 16 bits are clear, so the final action will be a shift. */
2605 return mips_build_shift (codes, value);
2607 else
2609 /* The final action could be a shift, add or inclusive OR.
2610 Rather than use a complex condition to select the best
2611 approach, try both mips_build_shift and mips_build_lower
2612 and pick the one that gives the shortest sequence.
2613 Note that this case is only used once per constant. */
2614 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2615 unsigned int cost, alt_cost;
2617 cost = mips_build_shift (codes, value);
2618 alt_cost = mips_build_lower (alt_codes, value);
2619 if (alt_cost < cost)
2621 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2622 cost = alt_cost;
2624 return cost;
2629 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2631 void
2632 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2634 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2635 enum machine_mode mode;
2636 unsigned int i, cost;
2637 rtx x;
2639 mode = GET_MODE (dest);
2640 cost = mips_build_integer (codes, value);
2642 /* Apply each binary operation to X. Invariant: X is a legitimate
2643 source operand for a SET pattern. */
2644 x = GEN_INT (codes[0].value);
2645 for (i = 1; i < cost; i++)
2647 if (!can_create_pseudo_p ())
2649 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2650 x = temp;
2652 else
2653 x = force_reg (mode, x);
2654 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2657 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2661 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2662 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2663 move_operand. */
2665 static void
2666 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2668 rtx base, offset;
2670 /* Split moves of big integers into smaller pieces. */
2671 if (splittable_const_int_operand (src, mode))
2673 mips_move_integer (dest, dest, INTVAL (src));
2674 return;
2677 /* Split moves of symbolic constants into high/low pairs. */
2678 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2680 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2681 return;
2684 if (mips_tls_operand_p (src))
2686 mips_emit_move (dest, mips_legitimize_tls_address (src));
2687 return;
2690 /* If we have (const (plus symbol offset)), and that expression cannot
2691 be forced into memory, load the symbol first and add in the offset.
2692 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2693 forced into memory, as it usually produces better code. */
2694 split_const (src, &base, &offset);
2695 if (offset != const0_rtx
2696 && (targetm.cannot_force_const_mem (src)
2697 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2699 base = mips_force_temporary (dest, base);
2700 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2701 return;
2704 src = force_const_mem (mode, src);
2706 /* When using explicit relocs, constant pool references are sometimes
2707 not legitimate addresses. */
2708 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2709 mips_emit_move (dest, src);
2713 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2714 sequence that is valid. */
2716 bool
2717 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2719 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2721 mips_emit_move (dest, force_reg (mode, src));
2722 return true;
2725 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2726 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2727 && REG_P (src) && MD_REG_P (REGNO (src))
2728 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2730 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2731 if (GET_MODE_SIZE (mode) <= 4)
2732 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2733 gen_rtx_REG (SImode, REGNO (src)),
2734 gen_rtx_REG (SImode, other_regno)));
2735 else
2736 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2737 gen_rtx_REG (DImode, REGNO (src)),
2738 gen_rtx_REG (DImode, other_regno)));
2739 return true;
2742 /* We need to deal with constants that would be legitimate
2743 immediate_operands but not legitimate move_operands. */
2744 if (CONSTANT_P (src) && !move_operand (src, mode))
2746 mips_legitimize_const_move (mode, dest, src);
2747 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2748 return true;
2750 return false;
2753 /* We need a lot of little routines to check constant values on the
2754 mips16. These are used to figure out how long the instruction will
2755 be. It would be much better to do this using constraints, but
2756 there aren't nearly enough letters available. */
2758 static int
2759 m16_check_op (rtx op, int low, int high, int mask)
2761 return (GET_CODE (op) == CONST_INT
2762 && INTVAL (op) >= low
2763 && INTVAL (op) <= high
2764 && (INTVAL (op) & mask) == 0);
2768 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2770 return m16_check_op (op, 0x1, 0x8, 0);
2774 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2776 return m16_check_op (op, - 0x8, 0x7, 0);
2780 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2782 return m16_check_op (op, - 0x7, 0x8, 0);
2786 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2788 return m16_check_op (op, - 0x10, 0xf, 0);
2792 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2794 return m16_check_op (op, - 0xf, 0x10, 0);
2798 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2800 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2804 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2806 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2810 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2812 return m16_check_op (op, - 0x80, 0x7f, 0);
2816 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2818 return m16_check_op (op, - 0x7f, 0x80, 0);
2822 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2824 return m16_check_op (op, 0x0, 0xff, 0);
2828 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2830 return m16_check_op (op, - 0xff, 0x0, 0);
2834 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2836 return m16_check_op (op, - 0x1, 0xfe, 0);
2840 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2842 return m16_check_op (op, 0x0, 0xff << 2, 3);
2846 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2848 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2852 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2854 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2858 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2860 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2863 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2864 address instruction. */
2866 static bool
2867 mips_lwxs_address_p (rtx addr)
2869 if (ISA_HAS_LWXS
2870 && GET_CODE (addr) == PLUS
2871 && REG_P (XEXP (addr, 1)))
2873 rtx offset = XEXP (addr, 0);
2874 if (GET_CODE (offset) == MULT
2875 && REG_P (XEXP (offset, 0))
2876 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2877 && INTVAL (XEXP (offset, 1)) == 4)
2878 return true;
2880 return false;
2883 /* The cost of loading values from the constant pool. It should be
2884 larger than the cost of any constant we want to synthesize inline. */
2886 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2888 /* Return the cost of X when used as an operand to the MIPS16 instruction
2889 that implements CODE. Return -1 if there is no such instruction, or if
2890 X is not a valid immediate operand for it. */
2892 static int
2893 mips16_constant_cost (int code, HOST_WIDE_INT x)
2895 switch (code)
2897 case ASHIFT:
2898 case ASHIFTRT:
2899 case LSHIFTRT:
2900 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2901 other shifts are extended. The shift patterns truncate the shift
2902 count to the right size, so there are no out-of-range values. */
2903 if (IN_RANGE (x, 1, 8))
2904 return 0;
2905 return COSTS_N_INSNS (1);
2907 case PLUS:
2908 if (IN_RANGE (x, -128, 127))
2909 return 0;
2910 if (SMALL_OPERAND (x))
2911 return COSTS_N_INSNS (1);
2912 return -1;
2914 case LEU:
2915 /* Like LE, but reject the always-true case. */
2916 if (x == -1)
2917 return -1;
2918 case LE:
2919 /* We add 1 to the immediate and use SLT. */
2920 x += 1;
2921 case XOR:
2922 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2923 case LT:
2924 case LTU:
2925 if (IN_RANGE (x, 0, 255))
2926 return 0;
2927 if (SMALL_OPERAND_UNSIGNED (x))
2928 return COSTS_N_INSNS (1);
2929 return -1;
2931 case EQ:
2932 case NE:
2933 /* Equality comparisons with 0 are cheap. */
2934 if (x == 0)
2935 return 0;
2936 return -1;
2938 default:
2939 return -1;
2943 /* Return true if there is a non-MIPS16 instruction that implements CODE
2944 and if that instruction accepts X as an immediate operand. */
2946 static int
2947 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2949 switch (code)
2951 case ASHIFT:
2952 case ASHIFTRT:
2953 case LSHIFTRT:
2954 /* All shift counts are truncated to a valid constant. */
2955 return true;
2957 case ROTATE:
2958 case ROTATERT:
2959 /* Likewise rotates, if the target supports rotates at all. */
2960 return ISA_HAS_ROR;
2962 case AND:
2963 case IOR:
2964 case XOR:
2965 /* These instructions take 16-bit unsigned immediates. */
2966 return SMALL_OPERAND_UNSIGNED (x);
2968 case PLUS:
2969 case LT:
2970 case LTU:
2971 /* These instructions take 16-bit signed immediates. */
2972 return SMALL_OPERAND (x);
2974 case EQ:
2975 case NE:
2976 case GT:
2977 case GTU:
2978 /* The "immediate" forms of these instructions are really
2979 implemented as comparisons with register 0. */
2980 return x == 0;
2982 case GE:
2983 case GEU:
2984 /* Likewise, meaning that the only valid immediate operand is 1. */
2985 return x == 1;
2987 case LE:
2988 /* We add 1 to the immediate and use SLT. */
2989 return SMALL_OPERAND (x + 1);
2991 case LEU:
2992 /* Likewise SLTU, but reject the always-true case. */
2993 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2995 case SIGN_EXTRACT:
2996 case ZERO_EXTRACT:
2997 /* The bit position and size are immediate operands. */
2998 return ISA_HAS_EXT_INS;
3000 default:
3001 /* By default assume that $0 can be used for 0. */
3002 return x == 0;
3006 /* Return the cost of binary operation X, given that the instruction
3007 sequence for a word-sized or smaller operation has cost SINGLE_COST
3008 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3010 static int
3011 mips_binary_cost (rtx x, int single_cost, int double_cost)
3013 int cost;
3015 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3016 cost = double_cost;
3017 else
3018 cost = single_cost;
3019 return (cost
3020 + rtx_cost (XEXP (x, 0), 0)
3021 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
3024 /* Return the cost of floating-point multiplications of mode MODE. */
3026 static int
3027 mips_fp_mult_cost (enum machine_mode mode)
3029 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3032 /* Return the cost of floating-point divisions of mode MODE. */
3034 static int
3035 mips_fp_div_cost (enum machine_mode mode)
3037 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3040 /* Return the cost of sign-extending OP to mode MODE, not including the
3041 cost of OP itself. */
3043 static int
3044 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3046 if (MEM_P (op))
3047 /* Extended loads are as cheap as unextended ones. */
3048 return 0;
3050 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3051 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3052 return 0;
3054 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3055 /* We can use SEB or SEH. */
3056 return COSTS_N_INSNS (1);
3058 /* We need to use a shift left and a shift right. */
3059 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3062 /* Return the cost of zero-extending OP to mode MODE, not including the
3063 cost of OP itself. */
3065 static int
3066 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3068 if (MEM_P (op))
3069 /* Extended loads are as cheap as unextended ones. */
3070 return 0;
3072 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3073 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3074 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3076 if (GENERATE_MIPS16E)
3077 /* We can use ZEB or ZEH. */
3078 return COSTS_N_INSNS (1);
3080 if (TARGET_MIPS16)
3081 /* We need to load 0xff or 0xffff into a register and use AND. */
3082 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3084 /* We can use ANDI. */
3085 return COSTS_N_INSNS (1);
3088 /* Implement TARGET_RTX_COSTS. */
3090 static bool
3091 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3093 enum machine_mode mode = GET_MODE (x);
3094 bool float_mode_p = FLOAT_MODE_P (mode);
3095 int cost;
3096 rtx addr;
3098 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3099 appear in the instruction stream, and the cost of a comparison is
3100 really the cost of the branch or scc condition. At the time of
3101 writing, gcc only uses an explicit outer COMPARE code when optabs
3102 is testing whether a constant is expensive enough to force into a
3103 register. We want optabs to pass such constants through the MIPS
3104 expanders instead, so make all constants very cheap here. */
3105 if (outer_code == COMPARE)
3107 gcc_assert (CONSTANT_P (x));
3108 *total = 0;
3109 return true;
3112 switch (code)
3114 case CONST_INT:
3115 /* Treat *clear_upper32-style ANDs as having zero cost in the
3116 second operand. The cost is entirely in the first operand.
3118 ??? This is needed because we would otherwise try to CSE
3119 the constant operand. Although that's the right thing for
3120 instructions that continue to be a register operation throughout
3121 compilation, it is disastrous for instructions that could
3122 later be converted into a memory operation. */
3123 if (TARGET_64BIT
3124 && outer_code == AND
3125 && UINTVAL (x) == 0xffffffff)
3127 *total = 0;
3128 return true;
3131 if (TARGET_MIPS16)
3133 cost = mips16_constant_cost (outer_code, INTVAL (x));
3134 if (cost >= 0)
3136 *total = cost;
3137 return true;
3140 else
3142 /* When not optimizing for size, we care more about the cost
3143 of hot code, and hot code is often in a loop. If a constant
3144 operand needs to be forced into a register, we will often be
3145 able to hoist the constant load out of the loop, so the load
3146 should not contribute to the cost. */
3147 if (!optimize_size
3148 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3150 *total = 0;
3151 return true;
3154 /* Fall through. */
3156 case CONST:
3157 case SYMBOL_REF:
3158 case LABEL_REF:
3159 case CONST_DOUBLE:
3160 if (force_to_mem_operand (x, VOIDmode))
3162 *total = COSTS_N_INSNS (1);
3163 return true;
3165 cost = mips_const_insns (x);
3166 if (cost > 0)
3168 /* If the constant is likely to be stored in a GPR, SETs of
3169 single-insn constants are as cheap as register sets; we
3170 never want to CSE them.
3172 Don't reduce the cost of storing a floating-point zero in
3173 FPRs. If we have a zero in an FPR for other reasons, we
3174 can get better cfg-cleanup and delayed-branch results by
3175 using it consistently, rather than using $0 sometimes and
3176 an FPR at other times. Also, moves between floating-point
3177 registers are sometimes cheaper than (D)MTC1 $0. */
3178 if (cost == 1
3179 && outer_code == SET
3180 && !(float_mode_p && TARGET_HARD_FLOAT))
3181 cost = 0;
3182 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3183 want to CSE the constant itself. It is usually better to
3184 have N copies of the last operation in the sequence and one
3185 shared copy of the other operations. (Note that this is
3186 not true for MIPS16 code, where the final operation in the
3187 sequence is often an extended instruction.)
3189 Also, if we have a CONST_INT, we don't know whether it is
3190 for a word or doubleword operation, so we cannot rely on
3191 the result of mips_build_integer. */
3192 else if (!TARGET_MIPS16
3193 && (outer_code == SET || mode == VOIDmode))
3194 cost = 1;
3195 *total = COSTS_N_INSNS (cost);
3196 return true;
3198 /* The value will need to be fetched from the constant pool. */
3199 *total = CONSTANT_POOL_COST;
3200 return true;
3202 case MEM:
3203 /* If the address is legitimate, return the number of
3204 instructions it needs. */
3205 addr = XEXP (x, 0);
3206 cost = mips_address_insns (addr, mode, true);
3207 if (cost > 0)
3209 *total = COSTS_N_INSNS (cost + 1);
3210 return true;
3212 /* Check for a scaled indexed address. */
3213 if (mips_lwxs_address_p (addr))
3215 *total = COSTS_N_INSNS (2);
3216 return true;
3218 /* Otherwise use the default handling. */
3219 return false;
3221 case FFS:
3222 *total = COSTS_N_INSNS (6);
3223 return false;
3225 case NOT:
3226 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3227 return false;
3229 case AND:
3230 /* Check for a *clear_upper32 pattern and treat it like a zero
3231 extension. See the pattern's comment for details. */
3232 if (TARGET_64BIT
3233 && mode == DImode
3234 && CONST_INT_P (XEXP (x, 1))
3235 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3237 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3238 + rtx_cost (XEXP (x, 0), 0));
3239 return true;
3241 /* Fall through. */
3243 case IOR:
3244 case XOR:
3245 /* Double-word operations use two single-word operations. */
3246 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3247 return true;
3249 case ASHIFT:
3250 case ASHIFTRT:
3251 case LSHIFTRT:
3252 case ROTATE:
3253 case ROTATERT:
3254 if (CONSTANT_P (XEXP (x, 1)))
3255 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3256 else
3257 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3258 return true;
3260 case ABS:
3261 if (float_mode_p)
3262 *total = mips_cost->fp_add;
3263 else
3264 *total = COSTS_N_INSNS (4);
3265 return false;
3267 case LO_SUM:
3268 /* Low-part immediates need an extended MIPS16 instruction. */
3269 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3270 + rtx_cost (XEXP (x, 0), 0));
3271 return true;
3273 case LT:
3274 case LTU:
3275 case LE:
3276 case LEU:
3277 case GT:
3278 case GTU:
3279 case GE:
3280 case GEU:
3281 case EQ:
3282 case NE:
3283 case UNORDERED:
3284 case LTGT:
3285 /* Branch comparisons have VOIDmode, so use the first operand's
3286 mode instead. */
3287 mode = GET_MODE (XEXP (x, 0));
3288 if (FLOAT_MODE_P (mode))
3290 *total = mips_cost->fp_add;
3291 return false;
3293 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3294 return true;
3296 case MINUS:
3297 if (float_mode_p
3298 && ISA_HAS_NMADD_NMSUB
3299 && TARGET_FUSED_MADD
3300 && !HONOR_NANS (mode)
3301 && !HONOR_SIGNED_ZEROS (mode))
3303 /* See if we can use NMADD or NMSUB. See mips.md for the
3304 associated patterns. */
3305 rtx op0 = XEXP (x, 0);
3306 rtx op1 = XEXP (x, 1);
3307 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3309 *total = (mips_fp_mult_cost (mode)
3310 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3311 + rtx_cost (XEXP (op0, 1), 0)
3312 + rtx_cost (op1, 0));
3313 return true;
3315 if (GET_CODE (op1) == MULT)
3317 *total = (mips_fp_mult_cost (mode)
3318 + rtx_cost (op0, 0)
3319 + rtx_cost (XEXP (op1, 0), 0)
3320 + rtx_cost (XEXP (op1, 1), 0));
3321 return true;
3324 /* Fall through. */
3326 case PLUS:
3327 if (float_mode_p)
3329 if (ISA_HAS_FP4
3330 && TARGET_FUSED_MADD
3331 && GET_CODE (XEXP (x, 0)) == MULT)
3332 *total = 0;
3333 else
3334 *total = mips_cost->fp_add;
3335 return false;
3338 /* Double-word operations require three single-word operations and
3339 an SLTU. The MIPS16 version then needs to move the result of
3340 the SLTU from $24 to a MIPS16 register. */
3341 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3342 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3343 return true;
3345 case NEG:
3346 if (float_mode_p
3347 && ISA_HAS_NMADD_NMSUB
3348 && TARGET_FUSED_MADD
3349 && !HONOR_NANS (mode)
3350 && HONOR_SIGNED_ZEROS (mode))
3352 /* See if we can use NMADD or NMSUB. See mips.md for the
3353 associated patterns. */
3354 rtx op = XEXP (x, 0);
3355 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3356 && GET_CODE (XEXP (op, 0)) == MULT)
3358 *total = (mips_fp_mult_cost (mode)
3359 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3360 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3361 + rtx_cost (XEXP (op, 1), 0));
3362 return true;
3366 if (float_mode_p)
3367 *total = mips_cost->fp_add;
3368 else
3369 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3370 return false;
3372 case MULT:
3373 if (float_mode_p)
3374 *total = mips_fp_mult_cost (mode);
3375 else if (mode == DImode && !TARGET_64BIT)
3376 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3377 where the mulsidi3 always includes an MFHI and an MFLO. */
3378 *total = (optimize_size
3379 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3380 : mips_cost->int_mult_si * 3 + 6);
3381 else if (optimize_size)
3382 *total = (ISA_HAS_MUL3 ? 1 : 2);
3383 else if (mode == DImode)
3384 *total = mips_cost->int_mult_di;
3385 else
3386 *total = mips_cost->int_mult_si;
3387 return false;
3389 case DIV:
3390 /* Check for a reciprocal. */
3391 if (float_mode_p && XEXP (x, 0) == CONST1_RTX (mode))
3393 if (ISA_HAS_FP4
3394 && flag_unsafe_math_optimizations
3395 && (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT))
3397 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3398 division as being free. */
3399 *total = rtx_cost (XEXP (x, 1), 0);
3400 return true;
3402 if (!ISA_MIPS1)
3404 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3405 return true;
3408 /* Fall through. */
3410 case SQRT:
3411 case MOD:
3412 if (float_mode_p)
3414 *total = mips_fp_div_cost (mode);
3415 return false;
3417 /* Fall through. */
3419 case UDIV:
3420 case UMOD:
3421 if (optimize_size)
3423 /* It is our responsibility to make division by a power of 2
3424 as cheap as 2 register additions if we want the division
3425 expanders to be used for such operations; see the setting
3426 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3427 should always produce shorter code than using
3428 expand_sdiv2_pow2. */
3429 if (TARGET_MIPS16
3430 && CONST_INT_P (XEXP (x, 1))
3431 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3433 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3434 return true;
3436 *total = COSTS_N_INSNS (mips_idiv_insns ());
3438 else if (mode == DImode)
3439 *total = mips_cost->int_div_di;
3440 else
3441 *total = mips_cost->int_div_si;
3442 return false;
3444 case SIGN_EXTEND:
3445 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3446 return false;
3448 case ZERO_EXTEND:
3449 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3450 return false;
3452 case FLOAT:
3453 case UNSIGNED_FLOAT:
3454 case FIX:
3455 case FLOAT_EXTEND:
3456 case FLOAT_TRUNCATE:
3457 *total = mips_cost->fp_add;
3458 return false;
3460 default:
3461 return false;
3465 /* Provide the costs of an addressing mode that contains ADDR.
3466 If ADDR is not a valid address, its cost is irrelevant. */
3468 static int
3469 mips_address_cost (rtx addr)
3471 return mips_address_insns (addr, SImode, false);
3474 /* Return one word of double-word value OP, taking into account the fixed
3475 endianness of certain registers. HIGH_P is true to select the high part,
3476 false to select the low part. */
3479 mips_subword (rtx op, int high_p)
3481 unsigned int byte;
3482 enum machine_mode mode;
3484 mode = GET_MODE (op);
3485 if (mode == VOIDmode)
3486 mode = DImode;
3488 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3489 byte = UNITS_PER_WORD;
3490 else
3491 byte = 0;
3493 if (FP_REG_RTX_P (op))
3494 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3496 if (MEM_P (op))
3497 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3499 return simplify_gen_subreg (word_mode, op, mode, byte);
3503 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3505 bool
3506 mips_split_64bit_move_p (rtx dest, rtx src)
3508 if (TARGET_64BIT)
3509 return false;
3511 /* FP->FP moves can be done in a single instruction. */
3512 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3513 return false;
3515 /* Check for floating-point loads and stores. They can be done using
3516 ldc1 and sdc1 on MIPS II and above. */
3517 if (mips_isa > 1)
3519 if (FP_REG_RTX_P (dest) && MEM_P (src))
3520 return false;
3521 if (FP_REG_RTX_P (src) && MEM_P (dest))
3522 return false;
3524 return true;
3528 /* Split a 64-bit move from SRC to DEST assuming that
3529 mips_split_64bit_move_p holds.
3531 Moves into and out of FPRs cause some difficulty here. Such moves
3532 will always be DFmode, since paired FPRs are not allowed to store
3533 DImode values. The most natural representation would be two separate
3534 32-bit moves, such as:
3536 (set (reg:SI $f0) (mem:SI ...))
3537 (set (reg:SI $f1) (mem:SI ...))
3539 However, the second insn is invalid because odd-numbered FPRs are
3540 not allowed to store independent values. Use the patterns load_df_low,
3541 load_df_high and store_df_high instead. */
3543 void
3544 mips_split_64bit_move (rtx dest, rtx src)
3546 if (FP_REG_RTX_P (dest))
3548 /* Loading an FPR from memory or from GPRs. */
3549 if (ISA_HAS_MXHC1)
3551 dest = gen_lowpart (DFmode, dest);
3552 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3553 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3554 copy_rtx (dest)));
3556 else
3558 emit_insn (gen_load_df_low (copy_rtx (dest),
3559 mips_subword (src, 0)));
3560 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3561 copy_rtx (dest)));
3564 else if (FP_REG_RTX_P (src))
3566 /* Storing an FPR into memory or GPRs. */
3567 if (ISA_HAS_MXHC1)
3569 src = gen_lowpart (DFmode, src);
3570 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3571 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3573 else
3575 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3576 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3579 else
3581 /* The operation can be split into two normal moves. Decide in
3582 which order to do them. */
3583 rtx low_dest;
3585 low_dest = mips_subword (dest, 0);
3586 if (REG_P (low_dest)
3587 && reg_overlap_mentioned_p (low_dest, src))
3589 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3590 mips_emit_move (low_dest, mips_subword (src, 0));
3592 else
3594 mips_emit_move (low_dest, mips_subword (src, 0));
3595 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3600 /* Return the appropriate instructions to move SRC into DEST. Assume
3601 that SRC is operand 1 and DEST is operand 0. */
3603 const char *
3604 mips_output_move (rtx dest, rtx src)
3606 enum rtx_code dest_code, src_code;
3607 enum mips_symbol_type symbol_type;
3608 bool dbl_p;
3610 dest_code = GET_CODE (dest);
3611 src_code = GET_CODE (src);
3612 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3614 if (dbl_p && mips_split_64bit_move_p (dest, src))
3615 return "#";
3617 if ((src_code == REG && GP_REG_P (REGNO (src)))
3618 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3620 if (dest_code == REG)
3622 if (GP_REG_P (REGNO (dest)))
3623 return "move\t%0,%z1";
3625 if (MD_REG_P (REGNO (dest)))
3626 return "mt%0\t%z1";
3628 if (DSP_ACC_REG_P (REGNO (dest)))
3630 static char retval[] = "mt__\t%z1,%q0";
3631 retval[2] = reg_names[REGNO (dest)][4];
3632 retval[3] = reg_names[REGNO (dest)][5];
3633 return retval;
3636 if (FP_REG_P (REGNO (dest)))
3637 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3639 if (ALL_COP_REG_P (REGNO (dest)))
3641 static char retval[] = "dmtc_\t%z1,%0";
3643 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3644 return (dbl_p ? retval : retval + 1);
3647 if (dest_code == MEM)
3648 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3650 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3652 if (src_code == REG)
3654 if (DSP_ACC_REG_P (REGNO (src)))
3656 static char retval[] = "mf__\t%0,%q1";
3657 retval[2] = reg_names[REGNO (src)][4];
3658 retval[3] = reg_names[REGNO (src)][5];
3659 return retval;
3662 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3663 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3665 if (FP_REG_P (REGNO (src)))
3666 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3668 if (ALL_COP_REG_P (REGNO (src)))
3670 static char retval[] = "dmfc_\t%0,%1";
3672 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3673 return (dbl_p ? retval : retval + 1);
3677 if (src_code == MEM)
3678 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3680 if (src_code == CONST_INT)
3682 /* Don't use the X format, because that will give out of
3683 range numbers for 64-bit hosts and 32-bit targets. */
3684 if (!TARGET_MIPS16)
3685 return "li\t%0,%1\t\t\t# %X1";
3687 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3688 return "li\t%0,%1";
3690 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3691 return "#";
3694 if (src_code == HIGH)
3695 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3697 if (CONST_GP_P (src))
3698 return "move\t%0,%1";
3700 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3701 && mips_lo_relocs[symbol_type] != 0)
3703 /* A signed 16-bit constant formed by applying a relocation
3704 operator to a symbolic address. */
3705 gcc_assert (!mips_split_p[symbol_type]);
3706 return "li\t%0,%R1";
3709 if (symbolic_operand (src, VOIDmode))
3711 gcc_assert (TARGET_MIPS16
3712 ? TARGET_MIPS16_TEXT_LOADS
3713 : !TARGET_EXPLICIT_RELOCS);
3714 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3717 if (src_code == REG && FP_REG_P (REGNO (src)))
3719 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3721 if (GET_MODE (dest) == V2SFmode)
3722 return "mov.ps\t%0,%1";
3723 else
3724 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3727 if (dest_code == MEM)
3728 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3730 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3732 if (src_code == MEM)
3733 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3735 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3737 static char retval[] = "l_c_\t%0,%1";
3739 retval[1] = (dbl_p ? 'd' : 'w');
3740 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3741 return retval;
3743 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3745 static char retval[] = "s_c_\t%1,%0";
3747 retval[1] = (dbl_p ? 'd' : 'w');
3748 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3749 return retval;
3751 gcc_unreachable ();
3754 /* Restore $gp from its save slot. Valid only when using o32 or
3755 o64 abicalls. */
3757 void
3758 mips_restore_gp (void)
3760 rtx address, slot;
3762 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3764 address = mips_add_offset (pic_offset_table_rtx,
3765 frame_pointer_needed
3766 ? hard_frame_pointer_rtx
3767 : stack_pointer_rtx,
3768 current_function_outgoing_args_size);
3769 slot = gen_rtx_MEM (Pmode, address);
3771 mips_emit_move (pic_offset_table_rtx, slot);
3772 if (!TARGET_EXPLICIT_RELOCS)
3773 emit_insn (gen_blockage ());
3776 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3778 static void
3779 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3781 emit_insn (gen_rtx_SET (VOIDmode, target,
3782 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3785 /* Return true if CMP1 is a suitable second operand for relational
3786 operator CODE. See also the *sCC patterns in mips.md. */
3788 static bool
3789 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3791 switch (code)
3793 case GT:
3794 case GTU:
3795 return reg_or_0_operand (cmp1, VOIDmode);
3797 case GE:
3798 case GEU:
3799 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3801 case LT:
3802 case LTU:
3803 return arith_operand (cmp1, VOIDmode);
3805 case LE:
3806 return sle_operand (cmp1, VOIDmode);
3808 case LEU:
3809 return sleu_operand (cmp1, VOIDmode);
3811 default:
3812 gcc_unreachable ();
3816 /* Canonicalize LE or LEU comparisons into LT comparisons when
3817 possible to avoid extra instructions or inverting the
3818 comparison. */
3820 static bool
3821 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3822 enum machine_mode mode)
3824 HOST_WIDE_INT original, plus_one;
3826 if (GET_CODE (*cmp1) != CONST_INT)
3827 return false;
3829 original = INTVAL (*cmp1);
3830 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3832 switch (*code)
3834 case LE:
3835 if (original < plus_one)
3837 *code = LT;
3838 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3839 return true;
3841 break;
3843 case LEU:
3844 if (plus_one != 0)
3846 *code = LTU;
3847 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3848 return true;
3850 break;
3852 default:
3853 return false;
3856 return false;
3860 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3861 result in TARGET. CMP0 and TARGET are register_operands that have
3862 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3863 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3865 static void
3866 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3867 rtx target, rtx cmp0, rtx cmp1)
3869 /* First see if there is a MIPS instruction that can do this operation
3870 with CMP1 in its current form. If not, try to canonicalize the
3871 comparison to LT. If that fails, try doing the same for the
3872 inverse operation. If that also fails, force CMP1 into a register
3873 and try again. */
3874 if (mips_relational_operand_ok_p (code, cmp1))
3875 mips_emit_binary (code, target, cmp0, cmp1);
3876 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3877 mips_emit_binary (code, target, cmp0, cmp1);
3878 else
3880 enum rtx_code inv_code = reverse_condition (code);
3881 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3883 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3884 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3886 else if (invert_ptr == 0)
3888 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3889 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3890 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3892 else
3894 *invert_ptr = !*invert_ptr;
3895 mips_emit_binary (inv_code, target, cmp0, cmp1);
3900 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3901 The register will have the same mode as CMP0. */
3903 static rtx
3904 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3906 if (cmp1 == const0_rtx)
3907 return cmp0;
3909 if (uns_arith_operand (cmp1, VOIDmode))
3910 return expand_binop (GET_MODE (cmp0), xor_optab,
3911 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3913 return expand_binop (GET_MODE (cmp0), sub_optab,
3914 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3917 /* Convert *CODE into a code that can be used in a floating-point
3918 scc instruction (c.<cond>.<fmt>). Return true if the values of
3919 the condition code registers will be inverted, with 0 indicating
3920 that the condition holds. */
3922 static bool
3923 mips_reverse_fp_cond_p (enum rtx_code *code)
3925 switch (*code)
3927 case NE:
3928 case LTGT:
3929 case ORDERED:
3930 *code = reverse_condition_maybe_unordered (*code);
3931 return true;
3933 default:
3934 return false;
3938 /* Convert a comparison into something that can be used in a branch or
3939 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3940 being compared and *CODE is the code used to compare them.
3942 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3943 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3944 otherwise any standard branch condition can be used. The standard branch
3945 conditions are:
3947 - EQ/NE between two registers.
3948 - any comparison between a register and zero. */
3950 static void
3951 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3953 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3955 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3957 *op0 = cmp_operands[0];
3958 *op1 = cmp_operands[1];
3960 else if (*code == EQ || *code == NE)
3962 if (need_eq_ne_p)
3964 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3965 *op1 = const0_rtx;
3967 else
3969 *op0 = cmp_operands[0];
3970 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3973 else
3975 /* The comparison needs a separate scc instruction. Store the
3976 result of the scc in *OP0 and compare it against zero. */
3977 bool invert = false;
3978 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3979 *op1 = const0_rtx;
3980 mips_emit_int_relational (*code, &invert, *op0,
3981 cmp_operands[0], cmp_operands[1]);
3982 *code = (invert ? EQ : NE);
3985 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3987 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3988 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3989 *code = NE;
3990 *op1 = const0_rtx;
3992 else
3994 enum rtx_code cmp_code;
3996 /* Floating-point tests use a separate c.cond.fmt comparison to
3997 set a condition code register. The branch or conditional move
3998 will then compare that register against zero.
4000 Set CMP_CODE to the code of the comparison instruction and
4001 *CODE to the code that the branch or move should use. */
4002 cmp_code = *code;
4003 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
4004 *op0 = (ISA_HAS_8CC
4005 ? gen_reg_rtx (CCmode)
4006 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4007 *op1 = const0_rtx;
4008 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
4012 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4013 Store the result in TARGET and return true if successful.
4015 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
4017 bool
4018 mips_emit_scc (enum rtx_code code, rtx target)
4020 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4021 return false;
4023 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
4024 if (code == EQ || code == NE)
4026 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4027 mips_emit_binary (code, target, zie, const0_rtx);
4029 else
4030 mips_emit_int_relational (code, 0, target,
4031 cmp_operands[0], cmp_operands[1]);
4032 return true;
4035 /* Emit the common code for doing conditional branches.
4036 operand[0] is the label to jump to.
4037 The comparison operands are saved away by cmp{si,di,sf,df}. */
4039 void
4040 gen_conditional_branch (rtx *operands, enum rtx_code code)
4042 rtx op0, op1, condition;
4044 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4045 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4046 emit_jump_insn (gen_condjump (condition, operands[0]));
4049 /* Implement:
4051 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4052 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4054 void
4055 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4056 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4058 rtx cmp_result;
4059 bool reversed_p;
4061 reversed_p = mips_reverse_fp_cond_p (&cond);
4062 cmp_result = gen_reg_rtx (CCV2mode);
4063 emit_insn (gen_scc_ps (cmp_result,
4064 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4065 if (reversed_p)
4066 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4067 cmp_result));
4068 else
4069 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4070 cmp_result));
4073 /* Emit the common code for conditional moves. OPERANDS is the array
4074 of operands passed to the conditional move define_expand. */
4076 void
4077 gen_conditional_move (rtx *operands)
4079 enum rtx_code code;
4080 rtx op0, op1;
4082 code = GET_CODE (operands[1]);
4083 mips_emit_compare (&code, &op0, &op1, true);
4084 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4085 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
4086 gen_rtx_fmt_ee (code,
4087 GET_MODE (op0),
4088 op0, op1),
4089 operands[2], operands[3])));
4092 /* Emit a conditional trap. OPERANDS is the array of operands passed to
4093 the conditional_trap expander. */
4095 void
4096 mips_gen_conditional_trap (rtx *operands)
4098 rtx op0, op1;
4099 enum rtx_code cmp_code = GET_CODE (operands[0]);
4100 enum machine_mode mode = GET_MODE (cmp_operands[0]);
4102 /* MIPS conditional trap machine instructions don't have GT or LE
4103 flavors, so we must invert the comparison and convert to LT and
4104 GE, respectively. */
4105 switch (cmp_code)
4107 case GT: cmp_code = LT; break;
4108 case LE: cmp_code = GE; break;
4109 case GTU: cmp_code = LTU; break;
4110 case LEU: cmp_code = GEU; break;
4111 default: break;
4113 if (cmp_code == GET_CODE (operands[0]))
4115 op0 = cmp_operands[0];
4116 op1 = cmp_operands[1];
4118 else
4120 op0 = cmp_operands[1];
4121 op1 = cmp_operands[0];
4123 op0 = force_reg (mode, op0);
4124 if (!arith_operand (op1, mode))
4125 op1 = force_reg (mode, op1);
4127 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4128 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
4129 operands[1]));
4132 /* Return true if function DECL is a MIPS16 function. Return the ambient
4133 setting if DECL is null. */
4135 static bool
4136 mips_use_mips16_mode_p (tree decl)
4138 if (decl)
4140 /* Nested functions must use the same frame pointer as their
4141 parent and must therefore use the same ISA mode. */
4142 tree parent = decl_function_context (decl);
4143 if (parent)
4144 decl = parent;
4145 if (mips_mips16_decl_p (decl))
4146 return true;
4147 if (mips_nomips16_decl_p (decl))
4148 return false;
4150 return mips_base_mips16;
4153 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
4155 static bool
4156 mips_ok_for_lazy_binding_p (rtx x)
4158 return (TARGET_USE_GOT
4159 && GET_CODE (x) == SYMBOL_REF
4160 && !mips_symbol_binds_local_p (x));
4163 /* Load function address ADDR into register DEST. SIBCALL_P is true
4164 if the address is needed for a sibling call. */
4166 static void
4167 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
4169 /* If we're generating PIC, and this call is to a global function,
4170 try to allow its address to be resolved lazily. This isn't
4171 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
4172 to the stub would be our caller's gp, not ours. */
4173 if (TARGET_EXPLICIT_RELOCS
4174 && !(sibcall_p && TARGET_CALL_SAVED_GP)
4175 && mips_ok_for_lazy_binding_p (addr))
4177 rtx high, lo_sum_symbol;
4179 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
4180 addr, SYMBOL_GOTOFF_CALL);
4181 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
4182 if (Pmode == SImode)
4183 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
4184 else
4185 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
4187 else
4188 mips_emit_move (dest, addr);
4192 /* Expand a call or call_value instruction. RESULT is where the
4193 result will go (null for calls), ADDR is the address of the
4194 function, ARGS_SIZE is the size of the arguments and AUX is
4195 the value passed to us by mips_function_arg. SIBCALL_P is true
4196 if we are expanding a sibling call, false if we're expanding
4197 a normal call. */
4199 void
4200 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
4202 rtx orig_addr, pattern, insn;
4204 orig_addr = addr;
4205 if (!call_insn_operand (addr, VOIDmode))
4207 addr = gen_reg_rtx (Pmode);
4208 mips_load_call_address (addr, orig_addr, sibcall_p);
4211 if (TARGET_MIPS16
4212 && TARGET_HARD_FLOAT_ABI
4213 && build_mips16_call_stub (result, addr, args_size,
4214 aux == 0 ? 0 : (int) GET_MODE (aux)))
4215 return;
4217 if (result == 0)
4218 pattern = (sibcall_p
4219 ? gen_sibcall_internal (addr, args_size)
4220 : gen_call_internal (addr, args_size));
4221 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
4223 rtx reg1, reg2;
4225 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
4226 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
4227 pattern =
4228 (sibcall_p
4229 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
4230 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
4232 else
4233 pattern = (sibcall_p
4234 ? gen_sibcall_value_internal (result, addr, args_size)
4235 : gen_call_value_internal (result, addr, args_size));
4237 insn = emit_call_insn (pattern);
4239 /* Lazy-binding stubs require $gp to be valid on entry. */
4240 if (mips_ok_for_lazy_binding_p (orig_addr))
4241 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4245 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
4247 static bool
4248 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4250 if (!TARGET_SIBCALLS)
4251 return false;
4253 /* We can't do a sibcall if the called function is a MIPS16 function
4254 because there is no direct "jx" instruction equivalent to "jalx" to
4255 switch the ISA mode. */
4256 if (mips_use_mips16_mode_p (decl))
4257 return false;
4259 /* Otherwise OK. */
4260 return true;
4263 /* Emit code to move general operand SRC into condition-code
4264 register DEST. SCRATCH is a scratch TFmode float register.
4265 The sequence is:
4267 FP1 = SRC
4268 FP2 = 0.0f
4269 DEST = FP2 < FP1
4271 where FP1 and FP2 are single-precision float registers
4272 taken from SCRATCH. */
4274 void
4275 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
4277 rtx fp1, fp2;
4279 /* Change the source to SFmode. */
4280 if (MEM_P (src))
4281 src = adjust_address (src, SFmode, 0);
4282 else if (REG_P (src) || GET_CODE (src) == SUBREG)
4283 src = gen_rtx_REG (SFmode, true_regnum (src));
4285 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
4286 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
4288 mips_emit_move (copy_rtx (fp1), src);
4289 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
4290 emit_insn (gen_slt_sf (dest, fp2, fp1));
4293 /* Emit code to change the current function's return address to
4294 ADDRESS. SCRATCH is available as a scratch register, if needed.
4295 ADDRESS and SCRATCH are both word-mode GPRs. */
4297 void
4298 mips_set_return_address (rtx address, rtx scratch)
4300 rtx slot_address;
4302 compute_frame_size (get_frame_size ());
4303 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
4304 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
4305 cfun->machine->frame.gp_sp_offset);
4307 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
4310 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
4311 Assume that the areas do not overlap. */
4313 static void
4314 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
4316 HOST_WIDE_INT offset, delta;
4317 unsigned HOST_WIDE_INT bits;
4318 int i;
4319 enum machine_mode mode;
4320 rtx *regs;
4322 /* Work out how many bits to move at a time. If both operands have
4323 half-word alignment, it is usually better to move in half words.
4324 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
4325 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
4326 Otherwise move word-sized chunks. */
4327 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
4328 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
4329 bits = BITS_PER_WORD / 2;
4330 else
4331 bits = BITS_PER_WORD;
4333 mode = mode_for_size (bits, MODE_INT, 0);
4334 delta = bits / BITS_PER_UNIT;
4336 /* Allocate a buffer for the temporary registers. */
4337 regs = alloca (sizeof (rtx) * length / delta);
4339 /* Load as many BITS-sized chunks as possible. Use a normal load if
4340 the source has enough alignment, otherwise use left/right pairs. */
4341 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4343 regs[i] = gen_reg_rtx (mode);
4344 if (MEM_ALIGN (src) >= bits)
4345 mips_emit_move (regs[i], adjust_address (src, mode, offset));
4346 else
4348 rtx part = adjust_address (src, BLKmode, offset);
4349 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
4350 gcc_unreachable ();
4354 /* Copy the chunks to the destination. */
4355 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4356 if (MEM_ALIGN (dest) >= bits)
4357 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
4358 else
4360 rtx part = adjust_address (dest, BLKmode, offset);
4361 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
4362 gcc_unreachable ();
4365 /* Mop up any left-over bytes. */
4366 if (offset < length)
4368 src = adjust_address (src, BLKmode, offset);
4369 dest = adjust_address (dest, BLKmode, offset);
4370 move_by_pieces (dest, src, length - offset,
4371 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
4375 #define MAX_MOVE_REGS 4
4376 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
4379 /* Helper function for doing a loop-based block operation on memory
4380 reference MEM. Each iteration of the loop will operate on LENGTH
4381 bytes of MEM.
4383 Create a new base register for use within the loop and point it to
4384 the start of MEM. Create a new memory reference that uses this
4385 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
4387 static void
4388 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
4389 rtx *loop_reg, rtx *loop_mem)
4391 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
4393 /* Although the new mem does not refer to a known location,
4394 it does keep up to LENGTH bytes of alignment. */
4395 *loop_mem = change_address (mem, BLKmode, *loop_reg);
4396 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
4400 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
4401 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
4402 memory regions do not overlap. */
4404 static void
4405 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
4407 rtx label, src_reg, dest_reg, final_src;
4408 HOST_WIDE_INT leftover;
4410 leftover = length % MAX_MOVE_BYTES;
4411 length -= leftover;
4413 /* Create registers and memory references for use within the loop. */
4414 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
4415 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
4417 /* Calculate the value that SRC_REG should have after the last iteration
4418 of the loop. */
4419 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
4420 0, 0, OPTAB_WIDEN);
4422 /* Emit the start of the loop. */
4423 label = gen_label_rtx ();
4424 emit_label (label);
4426 /* Emit the loop body. */
4427 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
4429 /* Move on to the next block. */
4430 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
4431 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
4433 /* Emit the loop condition. */
4434 if (Pmode == DImode)
4435 emit_insn (gen_cmpdi (src_reg, final_src));
4436 else
4437 emit_insn (gen_cmpsi (src_reg, final_src));
4438 emit_jump_insn (gen_bne (label));
4440 /* Mop up any left-over bytes. */
4441 if (leftover)
4442 mips_block_move_straight (dest, src, leftover);
4446 /* Expand a loop of synci insns for the address range [BEGIN, END). */
4448 void
4449 mips_expand_synci_loop (rtx begin, rtx end)
4451 rtx inc, label, cmp, cmp_result;
4453 /* Load INC with the cache line size (rdhwr INC,$1). */
4454 inc = gen_reg_rtx (SImode);
4455 emit_insn (gen_rdhwr (inc, const1_rtx));
4457 /* Loop back to here. */
4458 label = gen_label_rtx ();
4459 emit_label (label);
4461 emit_insn (gen_synci (begin));
4463 cmp = gen_reg_rtx (Pmode);
4464 mips_emit_binary (GTU, cmp, begin, end);
4466 mips_emit_binary (PLUS, begin, begin, inc);
4468 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
4469 emit_jump_insn (gen_condjump (cmp_result, label));
4472 /* Expand a movmemsi instruction. */
4474 bool
4475 mips_expand_block_move (rtx dest, rtx src, rtx length)
4477 if (GET_CODE (length) == CONST_INT)
4479 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
4481 mips_block_move_straight (dest, src, INTVAL (length));
4482 return true;
4484 else if (optimize)
4486 mips_block_move_loop (dest, src, INTVAL (length));
4487 return true;
4490 return false;
4493 /* Argument support functions. */
4495 /* Initialize CUMULATIVE_ARGS for a function. */
4497 void
4498 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4499 rtx libname ATTRIBUTE_UNUSED)
4501 static CUMULATIVE_ARGS zero_cum;
4502 tree param, next_param;
4504 *cum = zero_cum;
4505 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4507 /* Determine if this function has variable arguments. This is
4508 indicated by the last argument being 'void_type_mode' if there
4509 are no variable arguments. The standard MIPS calling sequence
4510 passes all arguments in the general purpose registers in this case. */
4512 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4513 param != 0; param = next_param)
4515 next_param = TREE_CHAIN (param);
4516 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4517 cum->gp_reg_found = 1;
4522 /* Fill INFO with information about a single argument. CUM is the
4523 cumulative state for earlier arguments. MODE is the mode of this
4524 argument and TYPE is its type (if known). NAMED is true if this
4525 is a named (fixed) argument rather than a variable one. */
4527 static void
4528 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4529 tree type, int named, struct mips_arg_info *info)
4531 bool doubleword_aligned_p;
4532 unsigned int num_bytes, num_words, max_regs;
4534 /* Work out the size of the argument. */
4535 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4536 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4538 /* Decide whether it should go in a floating-point register, assuming
4539 one is free. Later code checks for availability.
4541 The checks against UNITS_PER_FPVALUE handle the soft-float and
4542 single-float cases. */
4543 switch (mips_abi)
4545 case ABI_EABI:
4546 /* The EABI conventions have traditionally been defined in terms
4547 of TYPE_MODE, regardless of the actual type. */
4548 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4549 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4550 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4551 break;
4553 case ABI_32:
4554 case ABI_O64:
4555 /* Only leading floating-point scalars are passed in
4556 floating-point registers. We also handle vector floats the same
4557 say, which is OK because they are not covered by the standard ABI. */
4558 info->fpr_p = (!cum->gp_reg_found
4559 && cum->arg_number < 2
4560 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4561 || VECTOR_FLOAT_TYPE_P (type))
4562 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4563 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4564 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4565 break;
4567 case ABI_N32:
4568 case ABI_64:
4569 /* Scalar and complex floating-point types are passed in
4570 floating-point registers. */
4571 info->fpr_p = (named
4572 && (type == 0 || FLOAT_TYPE_P (type))
4573 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4574 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4575 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4576 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4578 /* ??? According to the ABI documentation, the real and imaginary
4579 parts of complex floats should be passed in individual registers.
4580 The real and imaginary parts of stack arguments are supposed
4581 to be contiguous and there should be an extra word of padding
4582 at the end.
4584 This has two problems. First, it makes it impossible to use a
4585 single "void *" va_list type, since register and stack arguments
4586 are passed differently. (At the time of writing, MIPSpro cannot
4587 handle complex float varargs correctly.) Second, it's unclear
4588 what should happen when there is only one register free.
4590 For now, we assume that named complex floats should go into FPRs
4591 if there are two FPRs free, otherwise they should be passed in the
4592 same way as a struct containing two floats. */
4593 if (info->fpr_p
4594 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4595 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4597 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4598 info->fpr_p = false;
4599 else
4600 num_words = 2;
4602 break;
4604 default:
4605 gcc_unreachable ();
4608 /* See whether the argument has doubleword alignment. */
4609 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4611 /* Set REG_OFFSET to the register count we're interested in.
4612 The EABI allocates the floating-point registers separately,
4613 but the other ABIs allocate them like integer registers. */
4614 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4615 ? cum->num_fprs
4616 : cum->num_gprs);
4618 /* Advance to an even register if the argument is doubleword-aligned. */
4619 if (doubleword_aligned_p)
4620 info->reg_offset += info->reg_offset & 1;
4622 /* Work out the offset of a stack argument. */
4623 info->stack_offset = cum->stack_words;
4624 if (doubleword_aligned_p)
4625 info->stack_offset += info->stack_offset & 1;
4627 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4629 /* Partition the argument between registers and stack. */
4630 info->reg_words = MIN (num_words, max_regs);
4631 info->stack_words = num_words - info->reg_words;
4635 /* INFO describes an argument that is passed in a single-register value.
4636 Return the register it uses, assuming that FPRs are available if
4637 HARD_FLOAT_P. */
4639 static unsigned int
4640 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4642 if (!info->fpr_p || !hard_float_p)
4643 return GP_ARG_FIRST + info->reg_offset;
4644 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4645 /* In o32, the second argument is always passed in $f14
4646 for TARGET_DOUBLE_FLOAT, regardless of whether the
4647 first argument was a word or doubleword. */
4648 return FP_ARG_FIRST + 2;
4649 else
4650 return FP_ARG_FIRST + info->reg_offset;
4653 /* Implement FUNCTION_ARG_ADVANCE. */
4655 void
4656 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4657 tree type, int named)
4659 struct mips_arg_info info;
4661 mips_arg_info (cum, mode, type, named, &info);
4663 if (!info.fpr_p)
4664 cum->gp_reg_found = true;
4666 /* See the comment above the cumulative args structure in mips.h
4667 for an explanation of what this code does. It assumes the O32
4668 ABI, which passes at most 2 arguments in float registers. */
4669 if (cum->arg_number < 2 && info.fpr_p)
4670 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4672 if (mips_abi != ABI_EABI || !info.fpr_p)
4673 cum->num_gprs = info.reg_offset + info.reg_words;
4674 else if (info.reg_words > 0)
4675 cum->num_fprs += MAX_FPRS_PER_FMT;
4677 if (info.stack_words > 0)
4678 cum->stack_words = info.stack_offset + info.stack_words;
4680 cum->arg_number++;
4683 /* Implement FUNCTION_ARG. */
4685 struct rtx_def *
4686 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4687 tree type, int named)
4689 struct mips_arg_info info;
4691 /* We will be called with a mode of VOIDmode after the last argument
4692 has been seen. Whatever we return will be passed to the call
4693 insn. If we need a mips16 fp_code, return a REG with the code
4694 stored as the mode. */
4695 if (mode == VOIDmode)
4697 if (TARGET_MIPS16 && cum->fp_code != 0)
4698 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4700 else
4701 return 0;
4704 mips_arg_info (cum, mode, type, named, &info);
4706 /* Return straight away if the whole argument is passed on the stack. */
4707 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4708 return 0;
4710 if (type != 0
4711 && TREE_CODE (type) == RECORD_TYPE
4712 && TARGET_NEWABI
4713 && TYPE_SIZE_UNIT (type)
4714 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4715 && named)
4717 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4718 structure contains a double in its entirety, then that 64-bit
4719 chunk is passed in a floating point register. */
4720 tree field;
4722 /* First check to see if there is any such field. */
4723 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4724 if (TREE_CODE (field) == FIELD_DECL
4725 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4726 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4727 && host_integerp (bit_position (field), 0)
4728 && int_bit_position (field) % BITS_PER_WORD == 0)
4729 break;
4731 if (field != 0)
4733 /* Now handle the special case by returning a PARALLEL
4734 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4735 chunks are passed in registers. */
4736 unsigned int i;
4737 HOST_WIDE_INT bitpos;
4738 rtx ret;
4740 /* assign_parms checks the mode of ENTRY_PARM, so we must
4741 use the actual mode here. */
4742 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4744 bitpos = 0;
4745 field = TYPE_FIELDS (type);
4746 for (i = 0; i < info.reg_words; i++)
4748 rtx reg;
4750 for (; field; field = TREE_CHAIN (field))
4751 if (TREE_CODE (field) == FIELD_DECL
4752 && int_bit_position (field) >= bitpos)
4753 break;
4755 if (field
4756 && int_bit_position (field) == bitpos
4757 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4758 && !TARGET_SOFT_FLOAT
4759 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4760 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4761 else
4762 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4764 XVECEXP (ret, 0, i)
4765 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4766 GEN_INT (bitpos / BITS_PER_UNIT));
4768 bitpos += BITS_PER_WORD;
4770 return ret;
4774 /* Handle the n32/n64 conventions for passing complex floating-point
4775 arguments in FPR pairs. The real part goes in the lower register
4776 and the imaginary part goes in the upper register. */
4777 if (TARGET_NEWABI
4778 && info.fpr_p
4779 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4781 rtx real, imag;
4782 enum machine_mode inner;
4783 int reg;
4785 inner = GET_MODE_INNER (mode);
4786 reg = FP_ARG_FIRST + info.reg_offset;
4787 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4789 /* Real part in registers, imaginary part on stack. */
4790 gcc_assert (info.stack_words == info.reg_words);
4791 return gen_rtx_REG (inner, reg);
4793 else
4795 gcc_assert (info.stack_words == 0);
4796 real = gen_rtx_EXPR_LIST (VOIDmode,
4797 gen_rtx_REG (inner, reg),
4798 const0_rtx);
4799 imag = gen_rtx_EXPR_LIST (VOIDmode,
4800 gen_rtx_REG (inner,
4801 reg + info.reg_words / 2),
4802 GEN_INT (GET_MODE_SIZE (inner)));
4803 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4807 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4811 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4813 static int
4814 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4815 enum machine_mode mode, tree type, bool named)
4817 struct mips_arg_info info;
4819 mips_arg_info (cum, mode, type, named, &info);
4820 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4824 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4825 PARM_BOUNDARY bits of alignment, but will be given anything up
4826 to STACK_BOUNDARY bits if the type requires it. */
4829 function_arg_boundary (enum machine_mode mode, tree type)
4831 unsigned int alignment;
4833 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4834 if (alignment < PARM_BOUNDARY)
4835 alignment = PARM_BOUNDARY;
4836 if (alignment > STACK_BOUNDARY)
4837 alignment = STACK_BOUNDARY;
4838 return alignment;
4841 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4842 upward rather than downward. In other words, return true if the
4843 first byte of the stack slot has useful data, false if the last
4844 byte does. */
4846 bool
4847 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4849 /* On little-endian targets, the first byte of every stack argument
4850 is passed in the first byte of the stack slot. */
4851 if (!BYTES_BIG_ENDIAN)
4852 return true;
4854 /* Otherwise, integral types are padded downward: the last byte of a
4855 stack argument is passed in the last byte of the stack slot. */
4856 if (type != 0
4857 ? (INTEGRAL_TYPE_P (type)
4858 || POINTER_TYPE_P (type)
4859 || FIXED_POINT_TYPE_P (type))
4860 : (GET_MODE_CLASS (mode) == MODE_INT
4861 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4862 return false;
4864 /* Big-endian o64 pads floating-point arguments downward. */
4865 if (mips_abi == ABI_O64)
4866 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4867 return false;
4869 /* Other types are padded upward for o32, o64, n32 and n64. */
4870 if (mips_abi != ABI_EABI)
4871 return true;
4873 /* Arguments smaller than a stack slot are padded downward. */
4874 if (mode != BLKmode)
4875 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4876 else
4877 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4881 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4882 if the least significant byte of the register has useful data. Return
4883 the opposite if the most significant byte does. */
4885 bool
4886 mips_pad_reg_upward (enum machine_mode mode, tree type)
4888 /* No shifting is required for floating-point arguments. */
4889 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4890 return !BYTES_BIG_ENDIAN;
4892 /* Otherwise, apply the same padding to register arguments as we do
4893 to stack arguments. */
4894 return mips_pad_arg_upward (mode, type);
4897 static void
4898 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4899 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4900 int no_rtl)
4902 CUMULATIVE_ARGS local_cum;
4903 int gp_saved, fp_saved;
4905 /* The caller has advanced CUM up to, but not beyond, the last named
4906 argument. Advance a local copy of CUM past the last "real" named
4907 argument, to find out how many registers are left over. */
4909 local_cum = *cum;
4910 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4912 /* Found out how many registers we need to save. */
4913 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4914 fp_saved = (EABI_FLOAT_VARARGS_P
4915 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4916 : 0);
4918 if (!no_rtl)
4920 if (gp_saved > 0)
4922 rtx ptr, mem;
4924 ptr = plus_constant (virtual_incoming_args_rtx,
4925 REG_PARM_STACK_SPACE (cfun->decl)
4926 - gp_saved * UNITS_PER_WORD);
4927 mem = gen_rtx_MEM (BLKmode, ptr);
4928 set_mem_alias_set (mem, get_varargs_alias_set ());
4930 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4931 mem, gp_saved);
4933 if (fp_saved > 0)
4935 /* We can't use move_block_from_reg, because it will use
4936 the wrong mode. */
4937 enum machine_mode mode;
4938 int off, i;
4940 /* Set OFF to the offset from virtual_incoming_args_rtx of
4941 the first float register. The FP save area lies below
4942 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4943 off = -gp_saved * UNITS_PER_WORD;
4944 off &= ~(UNITS_PER_FPVALUE - 1);
4945 off -= fp_saved * UNITS_PER_FPREG;
4947 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4949 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4950 i += MAX_FPRS_PER_FMT)
4952 rtx ptr, mem;
4954 ptr = plus_constant (virtual_incoming_args_rtx, off);
4955 mem = gen_rtx_MEM (mode, ptr);
4956 set_mem_alias_set (mem, get_varargs_alias_set ());
4957 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4958 off += UNITS_PER_HWFPVALUE;
4962 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4963 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4964 + fp_saved * UNITS_PER_FPREG);
4967 /* Create the va_list data type.
4968 We keep 3 pointers, and two offsets.
4969 Two pointers are to the overflow area, which starts at the CFA.
4970 One of these is constant, for addressing into the GPR save area below it.
4971 The other is advanced up the stack through the overflow region.
4972 The third pointer is to the GPR save area. Since the FPR save area
4973 is just below it, we can address FPR slots off this pointer.
4974 We also keep two one-byte offsets, which are to be subtracted from the
4975 constant pointers to yield addresses in the GPR and FPR save areas.
4976 These are downcounted as float or non-float arguments are used,
4977 and when they get to zero, the argument must be obtained from the
4978 overflow region.
4979 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4980 pointer is enough. It's started at the GPR save area, and is
4981 advanced, period.
4982 Note that the GPR save area is not constant size, due to optimization
4983 in the prologue. Hence, we can't use a design with two pointers
4984 and two offsets, although we could have designed this with two pointers
4985 and three offsets. */
4987 static tree
4988 mips_build_builtin_va_list (void)
4990 if (EABI_FLOAT_VARARGS_P)
4992 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4993 tree array, index;
4995 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4997 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4998 ptr_type_node);
4999 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
5000 ptr_type_node);
5001 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
5002 ptr_type_node);
5003 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
5004 unsigned_char_type_node);
5005 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
5006 unsigned_char_type_node);
5007 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5008 warn on every user file. */
5009 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
5010 array = build_array_type (unsigned_char_type_node,
5011 build_index_type (index));
5012 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
5014 DECL_FIELD_CONTEXT (f_ovfl) = record;
5015 DECL_FIELD_CONTEXT (f_gtop) = record;
5016 DECL_FIELD_CONTEXT (f_ftop) = record;
5017 DECL_FIELD_CONTEXT (f_goff) = record;
5018 DECL_FIELD_CONTEXT (f_foff) = record;
5019 DECL_FIELD_CONTEXT (f_res) = record;
5021 TYPE_FIELDS (record) = f_ovfl;
5022 TREE_CHAIN (f_ovfl) = f_gtop;
5023 TREE_CHAIN (f_gtop) = f_ftop;
5024 TREE_CHAIN (f_ftop) = f_goff;
5025 TREE_CHAIN (f_goff) = f_foff;
5026 TREE_CHAIN (f_foff) = f_res;
5028 layout_type (record);
5029 return record;
5031 else if (TARGET_IRIX && TARGET_IRIX6)
5032 /* On IRIX 6, this type is 'char *'. */
5033 return build_pointer_type (char_type_node);
5034 else
5035 /* Otherwise, we use 'void *'. */
5036 return ptr_type_node;
5039 /* Implement va_start. */
5041 void
5042 mips_va_start (tree valist, rtx nextarg)
5044 if (EABI_FLOAT_VARARGS_P)
5046 const CUMULATIVE_ARGS *cum;
5047 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5048 tree ovfl, gtop, ftop, goff, foff;
5049 tree t;
5050 int gpr_save_area_size;
5051 int fpr_save_area_size;
5052 int fpr_offset;
5054 cum = &current_function_args_info;
5055 gpr_save_area_size
5056 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5057 fpr_save_area_size
5058 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5060 f_ovfl = TYPE_FIELDS (va_list_type_node);
5061 f_gtop = TREE_CHAIN (f_ovfl);
5062 f_ftop = TREE_CHAIN (f_gtop);
5063 f_goff = TREE_CHAIN (f_ftop);
5064 f_foff = TREE_CHAIN (f_goff);
5066 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5067 NULL_TREE);
5068 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5069 NULL_TREE);
5070 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5071 NULL_TREE);
5072 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5073 NULL_TREE);
5074 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5075 NULL_TREE);
5077 /* Emit code to initialize OVFL, which points to the next varargs
5078 stack argument. CUM->STACK_WORDS gives the number of stack
5079 words used by named arguments. */
5080 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5081 if (cum->stack_words > 0)
5082 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
5083 size_int (cum->stack_words * UNITS_PER_WORD));
5084 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5085 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5087 /* Emit code to initialize GTOP, the top of the GPR save area. */
5088 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5089 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
5090 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5092 /* Emit code to initialize FTOP, the top of the FPR save area.
5093 This address is gpr_save_area_bytes below GTOP, rounded
5094 down to the next fp-aligned boundary. */
5095 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5096 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5097 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
5098 if (fpr_offset)
5099 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
5100 size_int (-fpr_offset));
5101 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
5102 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5104 /* Emit code to initialize GOFF, the offset from GTOP of the
5105 next GPR argument. */
5106 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
5107 build_int_cst (NULL_TREE, gpr_save_area_size));
5108 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5110 /* Likewise emit code to initialize FOFF, the offset from FTOP
5111 of the next FPR argument. */
5112 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
5113 build_int_cst (NULL_TREE, fpr_save_area_size));
5114 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5116 else
5118 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5119 std_expand_builtin_va_start (valist, nextarg);
5123 /* Implement va_arg. */
5125 static tree
5126 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5128 HOST_WIDE_INT size, rsize;
5129 tree addr;
5130 bool indirect;
5132 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5134 if (indirect)
5135 type = build_pointer_type (type);
5137 size = int_size_in_bytes (type);
5138 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5140 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
5141 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5142 else
5144 /* Not a simple merged stack. */
5146 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5147 tree ovfl, top, off, align;
5148 HOST_WIDE_INT osize;
5149 tree t, u;
5151 f_ovfl = TYPE_FIELDS (va_list_type_node);
5152 f_gtop = TREE_CHAIN (f_ovfl);
5153 f_ftop = TREE_CHAIN (f_gtop);
5154 f_goff = TREE_CHAIN (f_ftop);
5155 f_foff = TREE_CHAIN (f_goff);
5157 /* We maintain separate pointers and offsets for floating-point
5158 and integer arguments, but we need similar code in both cases.
5159 Let:
5161 TOP be the top of the register save area;
5162 OFF be the offset from TOP of the next register;
5163 ADDR_RTX be the address of the argument;
5164 RSIZE be the number of bytes used to store the argument
5165 when it's in the register save area;
5166 OSIZE be the number of bytes used to store it when it's
5167 in the stack overflow area; and
5168 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
5170 The code we want is:
5172 1: off &= -rsize; // round down
5173 2: if (off != 0)
5174 3: {
5175 4: addr_rtx = top - off;
5176 5: off -= rsize;
5177 6: }
5178 7: else
5179 8: {
5180 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
5181 10: addr_rtx = ovfl + PADDING;
5182 11: ovfl += osize;
5183 14: }
5185 [1] and [9] can sometimes be optimized away. */
5187 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5188 NULL_TREE);
5190 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5191 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5193 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5194 NULL_TREE);
5195 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5196 NULL_TREE);
5198 /* When floating-point registers are saved to the stack,
5199 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
5200 of the float's precision. */
5201 rsize = UNITS_PER_HWFPVALUE;
5203 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5204 (= PARM_BOUNDARY bits). This can be different from RSIZE
5205 in two cases:
5207 (1) On 32-bit targets when TYPE is a structure such as:
5209 struct s { float f; };
5211 Such structures are passed in paired FPRs, so RSIZE
5212 will be 8 bytes. However, the structure only takes
5213 up 4 bytes of memory, so OSIZE will only be 4.
5215 (2) In combinations such as -mgp64 -msingle-float
5216 -fshort-double. Doubles passed in registers
5217 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
5218 but those passed on the stack take up
5219 UNITS_PER_WORD bytes. */
5220 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5222 else
5224 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5225 NULL_TREE);
5226 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5227 NULL_TREE);
5228 if (rsize > UNITS_PER_WORD)
5230 /* [1] Emit code for: off &= -rsize. */
5231 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5232 build_int_cst (NULL_TREE, -rsize));
5233 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
5234 gimplify_and_add (t, pre_p);
5236 osize = rsize;
5239 /* [2] Emit code to branch if off == 0. */
5240 t = build2 (NE_EXPR, boolean_type_node, off,
5241 build_int_cst (TREE_TYPE (off), 0));
5242 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5244 /* [5] Emit code for: off -= rsize. We do this as a form of
5245 post-increment not available to C. Also widen for the
5246 coming pointer arithmetic. */
5247 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5248 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5249 t = fold_convert (sizetype, t);
5250 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5252 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
5253 the argument has RSIZE - SIZE bytes of leading padding. */
5254 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5255 if (BYTES_BIG_ENDIAN && rsize > size)
5257 u = size_int (rsize - size);
5258 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5260 COND_EXPR_THEN (addr) = t;
5262 if (osize > UNITS_PER_WORD)
5264 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
5265 u = size_int (osize - 1);
5266 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5267 t = fold_convert (sizetype, t);
5268 u = size_int (-osize);
5269 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5270 t = fold_convert (TREE_TYPE (ovfl), t);
5271 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5273 else
5274 align = NULL;
5276 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
5277 post-increment ovfl by osize. On big-endian machines,
5278 the argument has OSIZE - SIZE bytes of leading padding. */
5279 u = fold_convert (TREE_TYPE (ovfl),
5280 build_int_cst (NULL_TREE, osize));
5281 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5282 if (BYTES_BIG_ENDIAN && osize > size)
5284 u = size_int (osize - size);
5285 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5288 /* String [9] and [10,11] together. */
5289 if (align)
5290 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5291 COND_EXPR_ELSE (addr) = t;
5293 addr = fold_convert (build_pointer_type (type), addr);
5294 addr = build_va_arg_indirect_ref (addr);
5297 if (indirect)
5298 addr = build_va_arg_indirect_ref (addr);
5300 return addr;
5303 /* Return true if it is possible to use left/right accesses for a
5304 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5305 returning true, update *OP, *LEFT and *RIGHT as follows:
5307 *OP is a BLKmode reference to the whole field.
5309 *LEFT is a QImode reference to the first byte if big endian or
5310 the last byte if little endian. This address can be used in the
5311 left-side instructions (lwl, swl, ldl, sdl).
5313 *RIGHT is a QImode reference to the opposite end of the field and
5314 can be used in the patterning right-side instruction. */
5316 static bool
5317 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
5318 rtx *left, rtx *right)
5320 rtx first, last;
5322 /* Check that the operand really is a MEM. Not all the extv and
5323 extzv predicates are checked. */
5324 if (!MEM_P (*op))
5325 return false;
5327 /* Check that the size is valid. */
5328 if (width != 32 && (!TARGET_64BIT || width != 64))
5329 return false;
5331 /* We can only access byte-aligned values. Since we are always passed
5332 a reference to the first byte of the field, it is not necessary to
5333 do anything with BITPOS after this check. */
5334 if (bitpos % BITS_PER_UNIT != 0)
5335 return false;
5337 /* Reject aligned bitfields: we want to use a normal load or store
5338 instead of a left/right pair. */
5339 if (MEM_ALIGN (*op) >= width)
5340 return false;
5342 /* Adjust *OP to refer to the whole field. This also has the effect
5343 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5344 *op = adjust_address (*op, BLKmode, 0);
5345 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
5347 /* Get references to both ends of the field. We deliberately don't
5348 use the original QImode *OP for FIRST since the new BLKmode one
5349 might have a simpler address. */
5350 first = adjust_address (*op, QImode, 0);
5351 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
5353 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5354 be the upper word and RIGHT the lower word. */
5355 if (TARGET_BIG_ENDIAN)
5356 *left = first, *right = last;
5357 else
5358 *left = last, *right = first;
5360 return true;
5364 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
5365 Return true on success. We only handle cases where zero_extract is
5366 equivalent to sign_extract. */
5368 bool
5369 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
5371 rtx left, right, temp;
5373 /* If TARGET_64BIT, the destination of a 32-bit load will be a
5374 paradoxical word_mode subreg. This is the only case in which
5375 we allow the destination to be larger than the source. */
5376 if (GET_CODE (dest) == SUBREG
5377 && GET_MODE (dest) == DImode
5378 && SUBREG_BYTE (dest) == 0
5379 && GET_MODE (SUBREG_REG (dest)) == SImode)
5380 dest = SUBREG_REG (dest);
5382 /* After the above adjustment, the destination must be the same
5383 width as the source. */
5384 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
5385 return false;
5387 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
5388 return false;
5390 temp = gen_reg_rtx (GET_MODE (dest));
5391 if (GET_MODE (dest) == DImode)
5393 emit_insn (gen_mov_ldl (temp, src, left));
5394 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
5396 else
5398 emit_insn (gen_mov_lwl (temp, src, left));
5399 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
5401 return true;
5405 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
5406 true on success. */
5408 bool
5409 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
5411 rtx left, right;
5412 enum machine_mode mode;
5414 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
5415 return false;
5417 mode = mode_for_size (width, MODE_INT, 0);
5418 src = gen_lowpart (mode, src);
5420 if (mode == DImode)
5422 emit_insn (gen_mov_sdl (dest, src, left));
5423 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
5425 else
5427 emit_insn (gen_mov_swl (dest, src, left));
5428 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
5430 return true;
5433 /* Return true if X is a MEM with the same size as MODE. */
5435 bool
5436 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
5438 rtx size;
5440 if (!MEM_P (x))
5441 return false;
5443 size = MEM_SIZE (x);
5444 return size && INTVAL (size) == GET_MODE_SIZE (mode);
5447 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
5448 source of an "ext" instruction or the destination of an "ins"
5449 instruction. OP must be a register operand and the following
5450 conditions must hold:
5452 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
5453 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5454 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5456 Also reject lengths equal to a word as they are better handled
5457 by the move patterns. */
5459 bool
5460 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
5462 HOST_WIDE_INT len, pos;
5464 if (!ISA_HAS_EXT_INS
5465 || !register_operand (op, VOIDmode)
5466 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
5467 return false;
5469 len = INTVAL (size);
5470 pos = INTVAL (position);
5472 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
5473 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
5474 return false;
5476 return true;
5479 /* Set up globals to generate code for the ISA or processor
5480 described by INFO. */
5482 static void
5483 mips_set_architecture (const struct mips_cpu_info *info)
5485 if (info != 0)
5487 mips_arch_info = info;
5488 mips_arch = info->cpu;
5489 mips_isa = info->isa;
5494 /* Likewise for tuning. */
5496 static void
5497 mips_set_tune (const struct mips_cpu_info *info)
5499 if (info != 0)
5501 mips_tune_info = info;
5502 mips_tune = info->cpu;
5506 /* Initialize mips_split_addresses from the associated command-line
5507 settings.
5509 mips_split_addresses is a half-way house between explicit
5510 relocations and the traditional assembler macros. It can
5511 split absolute 32-bit symbolic constants into a high/lo_sum
5512 pair but uses macros for other sorts of access.
5514 Like explicit relocation support for REL targets, it relies
5515 on GNU extensions in the assembler and the linker.
5517 Although this code should work for -O0, it has traditionally
5518 been treated as an optimization. */
5520 static void
5521 mips_init_split_addresses (void)
5523 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5524 && optimize && !flag_pic
5525 && !ABI_HAS_64BIT_SYMBOLS)
5526 mips_split_addresses = 1;
5527 else
5528 mips_split_addresses = 0;
5531 /* (Re-)Initialize information about relocs. */
5533 static void
5534 mips_init_relocs (void)
5536 memset (mips_split_p, '\0', sizeof (mips_split_p));
5537 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
5538 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
5540 if (ABI_HAS_64BIT_SYMBOLS)
5542 if (TARGET_EXPLICIT_RELOCS)
5544 mips_split_p[SYMBOL_64_HIGH] = true;
5545 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5546 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5548 mips_split_p[SYMBOL_64_MID] = true;
5549 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5550 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5552 mips_split_p[SYMBOL_64_LOW] = true;
5553 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5554 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5556 mips_split_p[SYMBOL_ABSOLUTE] = true;
5557 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5560 else
5562 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
5564 mips_split_p[SYMBOL_ABSOLUTE] = true;
5565 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5566 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5568 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
5572 if (TARGET_MIPS16)
5574 /* The high part is provided by a pseudo copy of $gp. */
5575 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5576 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5579 if (TARGET_EXPLICIT_RELOCS)
5581 /* Small data constants are kept whole until after reload,
5582 then lowered by mips_rewrite_small_data. */
5583 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5585 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5586 if (TARGET_NEWABI)
5588 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5589 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5591 else
5593 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5594 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5597 if (TARGET_XGOT)
5599 /* The HIGH and LO_SUM are matched by special .md patterns. */
5600 mips_split_p[SYMBOL_GOT_DISP] = true;
5602 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5603 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5604 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5606 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5607 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5608 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5610 else
5612 if (TARGET_NEWABI)
5613 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5614 else
5615 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5616 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5620 if (TARGET_NEWABI)
5622 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5623 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5624 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5627 /* Thread-local relocation operators. */
5628 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5629 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5630 mips_split_p[SYMBOL_DTPREL] = 1;
5631 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5632 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5633 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5634 mips_split_p[SYMBOL_TPREL] = 1;
5635 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5636 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5638 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5641 static GTY(()) int was_mips16_p = -1;
5643 /* Set up the target-dependent global state so that it matches the
5644 current function's ISA mode. */
5646 static void
5647 mips_set_mips16_mode (int mips16_p)
5649 if (mips16_p == was_mips16_p)
5650 return;
5652 /* Restore base settings of various flags. */
5653 target_flags = mips_base_target_flags;
5654 align_loops = mips_base_align_loops;
5655 align_jumps = mips_base_align_jumps;
5656 align_functions = mips_base_align_functions;
5657 flag_schedule_insns = mips_base_schedule_insns;
5658 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
5659 flag_move_loop_invariants = mips_base_move_loop_invariants;
5660 flag_delayed_branch = mips_flag_delayed_branch;
5662 if (mips16_p)
5664 /* Select mips16 instruction set. */
5665 target_flags |= MASK_MIPS16;
5667 /* Don't run the scheduler before reload, since it tends to
5668 increase register pressure. */
5669 flag_schedule_insns = 0;
5671 /* Don't do hot/cold partitioning. The constant layout code expects
5672 the whole function to be in a single section. */
5673 flag_reorder_blocks_and_partition = 0;
5675 /* Don't move loop invariants, because it tends to increase
5676 register pressure. It also introduces an extra move in cases
5677 where the constant is the first operand in a two-operand binary
5678 instruction, or when it forms a register argument to a functon
5679 call. */
5680 flag_move_loop_invariants = 0;
5682 /* Silently disable -mexplicit-relocs since it doesn't apply
5683 to mips16 code. Even so, it would overly pedantic to warn
5684 about "-mips16 -mexplicit-relocs", especially given that
5685 we use a %gprel() operator. */
5686 target_flags &= ~MASK_EXPLICIT_RELOCS;
5688 /* Silently disable DSP extensions. */
5689 target_flags &= ~MASK_DSP;
5690 target_flags &= ~MASK_DSPR2;
5692 /* Experiments suggest we get the best overall results from using
5693 the range of an unextended lw or sw. Code that makes heavy use
5694 of byte or short accesses can do better with ranges of 0...31
5695 and 0...63 respectively, but most code is sensitive to the range
5696 of lw and sw instead. */
5697 targetm.min_anchor_offset = 0;
5698 targetm.max_anchor_offset = 127;
5700 if (flag_pic || TARGET_ABICALLS)
5701 sorry ("MIPS16 PIC");
5703 else
5705 /* Reset to select base non-mips16 ISA. */
5706 target_flags &= ~MASK_MIPS16;
5708 /* When using explicit relocs, we call dbr_schedule from within
5709 mips_reorg. */
5710 if (TARGET_EXPLICIT_RELOCS)
5711 flag_delayed_branch = 0;
5713 /* Provide default values for align_* for 64-bit targets. */
5714 if (TARGET_64BIT)
5716 if (align_loops == 0)
5717 align_loops = 8;
5718 if (align_jumps == 0)
5719 align_jumps = 8;
5720 if (align_functions == 0)
5721 align_functions = 8;
5724 targetm.min_anchor_offset = TARGET_MIN_ANCHOR_OFFSET;
5725 targetm.max_anchor_offset = TARGET_MAX_ANCHOR_OFFSET;
5728 /* (Re)initialize mips target internals for new ISA. */
5729 mips_init_split_addresses ();
5730 mips_init_relocs ();
5732 if (was_mips16_p >= 0)
5733 /* Reinitialize target-dependent state. */
5734 target_reinit ();
5736 was_mips16_p = TARGET_MIPS16;
5739 /* Use a hash table to keep track of implicit mips16/nomips16 attributes
5740 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
5742 struct mflip_mips16_entry GTY (()) {
5743 const char *name;
5744 bool mips16_p;
5746 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
5748 /* Hash table callbacks for mflip_mips16_htab. */
5750 static hashval_t
5751 mflip_mips16_htab_hash (const void *entry)
5753 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
5756 static int
5757 mflip_mips16_htab_eq (const void *entry, const void *name)
5759 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
5760 (const char *) name) == 0;
5763 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
5764 for -mflip-mips16. Return true if it should use "mips16" and false if
5765 it should use "nomips16". */
5767 static bool
5768 mflip_mips16_use_mips16_p (tree decl)
5770 struct mflip_mips16_entry *entry;
5771 const char *name;
5772 hashval_t hash;
5773 void **slot;
5775 /* Use the opposite of the command-line setting for anonymous decls. */
5776 if (!DECL_NAME (decl))
5777 return !mips_base_mips16;
5779 if (!mflip_mips16_htab)
5780 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
5781 mflip_mips16_htab_eq, NULL);
5783 name = IDENTIFIER_POINTER (DECL_NAME (decl));
5784 hash = htab_hash_string (name);
5785 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
5786 entry = (struct mflip_mips16_entry *) *slot;
5787 if (!entry)
5789 mips16_flipper = !mips16_flipper;
5790 entry = GGC_NEW (struct mflip_mips16_entry);
5791 entry->name = name;
5792 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
5793 *slot = entry;
5795 return entry->mips16_p;
5798 /* Implement TARGET_INSERT_ATTRIBUTES. */
5800 static void
5801 mips_insert_attributes (tree decl, tree *attributes)
5803 const char *name;
5804 bool mips16_p, nomips16_p;
5806 /* Check for "mips16" and "nomips16" attributes. */
5807 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
5808 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
5809 if (TREE_CODE (decl) != FUNCTION_DECL)
5811 if (mips16_p)
5812 error ("%qs attribute only applies to functions", "mips16");
5813 if (nomips16_p)
5814 error ("%qs attribute only applies to functions", "nomips16");
5816 else
5818 mips16_p |= mips_mips16_decl_p (decl);
5819 nomips16_p |= mips_nomips16_decl_p (decl);
5820 if (mips16_p || nomips16_p)
5822 /* DECL cannot be simultaneously mips16 and nomips16. */
5823 if (mips16_p && nomips16_p)
5824 error ("%qs cannot have both %<mips16%> and "
5825 "%<nomips16%> attributes",
5826 IDENTIFIER_POINTER (DECL_NAME (decl)));
5828 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
5830 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
5831 "mips16" attribute, arbitrarily pick one. We must pick the same
5832 setting for duplicate declarations of a function. */
5833 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
5834 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
5839 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
5841 static tree
5842 mips_merge_decl_attributes (tree olddecl, tree newdecl)
5844 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
5845 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
5846 error ("%qs redeclared with conflicting %qs attributes",
5847 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
5848 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
5849 error ("%qs redeclared with conflicting %qs attributes",
5850 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
5852 return merge_attributes (DECL_ATTRIBUTES (olddecl),
5853 DECL_ATTRIBUTES (newdecl));
5856 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
5857 function should use the MIPS16 ISA and switch modes accordingly. */
5859 static void
5860 mips_set_current_function (tree fndecl)
5862 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
5865 /* Implement TARGET_HANDLE_OPTION. */
5867 static bool
5868 mips_handle_option (size_t code, const char *arg, int value)
5870 switch (code)
5872 case OPT_mabi_:
5873 if (strcmp (arg, "32") == 0)
5874 mips_abi = ABI_32;
5875 else if (strcmp (arg, "o64") == 0)
5876 mips_abi = ABI_O64;
5877 else if (strcmp (arg, "n32") == 0)
5878 mips_abi = ABI_N32;
5879 else if (strcmp (arg, "64") == 0)
5880 mips_abi = ABI_64;
5881 else if (strcmp (arg, "eabi") == 0)
5882 mips_abi = ABI_EABI;
5883 else
5884 return false;
5885 return true;
5887 case OPT_march_:
5888 case OPT_mtune_:
5889 return mips_parse_cpu (arg) != 0;
5891 case OPT_mips:
5892 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
5893 return mips_isa_info != 0;
5895 case OPT_mno_flush_func:
5896 mips_cache_flush_func = NULL;
5897 return true;
5899 case OPT_mcode_readable_:
5900 if (strcmp (arg, "yes") == 0)
5901 mips_code_readable = CODE_READABLE_YES;
5902 else if (strcmp (arg, "pcrel") == 0)
5903 mips_code_readable = CODE_READABLE_PCREL;
5904 else if (strcmp (arg, "no") == 0)
5905 mips_code_readable = CODE_READABLE_NO;
5906 else
5907 return false;
5908 return true;
5910 case OPT_mllsc:
5911 mips_llsc = value ? LLSC_YES : LLSC_NO;
5912 return true;
5914 default:
5915 return true;
5919 /* Set up the threshold for data to go into the small data area, instead
5920 of the normal data area, and detect any conflicts in the switches. */
5922 void
5923 override_options (void)
5925 int i, start, regno;
5926 enum machine_mode mode;
5928 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5929 SUBTARGET_OVERRIDE_OPTIONS;
5930 #endif
5932 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5934 /* The following code determines the architecture and register size.
5935 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5936 The GAS and GCC code should be kept in sync as much as possible. */
5938 if (mips_arch_string != 0)
5939 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5941 if (mips_isa_info != 0)
5943 if (mips_arch_info == 0)
5944 mips_set_architecture (mips_isa_info);
5945 else if (mips_arch_info->isa != mips_isa_info->isa)
5946 error ("-%s conflicts with the other architecture options, "
5947 "which specify a %s processor",
5948 mips_isa_info->name,
5949 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5952 if (mips_arch_info == 0)
5954 #ifdef MIPS_CPU_STRING_DEFAULT
5955 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5956 #else
5957 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5958 #endif
5961 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5962 error ("-march=%s is not compatible with the selected ABI",
5963 mips_arch_info->name);
5965 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5966 if (mips_tune_string != 0)
5967 mips_set_tune (mips_parse_cpu (mips_tune_string));
5969 if (mips_tune_info == 0)
5970 mips_set_tune (mips_arch_info);
5972 /* Set cost structure for the processor. */
5973 if (optimize_size)
5974 mips_cost = &mips_rtx_cost_optimize_size;
5975 else
5976 mips_cost = &mips_rtx_cost_data[mips_tune];
5978 /* If the user hasn't specified a branch cost, use the processor's
5979 default. */
5980 if (mips_branch_cost == 0)
5981 mips_branch_cost = mips_cost->branch_cost;
5983 if ((target_flags_explicit & MASK_64BIT) != 0)
5985 /* The user specified the size of the integer registers. Make sure
5986 it agrees with the ABI and ISA. */
5987 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5988 error ("-mgp64 used with a 32-bit processor");
5989 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5990 error ("-mgp32 used with a 64-bit ABI");
5991 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5992 error ("-mgp64 used with a 32-bit ABI");
5994 else
5996 /* Infer the integer register size from the ABI and processor.
5997 Restrict ourselves to 32-bit registers if that's all the
5998 processor has, or if the ABI cannot handle 64-bit registers. */
5999 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
6000 target_flags &= ~MASK_64BIT;
6001 else
6002 target_flags |= MASK_64BIT;
6005 if ((target_flags_explicit & MASK_FLOAT64) != 0)
6007 /* Really, -mfp32 and -mfp64 are ornamental options. There's
6008 only one right answer here. */
6009 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
6010 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
6011 else if (!TARGET_64BIT && TARGET_FLOAT64
6012 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
6013 error ("-mgp32 and -mfp64 can only be combined if the target"
6014 " supports the mfhc1 and mthc1 instructions");
6015 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
6016 error ("unsupported combination: %s", "-mfp64 -msingle-float");
6018 else
6020 /* -msingle-float selects 32-bit float registers. Otherwise the
6021 float registers should be the same size as the integer ones. */
6022 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
6023 target_flags |= MASK_FLOAT64;
6024 else
6025 target_flags &= ~MASK_FLOAT64;
6028 /* End of code shared with GAS. */
6030 if ((target_flags_explicit & MASK_LONG64) == 0)
6032 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
6033 target_flags |= MASK_LONG64;
6034 else
6035 target_flags &= ~MASK_LONG64;
6038 if (!TARGET_OLDABI)
6039 flag_pcc_struct_return = 0;
6041 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
6043 /* If neither -mbranch-likely nor -mno-branch-likely was given
6044 on the command line, set MASK_BRANCHLIKELY based on the target
6045 architecture.
6047 By default, we enable use of Branch Likely instructions on
6048 all architectures which support them with the following
6049 exceptions: when creating MIPS32 or MIPS64 code, and when
6050 tuning for architectures where their use tends to hurt
6051 performance.
6053 The MIPS32 and MIPS64 architecture specifications say "Software
6054 is strongly encouraged to avoid use of Branch Likely
6055 instructions, as they will be removed from a future revision
6056 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
6057 issue those instructions unless instructed to do so by
6058 -mbranch-likely. */
6059 if (ISA_HAS_BRANCHLIKELY
6060 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
6061 && !(TUNE_MIPS5500 || TUNE_SB1))
6062 target_flags |= MASK_BRANCHLIKELY;
6063 else
6064 target_flags &= ~MASK_BRANCHLIKELY;
6066 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
6067 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
6069 /* The effect of -mabicalls isn't defined for the EABI. */
6070 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
6072 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
6073 target_flags &= ~MASK_ABICALLS;
6076 /* MIPS16 cannot generate PIC yet. */
6077 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
6079 sorry ("MIPS16 PIC");
6080 target_flags &= ~MASK_ABICALLS;
6081 flag_pic = flag_pie = flag_shlib = 0;
6084 if (TARGET_ABICALLS)
6085 /* We need to set flag_pic for executables as well as DSOs
6086 because we may reference symbols that are not defined in
6087 the final executable. (MIPS does not use things like
6088 copy relocs, for example.)
6090 Also, there is a body of code that uses __PIC__ to distinguish
6091 between -mabicalls and -mno-abicalls code. */
6092 flag_pic = 1;
6094 /* -mvr4130-align is a "speed over size" optimization: it usually produces
6095 faster code, but at the expense of more nops. Enable it at -O3 and
6096 above. */
6097 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
6098 target_flags |= MASK_VR4130_ALIGN;
6100 /* Prefer a call to memcpy over inline code when optimizing for size,
6101 though see MOVE_RATIO in mips.h. */
6102 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
6103 target_flags |= MASK_MEMCPY;
6105 /* If we have a nonzero small-data limit, check that the -mgpopt
6106 setting is consistent with the other target flags. */
6107 if (mips_section_threshold > 0)
6109 if (!TARGET_GPOPT)
6111 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
6112 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
6114 TARGET_LOCAL_SDATA = false;
6115 TARGET_EXTERN_SDATA = false;
6117 else
6119 if (TARGET_VXWORKS_RTP)
6120 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
6122 if (TARGET_ABICALLS)
6123 warning (0, "cannot use small-data accesses for %qs",
6124 "-mabicalls");
6128 #ifdef MIPS_TFMODE_FORMAT
6129 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
6130 #endif
6132 /* Make sure that the user didn't turn off paired single support when
6133 MIPS-3D support is requested. */
6134 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
6135 && !TARGET_PAIRED_SINGLE_FLOAT)
6136 error ("-mips3d requires -mpaired-single");
6138 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
6139 if (TARGET_MIPS3D)
6140 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
6142 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
6143 and TARGET_HARD_FLOAT are both true. */
6144 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
6145 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
6147 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
6148 enabled. */
6149 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
6150 error ("-mips3d/-mpaired-single must be used with -mips64");
6152 /* If TARGET_DSPR2, enable MASK_DSP. */
6153 if (TARGET_DSPR2)
6154 target_flags |= MASK_DSP;
6156 mips_print_operand_punct['?'] = 1;
6157 mips_print_operand_punct['#'] = 1;
6158 mips_print_operand_punct['/'] = 1;
6159 mips_print_operand_punct['&'] = 1;
6160 mips_print_operand_punct['!'] = 1;
6161 mips_print_operand_punct['*'] = 1;
6162 mips_print_operand_punct['@'] = 1;
6163 mips_print_operand_punct['.'] = 1;
6164 mips_print_operand_punct['('] = 1;
6165 mips_print_operand_punct[')'] = 1;
6166 mips_print_operand_punct['['] = 1;
6167 mips_print_operand_punct[']'] = 1;
6168 mips_print_operand_punct['<'] = 1;
6169 mips_print_operand_punct['>'] = 1;
6170 mips_print_operand_punct['{'] = 1;
6171 mips_print_operand_punct['}'] = 1;
6172 mips_print_operand_punct['^'] = 1;
6173 mips_print_operand_punct['$'] = 1;
6174 mips_print_operand_punct['+'] = 1;
6175 mips_print_operand_punct['~'] = 1;
6176 mips_print_operand_punct['|'] = 1;
6177 mips_print_operand_punct['-'] = 1;
6179 /* Set up array to map GCC register number to debug register number.
6180 Ignore the special purpose register numbers. */
6182 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6184 mips_dbx_regno[i] = INVALID_REGNUM;
6185 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
6186 mips_dwarf_regno[i] = i;
6187 else
6188 mips_dwarf_regno[i] = INVALID_REGNUM;
6191 start = GP_DBX_FIRST - GP_REG_FIRST;
6192 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
6193 mips_dbx_regno[i] = i + start;
6195 start = FP_DBX_FIRST - FP_REG_FIRST;
6196 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
6197 mips_dbx_regno[i] = i + start;
6199 /* HI and LO debug registers use big-endian ordering. */
6200 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
6201 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
6202 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
6203 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
6204 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
6206 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
6207 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
6210 /* Set up array giving whether a given register can hold a given mode. */
6212 for (mode = VOIDmode;
6213 mode != MAX_MACHINE_MODE;
6214 mode = (enum machine_mode) ((int)mode + 1))
6216 register int size = GET_MODE_SIZE (mode);
6217 register enum mode_class class = GET_MODE_CLASS (mode);
6219 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6221 register int temp;
6223 if (mode == CCV2mode)
6224 temp = (ISA_HAS_8CC
6225 && ST_REG_P (regno)
6226 && (regno - ST_REG_FIRST) % 2 == 0);
6228 else if (mode == CCV4mode)
6229 temp = (ISA_HAS_8CC
6230 && ST_REG_P (regno)
6231 && (regno - ST_REG_FIRST) % 4 == 0);
6233 else if (mode == CCmode)
6235 if (! ISA_HAS_8CC)
6236 temp = (regno == FPSW_REGNUM);
6237 else
6238 temp = (ST_REG_P (regno) || GP_REG_P (regno)
6239 || FP_REG_P (regno));
6242 else if (GP_REG_P (regno))
6243 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
6245 else if (FP_REG_P (regno))
6246 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
6247 || (MIN_FPRS_PER_FMT == 1
6248 && size <= UNITS_PER_FPREG))
6249 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
6250 || class == MODE_VECTOR_FLOAT)
6251 && size <= UNITS_PER_FPVALUE)
6252 /* Allow integer modes that fit into a single
6253 register. We need to put integers into FPRs
6254 when using instructions like cvt and trunc.
6255 We can't allow sizes smaller than a word,
6256 the FPU has no appropriate load/store
6257 instructions for those. */
6258 || (class == MODE_INT
6259 && size >= MIN_UNITS_PER_WORD
6260 && size <= UNITS_PER_FPREG)
6261 /* Allow TFmode for CCmode reloads. */
6262 || (ISA_HAS_8CC && mode == TFmode)));
6264 else if (ACC_REG_P (regno))
6265 temp = ((INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
6266 && size <= UNITS_PER_WORD * 2
6267 && (size <= UNITS_PER_WORD
6268 || regno == MD_REG_FIRST
6269 || (DSP_ACC_REG_P (regno)
6270 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
6272 else if (ALL_COP_REG_P (regno))
6273 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
6274 else
6275 temp = 0;
6277 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
6281 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
6282 initialized yet, so we can't use that here. */
6283 gpr_mode = TARGET_64BIT ? DImode : SImode;
6285 /* Function to allocate machine-dependent function status. */
6286 init_machine_status = &mips_init_machine_status;
6288 /* Default to working around R4000 errata only if the processor
6289 was selected explicitly. */
6290 if ((target_flags_explicit & MASK_FIX_R4000) == 0
6291 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
6292 target_flags |= MASK_FIX_R4000;
6294 /* Default to working around R4400 errata only if the processor
6295 was selected explicitly. */
6296 if ((target_flags_explicit & MASK_FIX_R4400) == 0
6297 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
6298 target_flags |= MASK_FIX_R4400;
6300 /* Save base state of options. */
6301 mips_base_mips16 = TARGET_MIPS16;
6302 mips_base_target_flags = target_flags;
6303 mips_base_schedule_insns = flag_schedule_insns;
6304 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
6305 mips_base_move_loop_invariants = flag_move_loop_invariants;
6306 mips_base_align_loops = align_loops;
6307 mips_base_align_jumps = align_jumps;
6308 mips_base_align_functions = align_functions;
6309 mips_flag_delayed_branch = flag_delayed_branch;
6311 /* Now select the mips16 or 32-bit instruction set, as requested. */
6312 mips_set_mips16_mode (mips_base_mips16);
6315 /* Swap the register information for registers I and I + 1, which
6316 currently have the wrong endianness. Note that the registers'
6317 fixedness and call-clobberedness might have been set on the
6318 command line. */
6320 static void
6321 mips_swap_registers (unsigned int i)
6323 int tmpi;
6324 const char *tmps;
6326 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
6327 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
6329 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
6330 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
6331 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
6332 SWAP_STRING (reg_names[i], reg_names[i + 1]);
6334 #undef SWAP_STRING
6335 #undef SWAP_INT
6338 /* Implement CONDITIONAL_REGISTER_USAGE. */
6340 void
6341 mips_conditional_register_usage (void)
6343 if (!TARGET_DSP)
6345 int regno;
6347 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
6348 fixed_regs[regno] = call_used_regs[regno] = 1;
6350 if (!TARGET_HARD_FLOAT)
6352 int regno;
6354 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
6355 fixed_regs[regno] = call_used_regs[regno] = 1;
6356 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6357 fixed_regs[regno] = call_used_regs[regno] = 1;
6359 else if (! ISA_HAS_8CC)
6361 int regno;
6363 /* We only have a single condition code register. We
6364 implement this by hiding all the condition code registers,
6365 and generating RTL that refers directly to ST_REG_FIRST. */
6366 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6367 fixed_regs[regno] = call_used_regs[regno] = 1;
6369 /* In mips16 mode, we permit the $t temporary registers to be used
6370 for reload. We prohibit the unused $s registers, since they
6371 are caller saved, and saving them via a mips16 register would
6372 probably waste more time than just reloading the value. */
6373 if (TARGET_MIPS16)
6375 fixed_regs[18] = call_used_regs[18] = 1;
6376 fixed_regs[19] = call_used_regs[19] = 1;
6377 fixed_regs[20] = call_used_regs[20] = 1;
6378 fixed_regs[21] = call_used_regs[21] = 1;
6379 fixed_regs[22] = call_used_regs[22] = 1;
6380 fixed_regs[23] = call_used_regs[23] = 1;
6381 fixed_regs[26] = call_used_regs[26] = 1;
6382 fixed_regs[27] = call_used_regs[27] = 1;
6383 fixed_regs[30] = call_used_regs[30] = 1;
6385 /* fp20-23 are now caller saved. */
6386 if (mips_abi == ABI_64)
6388 int regno;
6389 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
6390 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6392 /* Odd registers from fp21 to fp31 are now caller saved. */
6393 if (mips_abi == ABI_N32)
6395 int regno;
6396 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
6397 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6399 /* Make sure that double-register accumulator values are correctly
6400 ordered for the current endianness. */
6401 if (TARGET_LITTLE_ENDIAN)
6403 int regno;
6404 mips_swap_registers (MD_REG_FIRST);
6405 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
6406 mips_swap_registers (regno);
6410 /* Allocate a chunk of memory for per-function machine-dependent data. */
6411 static struct machine_function *
6412 mips_init_machine_status (void)
6414 return ((struct machine_function *)
6415 ggc_alloc_cleared (sizeof (struct machine_function)));
6418 /* On the mips16, we want to allocate $24 (T_REG) before other
6419 registers for instructions for which it is possible. This helps
6420 avoid shuffling registers around in order to set up for an xor,
6421 encouraging the compiler to use a cmp instead. */
6423 void
6424 mips_order_regs_for_local_alloc (void)
6426 register int i;
6428 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6429 reg_alloc_order[i] = i;
6431 if (TARGET_MIPS16)
6433 /* It really doesn't matter where we put register 0, since it is
6434 a fixed register anyhow. */
6435 reg_alloc_order[0] = 24;
6436 reg_alloc_order[24] = 0;
6441 /* The MIPS debug format wants all automatic variables and arguments
6442 to be in terms of the virtual frame pointer (stack pointer before
6443 any adjustment in the function), while the MIPS 3.0 linker wants
6444 the frame pointer to be the stack pointer after the initial
6445 adjustment. So, we do the adjustment here. The arg pointer (which
6446 is eliminated) points to the virtual frame pointer, while the frame
6447 pointer (which may be eliminated) points to the stack pointer after
6448 the initial adjustments. */
6450 HOST_WIDE_INT
6451 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6453 rtx offset2 = const0_rtx;
6454 rtx reg = eliminate_constant_term (addr, &offset2);
6456 if (offset == 0)
6457 offset = INTVAL (offset2);
6459 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
6460 || reg == hard_frame_pointer_rtx)
6462 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
6463 ? compute_frame_size (get_frame_size ())
6464 : cfun->machine->frame.total_size;
6466 /* MIPS16 frame is smaller */
6467 if (frame_pointer_needed && TARGET_MIPS16)
6468 frame_size -= cfun->machine->frame.args_size;
6470 offset = offset - frame_size;
6473 /* sdbout_parms does not want this to crash for unrecognized cases. */
6474 #if 0
6475 else if (reg != arg_pointer_rtx)
6476 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6477 addr);
6478 #endif
6480 return offset;
6483 /* If OP is an UNSPEC address, return the address to which it refers,
6484 otherwise return OP itself. */
6486 static rtx
6487 mips_strip_unspec_address (rtx op)
6489 rtx base, offset;
6491 split_const (op, &base, &offset);
6492 if (UNSPEC_ADDRESS_P (base))
6493 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6494 return op;
6497 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6499 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
6500 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
6501 'h' OP is HIGH, prints %hi(X),
6502 'd' output integer constant in decimal,
6503 'z' if the operand is 0, use $0 instead of normal operand.
6504 'D' print second part of double-word register or memory operand.
6505 'L' print low-order register of double-word register operand.
6506 'M' print high-order register of double-word register operand.
6507 'C' print part of opcode for a branch condition.
6508 'F' print part of opcode for a floating-point branch condition.
6509 'N' print part of opcode for a branch condition, inverted.
6510 'W' print part of opcode for a floating-point branch condition, inverted.
6511 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6512 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6513 't' like 'T', but with the EQ/NE cases reversed
6514 'Y' for a CONST_INT X, print mips_fp_conditions[X]
6515 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
6516 'R' print the reloc associated with LO_SUM
6517 'q' print DSP accumulator registers
6519 The punctuation characters are:
6521 '(' Turn on .set noreorder
6522 ')' Turn on .set reorder
6523 '[' Turn on .set noat
6524 ']' Turn on .set at
6525 '<' Turn on .set nomacro
6526 '>' Turn on .set macro
6527 '{' Turn on .set volatile (not GAS)
6528 '}' Turn on .set novolatile (not GAS)
6529 '&' Turn on .set noreorder if filling delay slots
6530 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
6531 '!' Turn on .set nomacro if filling delay slots
6532 '#' Print nop if in a .set noreorder section.
6533 '/' Like '#', but does nothing within a delayed branch sequence
6534 '?' Print 'l' if we are to use a branch likely instead of normal branch.
6535 '@' Print the name of the assembler temporary register (at or $1).
6536 '.' Print the name of the register with a hard-wired zero (zero or $0).
6537 '^' Print the name of the pic call-through register (t9 or $25).
6538 '$' Print the name of the stack pointer register (sp or $29).
6539 '+' Print the name of the gp register (usually gp or $28).
6540 '~' Output a branch alignment to LABEL_ALIGN(NULL).
6541 '|' Print .set push; .set mips2 if mips_llsc == LLSC_YES
6542 && !ISA_HAS_LL_SC.
6543 '-' Print .set pop under the same conditions for '|'. */
6545 void
6546 print_operand (FILE *file, rtx op, int letter)
6548 register enum rtx_code code;
6550 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6552 switch (letter)
6554 case '?':
6555 if (mips_branch_likely)
6556 putc ('l', file);
6557 break;
6559 case '@':
6560 fputs (reg_names [GP_REG_FIRST + 1], file);
6561 break;
6563 case '^':
6564 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
6565 break;
6567 case '.':
6568 fputs (reg_names [GP_REG_FIRST + 0], file);
6569 break;
6571 case '$':
6572 fputs (reg_names[STACK_POINTER_REGNUM], file);
6573 break;
6575 case '+':
6576 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6577 break;
6579 case '&':
6580 if (final_sequence != 0 && set_noreorder++ == 0)
6581 fputs (".set\tnoreorder\n\t", file);
6582 break;
6584 case '*':
6585 if (final_sequence != 0)
6587 if (set_noreorder++ == 0)
6588 fputs (".set\tnoreorder\n\t", file);
6590 if (set_nomacro++ == 0)
6591 fputs (".set\tnomacro\n\t", file);
6593 break;
6595 case '!':
6596 if (final_sequence != 0 && set_nomacro++ == 0)
6597 fputs ("\n\t.set\tnomacro", file);
6598 break;
6600 case '#':
6601 if (set_noreorder != 0)
6602 fputs ("\n\tnop", file);
6603 break;
6605 case '/':
6606 /* Print an extra newline so that the delayed insn is separated
6607 from the following ones. This looks neater and is consistent
6608 with non-nop delayed sequences. */
6609 if (set_noreorder != 0 && final_sequence == 0)
6610 fputs ("\n\tnop\n", file);
6611 break;
6613 case '(':
6614 if (set_noreorder++ == 0)
6615 fputs (".set\tnoreorder\n\t", file);
6616 break;
6618 case ')':
6619 if (set_noreorder == 0)
6620 error ("internal error: %%) found without a %%( in assembler pattern");
6622 else if (--set_noreorder == 0)
6623 fputs ("\n\t.set\treorder", file);
6625 break;
6627 case '[':
6628 if (set_noat++ == 0)
6629 fputs (".set\tnoat\n\t", file);
6630 break;
6632 case ']':
6633 if (set_noat == 0)
6634 error ("internal error: %%] found without a %%[ in assembler pattern");
6635 else if (--set_noat == 0)
6636 fputs ("\n\t.set\tat", file);
6638 break;
6640 case '<':
6641 if (set_nomacro++ == 0)
6642 fputs (".set\tnomacro\n\t", file);
6643 break;
6645 case '>':
6646 if (set_nomacro == 0)
6647 error ("internal error: %%> found without a %%< in assembler pattern");
6648 else if (--set_nomacro == 0)
6649 fputs ("\n\t.set\tmacro", file);
6651 break;
6653 case '{':
6654 if (set_volatile++ == 0)
6655 fputs ("#.set\tvolatile\n\t", file);
6656 break;
6658 case '}':
6659 if (set_volatile == 0)
6660 error ("internal error: %%} found without a %%{ in assembler pattern");
6661 else if (--set_volatile == 0)
6662 fputs ("\n\t#.set\tnovolatile", file);
6664 break;
6666 case '~':
6668 if (align_labels_log > 0)
6669 ASM_OUTPUT_ALIGN (file, align_labels_log);
6671 break;
6673 case '|':
6674 if (!ISA_HAS_LL_SC)
6675 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6676 break;
6678 case '-':
6679 if (!ISA_HAS_LL_SC)
6680 fputs ("\n\t.set\tpop", file);
6681 break;
6683 default:
6684 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
6685 break;
6688 return;
6691 if (! op)
6693 error ("PRINT_OPERAND null pointer");
6694 return;
6697 code = GET_CODE (op);
6699 if (letter == 'C')
6700 switch (code)
6702 case EQ: fputs ("eq", file); break;
6703 case NE: fputs ("ne", file); break;
6704 case GT: fputs ("gt", file); break;
6705 case GE: fputs ("ge", file); break;
6706 case LT: fputs ("lt", file); break;
6707 case LE: fputs ("le", file); break;
6708 case GTU: fputs ("gtu", file); break;
6709 case GEU: fputs ("geu", file); break;
6710 case LTU: fputs ("ltu", file); break;
6711 case LEU: fputs ("leu", file); break;
6712 default:
6713 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6716 else if (letter == 'N')
6717 switch (code)
6719 case EQ: fputs ("ne", file); break;
6720 case NE: fputs ("eq", file); break;
6721 case GT: fputs ("le", file); break;
6722 case GE: fputs ("lt", file); break;
6723 case LT: fputs ("ge", file); break;
6724 case LE: fputs ("gt", file); break;
6725 case GTU: fputs ("leu", file); break;
6726 case GEU: fputs ("ltu", file); break;
6727 case LTU: fputs ("geu", file); break;
6728 case LEU: fputs ("gtu", file); break;
6729 default:
6730 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6733 else if (letter == 'F')
6734 switch (code)
6736 case EQ: fputs ("c1f", file); break;
6737 case NE: fputs ("c1t", file); break;
6738 default:
6739 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6742 else if (letter == 'W')
6743 switch (code)
6745 case EQ: fputs ("c1t", file); break;
6746 case NE: fputs ("c1f", file); break;
6747 default:
6748 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6751 else if (letter == 'h')
6753 if (GET_CODE (op) == HIGH)
6754 op = XEXP (op, 0);
6756 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6759 else if (letter == 'R')
6760 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6762 else if (letter == 'Y')
6764 if (GET_CODE (op) == CONST_INT
6765 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6766 < ARRAY_SIZE (mips_fp_conditions)))
6767 fputs (mips_fp_conditions[INTVAL (op)], file);
6768 else
6769 output_operand_lossage ("invalid %%Y value");
6772 else if (letter == 'Z')
6774 if (ISA_HAS_8CC)
6776 print_operand (file, op, 0);
6777 fputc (',', file);
6781 else if (letter == 'q')
6783 int regnum;
6785 if (code != REG)
6786 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6788 regnum = REGNO (op);
6789 if (MD_REG_P (regnum))
6790 fprintf (file, "$ac0");
6791 else if (DSP_ACC_REG_P (regnum))
6792 fprintf (file, "$ac%c", reg_names[regnum][3]);
6793 else
6794 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6797 else if (code == REG || code == SUBREG)
6799 register int regnum;
6801 if (code == REG)
6802 regnum = REGNO (op);
6803 else
6804 regnum = true_regnum (op);
6806 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6807 || (letter == 'L' && WORDS_BIG_ENDIAN)
6808 || letter == 'D')
6809 regnum++;
6811 fprintf (file, "%s", reg_names[regnum]);
6814 else if (code == MEM)
6816 if (letter == 'D')
6817 output_address (plus_constant (XEXP (op, 0), 4));
6818 else
6819 output_address (XEXP (op, 0));
6822 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6823 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6825 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6826 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6828 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6829 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6831 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6832 fputs (reg_names[GP_REG_FIRST], file);
6834 else if (letter == 'd' || letter == 'x' || letter == 'X')
6835 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6837 else if (letter == 'T' || letter == 't')
6839 int truth = (code == NE) == (letter == 'T');
6840 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6843 else if (CONST_GP_P (op))
6844 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6846 else
6847 output_addr_const (file, mips_strip_unspec_address (op));
6851 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6852 in context CONTEXT. RELOCS is the array of relocations to use. */
6854 static void
6855 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6856 const char **relocs)
6858 enum mips_symbol_type symbol_type;
6859 const char *p;
6861 symbol_type = mips_classify_symbolic_expression (op, context);
6862 if (relocs[symbol_type] == 0)
6863 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6865 fputs (relocs[symbol_type], file);
6866 output_addr_const (file, mips_strip_unspec_address (op));
6867 for (p = relocs[symbol_type]; *p != 0; p++)
6868 if (*p == '(')
6869 fputc (')', file);
6872 /* Output address operand X to FILE. */
6874 void
6875 print_operand_address (FILE *file, rtx x)
6877 struct mips_address_info addr;
6879 if (mips_classify_address (&addr, x, word_mode, true))
6880 switch (addr.type)
6882 case ADDRESS_REG:
6883 print_operand (file, addr.offset, 0);
6884 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6885 return;
6887 case ADDRESS_LO_SUM:
6888 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6889 mips_lo_relocs);
6890 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6891 return;
6893 case ADDRESS_CONST_INT:
6894 output_addr_const (file, x);
6895 fprintf (file, "(%s)", reg_names[0]);
6896 return;
6898 case ADDRESS_SYMBOLIC:
6899 output_addr_const (file, mips_strip_unspec_address (x));
6900 return;
6902 gcc_unreachable ();
6905 /* When using assembler macros, keep track of all of small-data externs
6906 so that mips_file_end can emit the appropriate declarations for them.
6908 In most cases it would be safe (though pointless) to emit .externs
6909 for other symbols too. One exception is when an object is within
6910 the -G limit but declared by the user to be in a section other
6911 than .sbss or .sdata. */
6913 void
6914 mips_output_external (FILE *file, tree decl, const char *name)
6916 default_elf_asm_output_external (file, decl, name);
6918 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6919 set in order to avoid putting out names that are never really
6920 used. */
6921 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6923 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6925 fputs ("\t.extern\t", file);
6926 assemble_name (file, name);
6927 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6928 int_size_in_bytes (TREE_TYPE (decl)));
6930 else if (TARGET_IRIX
6931 && mips_abi == ABI_32
6932 && TREE_CODE (decl) == FUNCTION_DECL)
6934 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6935 `.global name .text' directive for every used but
6936 undefined function. If we don't, the linker may perform
6937 an optimization (skipping over the insns that set $gp)
6938 when it is unsafe. */
6939 fputs ("\t.globl ", file);
6940 assemble_name (file, name);
6941 fputs (" .text\n", file);
6946 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6947 put out a MIPS ECOFF file and a stab. */
6949 void
6950 mips_output_filename (FILE *stream, const char *name)
6953 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6954 directives. */
6955 if (write_symbols == DWARF2_DEBUG)
6956 return;
6957 else if (mips_output_filename_first_time)
6959 mips_output_filename_first_time = 0;
6960 num_source_filenames += 1;
6961 current_function_file = name;
6962 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6963 output_quoted_string (stream, name);
6964 putc ('\n', stream);
6967 /* If we are emitting stabs, let dbxout.c handle this (except for
6968 the mips_output_filename_first_time case). */
6969 else if (write_symbols == DBX_DEBUG)
6970 return;
6972 else if (name != current_function_file
6973 && strcmp (name, current_function_file) != 0)
6975 num_source_filenames += 1;
6976 current_function_file = name;
6977 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6978 output_quoted_string (stream, name);
6979 putc ('\n', stream);
6983 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6984 that should be written before the opening quote, such as "\t.ascii\t"
6985 for real string data or "\t# " for a comment. */
6987 void
6988 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6989 const char *prefix)
6991 size_t i;
6992 int cur_pos = 17;
6993 register const unsigned char *string =
6994 (const unsigned char *)string_param;
6996 fprintf (stream, "%s\"", prefix);
6997 for (i = 0; i < len; i++)
6999 register int c = string[i];
7001 if (ISPRINT (c))
7003 if (c == '\\' || c == '\"')
7005 putc ('\\', stream);
7006 cur_pos++;
7008 putc (c, stream);
7009 cur_pos++;
7011 else
7013 fprintf (stream, "\\%03o", c);
7014 cur_pos += 4;
7017 if (cur_pos > 72 && i+1 < len)
7019 cur_pos = 17;
7020 fprintf (stream, "\"\n%s\"", prefix);
7023 fprintf (stream, "\"\n");
7026 /* Implement TARGET_ASM_FILE_START. */
7028 static void
7029 mips_file_start (void)
7031 default_file_start ();
7033 if (!TARGET_IRIX)
7035 /* Generate a special section to describe the ABI switches used to
7036 produce the resultant binary. This used to be done by the assembler
7037 setting bits in the ELF header's flags field, but we have run out of
7038 bits. GDB needs this information in order to be able to correctly
7039 debug these binaries. See the function mips_gdbarch_init() in
7040 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
7041 causes unnecessary IRIX 6 ld warnings. */
7042 const char * abi_string = NULL;
7044 switch (mips_abi)
7046 case ABI_32: abi_string = "abi32"; break;
7047 case ABI_N32: abi_string = "abiN32"; break;
7048 case ABI_64: abi_string = "abi64"; break;
7049 case ABI_O64: abi_string = "abiO64"; break;
7050 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
7051 default:
7052 gcc_unreachable ();
7054 /* Note - we use fprintf directly rather than calling switch_to_section
7055 because in this way we can avoid creating an allocated section. We
7056 do not want this section to take up any space in the running
7057 executable. */
7058 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7059 abi_string);
7061 /* There is no ELF header flag to distinguish long32 forms of the
7062 EABI from long64 forms. Emit a special section to help tools
7063 such as GDB. Do the same for o64, which is sometimes used with
7064 -mlong64. */
7065 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7066 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7067 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7069 #ifdef HAVE_AS_GNU_ATTRIBUTE
7070 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7071 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
7072 #endif
7075 /* Generate the pseudo ops that System V.4 wants. */
7076 if (TARGET_ABICALLS)
7077 fprintf (asm_out_file, "\t.abicalls\n");
7079 if (flag_verbose_asm)
7080 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7081 ASM_COMMENT_START,
7082 mips_section_threshold, mips_arch_info->name, mips_isa);
7085 #ifdef BSS_SECTION_ASM_OP
7086 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
7087 in the use of sbss. */
7089 void
7090 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
7091 unsigned HOST_WIDE_INT size, int align)
7093 extern tree last_assemble_variable_decl;
7095 if (mips_in_small_data_p (decl))
7096 switch_to_section (get_named_section (NULL, ".sbss", 0));
7097 else
7098 switch_to_section (bss_section);
7099 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7100 last_assemble_variable_decl = decl;
7101 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
7102 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
7104 #endif
7106 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7107 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7109 void
7110 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7111 unsigned HOST_WIDE_INT size,
7112 unsigned int align)
7114 /* If the target wants uninitialized const declarations in
7115 .rdata then don't put them in .comm. */
7116 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
7117 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
7118 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7120 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7121 targetm.asm_out.globalize_label (stream, name);
7123 switch_to_section (readonly_data_section);
7124 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7125 mips_declare_object (stream, name, "",
7126 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7127 size);
7129 else
7130 mips_declare_common_object (stream, name, "\n\t.comm\t",
7131 size, align, true);
7134 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7135 NAME is the name of the object and ALIGN is the required alignment
7136 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7137 alignment argument. */
7139 void
7140 mips_declare_common_object (FILE *stream, const char *name,
7141 const char *init_string,
7142 unsigned HOST_WIDE_INT size,
7143 unsigned int align, bool takes_alignment_p)
7145 if (!takes_alignment_p)
7147 size += (align / BITS_PER_UNIT) - 1;
7148 size -= size % (align / BITS_PER_UNIT);
7149 mips_declare_object (stream, name, init_string,
7150 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7152 else
7153 mips_declare_object (stream, name, init_string,
7154 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7155 size, align / BITS_PER_UNIT);
7158 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7159 macros, mark the symbol as written so that mips_file_end won't emit an
7160 .extern for it. STREAM is the output file, NAME is the name of the
7161 symbol, INIT_STRING is the string that should be written before the
7162 symbol and FINAL_STRING is the string that should be written after it.
7163 FINAL_STRING is a printf() format that consumes the remaining arguments. */
7165 void
7166 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7167 const char *final_string, ...)
7169 va_list ap;
7171 fputs (init_string, stream);
7172 assemble_name (stream, name);
7173 va_start (ap, final_string);
7174 vfprintf (stream, final_string, ap);
7175 va_end (ap);
7177 if (!TARGET_EXPLICIT_RELOCS)
7179 tree name_tree = get_identifier (name);
7180 TREE_ASM_WRITTEN (name_tree) = 1;
7184 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7185 extern int size_directive_output;
7187 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7188 definitions except that it uses mips_declare_object() to emit the label. */
7190 void
7191 mips_declare_object_name (FILE *stream, const char *name,
7192 tree decl ATTRIBUTE_UNUSED)
7194 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7195 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7196 #endif
7198 size_directive_output = 0;
7199 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7201 HOST_WIDE_INT size;
7203 size_directive_output = 1;
7204 size = int_size_in_bytes (TREE_TYPE (decl));
7205 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7208 mips_declare_object (stream, name, "", ":\n");
7211 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7213 void
7214 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7216 const char *name;
7218 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7219 if (!flag_inhibit_size_directive
7220 && DECL_SIZE (decl) != 0
7221 && !at_end && top_level
7222 && DECL_INITIAL (decl) == error_mark_node
7223 && !size_directive_output)
7225 HOST_WIDE_INT size;
7227 size_directive_output = 1;
7228 size = int_size_in_bytes (TREE_TYPE (decl));
7229 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7232 #endif
7234 /* Return true if X in context CONTEXT is a small data address that can
7235 be rewritten as a LO_SUM. */
7237 static bool
7238 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
7240 enum mips_symbol_type symbol_type;
7242 return (TARGET_EXPLICIT_RELOCS
7243 && mips_symbolic_constant_p (x, context, &symbol_type)
7244 && symbol_type == SYMBOL_GP_RELATIVE);
7248 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
7249 containing MEM, or null if none. */
7251 static int
7252 mips_small_data_pattern_1 (rtx *loc, void *data)
7254 enum mips_symbol_context context;
7256 if (GET_CODE (*loc) == LO_SUM)
7257 return -1;
7259 if (MEM_P (*loc))
7261 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
7262 return 1;
7263 return -1;
7266 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7267 return mips_rewrite_small_data_p (*loc, context);
7270 /* Return true if OP refers to small data symbols directly, not through
7271 a LO_SUM. */
7273 bool
7274 mips_small_data_pattern_p (rtx op)
7276 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
7279 /* A for_each_rtx callback, used by mips_rewrite_small_data.
7280 DATA is the containing MEM, or null if none. */
7282 static int
7283 mips_rewrite_small_data_1 (rtx *loc, void *data)
7285 enum mips_symbol_context context;
7287 if (MEM_P (*loc))
7289 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
7290 return -1;
7293 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7294 if (mips_rewrite_small_data_p (*loc, context))
7295 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
7297 if (GET_CODE (*loc) == LO_SUM)
7298 return -1;
7300 return 0;
7303 /* If possible, rewrite OP so that it refers to small data using
7304 explicit relocations. */
7307 mips_rewrite_small_data (rtx op)
7309 op = copy_insn (op);
7310 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
7311 return op;
7314 /* Return true if the current function has an insn that implicitly
7315 refers to $gp. */
7317 static bool
7318 mips_function_has_gp_insn (void)
7320 /* Don't bother rechecking if we found one last time. */
7321 if (!cfun->machine->has_gp_insn_p)
7323 rtx insn;
7325 push_topmost_sequence ();
7326 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7327 if (INSN_P (insn)
7328 && GET_CODE (PATTERN (insn)) != USE
7329 && GET_CODE (PATTERN (insn)) != CLOBBER
7330 && (get_attr_got (insn) != GOT_UNSET
7331 || small_data_pattern (PATTERN (insn), VOIDmode)))
7332 break;
7333 pop_topmost_sequence ();
7335 cfun->machine->has_gp_insn_p = (insn != 0);
7337 return cfun->machine->has_gp_insn_p;
7341 /* Return the register that should be used as the global pointer
7342 within this function. Return 0 if the function doesn't need
7343 a global pointer. */
7345 static unsigned int
7346 mips_global_pointer (void)
7348 unsigned int regno;
7350 /* $gp is always available unless we're using a GOT. */
7351 if (!TARGET_USE_GOT)
7352 return GLOBAL_POINTER_REGNUM;
7354 /* We must always provide $gp when it is used implicitly. */
7355 if (!TARGET_EXPLICIT_RELOCS)
7356 return GLOBAL_POINTER_REGNUM;
7358 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7359 a valid gp. */
7360 if (current_function_profile)
7361 return GLOBAL_POINTER_REGNUM;
7363 /* If the function has a nonlocal goto, $gp must hold the correct
7364 global pointer for the target function. */
7365 if (current_function_has_nonlocal_goto)
7366 return GLOBAL_POINTER_REGNUM;
7368 /* If the gp is never referenced, there's no need to initialize it.
7369 Note that reload can sometimes introduce constant pool references
7370 into a function that otherwise didn't need them. For example,
7371 suppose we have an instruction like:
7373 (set (reg:DF R1) (float:DF (reg:SI R2)))
7375 If R2 turns out to be constant such as 1, the instruction may have a
7376 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7377 using this constant if R2 doesn't get allocated to a register.
7379 In cases like these, reload will have added the constant to the pool
7380 but no instruction will yet refer to it. */
7381 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7382 && !current_function_uses_const_pool
7383 && !mips_function_has_gp_insn ())
7384 return 0;
7386 /* We need a global pointer, but perhaps we can use a call-clobbered
7387 register instead of $gp. */
7388 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7389 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7390 if (!df_regs_ever_live_p (regno)
7391 && call_really_used_regs[regno]
7392 && !fixed_regs[regno]
7393 && regno != PIC_FUNCTION_ADDR_REGNUM)
7394 return regno;
7396 return GLOBAL_POINTER_REGNUM;
7400 /* Return true if the function return value MODE will get returned in a
7401 floating-point register. */
7403 static bool
7404 mips_return_mode_in_fpr_p (enum machine_mode mode)
7406 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
7407 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
7408 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7409 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
7412 /* Return a two-character string representing a function floating-point
7413 return mode, used to name MIPS16 function stubs. */
7415 static const char *
7416 mips16_call_stub_mode_suffix (enum machine_mode mode)
7418 if (mode == SFmode)
7419 return "sf";
7420 else if (mode == DFmode)
7421 return "df";
7422 else if (mode == SCmode)
7423 return "sc";
7424 else if (mode == DCmode)
7425 return "dc";
7426 else if (mode == V2SFmode)
7427 return "df";
7428 else
7429 gcc_unreachable ();
7432 /* Return true if the current function returns its value in a floating-point
7433 register in MIPS16 mode. */
7435 static bool
7436 mips16_cfun_returns_in_fpr_p (void)
7438 tree return_type = DECL_RESULT (current_function_decl);
7439 return (TARGET_MIPS16
7440 && TARGET_HARD_FLOAT_ABI
7441 && !aggregate_value_p (return_type, current_function_decl)
7442 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7446 /* Return true if the current function must save REGNO. */
7448 static bool
7449 mips_save_reg_p (unsigned int regno)
7451 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7452 if we have not chosen a call-clobbered substitute. */
7453 if (regno == GLOBAL_POINTER_REGNUM)
7454 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7456 /* Check call-saved registers. */
7457 if ((current_function_saves_all_registers || df_regs_ever_live_p (regno))
7458 && !call_really_used_regs[regno])
7459 return true;
7461 /* Save both registers in an FPR pair if either one is used. This is
7462 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7463 register to be used without the even register. */
7464 if (FP_REG_P (regno)
7465 && MAX_FPRS_PER_FMT == 2
7466 && df_regs_ever_live_p (regno + 1)
7467 && !call_really_used_regs[regno + 1])
7468 return true;
7470 /* We need to save the old frame pointer before setting up a new one. */
7471 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7472 return true;
7474 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7475 if (current_function_profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7476 return true;
7478 /* We need to save the incoming return address if it is ever clobbered
7479 within the function, if __builtin_eh_return is being used to set a
7480 different return address, or if a stub is being used to return a
7481 value in FPRs. */
7482 if (regno == GP_REG_FIRST + 31
7483 && (df_regs_ever_live_p (regno)
7484 || current_function_calls_eh_return
7485 || mips16_cfun_returns_in_fpr_p ()))
7486 return true;
7488 return false;
7491 /* Return the index of the lowest X in the range [0, SIZE) for which
7492 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7494 static unsigned int
7495 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7496 unsigned int size)
7498 unsigned int i;
7500 for (i = 0; i < size; i++)
7501 if (BITSET_P (mask, regs[i]))
7502 break;
7504 return i;
7507 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
7508 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
7509 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
7510 the same is true for all indexes (X, SIZE). */
7512 static void
7513 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7514 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
7516 unsigned int i;
7518 i = mips16e_find_first_register (*mask_ptr, regs, size);
7519 for (i++; i < size; i++)
7520 if (!BITSET_P (*mask_ptr, regs[i]))
7522 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
7523 *mask_ptr |= 1 << regs[i];
7527 /* Return the bytes needed to compute the frame pointer from the current
7528 stack pointer. SIZE is the size (in bytes) of the local variables.
7530 MIPS stack frames look like:
7532 Before call After call
7533 high +-----------------------+ +-----------------------+
7534 mem. | | | |
7535 | caller's temps. | | caller's temps. |
7536 | | | |
7537 +-----------------------+ +-----------------------+
7538 | | | |
7539 | arguments on stack. | | arguments on stack. |
7540 | | | |
7541 +-----------------------+ +-----------------------+
7542 | 4 words to save | | 4 words to save |
7543 | arguments passed | | arguments passed |
7544 | in registers, even | | in registers, even |
7545 | if not passed. | | if not passed. |
7546 SP->+-----------------------+ VFP->+-----------------------+
7547 (VFP = SP+fp_sp_offset) | |\
7548 | fp register save | | fp_reg_size
7549 | |/
7550 SP+gp_sp_offset->+-----------------------+
7551 /| |\
7552 | | gp register save | | gp_reg_size
7553 gp_reg_rounded | | |/
7554 | +-----------------------+
7555 \| alignment padding |
7556 +-----------------------+
7557 | |\
7558 | local variables | | var_size
7559 | |/
7560 +-----------------------+
7562 | alloca allocations |
7564 +-----------------------+
7565 /| |
7566 cprestore_size | | GP save for V.4 abi |
7567 \| |
7568 +-----------------------+
7569 | |\
7570 | arguments on stack | |
7571 | | |
7572 +-----------------------+ |
7573 | 4 words to save | | args_size
7574 | arguments passed | |
7575 | in registers, even | |
7576 | if not passed. | |
7577 low | (TARGET_OLDABI only) |/
7578 memory SP->+-----------------------+
7582 HOST_WIDE_INT
7583 compute_frame_size (HOST_WIDE_INT size)
7585 unsigned int regno;
7586 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
7587 HOST_WIDE_INT var_size; /* # bytes that variables take up */
7588 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
7589 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
7590 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
7591 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
7592 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
7593 unsigned int mask; /* mask of saved gp registers */
7594 unsigned int fmask; /* mask of saved fp registers */
7596 cfun->machine->global_pointer = mips_global_pointer ();
7598 gp_reg_size = 0;
7599 fp_reg_size = 0;
7600 mask = 0;
7601 fmask = 0;
7602 var_size = MIPS_STACK_ALIGN (size);
7603 args_size = current_function_outgoing_args_size;
7604 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
7606 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
7607 functions. If the function has local variables, we're committed
7608 to allocating it anyway. Otherwise reclaim it here. */
7609 if (var_size == 0 && current_function_is_leaf)
7610 cprestore_size = args_size = 0;
7612 /* The MIPS 3.0 linker does not like functions that dynamically
7613 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7614 looks like we are trying to create a second frame pointer to the
7615 function, so allocate some stack space to make it happy. */
7617 if (args_size == 0 && current_function_calls_alloca)
7618 args_size = 4 * UNITS_PER_WORD;
7620 total_size = var_size + args_size + cprestore_size;
7622 /* Calculate space needed for gp registers. */
7623 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7624 if (mips_save_reg_p (regno))
7626 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7627 mask |= 1 << (regno - GP_REG_FIRST);
7630 /* We need to restore these for the handler. */
7631 if (current_function_calls_eh_return)
7633 unsigned int i;
7634 for (i = 0; ; ++i)
7636 regno = EH_RETURN_DATA_REGNO (i);
7637 if (regno == INVALID_REGNUM)
7638 break;
7639 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7640 mask |= 1 << (regno - GP_REG_FIRST);
7644 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7645 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7646 save all later registers too. */
7647 if (GENERATE_MIPS16E_SAVE_RESTORE)
7649 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7650 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
7651 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7652 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
7655 /* This loop must iterate over the same space as its companion in
7656 mips_for_each_saved_reg. */
7657 if (TARGET_HARD_FLOAT)
7658 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7659 regno >= FP_REG_FIRST;
7660 regno -= MAX_FPRS_PER_FMT)
7661 if (mips_save_reg_p (regno))
7663 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
7664 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
7667 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
7668 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
7670 /* Add in the space required for saving incoming register arguments. */
7671 total_size += current_function_pretend_args_size;
7672 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7674 /* Save other computed information. */
7675 cfun->machine->frame.total_size = total_size;
7676 cfun->machine->frame.var_size = var_size;
7677 cfun->machine->frame.args_size = args_size;
7678 cfun->machine->frame.cprestore_size = cprestore_size;
7679 cfun->machine->frame.gp_reg_size = gp_reg_size;
7680 cfun->machine->frame.fp_reg_size = fp_reg_size;
7681 cfun->machine->frame.mask = mask;
7682 cfun->machine->frame.fmask = fmask;
7683 cfun->machine->frame.initialized = reload_completed;
7684 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
7685 cfun->machine->frame.num_fp = (fp_reg_size
7686 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
7688 if (mask)
7690 HOST_WIDE_INT offset;
7692 if (GENERATE_MIPS16E_SAVE_RESTORE)
7693 /* MIPS16e SAVE and RESTORE instructions require the GP save area
7694 to be aligned at the high end with any padding at the low end.
7695 It is only safe to use this calculation for o32, where we never
7696 have pretend arguments, and where any varargs will be saved in
7697 the caller-allocated area rather than at the top of the frame. */
7698 offset = (total_size - GET_MODE_SIZE (gpr_mode));
7699 else
7700 offset = (args_size + cprestore_size + var_size
7701 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
7702 cfun->machine->frame.gp_sp_offset = offset;
7703 cfun->machine->frame.gp_save_offset = offset - total_size;
7705 else
7707 cfun->machine->frame.gp_sp_offset = 0;
7708 cfun->machine->frame.gp_save_offset = 0;
7711 if (fmask)
7713 HOST_WIDE_INT offset;
7715 offset = (args_size + cprestore_size + var_size
7716 + gp_reg_rounded + fp_reg_size
7717 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
7718 cfun->machine->frame.fp_sp_offset = offset;
7719 cfun->machine->frame.fp_save_offset = offset - total_size;
7721 else
7723 cfun->machine->frame.fp_sp_offset = 0;
7724 cfun->machine->frame.fp_save_offset = 0;
7727 /* Ok, we're done. */
7728 return total_size;
7731 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7732 pointer or argument pointer. TO is either the stack pointer or
7733 hard frame pointer. */
7735 HOST_WIDE_INT
7736 mips_initial_elimination_offset (int from, int to)
7738 HOST_WIDE_INT offset;
7740 compute_frame_size (get_frame_size ());
7742 /* Set OFFSET to the offset from the stack pointer. */
7743 switch (from)
7745 case FRAME_POINTER_REGNUM:
7746 offset = 0;
7747 break;
7749 case ARG_POINTER_REGNUM:
7750 offset = (cfun->machine->frame.total_size
7751 - current_function_pretend_args_size);
7752 break;
7754 default:
7755 gcc_unreachable ();
7758 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
7759 offset -= cfun->machine->frame.args_size;
7761 return offset;
7764 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7765 back to a previous frame. */
7767 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
7769 if (count != 0)
7770 return const0_rtx;
7772 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
7775 /* Use FN to save or restore register REGNO. MODE is the register's
7776 mode and OFFSET is the offset of its save slot from the current
7777 stack pointer. */
7779 static void
7780 mips_save_restore_reg (enum machine_mode mode, int regno,
7781 HOST_WIDE_INT offset, mips_save_restore_fn fn)
7783 rtx mem;
7785 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
7787 fn (gen_rtx_REG (mode, regno), mem);
7791 /* Call FN for each register that is saved by the current function.
7792 SP_OFFSET is the offset of the current stack pointer from the start
7793 of the frame. */
7795 static void
7796 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7798 enum machine_mode fpr_mode;
7799 HOST_WIDE_INT offset;
7800 int regno;
7802 /* Save registers starting from high to low. The debuggers prefer at least
7803 the return register be stored at func+4, and also it allows us not to
7804 need a nop in the epilogue if at least one register is reloaded in
7805 addition to return address. */
7806 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7807 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7808 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7810 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7811 offset -= GET_MODE_SIZE (gpr_mode);
7814 /* This loop must iterate over the same space as its companion in
7815 compute_frame_size. */
7816 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7817 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7818 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7819 regno >= FP_REG_FIRST;
7820 regno -= MAX_FPRS_PER_FMT)
7821 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7823 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7824 offset -= GET_MODE_SIZE (fpr_mode);
7828 /* If we're generating n32 or n64 abicalls, and the current function
7829 does not use $28 as its global pointer, emit a cplocal directive.
7830 Use pic_offset_table_rtx as the argument to the directive. */
7832 static void
7833 mips_output_cplocal (void)
7835 if (!TARGET_EXPLICIT_RELOCS
7836 && cfun->machine->global_pointer > 0
7837 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7838 output_asm_insn (".cplocal %+", 0);
7841 /* Return the style of GP load sequence that is being used for the
7842 current function. */
7844 enum mips_loadgp_style
7845 mips_current_loadgp_style (void)
7847 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7848 return LOADGP_NONE;
7850 if (TARGET_RTP_PIC)
7851 return LOADGP_RTP;
7853 if (TARGET_ABSOLUTE_ABICALLS)
7854 return LOADGP_ABSOLUTE;
7856 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7859 /* The __gnu_local_gp symbol. */
7861 static GTY(()) rtx mips_gnu_local_gp;
7863 /* If we're generating n32 or n64 abicalls, emit instructions
7864 to set up the global pointer. */
7866 static void
7867 mips_emit_loadgp (void)
7869 rtx addr, offset, incoming_address, base, index;
7871 switch (mips_current_loadgp_style ())
7873 case LOADGP_ABSOLUTE:
7874 if (mips_gnu_local_gp == NULL)
7876 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7877 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7879 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7880 break;
7882 case LOADGP_NEWABI:
7883 addr = XEXP (DECL_RTL (current_function_decl), 0);
7884 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7885 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7886 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7887 if (!TARGET_EXPLICIT_RELOCS)
7888 emit_insn (gen_loadgp_blockage ());
7889 break;
7891 case LOADGP_RTP:
7892 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7893 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7894 emit_insn (gen_loadgp_rtp (base, index));
7895 if (!TARGET_EXPLICIT_RELOCS)
7896 emit_insn (gen_loadgp_blockage ());
7897 break;
7899 default:
7900 break;
7904 /* Set up the stack and frame (if desired) for the function. */
7906 static void
7907 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7909 const char *fnname;
7910 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7912 #ifdef SDB_DEBUGGING_INFO
7913 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7914 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7915 #endif
7917 /* In mips16 mode, we may need to generate a 32 bit to handle
7918 floating point arguments. The linker will arrange for any 32-bit
7919 functions to call this stub, which will then jump to the 16-bit
7920 function proper. */
7921 if (TARGET_MIPS16
7922 && TARGET_HARD_FLOAT_ABI
7923 && current_function_args_info.fp_code != 0)
7924 build_mips16_function_stub (file);
7926 /* Select the mips16 mode for this function. */
7927 if (TARGET_MIPS16)
7928 fprintf (file, "\t.set\tmips16\n");
7929 else
7930 fprintf (file, "\t.set\tnomips16\n");
7932 if (!FUNCTION_NAME_ALREADY_DECLARED)
7934 /* Get the function name the same way that toplev.c does before calling
7935 assemble_start_function. This is needed so that the name used here
7936 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7937 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7939 if (!flag_inhibit_size_directive)
7941 fputs ("\t.ent\t", file);
7942 assemble_name (file, fnname);
7943 fputs ("\n", file);
7946 assemble_name (file, fnname);
7947 fputs (":\n", file);
7950 /* Stop mips_file_end from treating this function as external. */
7951 if (TARGET_IRIX && mips_abi == ABI_32)
7952 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7954 if (!flag_inhibit_size_directive)
7956 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7957 fprintf (file,
7958 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7959 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7960 ", args= " HOST_WIDE_INT_PRINT_DEC
7961 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7962 (reg_names[(frame_pointer_needed)
7963 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7964 ((frame_pointer_needed && TARGET_MIPS16)
7965 ? tsize - cfun->machine->frame.args_size
7966 : tsize),
7967 reg_names[GP_REG_FIRST + 31],
7968 cfun->machine->frame.var_size,
7969 cfun->machine->frame.num_gp,
7970 cfun->machine->frame.num_fp,
7971 cfun->machine->frame.args_size,
7972 cfun->machine->frame.cprestore_size);
7974 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7975 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7976 cfun->machine->frame.mask,
7977 cfun->machine->frame.gp_save_offset);
7978 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7979 cfun->machine->frame.fmask,
7980 cfun->machine->frame.fp_save_offset);
7982 /* Require:
7983 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7984 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7987 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7989 /* Handle the initialization of $gp for SVR4 PIC. */
7990 if (!cfun->machine->all_noreorder_p)
7991 output_asm_insn ("%(.cpload\t%^%)", 0);
7992 else
7993 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7995 else if (cfun->machine->all_noreorder_p)
7996 output_asm_insn ("%(%<", 0);
7998 /* Tell the assembler which register we're using as the global
7999 pointer. This is needed for thunks, since they can use either
8000 explicit relocs or assembler macros. */
8001 mips_output_cplocal ();
8004 /* Make the last instruction frame related and note that it performs
8005 the operation described by FRAME_PATTERN. */
8007 static void
8008 mips_set_frame_expr (rtx frame_pattern)
8010 rtx insn;
8012 insn = get_last_insn ();
8013 RTX_FRAME_RELATED_P (insn) = 1;
8014 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8015 frame_pattern,
8016 REG_NOTES (insn));
8020 /* Return a frame-related rtx that stores REG at MEM.
8021 REG must be a single register. */
8023 static rtx
8024 mips_frame_set (rtx mem, rtx reg)
8026 rtx set;
8028 /* If we're saving the return address register and the dwarf return
8029 address column differs from the hard register number, adjust the
8030 note reg to refer to the former. */
8031 if (REGNO (reg) == GP_REG_FIRST + 31
8032 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
8033 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
8035 set = gen_rtx_SET (VOIDmode, mem, reg);
8036 RTX_FRAME_RELATED_P (set) = 1;
8038 return set;
8042 /* Save register REG to MEM. Make the instruction frame-related. */
8044 static void
8045 mips_save_reg (rtx reg, rtx mem)
8047 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8049 rtx x1, x2;
8051 if (mips_split_64bit_move_p (mem, reg))
8052 mips_split_64bit_move (mem, reg);
8053 else
8054 mips_emit_move (mem, reg);
8056 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
8057 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
8058 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8060 else
8062 if (TARGET_MIPS16
8063 && REGNO (reg) != GP_REG_FIRST + 31
8064 && !M16_REG_P (REGNO (reg)))
8066 /* Save a non-mips16 register by moving it through a temporary.
8067 We don't need to do this for $31 since there's a special
8068 instruction for it. */
8069 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8070 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8072 else
8073 mips_emit_move (mem, reg);
8075 mips_set_frame_expr (mips_frame_set (mem, reg));
8079 /* Return a move between register REGNO and memory location SP + OFFSET.
8080 Make the move a load if RESTORE_P, otherwise make it a frame-related
8081 store. */
8083 static rtx
8084 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
8085 unsigned int regno)
8087 rtx reg, mem;
8089 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
8090 reg = gen_rtx_REG (SImode, regno);
8091 return (restore_p
8092 ? gen_rtx_SET (VOIDmode, reg, mem)
8093 : mips_frame_set (mem, reg));
8096 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
8097 The instruction must:
8099 - Allocate or deallocate SIZE bytes in total; SIZE is known
8100 to be nonzero.
8102 - Save or restore as many registers in *MASK_PTR as possible.
8103 The instruction saves the first registers at the top of the
8104 allocated area, with the other registers below it.
8106 - Save NARGS argument registers above the allocated area.
8108 (NARGS is always zero if RESTORE_P.)
8110 The SAVE and RESTORE instructions cannot save and restore all general
8111 registers, so there may be some registers left over for the caller to
8112 handle. Destructively modify *MASK_PTR so that it contains the registers
8113 that still need to be saved or restored. The caller can save these
8114 registers in the memory immediately below *OFFSET_PTR, which is a
8115 byte offset from the bottom of the allocated stack area. */
8117 static rtx
8118 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
8119 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
8120 HOST_WIDE_INT size)
8122 rtx pattern, set;
8123 HOST_WIDE_INT offset, top_offset;
8124 unsigned int i, regno;
8125 int n;
8127 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
8129 /* Calculate the number of elements in the PARALLEL. We need one element
8130 for the stack adjustment, one for each argument register save, and one
8131 for each additional register move. */
8132 n = 1 + nargs;
8133 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8134 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
8135 n++;
8137 /* Create the final PARALLEL. */
8138 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
8139 n = 0;
8141 /* Add the stack pointer adjustment. */
8142 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8143 plus_constant (stack_pointer_rtx,
8144 restore_p ? size : -size));
8145 RTX_FRAME_RELATED_P (set) = 1;
8146 XVECEXP (pattern, 0, n++) = set;
8148 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8149 top_offset = restore_p ? size : 0;
8151 /* Save the arguments. */
8152 for (i = 0; i < nargs; i++)
8154 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
8155 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
8156 XVECEXP (pattern, 0, n++) = set;
8159 /* Then fill in the other register moves. */
8160 offset = top_offset;
8161 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8163 regno = mips16e_save_restore_regs[i];
8164 if (BITSET_P (*mask_ptr, regno))
8166 offset -= UNITS_PER_WORD;
8167 set = mips16e_save_restore_reg (restore_p, offset, regno);
8168 XVECEXP (pattern, 0, n++) = set;
8169 *mask_ptr &= ~(1 << regno);
8173 /* Tell the caller what offset it should use for the remaining registers. */
8174 *offset_ptr = size + (offset - top_offset) + size;
8176 gcc_assert (n == XVECLEN (pattern, 0));
8178 return pattern;
8181 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8182 pointer. Return true if PATTERN matches the kind of instruction
8183 generated by mips16e_build_save_restore. If INFO is nonnull,
8184 initialize it when returning true. */
8186 bool
8187 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
8188 struct mips16e_save_restore_info *info)
8190 unsigned int i, nargs, mask;
8191 HOST_WIDE_INT top_offset, save_offset, offset, extra;
8192 rtx set, reg, mem, base;
8193 int n;
8195 if (!GENERATE_MIPS16E_SAVE_RESTORE)
8196 return false;
8198 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8199 top_offset = adjust > 0 ? adjust : 0;
8201 /* Interpret all other members of the PARALLEL. */
8202 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
8203 mask = 0;
8204 nargs = 0;
8205 i = 0;
8206 for (n = 1; n < XVECLEN (pattern, 0); n++)
8208 /* Check that we have a SET. */
8209 set = XVECEXP (pattern, 0, n);
8210 if (GET_CODE (set) != SET)
8211 return false;
8213 /* Check that the SET is a load (if restoring) or a store
8214 (if saving). */
8215 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8216 if (!MEM_P (mem))
8217 return false;
8219 /* Check that the address is the sum of the stack pointer and a
8220 possibly-zero constant offset. */
8221 mips_split_plus (XEXP (mem, 0), &base, &offset);
8222 if (base != stack_pointer_rtx)
8223 return false;
8225 /* Check that SET's other operand is a register. */
8226 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8227 if (!REG_P (reg))
8228 return false;
8230 /* Check for argument saves. */
8231 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
8232 && REGNO (reg) == GP_ARG_FIRST + nargs)
8233 nargs++;
8234 else if (offset == save_offset)
8236 while (mips16e_save_restore_regs[i++] != REGNO (reg))
8237 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8238 return false;
8240 mask |= 1 << REGNO (reg);
8241 save_offset -= GET_MODE_SIZE (gpr_mode);
8243 else
8244 return false;
8247 /* Check that the restrictions on register ranges are met. */
8248 extra = 0;
8249 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8250 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8251 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8252 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8253 if (extra != 0)
8254 return false;
8256 /* Make sure that the topmost argument register is not saved twice.
8257 The checks above ensure that the same is then true for the other
8258 argument registers. */
8259 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8260 return false;
8262 /* Pass back information, if requested. */
8263 if (info)
8265 info->nargs = nargs;
8266 info->mask = mask;
8267 info->size = (adjust > 0 ? adjust : -adjust);
8270 return true;
8273 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8274 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8275 the null terminator. */
8277 static char *
8278 mips16e_add_register_range (char *s, unsigned int min_reg,
8279 unsigned int max_reg)
8281 if (min_reg != max_reg)
8282 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8283 else
8284 s += sprintf (s, ",%s", reg_names[min_reg]);
8285 return s;
8288 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8289 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8291 const char *
8292 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8294 static char buffer[300];
8296 struct mips16e_save_restore_info info;
8297 unsigned int i, end;
8298 char *s;
8300 /* Parse the pattern. */
8301 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8302 gcc_unreachable ();
8304 /* Add the mnemonic. */
8305 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8306 s += strlen (s);
8308 /* Save the arguments. */
8309 if (info.nargs > 1)
8310 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8311 reg_names[GP_ARG_FIRST + info.nargs - 1]);
8312 else if (info.nargs == 1)
8313 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8315 /* Emit the amount of stack space to allocate or deallocate. */
8316 s += sprintf (s, "%d", (int) info.size);
8318 /* Save or restore $16. */
8319 if (BITSET_P (info.mask, 16))
8320 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8322 /* Save or restore $17. */
8323 if (BITSET_P (info.mask, 17))
8324 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8326 /* Save or restore registers in the range $s2...$s8, which
8327 mips16e_s2_s8_regs lists in decreasing order. Note that this
8328 is a software register range; the hardware registers are not
8329 numbered consecutively. */
8330 end = ARRAY_SIZE (mips16e_s2_s8_regs);
8331 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
8332 if (i < end)
8333 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
8334 mips16e_s2_s8_regs[i]);
8336 /* Save or restore registers in the range $a0...$a3. */
8337 end = ARRAY_SIZE (mips16e_a0_a3_regs);
8338 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
8339 if (i < end)
8340 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
8341 mips16e_a0_a3_regs[end - 1]);
8343 /* Save or restore $31. */
8344 if (BITSET_P (info.mask, 31))
8345 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
8347 return buffer;
8350 /* Return a simplified form of X using the register values in REG_VALUES.
8351 REG_VALUES[R] is the last value assigned to hard register R, or null
8352 if R has not been modified.
8354 This function is rather limited, but is good enough for our purposes. */
8356 static rtx
8357 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
8359 rtx x0, x1;
8361 x = avoid_constant_pool_reference (x);
8363 if (UNARY_P (x))
8365 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8366 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
8367 x0, GET_MODE (XEXP (x, 0)));
8370 if (ARITHMETIC_P (x))
8372 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8373 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
8374 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
8377 if (REG_P (x)
8378 && reg_values[REGNO (x)]
8379 && !rtx_unstable_p (reg_values[REGNO (x)]))
8380 return reg_values[REGNO (x)];
8382 return x;
8385 /* Return true if (set DEST SRC) stores an argument register into its
8386 caller-allocated save slot, storing the number of that argument
8387 register in *REGNO_PTR if so. REG_VALUES is as for
8388 mips16e_collect_propagate_value. */
8390 static bool
8391 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
8392 unsigned int *regno_ptr)
8394 unsigned int argno, regno;
8395 HOST_WIDE_INT offset, required_offset;
8396 rtx addr, base;
8398 /* Check that this is a word-mode store. */
8399 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
8400 return false;
8402 /* Check that the register being saved is an unmodified argument
8403 register. */
8404 regno = REGNO (src);
8405 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
8406 return false;
8407 argno = regno - GP_ARG_FIRST;
8409 /* Check whether the address is an appropriate stack pointer or
8410 frame pointer access. The frame pointer is offset from the
8411 stack pointer by the size of the outgoing arguments. */
8412 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
8413 mips_split_plus (addr, &base, &offset);
8414 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
8415 if (base == hard_frame_pointer_rtx)
8416 required_offset -= cfun->machine->frame.args_size;
8417 else if (base != stack_pointer_rtx)
8418 return false;
8419 if (offset != required_offset)
8420 return false;
8422 *regno_ptr = regno;
8423 return true;
8426 /* A subroutine of mips_expand_prologue, called only when generating
8427 MIPS16e SAVE instructions. Search the start of the function for any
8428 instructions that save argument registers into their caller-allocated
8429 save slots. Delete such instructions and return a value N such that
8430 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8431 instructions redundant. */
8433 static unsigned int
8434 mips16e_collect_argument_saves (void)
8436 rtx reg_values[FIRST_PSEUDO_REGISTER];
8437 rtx insn, next, set, dest, src;
8438 unsigned int nargs, regno;
8440 push_topmost_sequence ();
8441 nargs = 0;
8442 memset (reg_values, 0, sizeof (reg_values));
8443 for (insn = get_insns (); insn; insn = next)
8445 next = NEXT_INSN (insn);
8446 if (NOTE_P (insn))
8447 continue;
8449 if (!INSN_P (insn))
8450 break;
8452 set = PATTERN (insn);
8453 if (GET_CODE (set) != SET)
8454 break;
8456 dest = SET_DEST (set);
8457 src = SET_SRC (set);
8458 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
8460 if (!BITSET_P (cfun->machine->frame.mask, regno))
8462 delete_insn (insn);
8463 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
8466 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
8467 reg_values[REGNO (dest)]
8468 = mips16e_collect_propagate_value (src, reg_values);
8469 else
8470 break;
8472 pop_topmost_sequence ();
8474 return nargs;
8477 /* Expand the prologue into a bunch of separate insns. */
8479 void
8480 mips_expand_prologue (void)
8482 HOST_WIDE_INT size;
8483 unsigned int nargs;
8484 rtx insn;
8486 if (cfun->machine->global_pointer > 0)
8487 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8489 size = compute_frame_size (get_frame_size ());
8491 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8492 bytes beforehand; this is enough to cover the register save area
8493 without going out of range. */
8494 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8496 HOST_WIDE_INT step1;
8498 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8500 if (GENERATE_MIPS16E_SAVE_RESTORE)
8502 HOST_WIDE_INT offset;
8503 unsigned int mask, regno;
8505 /* Try to merge argument stores into the save instruction. */
8506 nargs = mips16e_collect_argument_saves ();
8508 /* Build the save instruction. */
8509 mask = cfun->machine->frame.mask;
8510 insn = mips16e_build_save_restore (false, &mask, &offset,
8511 nargs, step1);
8512 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8513 size -= step1;
8515 /* Check if we need to save other registers. */
8516 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8517 if (BITSET_P (mask, regno - GP_REG_FIRST))
8519 offset -= GET_MODE_SIZE (gpr_mode);
8520 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8523 else
8525 insn = gen_add3_insn (stack_pointer_rtx,
8526 stack_pointer_rtx,
8527 GEN_INT (-step1));
8528 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8529 size -= step1;
8530 mips_for_each_saved_reg (size, mips_save_reg);
8534 /* Allocate the rest of the frame. */
8535 if (size > 0)
8537 if (SMALL_OPERAND (-size))
8538 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8539 stack_pointer_rtx,
8540 GEN_INT (-size)))) = 1;
8541 else
8543 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8544 if (TARGET_MIPS16)
8546 /* There are no instructions to add or subtract registers
8547 from the stack pointer, so use the frame pointer as a
8548 temporary. We should always be using a frame pointer
8549 in this case anyway. */
8550 gcc_assert (frame_pointer_needed);
8551 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8552 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8553 hard_frame_pointer_rtx,
8554 MIPS_PROLOGUE_TEMP (Pmode)));
8555 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8557 else
8558 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8559 stack_pointer_rtx,
8560 MIPS_PROLOGUE_TEMP (Pmode)));
8562 /* Describe the combined effect of the previous instructions. */
8563 mips_set_frame_expr
8564 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8565 plus_constant (stack_pointer_rtx, -size)));
8569 /* Set up the frame pointer, if we're using one. In mips16 code,
8570 we point the frame pointer ahead of the outgoing argument area.
8571 This should allow more variables & incoming arguments to be
8572 accessed with unextended instructions. */
8573 if (frame_pointer_needed)
8575 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
8577 rtx offset = GEN_INT (cfun->machine->frame.args_size);
8578 if (SMALL_OPERAND (cfun->machine->frame.args_size))
8579 RTX_FRAME_RELATED_P
8580 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8581 stack_pointer_rtx,
8582 offset))) = 1;
8583 else
8585 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
8586 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8587 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8588 hard_frame_pointer_rtx,
8589 MIPS_PROLOGUE_TEMP (Pmode)));
8590 mips_set_frame_expr
8591 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8592 plus_constant (stack_pointer_rtx,
8593 cfun->machine->frame.args_size)));
8596 else
8597 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
8598 stack_pointer_rtx)) = 1;
8601 mips_emit_loadgp ();
8603 /* If generating o32/o64 abicalls, save $gp on the stack. */
8604 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8605 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8607 /* If we are profiling, make sure no instructions are scheduled before
8608 the call to mcount. */
8610 if (current_function_profile)
8611 emit_insn (gen_blockage ());
8614 /* Do any necessary cleanup after a function to restore stack, frame,
8615 and regs. */
8617 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8619 static void
8620 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8621 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8623 /* Reinstate the normal $gp. */
8624 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8625 mips_output_cplocal ();
8627 if (cfun->machine->all_noreorder_p)
8629 /* Avoid using %>%) since it adds excess whitespace. */
8630 output_asm_insn (".set\tmacro", 0);
8631 output_asm_insn (".set\treorder", 0);
8632 set_noreorder = set_nomacro = 0;
8635 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8637 const char *fnname;
8639 /* Get the function name the same way that toplev.c does before calling
8640 assemble_start_function. This is needed so that the name used here
8641 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8642 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8643 fputs ("\t.end\t", file);
8644 assemble_name (file, fnname);
8645 fputs ("\n", file);
8649 /* Emit instructions to restore register REG from slot MEM. */
8651 static void
8652 mips_restore_reg (rtx reg, rtx mem)
8654 /* There's no mips16 instruction to load $31 directly. Load into
8655 $7 instead and adjust the return insn appropriately. */
8656 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8657 reg = gen_rtx_REG (GET_MODE (reg), 7);
8659 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8661 /* Can't restore directly; move through a temporary. */
8662 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8663 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8665 else
8666 mips_emit_move (reg, mem);
8670 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8671 if this epilogue precedes a sibling call, false if it is for a normal
8672 "epilogue" pattern. */
8674 void
8675 mips_expand_epilogue (int sibcall_p)
8677 HOST_WIDE_INT step1, step2;
8678 rtx base, target;
8680 if (!sibcall_p && mips_can_use_return_insn ())
8682 emit_jump_insn (gen_return ());
8683 return;
8686 /* In mips16 mode, if the return value should go into a floating-point
8687 register, we need to call a helper routine to copy it over. */
8688 if (mips16_cfun_returns_in_fpr_p ())
8690 char *name;
8691 rtx func;
8692 rtx insn;
8693 rtx retval;
8694 rtx call;
8695 tree id;
8696 tree return_type;
8697 enum machine_mode return_mode;
8699 return_type = DECL_RESULT (current_function_decl);
8700 return_mode = DECL_MODE (return_type);
8702 name = ACONCAT (("__mips16_ret_",
8703 mips16_call_stub_mode_suffix (return_mode),
8704 NULL));
8705 id = get_identifier (name);
8706 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8707 retval = gen_rtx_REG (return_mode, GP_RETURN);
8708 call = gen_call_value_internal (retval, func, const0_rtx);
8709 insn = emit_call_insn (call);
8710 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8713 /* Split the frame into two. STEP1 is the amount of stack we should
8714 deallocate before restoring the registers. STEP2 is the amount we
8715 should deallocate afterwards.
8717 Start off by assuming that no registers need to be restored. */
8718 step1 = cfun->machine->frame.total_size;
8719 step2 = 0;
8721 /* Work out which register holds the frame address. Account for the
8722 frame pointer offset used by mips16 code. */
8723 if (!frame_pointer_needed)
8724 base = stack_pointer_rtx;
8725 else
8727 base = hard_frame_pointer_rtx;
8728 if (TARGET_MIPS16)
8729 step1 -= cfun->machine->frame.args_size;
8732 /* If we need to restore registers, deallocate as much stack as
8733 possible in the second step without going out of range. */
8734 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8736 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8737 step1 -= step2;
8740 /* Set TARGET to BASE + STEP1. */
8741 target = base;
8742 if (step1 > 0)
8744 rtx adjust;
8746 /* Get an rtx for STEP1 that we can add to BASE. */
8747 adjust = GEN_INT (step1);
8748 if (!SMALL_OPERAND (step1))
8750 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8751 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8754 /* Normal mode code can copy the result straight into $sp. */
8755 if (!TARGET_MIPS16)
8756 target = stack_pointer_rtx;
8758 emit_insn (gen_add3_insn (target, base, adjust));
8761 /* Copy TARGET into the stack pointer. */
8762 if (target != stack_pointer_rtx)
8763 mips_emit_move (stack_pointer_rtx, target);
8765 /* If we're using addressing macros, $gp is implicitly used by all
8766 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8767 from the stack. */
8768 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8769 emit_insn (gen_blockage ());
8771 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8773 unsigned int regno, mask;
8774 HOST_WIDE_INT offset;
8775 rtx restore;
8777 /* Generate the restore instruction. */
8778 mask = cfun->machine->frame.mask;
8779 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8781 /* Restore any other registers manually. */
8782 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8783 if (BITSET_P (mask, regno - GP_REG_FIRST))
8785 offset -= GET_MODE_SIZE (gpr_mode);
8786 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8789 /* Restore the remaining registers and deallocate the final bit
8790 of the frame. */
8791 emit_insn (restore);
8793 else
8795 /* Restore the registers. */
8796 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8797 mips_restore_reg);
8799 /* Deallocate the final bit of the frame. */
8800 if (step2 > 0)
8801 emit_insn (gen_add3_insn (stack_pointer_rtx,
8802 stack_pointer_rtx,
8803 GEN_INT (step2)));
8806 /* Add in the __builtin_eh_return stack adjustment. We need to
8807 use a temporary in mips16 code. */
8808 if (current_function_calls_eh_return)
8810 if (TARGET_MIPS16)
8812 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8813 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8814 MIPS_EPILOGUE_TEMP (Pmode),
8815 EH_RETURN_STACKADJ_RTX));
8816 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8818 else
8819 emit_insn (gen_add3_insn (stack_pointer_rtx,
8820 stack_pointer_rtx,
8821 EH_RETURN_STACKADJ_RTX));
8824 if (!sibcall_p)
8826 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8827 path will restore the return address into $7 rather than $31. */
8828 if (TARGET_MIPS16
8829 && !GENERATE_MIPS16E_SAVE_RESTORE
8830 && (cfun->machine->frame.mask & RA_MASK) != 0)
8831 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8832 GP_REG_FIRST + 7)));
8833 else
8834 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8835 GP_REG_FIRST + 31)));
8839 /* Return nonzero if this function is known to have a null epilogue.
8840 This allows the optimizer to omit jumps to jumps if no stack
8841 was created. */
8844 mips_can_use_return_insn (void)
8846 if (! reload_completed)
8847 return 0;
8849 if (df_regs_ever_live_p (31) || current_function_profile)
8850 return 0;
8852 /* In mips16 mode, a function that returns a floating point value
8853 needs to arrange to copy the return value into the floating point
8854 registers. */
8855 if (mips16_cfun_returns_in_fpr_p ())
8856 return 0;
8858 if (cfun->machine->frame.initialized)
8859 return cfun->machine->frame.total_size == 0;
8861 return compute_frame_size (get_frame_size ()) == 0;
8864 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8865 in order to avoid duplicating too much logic from elsewhere. */
8867 static void
8868 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8869 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8870 tree function)
8872 rtx this, temp1, temp2, insn, fnaddr;
8873 bool use_sibcall_p;
8875 /* Pretend to be a post-reload pass while generating rtl. */
8876 reload_completed = 1;
8878 /* Mark the end of the (empty) prologue. */
8879 emit_note (NOTE_INSN_PROLOGUE_END);
8881 /* Determine if we can use a sibcall to call FUNCTION directly. */
8882 fnaddr = XEXP (DECL_RTL (function), 0);
8883 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
8884 && const_call_insn_operand (fnaddr, Pmode));
8886 /* Determine if we need to load FNADDR from the GOT. */
8887 if (!use_sibcall_p)
8888 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
8890 case SYMBOL_GOT_PAGE_OFST:
8891 case SYMBOL_GOT_DISP:
8892 /* Pick a global pointer. Use a call-clobbered register if
8893 TARGET_CALL_SAVED_GP. */
8894 cfun->machine->global_pointer =
8895 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8896 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8898 /* Set up the global pointer for n32 or n64 abicalls. */
8899 mips_emit_loadgp ();
8900 break;
8902 default:
8903 break;
8906 /* We need two temporary registers in some cases. */
8907 temp1 = gen_rtx_REG (Pmode, 2);
8908 temp2 = gen_rtx_REG (Pmode, 3);
8910 /* Find out which register contains the "this" pointer. */
8911 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8912 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8913 else
8914 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8916 /* Add DELTA to THIS. */
8917 if (delta != 0)
8919 rtx offset = GEN_INT (delta);
8920 if (!SMALL_OPERAND (delta))
8922 mips_emit_move (temp1, offset);
8923 offset = temp1;
8925 emit_insn (gen_add3_insn (this, this, offset));
8928 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8929 if (vcall_offset != 0)
8931 rtx addr;
8933 /* Set TEMP1 to *THIS. */
8934 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
8936 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8937 addr = mips_add_offset (temp2, temp1, vcall_offset);
8939 /* Load the offset and add it to THIS. */
8940 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
8941 emit_insn (gen_add3_insn (this, this, temp1));
8944 /* Jump to the target function. Use a sibcall if direct jumps are
8945 allowed, otherwise load the address into a register first. */
8946 if (use_sibcall_p)
8948 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8949 SIBLING_CALL_P (insn) = 1;
8951 else
8953 /* This is messy. gas treats "la $25,foo" as part of a call
8954 sequence and may allow a global "foo" to be lazily bound.
8955 The general move patterns therefore reject this combination.
8957 In this context, lazy binding would actually be OK
8958 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8959 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8960 We must therefore load the address via a temporary
8961 register if mips_dangerous_for_la25_p.
8963 If we jump to the temporary register rather than $25, the assembler
8964 can use the move insn to fill the jump's delay slot. */
8965 if (TARGET_USE_PIC_FN_ADDR_REG
8966 && !mips_dangerous_for_la25_p (fnaddr))
8967 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8968 mips_load_call_address (temp1, fnaddr, true);
8970 if (TARGET_USE_PIC_FN_ADDR_REG
8971 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8972 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8973 emit_jump_insn (gen_indirect_jump (temp1));
8976 /* Run just enough of rest_of_compilation. This sequence was
8977 "borrowed" from alpha.c. */
8978 insn = get_insns ();
8979 insn_locators_alloc ();
8980 split_all_insns_noflow ();
8981 mips16_lay_out_constants ();
8982 shorten_branches (insn);
8983 final_start_function (insn, file, 1);
8984 final (insn, file, 1);
8985 final_end_function ();
8987 /* Clean up the vars set above. Note that final_end_function resets
8988 the global pointer for us. */
8989 reload_completed = 0;
8992 /* Implement TARGET_SELECT_RTX_SECTION. */
8994 static section *
8995 mips_select_rtx_section (enum machine_mode mode, rtx x,
8996 unsigned HOST_WIDE_INT align)
8998 /* ??? Consider using mergeable small data sections. */
8999 if (mips_rtx_constant_in_small_data_p (mode))
9000 return get_named_section (NULL, ".sdata", 0);
9002 return default_elf_select_rtx_section (mode, x, align);
9005 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
9007 The complication here is that, with the combination TARGET_ABICALLS
9008 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
9009 therefore not be included in the read-only part of a DSO. Handle such
9010 cases by selecting a normal data section instead of a read-only one.
9011 The logic apes that in default_function_rodata_section. */
9013 static section *
9014 mips_function_rodata_section (tree decl)
9016 if (!TARGET_ABICALLS || TARGET_GPWORD)
9017 return default_function_rodata_section (decl);
9019 if (decl && DECL_SECTION_NAME (decl))
9021 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
9022 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
9024 char *rname = ASTRDUP (name);
9025 rname[14] = 'd';
9026 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
9028 else if (flag_function_sections && flag_data_sections
9029 && strncmp (name, ".text.", 6) == 0)
9031 char *rname = ASTRDUP (name);
9032 memcpy (rname + 1, "data", 4);
9033 return get_section (rname, SECTION_WRITE, decl);
9036 return data_section;
9039 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
9040 locally-defined objects go in a small data section. It also controls
9041 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
9042 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
9044 static bool
9045 mips_in_small_data_p (const_tree decl)
9047 HOST_WIDE_INT size;
9049 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
9050 return false;
9052 /* We don't yet generate small-data references for -mabicalls or
9053 VxWorks RTP code. See the related -G handling in override_options. */
9054 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
9055 return false;
9057 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
9059 const char *name;
9061 /* Reject anything that isn't in a known small-data section. */
9062 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
9063 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
9064 return false;
9066 /* If a symbol is defined externally, the assembler will use the
9067 usual -G rules when deciding how to implement macros. */
9068 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
9069 return true;
9071 else if (TARGET_EMBEDDED_DATA)
9073 /* Don't put constants into the small data section: we want them
9074 to be in ROM rather than RAM. */
9075 if (TREE_CODE (decl) != VAR_DECL)
9076 return false;
9078 if (TREE_READONLY (decl)
9079 && !TREE_SIDE_EFFECTS (decl)
9080 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
9081 return false;
9084 /* Enforce -mlocal-sdata. */
9085 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
9086 return false;
9088 /* Enforce -mextern-sdata. */
9089 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
9091 if (DECL_EXTERNAL (decl))
9092 return false;
9093 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
9094 return false;
9097 size = int_size_in_bytes (TREE_TYPE (decl));
9098 return (size > 0 && size <= mips_section_threshold);
9101 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
9102 anchors for small data: the GP register acts as an anchor in that
9103 case. We also don't want to use them for PC-relative accesses,
9104 where the PC acts as an anchor. */
9106 static bool
9107 mips_use_anchors_for_symbol_p (const_rtx symbol)
9109 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
9111 case SYMBOL_PC_RELATIVE:
9112 case SYMBOL_GP_RELATIVE:
9113 return false;
9115 default:
9116 return default_use_anchors_for_symbol_p (symbol);
9120 /* See whether VALTYPE is a record whose fields should be returned in
9121 floating-point registers. If so, return the number of fields and
9122 list them in FIELDS (which should have two elements). Return 0
9123 otherwise.
9125 For n32 & n64, a structure with one or two fields is returned in
9126 floating-point registers as long as every field has a floating-point
9127 type. */
9129 static int
9130 mips_fpr_return_fields (const_tree valtype, tree *fields)
9132 tree field;
9133 int i;
9135 if (!TARGET_NEWABI)
9136 return 0;
9138 if (TREE_CODE (valtype) != RECORD_TYPE)
9139 return 0;
9141 i = 0;
9142 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
9144 if (TREE_CODE (field) != FIELD_DECL)
9145 continue;
9147 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
9148 return 0;
9150 if (i == 2)
9151 return 0;
9153 fields[i++] = field;
9155 return i;
9159 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
9160 a value in the most significant part of $2/$3 if:
9162 - the target is big-endian;
9164 - the value has a structure or union type (we generalize this to
9165 cover aggregates from other languages too); and
9167 - the structure is not returned in floating-point registers. */
9169 static bool
9170 mips_return_in_msb (const_tree valtype)
9172 tree fields[2];
9174 return (TARGET_NEWABI
9175 && TARGET_BIG_ENDIAN
9176 && AGGREGATE_TYPE_P (valtype)
9177 && mips_fpr_return_fields (valtype, fields) == 0);
9181 /* Return a composite value in a pair of floating-point registers.
9182 MODE1 and OFFSET1 are the mode and byte offset for the first value,
9183 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
9184 complete value.
9186 For n32 & n64, $f0 always holds the first value and $f2 the second.
9187 Otherwise the values are packed together as closely as possible. */
9189 static rtx
9190 mips_return_fpr_pair (enum machine_mode mode,
9191 enum machine_mode mode1, HOST_WIDE_INT offset1,
9192 enum machine_mode mode2, HOST_WIDE_INT offset2)
9194 int inc;
9196 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
9197 return gen_rtx_PARALLEL
9198 (mode,
9199 gen_rtvec (2,
9200 gen_rtx_EXPR_LIST (VOIDmode,
9201 gen_rtx_REG (mode1, FP_RETURN),
9202 GEN_INT (offset1)),
9203 gen_rtx_EXPR_LIST (VOIDmode,
9204 gen_rtx_REG (mode2, FP_RETURN + inc),
9205 GEN_INT (offset2))));
9210 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
9211 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
9212 VALTYPE is null and MODE is the mode of the return value. */
9215 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
9216 enum machine_mode mode)
9218 if (valtype)
9220 tree fields[2];
9221 int unsignedp;
9223 mode = TYPE_MODE (valtype);
9224 unsignedp = TYPE_UNSIGNED (valtype);
9226 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
9227 true, we must promote the mode just as PROMOTE_MODE does. */
9228 mode = promote_mode (valtype, mode, &unsignedp, 1);
9230 /* Handle structures whose fields are returned in $f0/$f2. */
9231 switch (mips_fpr_return_fields (valtype, fields))
9233 case 1:
9234 return gen_rtx_REG (mode, FP_RETURN);
9236 case 2:
9237 return mips_return_fpr_pair (mode,
9238 TYPE_MODE (TREE_TYPE (fields[0])),
9239 int_byte_position (fields[0]),
9240 TYPE_MODE (TREE_TYPE (fields[1])),
9241 int_byte_position (fields[1]));
9244 /* If a value is passed in the most significant part of a register, see
9245 whether we have to round the mode up to a whole number of words. */
9246 if (mips_return_in_msb (valtype))
9248 HOST_WIDE_INT size = int_size_in_bytes (valtype);
9249 if (size % UNITS_PER_WORD != 0)
9251 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
9252 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
9256 /* For EABI, the class of return register depends entirely on MODE.
9257 For example, "struct { some_type x; }" and "union { some_type x; }"
9258 are returned in the same way as a bare "some_type" would be.
9259 Other ABIs only use FPRs for scalar, complex or vector types. */
9260 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
9261 return gen_rtx_REG (mode, GP_RETURN);
9264 if (!TARGET_MIPS16)
9266 /* Handle long doubles for n32 & n64. */
9267 if (mode == TFmode)
9268 return mips_return_fpr_pair (mode,
9269 DImode, 0,
9270 DImode, GET_MODE_SIZE (mode) / 2);
9272 if (mips_return_mode_in_fpr_p (mode))
9274 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9275 return mips_return_fpr_pair (mode,
9276 GET_MODE_INNER (mode), 0,
9277 GET_MODE_INNER (mode),
9278 GET_MODE_SIZE (mode) / 2);
9279 else
9280 return gen_rtx_REG (mode, FP_RETURN);
9284 return gen_rtx_REG (mode, GP_RETURN);
9287 /* Return nonzero when an argument must be passed by reference. */
9289 static bool
9290 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9291 enum machine_mode mode, const_tree type,
9292 bool named ATTRIBUTE_UNUSED)
9294 if (mips_abi == ABI_EABI)
9296 int size;
9298 /* ??? How should SCmode be handled? */
9299 if (mode == DImode || mode == DFmode
9300 || mode == DQmode || mode == UDQmode
9301 || mode == DAmode || mode == UDAmode)
9302 return 0;
9304 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
9305 return size == -1 || size > UNITS_PER_WORD;
9307 else
9309 /* If we have a variable-sized parameter, we have no choice. */
9310 return targetm.calls.must_pass_in_stack (mode, type);
9314 static bool
9315 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9316 enum machine_mode mode ATTRIBUTE_UNUSED,
9317 const_tree type ATTRIBUTE_UNUSED, bool named)
9319 return mips_abi == ABI_EABI && named;
9322 /* Return true if registers of class CLASS cannot change from mode FROM
9323 to mode TO. */
9325 bool
9326 mips_cannot_change_mode_class (enum machine_mode from,
9327 enum machine_mode to, enum reg_class class)
9329 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
9330 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
9332 if (TARGET_BIG_ENDIAN)
9334 /* When a multi-word value is stored in paired floating-point
9335 registers, the first register always holds the low word.
9336 We therefore can't allow FPRs to change between single-word
9337 and multi-word modes. */
9338 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
9339 return true;
9343 /* gcc assumes that each word of a multiword register can be accessed
9344 individually using SUBREGs. This is not true for floating-point
9345 registers if they are bigger than a word. */
9346 if (UNITS_PER_FPREG > UNITS_PER_WORD
9347 && GET_MODE_SIZE (from) > UNITS_PER_WORD
9348 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
9349 && reg_classes_intersect_p (FP_REGS, class))
9350 return true;
9352 /* Loading a 32-bit value into a 64-bit floating-point register
9353 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9354 We can't allow 64-bit float registers to change from SImode to
9355 to a wider mode. */
9356 if (TARGET_64BIT
9357 && TARGET_FLOAT64
9358 && from == SImode
9359 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
9360 && reg_classes_intersect_p (FP_REGS, class))
9361 return true;
9363 return false;
9366 /* Return true if X should not be moved directly into register $25.
9367 We need this because many versions of GAS will treat "la $25,foo" as
9368 part of a call sequence and so allow a global "foo" to be lazily bound. */
9370 bool
9371 mips_dangerous_for_la25_p (rtx x)
9373 return (!TARGET_EXPLICIT_RELOCS
9374 && TARGET_USE_GOT
9375 && GET_CODE (x) == SYMBOL_REF
9376 && mips_global_symbol_p (x));
9379 /* Implement PREFERRED_RELOAD_CLASS. */
9381 enum reg_class
9382 mips_preferred_reload_class (rtx x, enum reg_class class)
9384 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
9385 return LEA_REGS;
9387 if (TARGET_HARD_FLOAT
9388 && FLOAT_MODE_P (GET_MODE (x))
9389 && reg_class_subset_p (FP_REGS, class))
9390 return FP_REGS;
9392 if (reg_class_subset_p (GR_REGS, class))
9393 class = GR_REGS;
9395 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
9396 class = M16_REGS;
9398 return class;
9401 /* This function returns the register class required for a secondary
9402 register when copying between one of the registers in CLASS, and X,
9403 using MODE. If IN_P is nonzero, the copy is going from X to the
9404 register, otherwise the register is the source. A return value of
9405 NO_REGS means that no secondary register is required. */
9407 enum reg_class
9408 mips_secondary_reload_class (enum reg_class class,
9409 enum machine_mode mode, rtx x, int in_p)
9411 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
9412 int regno = -1;
9413 int gp_reg_p;
9415 if (REG_P (x)|| GET_CODE (x) == SUBREG)
9416 regno = true_regnum (x);
9418 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
9420 if (mips_dangerous_for_la25_p (x))
9422 gr_regs = LEA_REGS;
9423 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
9424 return gr_regs;
9427 /* Copying from HI or LO to anywhere other than a general register
9428 requires a general register.
9429 This rule applies to both the original HI/LO pair and the new
9430 DSP accumulators. */
9431 if (reg_class_subset_p (class, ACC_REGS))
9433 if (TARGET_MIPS16 && in_p)
9435 /* We can't really copy to HI or LO at all in mips16 mode. */
9436 return M16_REGS;
9438 return gp_reg_p ? NO_REGS : gr_regs;
9440 if (ACC_REG_P (regno))
9442 if (TARGET_MIPS16 && ! in_p)
9444 /* We can't really copy to HI or LO at all in mips16 mode. */
9445 return M16_REGS;
9447 return class == gr_regs ? NO_REGS : gr_regs;
9450 /* We can only copy a value to a condition code register from a
9451 floating point register, and even then we require a scratch
9452 floating point register. We can only copy a value out of a
9453 condition code register into a general register. */
9454 if (class == ST_REGS)
9456 if (in_p)
9457 return FP_REGS;
9458 return gp_reg_p ? NO_REGS : gr_regs;
9460 if (ST_REG_P (regno))
9462 if (! in_p)
9463 return FP_REGS;
9464 return class == gr_regs ? NO_REGS : gr_regs;
9467 if (class == FP_REGS)
9469 if (MEM_P (x))
9471 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
9472 return NO_REGS;
9474 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
9476 /* We can use the l.s and l.d macros to load floating-point
9477 constants. ??? For l.s, we could probably get better
9478 code by returning GR_REGS here. */
9479 return NO_REGS;
9481 else if (gp_reg_p || x == CONST0_RTX (mode))
9483 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9484 return NO_REGS;
9486 else if (FP_REG_P (regno))
9488 /* In this case we can use mov.s or mov.d. */
9489 return NO_REGS;
9491 else
9493 /* Otherwise, we need to reload through an integer register. */
9494 return gr_regs;
9498 /* In mips16 mode, going between memory and anything but M16_REGS
9499 requires an M16_REG. */
9500 if (TARGET_MIPS16)
9502 if (class != M16_REGS && class != M16_NA_REGS)
9504 if (gp_reg_p)
9505 return NO_REGS;
9506 return M16_REGS;
9508 if (! gp_reg_p)
9510 if (class == M16_REGS || class == M16_NA_REGS)
9511 return NO_REGS;
9512 return M16_REGS;
9516 return NO_REGS;
9519 /* Implement CLASS_MAX_NREGS.
9521 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
9523 - ST_REGS are always hold CCmode values, and CCmode values are
9524 considered to be 4 bytes wide.
9526 All other register classes are covered by UNITS_PER_WORD. Note that
9527 this is true even for unions of integer and float registers when the
9528 latter are smaller than the former. The only supported combination
9529 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
9530 words but 32-bit float registers. A word-based calculation is correct
9531 in that case since -msingle-float disallows multi-FPR values. */
9534 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
9535 enum machine_mode mode)
9537 if (class == ST_REGS)
9538 return (GET_MODE_SIZE (mode) + 3) / 4;
9539 else if (class == FP_REGS)
9540 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9541 else
9542 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9545 static bool
9546 mips_valid_pointer_mode (enum machine_mode mode)
9548 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9551 /* Target hook for vector_mode_supported_p. */
9553 static bool
9554 mips_vector_mode_supported_p (enum machine_mode mode)
9556 switch (mode)
9558 case V2SFmode:
9559 return TARGET_PAIRED_SINGLE_FLOAT;
9561 case V2HImode:
9562 case V4QImode:
9563 case V2HQmode:
9564 case V2UHQmode:
9565 case V2HAmode:
9566 case V2UHAmode:
9567 case V4QQmode:
9568 case V4UQQmode:
9569 return TARGET_DSP;
9571 default:
9572 return false;
9576 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9578 static bool
9579 mips_scalar_mode_supported_p (enum machine_mode mode)
9581 if (ALL_FIXED_POINT_MODE_P (mode)
9582 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9583 return true;
9585 return default_scalar_mode_supported_p (mode);
9588 /* If we can access small data directly (using gp-relative relocation
9589 operators) return the small data pointer, otherwise return null.
9591 For each mips16 function which refers to GP relative symbols, we
9592 use a pseudo register, initialized at the start of the function, to
9593 hold the $gp value. */
9595 static rtx
9596 mips16_gp_pseudo_reg (void)
9598 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
9599 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
9601 /* Don't initialize the pseudo register if we are being called from
9602 the tree optimizers' cost-calculation routines. */
9603 if (!cfun->machine->initialized_mips16_gp_pseudo_p
9604 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
9606 rtx insn, scan;
9608 /* We want to initialize this to a value which gcc will believe
9609 is constant. */
9610 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
9612 push_topmost_sequence ();
9613 /* We need to emit the initialization after the FUNCTION_BEG
9614 note, so that it will be integrated. */
9615 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
9616 if (NOTE_P (scan)
9617 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
9618 break;
9619 if (scan == NULL_RTX)
9620 scan = get_insns ();
9621 insn = emit_insn_after (insn, scan);
9622 pop_topmost_sequence ();
9624 cfun->machine->initialized_mips16_gp_pseudo_p = true;
9627 return cfun->machine->mips16_gp_pseudo_rtx;
9630 /* Write out code to move floating point arguments in or out of
9631 general registers. Output the instructions to FILE. FP_CODE is
9632 the code describing which arguments are present (see the comment at
9633 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
9634 we are copying from the floating point registers. */
9636 static void
9637 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
9639 const char *s;
9640 int gparg, fparg;
9641 unsigned int f;
9642 CUMULATIVE_ARGS cum;
9644 /* This code only works for the original 32-bit ABI and the O64 ABI. */
9645 gcc_assert (TARGET_OLDABI);
9647 if (from_fp_p)
9648 s = "mfc1";
9649 else
9650 s = "mtc1";
9652 init_cumulative_args (&cum, NULL, NULL);
9654 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9656 enum machine_mode mode;
9657 struct mips_arg_info info;
9659 if ((f & 3) == 1)
9660 mode = SFmode;
9661 else if ((f & 3) == 2)
9662 mode = DFmode;
9663 else
9664 gcc_unreachable ();
9666 mips_arg_info (&cum, mode, NULL, true, &info);
9667 gparg = mips_arg_regno (&info, false);
9668 fparg = mips_arg_regno (&info, true);
9670 if (mode == SFmode)
9671 fprintf (file, "\t%s\t%s,%s\n", s,
9672 reg_names[gparg], reg_names[fparg]);
9673 else if (TARGET_64BIT)
9674 fprintf (file, "\td%s\t%s,%s\n", s,
9675 reg_names[gparg], reg_names[fparg]);
9676 else if (ISA_HAS_MXHC1)
9677 /* -mips32r2 -mfp64 */
9678 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
9680 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
9681 reg_names[fparg],
9682 from_fp_p ? "mfhc1" : "mthc1",
9683 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
9684 reg_names[fparg]);
9685 else if (TARGET_BIG_ENDIAN)
9686 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9687 reg_names[gparg], reg_names[fparg + 1], s,
9688 reg_names[gparg + 1], reg_names[fparg]);
9689 else
9690 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9691 reg_names[gparg], reg_names[fparg], s,
9692 reg_names[gparg + 1], reg_names[fparg + 1]);
9694 function_arg_advance (&cum, mode, NULL, true);
9698 /* Build a mips16 function stub. This is used for functions which
9699 take arguments in the floating point registers. It is 32-bit code
9700 that moves the floating point args into the general registers, and
9701 then jumps to the 16-bit code. */
9703 static void
9704 build_mips16_function_stub (FILE *file)
9706 const char *fnname;
9707 char *secname, *stubname;
9708 tree stubid, stubdecl;
9709 int need_comma;
9710 unsigned int f;
9712 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9713 fnname = targetm.strip_name_encoding (fnname);
9714 secname = (char *) alloca (strlen (fnname) + 20);
9715 sprintf (secname, ".mips16.fn.%s", fnname);
9716 stubname = (char *) alloca (strlen (fnname) + 20);
9717 sprintf (stubname, "__fn_stub_%s", fnname);
9718 stubid = get_identifier (stubname);
9719 stubdecl = build_decl (FUNCTION_DECL, stubid,
9720 build_function_type (void_type_node, NULL_TREE));
9721 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9722 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9724 fprintf (file, "\t# Stub function for %s (", current_function_name ());
9725 need_comma = 0;
9726 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
9728 fprintf (file, "%s%s",
9729 need_comma ? ", " : "",
9730 (f & 3) == 1 ? "float" : "double");
9731 need_comma = 1;
9733 fprintf (file, ")\n");
9735 fprintf (file, "\t.set\tnomips16\n");
9736 switch_to_section (function_section (stubdecl));
9737 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
9739 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9740 within a .ent, and we cannot emit another .ent. */
9741 if (!FUNCTION_NAME_ALREADY_DECLARED)
9743 fputs ("\t.ent\t", file);
9744 assemble_name (file, stubname);
9745 fputs ("\n", file);
9748 assemble_name (file, stubname);
9749 fputs (":\n", file);
9751 /* We don't want the assembler to insert any nops here. */
9752 fprintf (file, "\t.set\tnoreorder\n");
9754 mips16_fp_args (file, current_function_args_info.fp_code, 1);
9756 fprintf (asm_out_file, "\t.set\tnoat\n");
9757 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
9758 assemble_name (file, fnname);
9759 fprintf (file, "\n");
9760 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9761 fprintf (asm_out_file, "\t.set\tat\n");
9763 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9764 with one of the mfc1 instructions, because the result is not
9765 available for one instruction, so if the very first instruction
9766 in the function refers to the register, it will see the wrong
9767 value. */
9768 fprintf (file, "\tnop\n");
9770 fprintf (file, "\t.set\treorder\n");
9772 if (!FUNCTION_NAME_ALREADY_DECLARED)
9774 fputs ("\t.end\t", file);
9775 assemble_name (file, stubname);
9776 fputs ("\n", file);
9779 switch_to_section (function_section (current_function_decl));
9782 /* We keep a list of functions for which we have already built stubs
9783 in build_mips16_call_stub. */
9785 struct mips16_stub
9787 struct mips16_stub *next;
9788 char *name;
9789 int fpret;
9792 static struct mips16_stub *mips16_stubs;
9794 /* Emit code to return a double value from a mips16 stub. GPREG is the
9795 first GP reg to use, FPREG is the first FP reg to use. */
9797 static void
9798 mips16_fpret_double (int gpreg, int fpreg)
9800 if (TARGET_64BIT)
9801 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9802 reg_names[gpreg], reg_names[fpreg]);
9803 else if (TARGET_FLOAT64)
9805 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9806 reg_names[gpreg + WORDS_BIG_ENDIAN],
9807 reg_names[fpreg]);
9808 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9809 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9810 reg_names[fpreg]);
9812 else
9814 if (TARGET_BIG_ENDIAN)
9816 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9817 reg_names[gpreg + 0],
9818 reg_names[fpreg + 1]);
9819 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9820 reg_names[gpreg + 1],
9821 reg_names[fpreg + 0]);
9823 else
9825 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9826 reg_names[gpreg + 0],
9827 reg_names[fpreg + 0]);
9828 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9829 reg_names[gpreg + 1],
9830 reg_names[fpreg + 1]);
9835 /* Build a call stub for a mips16 call. A stub is needed if we are
9836 passing any floating point values which should go into the floating
9837 point registers. If we are, and the call turns out to be to a
9838 32-bit function, the stub will be used to move the values into the
9839 floating point registers before calling the 32-bit function. The
9840 linker will magically adjust the function call to either the 16-bit
9841 function or the 32-bit stub, depending upon where the function call
9842 is actually defined.
9844 Similarly, we need a stub if the return value might come back in a
9845 floating point register.
9847 RETVAL is the location of the return value, or null if this is
9848 a call rather than a call_value. FN is the address of the
9849 function and ARG_SIZE is the size of the arguments. FP_CODE
9850 is the code built by function_arg. This function returns a nonzero
9851 value if it builds the call instruction itself. */
9854 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9856 int fpret = 0;
9857 const char *fnname;
9858 char *secname, *stubname;
9859 struct mips16_stub *l;
9860 tree stubid, stubdecl;
9861 int need_comma;
9862 unsigned int f;
9863 rtx insn;
9865 /* We don't need to do anything if we aren't in mips16 mode, or if
9866 we were invoked with the -msoft-float option. */
9867 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
9868 return 0;
9870 /* Figure out whether the value might come back in a floating point
9871 register. */
9872 if (retval)
9873 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9875 /* We don't need to do anything if there were no floating point
9876 arguments and the value will not be returned in a floating point
9877 register. */
9878 if (fp_code == 0 && ! fpret)
9879 return 0;
9881 /* We don't need to do anything if this is a call to a special
9882 mips16 support function. */
9883 if (GET_CODE (fn) == SYMBOL_REF
9884 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9885 return 0;
9887 /* This code will only work for o32 and o64 abis. The other ABI's
9888 require more sophisticated support. */
9889 gcc_assert (TARGET_OLDABI);
9891 /* If we're calling via a function pointer, then we must always call
9892 via a stub. There are magic stubs provided in libgcc.a for each
9893 of the required cases. Each of them expects the function address
9894 to arrive in register $2. */
9896 if (GET_CODE (fn) != SYMBOL_REF)
9898 char buf[30];
9899 tree id;
9900 rtx stub_fn, insn;
9902 /* ??? If this code is modified to support other ABI's, we need
9903 to handle PARALLEL return values here. */
9905 if (fpret)
9906 sprintf (buf, "__mips16_call_stub_%s_%d",
9907 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9908 fp_code);
9909 else
9910 sprintf (buf, "__mips16_call_stub_%d",
9911 fp_code);
9913 id = get_identifier (buf);
9914 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9916 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
9918 if (retval == NULL_RTX)
9919 insn = gen_call_internal (stub_fn, arg_size);
9920 else
9921 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9922 insn = emit_call_insn (insn);
9924 /* Put the register usage information on the CALL. */
9925 CALL_INSN_FUNCTION_USAGE (insn) =
9926 gen_rtx_EXPR_LIST (VOIDmode,
9927 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9928 CALL_INSN_FUNCTION_USAGE (insn));
9930 /* If we are handling a floating point return value, we need to
9931 save $18 in the function prologue. Putting a note on the
9932 call will mean that df_regs_ever_live_p ($18) will be true if the
9933 call is not eliminated, and we can check that in the prologue
9934 code. */
9935 if (fpret)
9936 CALL_INSN_FUNCTION_USAGE (insn) =
9937 gen_rtx_EXPR_LIST (VOIDmode,
9938 gen_rtx_USE (VOIDmode,
9939 gen_rtx_REG (word_mode, 18)),
9940 CALL_INSN_FUNCTION_USAGE (insn));
9942 /* Return 1 to tell the caller that we've generated the call
9943 insn. */
9944 return 1;
9947 /* We know the function we are going to call. If we have already
9948 built a stub, we don't need to do anything further. */
9950 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
9951 for (l = mips16_stubs; l != NULL; l = l->next)
9952 if (strcmp (l->name, fnname) == 0)
9953 break;
9955 if (l == NULL)
9957 /* Build a special purpose stub. When the linker sees a
9958 function call in mips16 code, it will check where the target
9959 is defined. If the target is a 32-bit call, the linker will
9960 search for the section defined here. It can tell which
9961 symbol this section is associated with by looking at the
9962 relocation information (the name is unreliable, since this
9963 might be a static function). If such a section is found, the
9964 linker will redirect the call to the start of the magic
9965 section.
9967 If the function does not return a floating point value, the
9968 special stub section is named
9969 .mips16.call.FNNAME
9971 If the function does return a floating point value, the stub
9972 section is named
9973 .mips16.call.fp.FNNAME
9976 secname = (char *) alloca (strlen (fnname) + 40);
9977 sprintf (secname, ".mips16.call.%s%s",
9978 fpret ? "fp." : "",
9979 fnname);
9980 stubname = (char *) alloca (strlen (fnname) + 20);
9981 sprintf (stubname, "__call_stub_%s%s",
9982 fpret ? "fp_" : "",
9983 fnname);
9984 stubid = get_identifier (stubname);
9985 stubdecl = build_decl (FUNCTION_DECL, stubid,
9986 build_function_type (void_type_node, NULL_TREE));
9987 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9988 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9990 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9991 (fpret
9992 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9993 : ""),
9994 fnname);
9995 need_comma = 0;
9996 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9998 fprintf (asm_out_file, "%s%s",
9999 need_comma ? ", " : "",
10000 (f & 3) == 1 ? "float" : "double");
10001 need_comma = 1;
10003 fprintf (asm_out_file, ")\n");
10005 fprintf (asm_out_file, "\t.set\tnomips16\n");
10006 assemble_start_function (stubdecl, stubname);
10008 if (!FUNCTION_NAME_ALREADY_DECLARED)
10010 fputs ("\t.ent\t", asm_out_file);
10011 assemble_name (asm_out_file, stubname);
10012 fputs ("\n", asm_out_file);
10014 assemble_name (asm_out_file, stubname);
10015 fputs (":\n", asm_out_file);
10018 /* We build the stub code by hand. That's the only way we can
10019 do it, since we can't generate 32-bit code during a 16-bit
10020 compilation. */
10022 /* We don't want the assembler to insert any nops here. */
10023 fprintf (asm_out_file, "\t.set\tnoreorder\n");
10025 mips16_fp_args (asm_out_file, fp_code, 0);
10027 if (! fpret)
10029 fprintf (asm_out_file, "\t.set\tnoat\n");
10030 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
10031 fnname);
10032 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
10033 fprintf (asm_out_file, "\t.set\tat\n");
10034 /* Unfortunately, we can't fill the jump delay slot. We
10035 can't fill with one of the mtc1 instructions, because the
10036 result is not available for one instruction, so if the
10037 very first instruction in the function refers to the
10038 register, it will see the wrong value. */
10039 fprintf (asm_out_file, "\tnop\n");
10041 else
10043 fprintf (asm_out_file, "\tmove\t%s,%s\n",
10044 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
10045 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
10046 /* As above, we can't fill the delay slot. */
10047 fprintf (asm_out_file, "\tnop\n");
10048 if (GET_MODE (retval) == SFmode)
10049 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10050 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
10051 else if (GET_MODE (retval) == SCmode)
10053 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10054 reg_names[GP_REG_FIRST + 2],
10055 reg_names[FP_REG_FIRST + 0]);
10056 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10057 reg_names[GP_REG_FIRST + 3],
10058 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
10060 else if (GET_MODE (retval) == DFmode
10061 || GET_MODE (retval) == V2SFmode)
10063 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
10065 else if (GET_MODE (retval) == DCmode)
10067 mips16_fpret_double (GP_REG_FIRST + 2,
10068 FP_REG_FIRST + 0);
10069 mips16_fpret_double (GP_REG_FIRST + 4,
10070 FP_REG_FIRST + MAX_FPRS_PER_FMT);
10072 else
10074 if (TARGET_BIG_ENDIAN)
10076 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10077 reg_names[GP_REG_FIRST + 2],
10078 reg_names[FP_REG_FIRST + 1]);
10079 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10080 reg_names[GP_REG_FIRST + 3],
10081 reg_names[FP_REG_FIRST + 0]);
10083 else
10085 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10086 reg_names[GP_REG_FIRST + 2],
10087 reg_names[FP_REG_FIRST + 0]);
10088 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10089 reg_names[GP_REG_FIRST + 3],
10090 reg_names[FP_REG_FIRST + 1]);
10093 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
10094 /* As above, we can't fill the delay slot. */
10095 fprintf (asm_out_file, "\tnop\n");
10098 fprintf (asm_out_file, "\t.set\treorder\n");
10100 #ifdef ASM_DECLARE_FUNCTION_SIZE
10101 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
10102 #endif
10104 if (!FUNCTION_NAME_ALREADY_DECLARED)
10106 fputs ("\t.end\t", asm_out_file);
10107 assemble_name (asm_out_file, stubname);
10108 fputs ("\n", asm_out_file);
10111 /* Record this stub. */
10112 l = (struct mips16_stub *) xmalloc (sizeof *l);
10113 l->name = xstrdup (fnname);
10114 l->fpret = fpret;
10115 l->next = mips16_stubs;
10116 mips16_stubs = l;
10119 /* If we expect a floating point return value, but we've built a
10120 stub which does not expect one, then we're in trouble. We can't
10121 use the existing stub, because it won't handle the floating point
10122 value. We can't build a new stub, because the linker won't know
10123 which stub to use for the various calls in this object file.
10124 Fortunately, this case is illegal, since it means that a function
10125 was declared in two different ways in a single compilation. */
10126 if (fpret && ! l->fpret)
10127 error ("cannot handle inconsistent calls to %qs", fnname);
10129 if (retval == NULL_RTX)
10130 insn = gen_call_internal_direct (fn, arg_size);
10131 else
10132 insn = gen_call_value_internal_direct (retval, fn, arg_size);
10133 insn = emit_call_insn (insn);
10135 /* If we are calling a stub which handles a floating point return
10136 value, we need to arrange to save $18 in the prologue. We do
10137 this by marking the function call as using the register. The
10138 prologue will later see that it is used, and emit code to save
10139 it. */
10140 if (l->fpret)
10141 CALL_INSN_FUNCTION_USAGE (insn) =
10142 gen_rtx_EXPR_LIST (VOIDmode,
10143 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
10144 CALL_INSN_FUNCTION_USAGE (insn));
10146 /* Return 1 to tell the caller that we've generated the call
10147 insn. */
10148 return 1;
10151 /* An entry in the mips16 constant pool. VALUE is the pool constant,
10152 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
10154 struct mips16_constant {
10155 struct mips16_constant *next;
10156 rtx value;
10157 rtx label;
10158 enum machine_mode mode;
10161 /* Information about an incomplete mips16 constant pool. FIRST is the
10162 first constant, HIGHEST_ADDRESS is the highest address that the first
10163 byte of the pool can have, and INSN_ADDRESS is the current instruction
10164 address. */
10166 struct mips16_constant_pool {
10167 struct mips16_constant *first;
10168 int highest_address;
10169 int insn_address;
10172 /* Add constant VALUE to POOL and return its label. MODE is the
10173 value's mode (used for CONST_INTs, etc.). */
10175 static rtx
10176 add_constant (struct mips16_constant_pool *pool,
10177 rtx value, enum machine_mode mode)
10179 struct mips16_constant **p, *c;
10180 bool first_of_size_p;
10182 /* See whether the constant is already in the pool. If so, return the
10183 existing label, otherwise leave P pointing to the place where the
10184 constant should be added.
10186 Keep the pool sorted in increasing order of mode size so that we can
10187 reduce the number of alignments needed. */
10188 first_of_size_p = true;
10189 for (p = &pool->first; *p != 0; p = &(*p)->next)
10191 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
10192 return (*p)->label;
10193 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
10194 break;
10195 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
10196 first_of_size_p = false;
10199 /* In the worst case, the constant needed by the earliest instruction
10200 will end up at the end of the pool. The entire pool must then be
10201 accessible from that instruction.
10203 When adding the first constant, set the pool's highest address to
10204 the address of the first out-of-range byte. Adjust this address
10205 downwards each time a new constant is added. */
10206 if (pool->first == 0)
10207 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
10208 is the address of the instruction with the lowest two bits clear.
10209 The base PC value for ld has the lowest three bits clear. Assume
10210 the worst case here. */
10211 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10212 pool->highest_address -= GET_MODE_SIZE (mode);
10213 if (first_of_size_p)
10214 /* Take into account the worst possible padding due to alignment. */
10215 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10217 /* Create a new entry. */
10218 c = (struct mips16_constant *) xmalloc (sizeof *c);
10219 c->value = value;
10220 c->mode = mode;
10221 c->label = gen_label_rtx ();
10222 c->next = *p;
10223 *p = c;
10225 return c->label;
10228 /* Output constant VALUE after instruction INSN and return the last
10229 instruction emitted. MODE is the mode of the constant. */
10231 static rtx
10232 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10234 switch (GET_MODE_CLASS (mode))
10236 case MODE_INT:
10238 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10239 return emit_insn_after (gen_consttable_int (value, size), insn);
10242 case MODE_FLOAT:
10243 return emit_insn_after (gen_consttable_float (value), insn);
10245 case MODE_VECTOR_FLOAT:
10246 case MODE_VECTOR_INT:
10248 int i;
10249 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10250 insn = dump_constants_1 (GET_MODE_INNER (mode),
10251 CONST_VECTOR_ELT (value, i), insn);
10252 return insn;
10255 default:
10256 gcc_unreachable ();
10261 /* Dump out the constants in CONSTANTS after INSN. */
10263 static void
10264 dump_constants (struct mips16_constant *constants, rtx insn)
10266 struct mips16_constant *c, *next;
10267 int align;
10269 align = 0;
10270 for (c = constants; c != NULL; c = next)
10272 /* If necessary, increase the alignment of PC. */
10273 if (align < GET_MODE_SIZE (c->mode))
10275 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10276 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
10278 align = GET_MODE_SIZE (c->mode);
10280 insn = emit_label_after (c->label, insn);
10281 insn = dump_constants_1 (c->mode, c->value, insn);
10283 next = c->next;
10284 free (c);
10287 emit_barrier_after (insn);
10290 /* Return the length of instruction INSN. */
10292 static int
10293 mips16_insn_length (rtx insn)
10295 if (JUMP_P (insn))
10297 rtx body = PATTERN (insn);
10298 if (GET_CODE (body) == ADDR_VEC)
10299 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
10300 if (GET_CODE (body) == ADDR_DIFF_VEC)
10301 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
10303 return get_attr_length (insn);
10306 /* If *X is a symbolic constant that refers to the constant pool, add
10307 the constant to POOL and rewrite *X to use the constant's label. */
10309 static void
10310 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
10312 rtx base, offset, label;
10314 split_const (*x, &base, &offset);
10315 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
10317 label = add_constant (pool, get_pool_constant (base),
10318 get_pool_mode (base));
10319 base = gen_rtx_LABEL_REF (Pmode, label);
10320 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
10324 /* This structure is used to communicate with mips16_rewrite_pool_refs.
10325 INSN is the instruction we're rewriting and POOL points to the current
10326 constant pool. */
10327 struct mips16_rewrite_pool_refs_info {
10328 rtx insn;
10329 struct mips16_constant_pool *pool;
10332 /* Rewrite *X so that constant pool references refer to the constant's
10333 label instead. DATA points to a mips16_rewrite_pool_refs_info
10334 structure. */
10336 static int
10337 mips16_rewrite_pool_refs (rtx *x, void *data)
10339 struct mips16_rewrite_pool_refs_info *info = data;
10341 if (force_to_mem_operand (*x, Pmode))
10343 rtx mem = force_const_mem (GET_MODE (*x), *x);
10344 validate_change (info->insn, x, mem, false);
10347 if (MEM_P (*x))
10349 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
10350 return -1;
10353 if (TARGET_MIPS16_TEXT_LOADS)
10354 mips16_rewrite_pool_constant (info->pool, x);
10356 return GET_CODE (*x) == CONST ? -1 : 0;
10359 /* Build MIPS16 constant pools. */
10361 static void
10362 mips16_lay_out_constants (void)
10364 struct mips16_constant_pool pool;
10365 struct mips16_rewrite_pool_refs_info info;
10366 rtx insn, barrier;
10368 if (!TARGET_MIPS16_PCREL_LOADS)
10369 return;
10371 barrier = 0;
10372 memset (&pool, 0, sizeof (pool));
10373 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10375 /* Rewrite constant pool references in INSN. */
10376 if (INSN_P (insn))
10378 info.insn = insn;
10379 info.pool = &pool;
10380 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
10383 pool.insn_address += mips16_insn_length (insn);
10385 if (pool.first != NULL)
10387 /* If there are no natural barriers between the first user of
10388 the pool and the highest acceptable address, we'll need to
10389 create a new instruction to jump around the constant pool.
10390 In the worst case, this instruction will be 4 bytes long.
10392 If it's too late to do this transformation after INSN,
10393 do it immediately before INSN. */
10394 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
10396 rtx label, jump;
10398 label = gen_label_rtx ();
10400 jump = emit_jump_insn_before (gen_jump (label), insn);
10401 JUMP_LABEL (jump) = label;
10402 LABEL_NUSES (label) = 1;
10403 barrier = emit_barrier_after (jump);
10405 emit_label_after (label, barrier);
10406 pool.insn_address += 4;
10409 /* See whether the constant pool is now out of range of the first
10410 user. If so, output the constants after the previous barrier.
10411 Note that any instructions between BARRIER and INSN (inclusive)
10412 will use negative offsets to refer to the pool. */
10413 if (pool.insn_address > pool.highest_address)
10415 dump_constants (pool.first, barrier);
10416 pool.first = NULL;
10417 barrier = 0;
10419 else if (BARRIER_P (insn))
10420 barrier = insn;
10423 dump_constants (pool.first, get_last_insn ());
10426 /* A temporary variable used by for_each_rtx callbacks, etc. */
10427 static rtx mips_sim_insn;
10429 /* A structure representing the state of the processor pipeline.
10430 Used by the mips_sim_* family of functions. */
10431 struct mips_sim {
10432 /* The maximum number of instructions that can be issued in a cycle.
10433 (Caches mips_issue_rate.) */
10434 unsigned int issue_rate;
10436 /* The current simulation time. */
10437 unsigned int time;
10439 /* How many more instructions can be issued in the current cycle. */
10440 unsigned int insns_left;
10442 /* LAST_SET[X].INSN is the last instruction to set register X.
10443 LAST_SET[X].TIME is the time at which that instruction was issued.
10444 INSN is null if no instruction has yet set register X. */
10445 struct {
10446 rtx insn;
10447 unsigned int time;
10448 } last_set[FIRST_PSEUDO_REGISTER];
10450 /* The pipeline's current DFA state. */
10451 state_t dfa_state;
10454 /* Reset STATE to the initial simulation state. */
10456 static void
10457 mips_sim_reset (struct mips_sim *state)
10459 state->time = 0;
10460 state->insns_left = state->issue_rate;
10461 memset (&state->last_set, 0, sizeof (state->last_set));
10462 state_reset (state->dfa_state);
10465 /* Initialize STATE before its first use. DFA_STATE points to an
10466 allocated but uninitialized DFA state. */
10468 static void
10469 mips_sim_init (struct mips_sim *state, state_t dfa_state)
10471 state->issue_rate = mips_issue_rate ();
10472 state->dfa_state = dfa_state;
10473 mips_sim_reset (state);
10476 /* Advance STATE by one clock cycle. */
10478 static void
10479 mips_sim_next_cycle (struct mips_sim *state)
10481 state->time++;
10482 state->insns_left = state->issue_rate;
10483 state_transition (state->dfa_state, 0);
10486 /* Advance simulation state STATE until instruction INSN can read
10487 register REG. */
10489 static void
10490 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
10492 unsigned int i;
10494 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
10495 if (state->last_set[REGNO (reg) + i].insn != 0)
10497 unsigned int t;
10499 t = state->last_set[REGNO (reg) + i].time;
10500 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
10501 while (state->time < t)
10502 mips_sim_next_cycle (state);
10506 /* A for_each_rtx callback. If *X is a register, advance simulation state
10507 DATA until mips_sim_insn can read the register's value. */
10509 static int
10510 mips_sim_wait_regs_2 (rtx *x, void *data)
10512 if (REG_P (*x))
10513 mips_sim_wait_reg (data, mips_sim_insn, *x);
10514 return 0;
10517 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
10519 static void
10520 mips_sim_wait_regs_1 (rtx *x, void *data)
10522 for_each_rtx (x, mips_sim_wait_regs_2, data);
10525 /* Advance simulation state STATE until all of INSN's register
10526 dependencies are satisfied. */
10528 static void
10529 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
10531 mips_sim_insn = insn;
10532 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
10535 /* Advance simulation state STATE until the units required by
10536 instruction INSN are available. */
10538 static void
10539 mips_sim_wait_units (struct mips_sim *state, rtx insn)
10541 state_t tmp_state;
10543 tmp_state = alloca (state_size ());
10544 while (state->insns_left == 0
10545 || (memcpy (tmp_state, state->dfa_state, state_size ()),
10546 state_transition (tmp_state, insn) >= 0))
10547 mips_sim_next_cycle (state);
10550 /* Advance simulation state STATE until INSN is ready to issue. */
10552 static void
10553 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
10555 mips_sim_wait_regs (state, insn);
10556 mips_sim_wait_units (state, insn);
10559 /* mips_sim_insn has just set X. Update the LAST_SET array
10560 in simulation state DATA. */
10562 static void
10563 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10565 struct mips_sim *state;
10566 unsigned int i;
10568 state = data;
10569 if (REG_P (x))
10570 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
10572 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
10573 state->last_set[REGNO (x) + i].time = state->time;
10577 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
10578 can issue immediately (i.e., that mips_sim_wait_insn has already
10579 been called). */
10581 static void
10582 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
10584 state_transition (state->dfa_state, insn);
10585 state->insns_left--;
10587 mips_sim_insn = insn;
10588 note_stores (PATTERN (insn), mips_sim_record_set, state);
10591 /* Simulate issuing a NOP in state STATE. */
10593 static void
10594 mips_sim_issue_nop (struct mips_sim *state)
10596 if (state->insns_left == 0)
10597 mips_sim_next_cycle (state);
10598 state->insns_left--;
10601 /* Update simulation state STATE so that it's ready to accept the instruction
10602 after INSN. INSN should be part of the main rtl chain, not a member of a
10603 SEQUENCE. */
10605 static void
10606 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
10608 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
10609 if (JUMP_P (insn))
10610 mips_sim_issue_nop (state);
10612 switch (GET_CODE (SEQ_BEGIN (insn)))
10614 case CODE_LABEL:
10615 case CALL_INSN:
10616 /* We can't predict the processor state after a call or label. */
10617 mips_sim_reset (state);
10618 break;
10620 case JUMP_INSN:
10621 /* The delay slots of branch likely instructions are only executed
10622 when the branch is taken. Therefore, if the caller has simulated
10623 the delay slot instruction, STATE does not really reflect the state
10624 of the pipeline for the instruction after the delay slot. Also,
10625 branch likely instructions tend to incur a penalty when not taken,
10626 so there will probably be an extra delay between the branch and
10627 the instruction after the delay slot. */
10628 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
10629 mips_sim_reset (state);
10630 break;
10632 default:
10633 break;
10637 /* The VR4130 pipeline issues aligned pairs of instructions together,
10638 but it stalls the second instruction if it depends on the first.
10639 In order to cut down the amount of logic required, this dependence
10640 check is not based on a full instruction decode. Instead, any non-SPECIAL
10641 instruction is assumed to modify the register specified by bits 20-16
10642 (which is usually the "rt" field).
10644 In beq, beql, bne and bnel instructions, the rt field is actually an
10645 input, so we can end up with a false dependence between the branch
10646 and its delay slot. If this situation occurs in instruction INSN,
10647 try to avoid it by swapping rs and rt. */
10649 static void
10650 vr4130_avoid_branch_rt_conflict (rtx insn)
10652 rtx first, second;
10654 first = SEQ_BEGIN (insn);
10655 second = SEQ_END (insn);
10656 if (JUMP_P (first)
10657 && NONJUMP_INSN_P (second)
10658 && GET_CODE (PATTERN (first)) == SET
10659 && GET_CODE (SET_DEST (PATTERN (first))) == PC
10660 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
10662 /* Check for the right kind of condition. */
10663 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
10664 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
10665 && REG_P (XEXP (cond, 0))
10666 && REG_P (XEXP (cond, 1))
10667 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
10668 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
10670 /* SECOND mentions the rt register but not the rs register. */
10671 rtx tmp = XEXP (cond, 0);
10672 XEXP (cond, 0) = XEXP (cond, 1);
10673 XEXP (cond, 1) = tmp;
10678 /* Implement -mvr4130-align. Go through each basic block and simulate the
10679 processor pipeline. If we find that a pair of instructions could execute
10680 in parallel, and the first of those instruction is not 8-byte aligned,
10681 insert a nop to make it aligned. */
10683 static void
10684 vr4130_align_insns (void)
10686 struct mips_sim state;
10687 rtx insn, subinsn, last, last2, next;
10688 bool aligned_p;
10690 dfa_start ();
10692 /* LAST is the last instruction before INSN to have a nonzero length.
10693 LAST2 is the last such instruction before LAST. */
10694 last = 0;
10695 last2 = 0;
10697 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
10698 aligned_p = true;
10700 mips_sim_init (&state, alloca (state_size ()));
10701 for (insn = get_insns (); insn != 0; insn = next)
10703 unsigned int length;
10705 next = NEXT_INSN (insn);
10707 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
10708 This isn't really related to the alignment pass, but we do it on
10709 the fly to avoid a separate instruction walk. */
10710 vr4130_avoid_branch_rt_conflict (insn);
10712 if (USEFUL_INSN_P (insn))
10713 FOR_EACH_SUBINSN (subinsn, insn)
10715 mips_sim_wait_insn (&state, subinsn);
10717 /* If we want this instruction to issue in parallel with the
10718 previous one, make sure that the previous instruction is
10719 aligned. There are several reasons why this isn't worthwhile
10720 when the second instruction is a call:
10722 - Calls are less likely to be performance critical,
10723 - There's a good chance that the delay slot can execute
10724 in parallel with the call.
10725 - The return address would then be unaligned.
10727 In general, if we're going to insert a nop between instructions
10728 X and Y, it's better to insert it immediately after X. That
10729 way, if the nop makes Y aligned, it will also align any labels
10730 between X and Y. */
10731 if (state.insns_left != state.issue_rate
10732 && !CALL_P (subinsn))
10734 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
10736 /* SUBINSN is the first instruction in INSN and INSN is
10737 aligned. We want to align the previous instruction
10738 instead, so insert a nop between LAST2 and LAST.
10740 Note that LAST could be either a single instruction
10741 or a branch with a delay slot. In the latter case,
10742 LAST, like INSN, is already aligned, but the delay
10743 slot must have some extra delay that stops it from
10744 issuing at the same time as the branch. We therefore
10745 insert a nop before the branch in order to align its
10746 delay slot. */
10747 emit_insn_after (gen_nop (), last2);
10748 aligned_p = false;
10750 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
10752 /* SUBINSN is the delay slot of INSN, but INSN is
10753 currently unaligned. Insert a nop between
10754 LAST and INSN to align it. */
10755 emit_insn_after (gen_nop (), last);
10756 aligned_p = true;
10759 mips_sim_issue_insn (&state, subinsn);
10761 mips_sim_finish_insn (&state, insn);
10763 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10764 length = get_attr_length (insn);
10765 if (length > 0)
10767 /* If the instruction is an asm statement or multi-instruction
10768 mips.md patern, the length is only an estimate. Insert an
10769 8 byte alignment after it so that the following instructions
10770 can be handled correctly. */
10771 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
10772 && (recog_memoized (insn) < 0 || length >= 8))
10774 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
10775 next = NEXT_INSN (next);
10776 mips_sim_next_cycle (&state);
10777 aligned_p = true;
10779 else if (length & 4)
10780 aligned_p = !aligned_p;
10781 last2 = last;
10782 last = insn;
10785 /* See whether INSN is an aligned label. */
10786 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
10787 aligned_p = true;
10789 dfa_finish ();
10792 /* Subroutine of mips_reorg. If there is a hazard between INSN
10793 and a previous instruction, avoid it by inserting nops after
10794 instruction AFTER.
10796 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10797 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10798 before using the value of that register. *HILO_DELAY counts the
10799 number of instructions since the last hilo hazard (that is,
10800 the number of instructions since the last mflo or mfhi).
10802 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10803 for the next instruction.
10805 LO_REG is an rtx for the LO register, used in dependence checking. */
10807 static void
10808 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
10809 rtx *delayed_reg, rtx lo_reg)
10811 rtx pattern, set;
10812 int nops, ninsns;
10814 if (!INSN_P (insn))
10815 return;
10817 pattern = PATTERN (insn);
10819 /* Do not put the whole function in .set noreorder if it contains
10820 an asm statement. We don't know whether there will be hazards
10821 between the asm statement and the gcc-generated code. */
10822 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
10823 cfun->machine->all_noreorder_p = false;
10825 /* Ignore zero-length instructions (barriers and the like). */
10826 ninsns = get_attr_length (insn) / 4;
10827 if (ninsns == 0)
10828 return;
10830 /* Work out how many nops are needed. Note that we only care about
10831 registers that are explicitly mentioned in the instruction's pattern.
10832 It doesn't matter that calls use the argument registers or that they
10833 clobber hi and lo. */
10834 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10835 nops = 2 - *hilo_delay;
10836 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10837 nops = 1;
10838 else
10839 nops = 0;
10841 /* Insert the nops between this instruction and the previous one.
10842 Each new nop takes us further from the last hilo hazard. */
10843 *hilo_delay += nops;
10844 while (nops-- > 0)
10845 emit_insn_after (gen_hazard_nop (), after);
10847 /* Set up the state for the next instruction. */
10848 *hilo_delay += ninsns;
10849 *delayed_reg = 0;
10850 if (INSN_CODE (insn) >= 0)
10851 switch (get_attr_hazard (insn))
10853 case HAZARD_NONE:
10854 break;
10856 case HAZARD_HILO:
10857 *hilo_delay = 0;
10858 break;
10860 case HAZARD_DELAY:
10861 set = single_set (insn);
10862 gcc_assert (set != 0);
10863 *delayed_reg = SET_DEST (set);
10864 break;
10869 /* Go through the instruction stream and insert nops where necessary.
10870 See if the whole function can then be put into .set noreorder &
10871 .set nomacro. */
10873 static void
10874 mips_avoid_hazards (void)
10876 rtx insn, last_insn, lo_reg, delayed_reg;
10877 int hilo_delay, i;
10879 /* Force all instructions to be split into their final form. */
10880 split_all_insns_noflow ();
10882 /* Recalculate instruction lengths without taking nops into account. */
10883 cfun->machine->ignore_hazard_length_p = true;
10884 shorten_branches (get_insns ());
10886 cfun->machine->all_noreorder_p = true;
10888 /* Profiled functions can't be all noreorder because the profiler
10889 support uses assembler macros. */
10890 if (current_function_profile)
10891 cfun->machine->all_noreorder_p = false;
10893 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10894 we rely on the assembler to work around some errata. */
10895 if (TARGET_FIX_VR4120)
10896 cfun->machine->all_noreorder_p = false;
10898 /* The same is true for -mfix-vr4130 if we might generate mflo or
10899 mfhi instructions. Note that we avoid using mflo and mfhi if
10900 the VR4130 macc and dmacc instructions are available instead;
10901 see the *mfhilo_{si,di}_macc patterns. */
10902 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10903 cfun->machine->all_noreorder_p = false;
10905 last_insn = 0;
10906 hilo_delay = 2;
10907 delayed_reg = 0;
10908 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10910 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10911 if (INSN_P (insn))
10913 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10914 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10915 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10916 &hilo_delay, &delayed_reg, lo_reg);
10917 else
10918 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10919 &delayed_reg, lo_reg);
10921 last_insn = insn;
10926 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10928 static void
10929 mips_reorg (void)
10931 mips16_lay_out_constants ();
10932 if (TARGET_EXPLICIT_RELOCS)
10934 if (mips_flag_delayed_branch)
10935 dbr_schedule (get_insns ());
10936 mips_avoid_hazards ();
10937 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10938 vr4130_align_insns ();
10942 /* This function does three things:
10944 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10945 - Register the mips16 hardware floating point stubs.
10946 - Register the gofast functions if selected using --enable-gofast. */
10948 #include "config/gofast.h"
10950 static void
10951 mips_init_libfuncs (void)
10953 if (TARGET_FIX_VR4120)
10955 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10956 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10959 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10961 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10962 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10963 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10964 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10966 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10967 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10968 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10969 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10970 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10971 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10972 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10974 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10975 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10976 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10978 if (TARGET_DOUBLE_FLOAT)
10980 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10981 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10982 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10983 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10985 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10986 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10987 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10988 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10989 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10990 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10991 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10993 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10994 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10996 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10997 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10998 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
11001 else
11002 gofast_maybe_init_libfuncs ();
11005 /* Return a number assessing the cost of moving a register in class
11006 FROM to class TO. The classes are expressed using the enumeration
11007 values such as `GENERAL_REGS'. A value of 2 is the default; other
11008 values are interpreted relative to that.
11010 It is not required that the cost always equal 2 when FROM is the
11011 same as TO; on some machines it is expensive to move between
11012 registers if they are not general registers.
11014 If reload sees an insn consisting of a single `set' between two
11015 hard registers, and if `REGISTER_MOVE_COST' applied to their
11016 classes returns a value of 2, reload does not check to ensure that
11017 the constraints of the insn are met. Setting a cost of other than
11018 2 will allow reload to verify that the constraints are met. You
11019 should do this if the `movM' pattern's constraints do not allow
11020 such copying.
11022 ??? We make the cost of moving from HI/LO into general
11023 registers the same as for one of moving general registers to
11024 HI/LO for TARGET_MIPS16 in order to prevent allocating a
11025 pseudo to HI/LO. This might hurt optimizations though, it
11026 isn't clear if it is wise. And it might not work in all cases. We
11027 could solve the DImode LO reg problem by using a multiply, just
11028 like reload_{in,out}si. We could solve the SImode/HImode HI reg
11029 problem by using divide instructions. divu puts the remainder in
11030 the HI reg, so doing a divide by -1 will move the value in the HI
11031 reg for all values except -1. We could handle that case by using a
11032 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
11033 a compare/branch to test the input value to see which instruction
11034 we need to use. This gets pretty messy, but it is feasible. */
11037 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
11038 enum reg_class to, enum reg_class from)
11040 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
11041 return 2;
11042 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
11043 return 2;
11044 else if (reg_class_subset_p (from, GENERAL_REGS))
11046 if (to == M16_REGS)
11047 return 2;
11048 else if (to == M16_NA_REGS)
11049 return 2;
11050 else if (reg_class_subset_p (to, GENERAL_REGS))
11052 if (TARGET_MIPS16)
11053 return 4;
11054 else
11055 return 2;
11057 else if (to == FP_REGS)
11058 return 4;
11059 else if (reg_class_subset_p (to, ACC_REGS))
11061 if (TARGET_MIPS16)
11062 return 12;
11063 else
11064 return 6;
11066 else if (reg_class_subset_p (to, ALL_COP_REGS))
11068 return 5;
11071 else if (from == FP_REGS)
11073 if (reg_class_subset_p (to, GENERAL_REGS))
11074 return 4;
11075 else if (to == FP_REGS)
11076 return 2;
11077 else if (to == ST_REGS)
11078 return 8;
11080 else if (reg_class_subset_p (from, ACC_REGS))
11082 if (reg_class_subset_p (to, GENERAL_REGS))
11084 if (TARGET_MIPS16)
11085 return 12;
11086 else
11087 return 6;
11090 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
11091 return 4;
11092 else if (reg_class_subset_p (from, ALL_COP_REGS))
11094 return 5;
11097 /* Fall through.
11098 ??? What cases are these? Shouldn't we return 2 here? */
11100 return 12;
11103 /* Return the length of INSN. LENGTH is the initial length computed by
11104 attributes in the machine-description file. */
11107 mips_adjust_insn_length (rtx insn, int length)
11109 /* A unconditional jump has an unfilled delay slot if it is not part
11110 of a sequence. A conditional jump normally has a delay slot, but
11111 does not on MIPS16. */
11112 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
11113 length += 4;
11115 /* See how many nops might be needed to avoid hardware hazards. */
11116 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
11117 switch (get_attr_hazard (insn))
11119 case HAZARD_NONE:
11120 break;
11122 case HAZARD_DELAY:
11123 length += 4;
11124 break;
11126 case HAZARD_HILO:
11127 length += 8;
11128 break;
11131 /* All MIPS16 instructions are a measly two bytes. */
11132 if (TARGET_MIPS16)
11133 length /= 2;
11135 return length;
11139 /* Return an asm sequence to start a noat block and load the address
11140 of a label into $1. */
11142 const char *
11143 mips_output_load_label (void)
11145 if (TARGET_EXPLICIT_RELOCS)
11146 switch (mips_abi)
11148 case ABI_N32:
11149 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
11151 case ABI_64:
11152 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
11154 default:
11155 if (ISA_HAS_LOAD_DELAY)
11156 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
11157 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
11159 else
11161 if (Pmode == DImode)
11162 return "%[dla\t%@,%0";
11163 else
11164 return "%[la\t%@,%0";
11168 /* Return the assembly code for INSN, which has the operands given by
11169 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
11170 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
11171 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
11172 version of BRANCH_IF_TRUE. */
11174 const char *
11175 mips_output_conditional_branch (rtx insn, rtx *operands,
11176 const char *branch_if_true,
11177 const char *branch_if_false)
11179 unsigned int length;
11180 rtx taken, not_taken;
11182 length = get_attr_length (insn);
11183 if (length <= 8)
11185 /* Just a simple conditional branch. */
11186 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
11187 return branch_if_true;
11190 /* Generate a reversed branch around a direct jump. This fallback does
11191 not use branch-likely instructions. */
11192 mips_branch_likely = false;
11193 not_taken = gen_label_rtx ();
11194 taken = operands[1];
11196 /* Generate the reversed branch to NOT_TAKEN. */
11197 operands[1] = not_taken;
11198 output_asm_insn (branch_if_false, operands);
11200 /* If INSN has a delay slot, we must provide delay slots for both the
11201 branch to NOT_TAKEN and the conditional jump. We must also ensure
11202 that INSN's delay slot is executed in the appropriate cases. */
11203 if (final_sequence)
11205 /* This first delay slot will always be executed, so use INSN's
11206 delay slot if is not annulled. */
11207 if (!INSN_ANNULLED_BRANCH_P (insn))
11209 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11210 asm_out_file, optimize, 1, NULL);
11211 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11213 else
11214 output_asm_insn ("nop", 0);
11215 fprintf (asm_out_file, "\n");
11218 /* Output the unconditional branch to TAKEN. */
11219 if (length <= 16)
11220 output_asm_insn ("j\t%0%/", &taken);
11221 else
11223 output_asm_insn (mips_output_load_label (), &taken);
11224 output_asm_insn ("jr\t%@%]%/", 0);
11227 /* Now deal with its delay slot; see above. */
11228 if (final_sequence)
11230 /* This delay slot will only be executed if the branch is taken.
11231 Use INSN's delay slot if is annulled. */
11232 if (INSN_ANNULLED_BRANCH_P (insn))
11234 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11235 asm_out_file, optimize, 1, NULL);
11236 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11238 else
11239 output_asm_insn ("nop", 0);
11240 fprintf (asm_out_file, "\n");
11243 /* Output NOT_TAKEN. */
11244 (*targetm.asm_out.internal_label) (asm_out_file, "L",
11245 CODE_LABEL_NUMBER (not_taken));
11246 return "";
11249 /* Return the assembly code for INSN, which branches to OPERANDS[1]
11250 if some ordered condition is true. The condition is given by
11251 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
11252 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
11253 its second is always zero. */
11255 const char *
11256 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
11258 const char *branch[2];
11260 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
11261 Make BRANCH[0] branch on the inverse condition. */
11262 switch (GET_CODE (operands[0]))
11264 /* These cases are equivalent to comparisons against zero. */
11265 case LEU:
11266 inverted_p = !inverted_p;
11267 /* Fall through. */
11268 case GTU:
11269 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
11270 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
11271 break;
11273 /* These cases are always true or always false. */
11274 case LTU:
11275 inverted_p = !inverted_p;
11276 /* Fall through. */
11277 case GEU:
11278 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
11279 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
11280 break;
11282 default:
11283 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
11284 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
11285 break;
11287 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
11290 /* Used to output div or ddiv instruction DIVISION, which has the operands
11291 given by OPERANDS. Add in a divide-by-zero check if needed.
11293 When working around R4000 and R4400 errata, we need to make sure that
11294 the division is not immediately followed by a shift[1][2]. We also
11295 need to stop the division from being put into a branch delay slot[3].
11296 The easiest way to avoid both problems is to add a nop after the
11297 division. When a divide-by-zero check is needed, this nop can be
11298 used to fill the branch delay slot.
11300 [1] If a double-word or a variable shift executes immediately
11301 after starting an integer division, the shift may give an
11302 incorrect result. See quotations of errata #16 and #28 from
11303 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11304 in mips.md for details.
11306 [2] A similar bug to [1] exists for all revisions of the
11307 R4000 and the R4400 when run in an MC configuration.
11308 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
11310 "19. In this following sequence:
11312 ddiv (or ddivu or div or divu)
11313 dsll32 (or dsrl32, dsra32)
11315 if an MPT stall occurs, while the divide is slipping the cpu
11316 pipeline, then the following double shift would end up with an
11317 incorrect result.
11319 Workaround: The compiler needs to avoid generating any
11320 sequence with divide followed by extended double shift."
11322 This erratum is also present in "MIPS R4400MC Errata, Processor
11323 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
11324 & 3.0" as errata #10 and #4, respectively.
11326 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11327 (also valid for MIPS R4000MC processors):
11329 "52. R4000SC: This bug does not apply for the R4000PC.
11331 There are two flavors of this bug:
11333 1) If the instruction just after divide takes an RF exception
11334 (tlb-refill, tlb-invalid) and gets an instruction cache
11335 miss (both primary and secondary) and the line which is
11336 currently in secondary cache at this index had the first
11337 data word, where the bits 5..2 are set, then R4000 would
11338 get a wrong result for the div.
11342 div r8, r9
11343 ------------------- # end-of page. -tlb-refill
11347 div r8, r9
11348 ------------------- # end-of page. -tlb-invalid
11351 2) If the divide is in the taken branch delay slot, where the
11352 target takes RF exception and gets an I-cache miss for the
11353 exception vector or where I-cache miss occurs for the
11354 target address, under the above mentioned scenarios, the
11355 div would get wrong results.
11358 j r2 # to next page mapped or unmapped
11359 div r8,r9 # this bug would be there as long
11360 # as there is an ICache miss and
11361 nop # the "data pattern" is present
11364 beq r0, r0, NextPage # to Next page
11365 div r8,r9
11368 This bug is present for div, divu, ddiv, and ddivu
11369 instructions.
11371 Workaround: For item 1), OS could make sure that the next page
11372 after the divide instruction is also mapped. For item 2), the
11373 compiler could make sure that the divide instruction is not in
11374 the branch delay slot."
11376 These processors have PRId values of 0x00004220 and 0x00004300 for
11377 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
11379 const char *
11380 mips_output_division (const char *division, rtx *operands)
11382 const char *s;
11384 s = division;
11385 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
11387 output_asm_insn (s, operands);
11388 s = "nop";
11390 if (TARGET_CHECK_ZERO_DIV)
11392 if (TARGET_MIPS16)
11394 output_asm_insn (s, operands);
11395 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
11397 else if (GENERATE_DIVIDE_TRAPS)
11399 output_asm_insn (s, operands);
11400 s = "teq\t%2,%.,7";
11402 else
11404 output_asm_insn ("%(bne\t%2,%.,1f", operands);
11405 output_asm_insn (s, operands);
11406 s = "break\t7%)\n1:";
11409 return s;
11412 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11413 with a final "000" replaced by "k". Ignore case.
11415 Note: this function is shared between GCC and GAS. */
11417 static bool
11418 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
11420 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
11421 given++, canonical++;
11423 return ((*given == 0 && *canonical == 0)
11424 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
11428 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11429 CPU name. We've traditionally allowed a lot of variation here.
11431 Note: this function is shared between GCC and GAS. */
11433 static bool
11434 mips_matching_cpu_name_p (const char *canonical, const char *given)
11436 /* First see if the name matches exactly, or with a final "000"
11437 turned into "k". */
11438 if (mips_strict_matching_cpu_name_p (canonical, given))
11439 return true;
11441 /* If not, try comparing based on numerical designation alone.
11442 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11443 if (TOLOWER (*given) == 'r')
11444 given++;
11445 if (!ISDIGIT (*given))
11446 return false;
11448 /* Skip over some well-known prefixes in the canonical name,
11449 hoping to find a number there too. */
11450 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
11451 canonical += 2;
11452 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
11453 canonical += 2;
11454 else if (TOLOWER (canonical[0]) == 'r')
11455 canonical += 1;
11457 return mips_strict_matching_cpu_name_p (canonical, given);
11461 /* Return the mips_cpu_info entry for the processor or ISA given
11462 by CPU_STRING. Return null if the string isn't recognized.
11464 A similar function exists in GAS. */
11466 static const struct mips_cpu_info *
11467 mips_parse_cpu (const char *cpu_string)
11469 const struct mips_cpu_info *p;
11470 const char *s;
11472 /* In the past, we allowed upper-case CPU names, but it doesn't
11473 work well with the multilib machinery. */
11474 for (s = cpu_string; *s != 0; s++)
11475 if (ISUPPER (*s))
11477 warning (0, "the cpu name must be lower case");
11478 break;
11481 /* 'from-abi' selects the most compatible architecture for the given
11482 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11483 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11484 version. Look first at the -mgp options, if given, otherwise base
11485 the choice on MASK_64BIT in TARGET_DEFAULT. */
11486 if (strcasecmp (cpu_string, "from-abi") == 0)
11487 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
11488 : ABI_NEEDS_64BIT_REGS ? 3
11489 : (TARGET_64BIT ? 3 : 1));
11491 /* 'default' has traditionally been a no-op. Probably not very useful. */
11492 if (strcasecmp (cpu_string, "default") == 0)
11493 return 0;
11495 for (p = mips_cpu_info_table; p->name != 0; p++)
11496 if (mips_matching_cpu_name_p (p->name, cpu_string))
11497 return p;
11499 return 0;
11503 /* Return the processor associated with the given ISA level, or null
11504 if the ISA isn't valid. */
11506 static const struct mips_cpu_info *
11507 mips_cpu_info_from_isa (int isa)
11509 const struct mips_cpu_info *p;
11511 for (p = mips_cpu_info_table; p->name != 0; p++)
11512 if (p->isa == isa)
11513 return p;
11515 return 0;
11518 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
11519 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
11520 they only hold condition code modes, and CCmode is always considered to
11521 be 4 bytes wide. All other registers are word sized. */
11523 unsigned int
11524 mips_hard_regno_nregs (int regno, enum machine_mode mode)
11526 if (ST_REG_P (regno))
11527 return ((GET_MODE_SIZE (mode) + 3) / 4);
11528 else if (! FP_REG_P (regno))
11529 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
11530 else
11531 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
11534 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
11535 all BLKmode objects are returned in memory. Under the new (N32 and
11536 64-bit MIPS ABIs) small structures are returned in a register.
11537 Objects with varying size must still be returned in memory, of
11538 course. */
11540 static bool
11541 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
11543 if (TARGET_OLDABI)
11544 return (TYPE_MODE (type) == BLKmode);
11545 else
11546 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
11547 || (int_size_in_bytes (type) == -1));
11550 static bool
11551 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
11553 return !TARGET_OLDABI;
11556 /* Return true if INSN is a multiply-add or multiply-subtract
11557 instruction and PREV assigns to the accumulator operand. */
11559 bool
11560 mips_linked_madd_p (rtx prev, rtx insn)
11562 rtx x;
11564 x = single_set (insn);
11565 if (x == 0)
11566 return false;
11568 x = SET_SRC (x);
11570 if (GET_CODE (x) == PLUS
11571 && GET_CODE (XEXP (x, 0)) == MULT
11572 && reg_set_p (XEXP (x, 1), prev))
11573 return true;
11575 if (GET_CODE (x) == MINUS
11576 && GET_CODE (XEXP (x, 1)) == MULT
11577 && reg_set_p (XEXP (x, 0), prev))
11578 return true;
11580 return false;
11583 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
11584 that may clobber hi or lo. */
11586 static rtx mips_macc_chains_last_hilo;
11588 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
11589 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
11591 static void
11592 mips_macc_chains_record (rtx insn)
11594 if (get_attr_may_clobber_hilo (insn))
11595 mips_macc_chains_last_hilo = insn;
11598 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
11599 has NREADY elements, looking for a multiply-add or multiply-subtract
11600 instruction that is cumulative with mips_macc_chains_last_hilo.
11601 If there is one, promote it ahead of anything else that might
11602 clobber hi or lo. */
11604 static void
11605 mips_macc_chains_reorder (rtx *ready, int nready)
11607 int i, j;
11609 if (mips_macc_chains_last_hilo != 0)
11610 for (i = nready - 1; i >= 0; i--)
11611 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
11613 for (j = nready - 1; j > i; j--)
11614 if (recog_memoized (ready[j]) >= 0
11615 && get_attr_may_clobber_hilo (ready[j]))
11617 mips_promote_ready (ready, i, j);
11618 break;
11620 break;
11624 /* The last instruction to be scheduled. */
11626 static rtx vr4130_last_insn;
11628 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
11629 points to an rtx that is initially an instruction. Nullify the rtx
11630 if the instruction uses the value of register X. */
11632 static void
11633 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11635 rtx *insn_ptr = data;
11636 if (REG_P (x)
11637 && *insn_ptr != 0
11638 && reg_referenced_p (x, PATTERN (*insn_ptr)))
11639 *insn_ptr = 0;
11642 /* Return true if there is true register dependence between vr4130_last_insn
11643 and INSN. */
11645 static bool
11646 vr4130_true_reg_dependence_p (rtx insn)
11648 note_stores (PATTERN (vr4130_last_insn),
11649 vr4130_true_reg_dependence_p_1, &insn);
11650 return insn == 0;
11653 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
11654 the ready queue and that INSN2 is the instruction after it, return
11655 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
11656 in which INSN1 and INSN2 can probably issue in parallel, but for
11657 which (INSN2, INSN1) should be less sensitive to instruction
11658 alignment than (INSN1, INSN2). See 4130.md for more details. */
11660 static bool
11661 vr4130_swap_insns_p (rtx insn1, rtx insn2)
11663 sd_iterator_def sd_it;
11664 dep_t dep;
11666 /* Check for the following case:
11668 1) there is some other instruction X with an anti dependence on INSN1;
11669 2) X has a higher priority than INSN2; and
11670 3) X is an arithmetic instruction (and thus has no unit restrictions).
11672 If INSN1 is the last instruction blocking X, it would better to
11673 choose (INSN1, X) over (INSN2, INSN1). */
11674 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
11675 if (DEP_TYPE (dep) == REG_DEP_ANTI
11676 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
11677 && recog_memoized (DEP_CON (dep)) >= 0
11678 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
11679 return false;
11681 if (vr4130_last_insn != 0
11682 && recog_memoized (insn1) >= 0
11683 && recog_memoized (insn2) >= 0)
11685 /* See whether INSN1 and INSN2 use different execution units,
11686 or if they are both ALU-type instructions. If so, they can
11687 probably execute in parallel. */
11688 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
11689 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
11690 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
11692 /* If only one of the instructions has a dependence on
11693 vr4130_last_insn, prefer to schedule the other one first. */
11694 bool dep1 = vr4130_true_reg_dependence_p (insn1);
11695 bool dep2 = vr4130_true_reg_dependence_p (insn2);
11696 if (dep1 != dep2)
11697 return dep1;
11699 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
11700 is not an ALU-type instruction and if INSN1 uses the same
11701 execution unit. (Note that if this condition holds, we already
11702 know that INSN2 uses a different execution unit.) */
11703 if (class1 != VR4130_CLASS_ALU
11704 && recog_memoized (vr4130_last_insn) >= 0
11705 && class1 == get_attr_vr4130_class (vr4130_last_insn))
11706 return true;
11709 return false;
11712 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
11713 queue with at least two instructions. Swap the first two if
11714 vr4130_swap_insns_p says that it could be worthwhile. */
11716 static void
11717 vr4130_reorder (rtx *ready, int nready)
11719 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
11720 mips_promote_ready (ready, nready - 2, nready - 1);
11723 /* Remove the instruction at index LOWER from ready queue READY and
11724 reinsert it in front of the instruction at index HIGHER. LOWER must
11725 be <= HIGHER. */
11727 static void
11728 mips_promote_ready (rtx *ready, int lower, int higher)
11730 rtx new_head;
11731 int i;
11733 new_head = ready[lower];
11734 for (i = lower; i < higher; i++)
11735 ready[i] = ready[i + 1];
11736 ready[i] = new_head;
11739 /* If the priority of the instruction at POS2 in the ready queue READY
11740 is within LIMIT units of that of the instruction at POS1, swap the
11741 instructions if POS2 is not already less than POS1. */
11743 static void
11744 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11746 if (pos1 < pos2
11747 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11749 rtx temp;
11750 temp = ready[pos1];
11751 ready[pos1] = ready[pos2];
11752 ready[pos2] = temp;
11756 /* Record whether last 74k AGEN instruction was a load or store. */
11758 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11760 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11761 resets to TYPE_UNKNOWN state. */
11763 static void
11764 mips_74k_agen_init (rtx insn)
11766 if (!insn || !NONJUMP_INSN_P (insn))
11767 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11768 else if (USEFUL_INSN_P (insn))
11770 enum attr_type type = get_attr_type (insn);
11771 if (type == TYPE_LOAD || type == TYPE_STORE)
11772 mips_last_74k_agen_insn = type;
11776 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11777 loads to be grouped together, and multiple stores to be grouped
11778 together. Swap things around in the ready queue to make this happen. */
11780 static void
11781 mips_74k_agen_reorder (rtx *ready, int nready)
11783 int i;
11784 int store_pos, load_pos;
11786 store_pos = -1;
11787 load_pos = -1;
11789 for (i = nready - 1; i >= 0; i--)
11791 rtx insn = ready[i];
11792 if (USEFUL_INSN_P (insn))
11793 switch (get_attr_type (insn))
11795 case TYPE_STORE:
11796 if (store_pos == -1)
11797 store_pos = i;
11798 break;
11800 case TYPE_LOAD:
11801 if (load_pos == -1)
11802 load_pos = i;
11803 break;
11805 default:
11806 break;
11810 if (load_pos == -1 || store_pos == -1)
11811 return;
11813 switch (mips_last_74k_agen_insn)
11815 case TYPE_UNKNOWN:
11816 /* Prefer to schedule loads since they have a higher latency. */
11817 case TYPE_LOAD:
11818 /* Swap loads to the front of the queue. */
11819 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11820 break;
11821 case TYPE_STORE:
11822 /* Swap stores to the front of the queue. */
11823 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11824 break;
11825 default:
11826 break;
11830 /* Implement TARGET_SCHED_INIT. */
11832 static void
11833 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11834 int max_ready ATTRIBUTE_UNUSED)
11836 mips_macc_chains_last_hilo = 0;
11837 vr4130_last_insn = 0;
11838 mips_74k_agen_init (NULL_RTX);
11841 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11843 static int
11844 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11845 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11847 if (!reload_completed
11848 && TUNE_MACC_CHAINS
11849 && *nreadyp > 0)
11850 mips_macc_chains_reorder (ready, *nreadyp);
11851 if (reload_completed
11852 && TUNE_MIPS4130
11853 && !TARGET_VR4130_ALIGN
11854 && *nreadyp > 1)
11855 vr4130_reorder (ready, *nreadyp);
11856 if (TUNE_74K)
11857 mips_74k_agen_reorder (ready, *nreadyp);
11858 return mips_issue_rate ();
11861 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11863 static int
11864 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11865 rtx insn, int more)
11867 if (TUNE_74K)
11868 mips_74k_agen_init (insn);
11869 switch (GET_CODE (PATTERN (insn)))
11871 case USE:
11872 case CLOBBER:
11873 /* Don't count USEs and CLOBBERs against the issue rate. */
11874 break;
11876 default:
11877 more--;
11878 if (!reload_completed && TUNE_MACC_CHAINS)
11879 mips_macc_chains_record (insn);
11880 vr4130_last_insn = insn;
11881 break;
11883 return more;
11886 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11887 dependencies have no cost, except on the 20Kc where output-dependence
11888 is treated like input-dependence. */
11890 static int
11891 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
11892 rtx dep ATTRIBUTE_UNUSED, int cost)
11894 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
11895 && TUNE_20KC)
11896 return cost;
11897 if (REG_NOTE_KIND (link) != 0)
11898 return 0;
11899 return cost;
11902 /* Return the number of instructions that can be issued per cycle. */
11904 static int
11905 mips_issue_rate (void)
11907 switch (mips_tune)
11909 case PROCESSOR_74KC:
11910 case PROCESSOR_74KF2_1:
11911 case PROCESSOR_74KF1_1:
11912 case PROCESSOR_74KF3_2:
11913 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11914 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11915 but in reality only a maximum of 3 insns can be issued as the
11916 floating point load/stores also require a slot in the AGEN pipe. */
11917 return 4;
11919 case PROCESSOR_20KC:
11920 case PROCESSOR_R4130:
11921 case PROCESSOR_R5400:
11922 case PROCESSOR_R5500:
11923 case PROCESSOR_R7000:
11924 case PROCESSOR_R9000:
11925 return 2;
11927 case PROCESSOR_SB1:
11928 case PROCESSOR_SB1A:
11929 /* This is actually 4, but we get better performance if we claim 3.
11930 This is partly because of unwanted speculative code motion with the
11931 larger number, and partly because in most common cases we can't
11932 reach the theoretical max of 4. */
11933 return 3;
11935 default:
11936 return 1;
11940 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11941 be as wide as the scheduling freedom in the DFA. */
11943 static int
11944 mips_multipass_dfa_lookahead (void)
11946 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11947 if (TUNE_SB1)
11948 return 4;
11950 return 0;
11953 /* Implements a store data bypass check. We need this because the cprestore
11954 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11955 default routine to abort. We just return false for that case. */
11956 /* ??? Should try to give a better result here than assuming false. */
11959 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11961 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11962 return false;
11964 return ! store_data_bypass_p (out_insn, in_insn);
11967 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11968 return the first operand of the associated "pref" or "prefx" insn. */
11971 mips_prefetch_cookie (rtx write, rtx locality)
11973 /* store_streamed / load_streamed. */
11974 if (INTVAL (locality) <= 0)
11975 return GEN_INT (INTVAL (write) + 4);
11977 /* store / load. */
11978 if (INTVAL (locality) <= 2)
11979 return write;
11981 /* store_retained / load_retained. */
11982 return GEN_INT (INTVAL (write) + 6);
11985 /* MIPS builtin function support. */
11987 struct builtin_description
11989 /* The code of the main .md file instruction. See mips_builtin_type
11990 for more information. */
11991 enum insn_code icode;
11993 /* The floating-point comparison code to use with ICODE, if any. */
11994 enum mips_fp_condition cond;
11996 /* The name of the builtin function. */
11997 const char *name;
11999 /* Specifies how the function should be expanded. */
12000 enum mips_builtin_type builtin_type;
12002 /* The function's prototype. */
12003 enum mips_function_type function_type;
12005 /* The target flags required for this function. */
12006 int target_flags;
12009 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
12010 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
12011 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
12012 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
12013 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
12015 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
12016 TARGET_FLAGS. */
12017 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
12018 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
12019 "__builtin_mips_" #INSN "_" #COND "_s", \
12020 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
12021 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
12022 "__builtin_mips_" #INSN "_" #COND "_d", \
12023 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
12025 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
12026 The lower and upper forms require TARGET_FLAGS while the any and all
12027 forms require MASK_MIPS3D. */
12028 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
12029 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12030 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
12031 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
12032 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12033 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
12034 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
12035 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12036 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
12037 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
12038 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12039 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
12040 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
12042 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
12043 require MASK_MIPS3D. */
12044 #define CMP_4S_BUILTINS(INSN, COND) \
12045 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
12046 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
12047 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12048 MASK_MIPS3D }, \
12049 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
12050 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
12051 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12052 MASK_MIPS3D }
12054 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
12055 instruction requires TARGET_FLAGS. */
12056 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
12057 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12058 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
12059 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12060 TARGET_FLAGS }, \
12061 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12062 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
12063 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12064 TARGET_FLAGS }
12066 /* Define all the builtins related to c.cond.fmt condition COND. */
12067 #define CMP_BUILTINS(COND) \
12068 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
12069 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
12070 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
12071 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
12072 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
12073 CMP_4S_BUILTINS (c, COND), \
12074 CMP_4S_BUILTINS (cabs, COND)
12076 static const struct builtin_description mips_bdesc[] =
12078 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12079 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12080 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12081 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12082 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
12083 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12084 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12085 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12087 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
12088 MASK_PAIRED_SINGLE_FLOAT),
12089 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12090 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12091 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12092 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12094 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
12095 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
12096 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12097 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
12098 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
12099 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12101 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
12102 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
12103 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12104 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
12105 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
12106 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12108 MIPS_FP_CONDITIONS (CMP_BUILTINS)
12111 /* Builtin functions for the SB-1 processor. */
12113 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
12115 static const struct builtin_description sb1_bdesc[] =
12117 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
12120 /* Builtin functions for DSP ASE. */
12122 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
12123 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
12124 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
12125 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
12126 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
12128 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
12129 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
12130 builtin_description fields. */
12131 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
12132 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
12133 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
12135 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
12136 branch instruction. TARGET_FLAGS is a builtin_description field. */
12137 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
12138 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
12139 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
12141 static const struct builtin_description dsp_bdesc[] =
12143 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12144 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12145 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12146 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12147 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12148 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12149 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12150 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12151 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12152 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12153 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12154 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12155 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12156 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
12157 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
12158 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
12159 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
12160 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
12161 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
12162 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
12163 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
12164 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
12165 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12166 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12167 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12168 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12169 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12170 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12171 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12172 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12173 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
12174 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12175 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12176 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12177 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
12178 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12179 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12180 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12181 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
12182 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
12183 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12184 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
12185 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
12186 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
12187 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12188 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
12189 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
12190 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
12191 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
12192 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
12193 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12194 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12195 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12196 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12197 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12198 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12199 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12200 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12201 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12202 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
12203 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
12204 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12205 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12206 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12207 BPOSGE_BUILTIN (32, MASK_DSP),
12209 /* The following are for the MIPS DSP ASE REV 2. */
12210 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
12211 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12212 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12213 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12214 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12215 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12216 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12217 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12218 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12219 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12220 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12221 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12222 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12223 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12224 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12225 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12226 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
12227 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
12228 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12229 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
12230 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
12231 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
12232 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12233 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12234 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12235 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12236 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12237 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12238 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12239 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12240 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12241 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12242 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12243 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
12246 static const struct builtin_description dsp_32only_bdesc[] =
12248 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12249 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12250 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12251 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12252 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12253 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12254 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12255 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12256 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12257 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12258 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12259 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12260 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12261 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12262 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12263 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12264 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12265 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12266 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12267 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12268 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12270 /* The following are for the MIPS DSP ASE REV 2. */
12271 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12272 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12273 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12274 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12275 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12276 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12277 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12278 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
12279 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
12280 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12281 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12282 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12283 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12284 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12285 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
12288 /* This helps provide a mapping from builtin function codes to bdesc
12289 arrays. */
12291 struct bdesc_map
12293 /* The builtin function table that this entry describes. */
12294 const struct builtin_description *bdesc;
12296 /* The number of entries in the builtin function table. */
12297 unsigned int size;
12299 /* The target processor that supports these builtin functions.
12300 PROCESSOR_MAX means we enable them for all processors. */
12301 enum processor_type proc;
12303 /* If the target has these flags, this builtin function table
12304 will not be supported. */
12305 int unsupported_target_flags;
12308 static const struct bdesc_map bdesc_arrays[] =
12310 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
12311 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
12312 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
12313 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
12314 MASK_64BIT }
12317 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
12318 suitable for input operand OP of instruction ICODE. Return the value. */
12320 static rtx
12321 mips_prepare_builtin_arg (enum insn_code icode,
12322 unsigned int op, tree exp, unsigned int argnum)
12324 rtx value;
12325 enum machine_mode mode;
12327 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
12328 mode = insn_data[icode].operand[op].mode;
12329 if (!insn_data[icode].operand[op].predicate (value, mode))
12331 value = copy_to_mode_reg (mode, value);
12332 /* Check the predicate again. */
12333 if (!insn_data[icode].operand[op].predicate (value, mode))
12335 error ("invalid argument to builtin function");
12336 return const0_rtx;
12340 return value;
12343 /* Return an rtx suitable for output operand OP of instruction ICODE.
12344 If TARGET is non-null, try to use it where possible. */
12346 static rtx
12347 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
12349 enum machine_mode mode;
12351 mode = insn_data[icode].operand[op].mode;
12352 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
12353 target = gen_reg_rtx (mode);
12355 return target;
12358 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
12361 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
12362 enum machine_mode mode ATTRIBUTE_UNUSED,
12363 int ignore ATTRIBUTE_UNUSED)
12365 enum insn_code icode;
12366 enum mips_builtin_type type;
12367 tree fndecl;
12368 unsigned int fcode;
12369 const struct builtin_description *bdesc;
12370 const struct bdesc_map *m;
12372 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12373 fcode = DECL_FUNCTION_CODE (fndecl);
12375 if (TARGET_MIPS16)
12377 error ("built-in function %qs not supported for MIPS16",
12378 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
12379 return const0_rtx;
12382 bdesc = NULL;
12383 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12385 if (fcode < m->size)
12387 bdesc = m->bdesc;
12388 icode = bdesc[fcode].icode;
12389 type = bdesc[fcode].builtin_type;
12390 break;
12392 fcode -= m->size;
12394 if (bdesc == NULL)
12395 return 0;
12397 switch (type)
12399 case MIPS_BUILTIN_DIRECT:
12400 return mips_expand_builtin_direct (icode, target, exp, true);
12402 case MIPS_BUILTIN_DIRECT_NO_TARGET:
12403 return mips_expand_builtin_direct (icode, target, exp, false);
12405 case MIPS_BUILTIN_MOVT:
12406 case MIPS_BUILTIN_MOVF:
12407 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
12408 target, exp);
12410 case MIPS_BUILTIN_CMP_ANY:
12411 case MIPS_BUILTIN_CMP_ALL:
12412 case MIPS_BUILTIN_CMP_UPPER:
12413 case MIPS_BUILTIN_CMP_LOWER:
12414 case MIPS_BUILTIN_CMP_SINGLE:
12415 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
12416 target, exp);
12418 case MIPS_BUILTIN_BPOSGE32:
12419 return mips_expand_builtin_bposge (type, target);
12421 default:
12422 return 0;
12426 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
12428 void
12429 mips_init_builtins (void)
12431 const struct builtin_description *d;
12432 const struct bdesc_map *m;
12433 tree types[(int) MIPS_MAX_FTYPE_MAX];
12434 tree V2SF_type_node;
12435 tree V2HI_type_node;
12436 tree V4QI_type_node;
12437 unsigned int offset;
12439 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
12440 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
12441 return;
12443 if (TARGET_PAIRED_SINGLE_FLOAT)
12445 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
12447 types[MIPS_V2SF_FTYPE_V2SF]
12448 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
12450 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
12451 = build_function_type_list (V2SF_type_node,
12452 V2SF_type_node, V2SF_type_node, NULL_TREE);
12454 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
12455 = build_function_type_list (V2SF_type_node,
12456 V2SF_type_node, V2SF_type_node,
12457 integer_type_node, NULL_TREE);
12459 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
12460 = build_function_type_list (V2SF_type_node,
12461 V2SF_type_node, V2SF_type_node,
12462 V2SF_type_node, V2SF_type_node, NULL_TREE);
12464 types[MIPS_V2SF_FTYPE_SF_SF]
12465 = build_function_type_list (V2SF_type_node,
12466 float_type_node, float_type_node, NULL_TREE);
12468 types[MIPS_INT_FTYPE_V2SF_V2SF]
12469 = build_function_type_list (integer_type_node,
12470 V2SF_type_node, V2SF_type_node, NULL_TREE);
12472 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
12473 = build_function_type_list (integer_type_node,
12474 V2SF_type_node, V2SF_type_node,
12475 V2SF_type_node, V2SF_type_node, NULL_TREE);
12477 types[MIPS_INT_FTYPE_SF_SF]
12478 = build_function_type_list (integer_type_node,
12479 float_type_node, float_type_node, NULL_TREE);
12481 types[MIPS_INT_FTYPE_DF_DF]
12482 = build_function_type_list (integer_type_node,
12483 double_type_node, double_type_node, NULL_TREE);
12485 types[MIPS_SF_FTYPE_V2SF]
12486 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
12488 types[MIPS_SF_FTYPE_SF]
12489 = build_function_type_list (float_type_node,
12490 float_type_node, NULL_TREE);
12492 types[MIPS_SF_FTYPE_SF_SF]
12493 = build_function_type_list (float_type_node,
12494 float_type_node, float_type_node, NULL_TREE);
12496 types[MIPS_DF_FTYPE_DF]
12497 = build_function_type_list (double_type_node,
12498 double_type_node, NULL_TREE);
12500 types[MIPS_DF_FTYPE_DF_DF]
12501 = build_function_type_list (double_type_node,
12502 double_type_node, double_type_node, NULL_TREE);
12505 if (TARGET_DSP)
12507 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
12508 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
12510 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
12511 = build_function_type_list (V2HI_type_node,
12512 V2HI_type_node, V2HI_type_node,
12513 NULL_TREE);
12515 types[MIPS_SI_FTYPE_SI_SI]
12516 = build_function_type_list (intSI_type_node,
12517 intSI_type_node, intSI_type_node,
12518 NULL_TREE);
12520 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
12521 = build_function_type_list (V4QI_type_node,
12522 V4QI_type_node, V4QI_type_node,
12523 NULL_TREE);
12525 types[MIPS_SI_FTYPE_V4QI]
12526 = build_function_type_list (intSI_type_node,
12527 V4QI_type_node,
12528 NULL_TREE);
12530 types[MIPS_V2HI_FTYPE_V2HI]
12531 = build_function_type_list (V2HI_type_node,
12532 V2HI_type_node,
12533 NULL_TREE);
12535 types[MIPS_SI_FTYPE_SI]
12536 = build_function_type_list (intSI_type_node,
12537 intSI_type_node,
12538 NULL_TREE);
12540 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
12541 = build_function_type_list (V4QI_type_node,
12542 V2HI_type_node, V2HI_type_node,
12543 NULL_TREE);
12545 types[MIPS_V2HI_FTYPE_SI_SI]
12546 = build_function_type_list (V2HI_type_node,
12547 intSI_type_node, intSI_type_node,
12548 NULL_TREE);
12550 types[MIPS_SI_FTYPE_V2HI]
12551 = build_function_type_list (intSI_type_node,
12552 V2HI_type_node,
12553 NULL_TREE);
12555 types[MIPS_V2HI_FTYPE_V4QI]
12556 = build_function_type_list (V2HI_type_node,
12557 V4QI_type_node,
12558 NULL_TREE);
12560 types[MIPS_V4QI_FTYPE_V4QI_SI]
12561 = build_function_type_list (V4QI_type_node,
12562 V4QI_type_node, intSI_type_node,
12563 NULL_TREE);
12565 types[MIPS_V2HI_FTYPE_V2HI_SI]
12566 = build_function_type_list (V2HI_type_node,
12567 V2HI_type_node, intSI_type_node,
12568 NULL_TREE);
12570 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
12571 = build_function_type_list (V2HI_type_node,
12572 V4QI_type_node, V2HI_type_node,
12573 NULL_TREE);
12575 types[MIPS_SI_FTYPE_V2HI_V2HI]
12576 = build_function_type_list (intSI_type_node,
12577 V2HI_type_node, V2HI_type_node,
12578 NULL_TREE);
12580 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
12581 = build_function_type_list (intDI_type_node,
12582 intDI_type_node, V4QI_type_node, V4QI_type_node,
12583 NULL_TREE);
12585 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
12586 = build_function_type_list (intDI_type_node,
12587 intDI_type_node, V2HI_type_node, V2HI_type_node,
12588 NULL_TREE);
12590 types[MIPS_DI_FTYPE_DI_SI_SI]
12591 = build_function_type_list (intDI_type_node,
12592 intDI_type_node, intSI_type_node, intSI_type_node,
12593 NULL_TREE);
12595 types[MIPS_V4QI_FTYPE_SI]
12596 = build_function_type_list (V4QI_type_node,
12597 intSI_type_node,
12598 NULL_TREE);
12600 types[MIPS_V2HI_FTYPE_SI]
12601 = build_function_type_list (V2HI_type_node,
12602 intSI_type_node,
12603 NULL_TREE);
12605 types[MIPS_VOID_FTYPE_V4QI_V4QI]
12606 = build_function_type_list (void_type_node,
12607 V4QI_type_node, V4QI_type_node,
12608 NULL_TREE);
12610 types[MIPS_SI_FTYPE_V4QI_V4QI]
12611 = build_function_type_list (intSI_type_node,
12612 V4QI_type_node, V4QI_type_node,
12613 NULL_TREE);
12615 types[MIPS_VOID_FTYPE_V2HI_V2HI]
12616 = build_function_type_list (void_type_node,
12617 V2HI_type_node, V2HI_type_node,
12618 NULL_TREE);
12620 types[MIPS_SI_FTYPE_DI_SI]
12621 = build_function_type_list (intSI_type_node,
12622 intDI_type_node, intSI_type_node,
12623 NULL_TREE);
12625 types[MIPS_DI_FTYPE_DI_SI]
12626 = build_function_type_list (intDI_type_node,
12627 intDI_type_node, intSI_type_node,
12628 NULL_TREE);
12630 types[MIPS_VOID_FTYPE_SI_SI]
12631 = build_function_type_list (void_type_node,
12632 intSI_type_node, intSI_type_node,
12633 NULL_TREE);
12635 types[MIPS_SI_FTYPE_PTR_SI]
12636 = build_function_type_list (intSI_type_node,
12637 ptr_type_node, intSI_type_node,
12638 NULL_TREE);
12640 types[MIPS_SI_FTYPE_VOID]
12641 = build_function_type (intSI_type_node, void_list_node);
12643 if (TARGET_DSPR2)
12645 types[MIPS_V4QI_FTYPE_V4QI]
12646 = build_function_type_list (V4QI_type_node,
12647 V4QI_type_node,
12648 NULL_TREE);
12650 types[MIPS_SI_FTYPE_SI_SI_SI]
12651 = build_function_type_list (intSI_type_node,
12652 intSI_type_node, intSI_type_node,
12653 intSI_type_node, NULL_TREE);
12655 types[MIPS_DI_FTYPE_DI_USI_USI]
12656 = build_function_type_list (intDI_type_node,
12657 intDI_type_node,
12658 unsigned_intSI_type_node,
12659 unsigned_intSI_type_node, NULL_TREE);
12661 types[MIPS_DI_FTYPE_SI_SI]
12662 = build_function_type_list (intDI_type_node,
12663 intSI_type_node, intSI_type_node,
12664 NULL_TREE);
12666 types[MIPS_DI_FTYPE_USI_USI]
12667 = build_function_type_list (intDI_type_node,
12668 unsigned_intSI_type_node,
12669 unsigned_intSI_type_node, NULL_TREE);
12671 types[MIPS_V2HI_FTYPE_SI_SI_SI]
12672 = build_function_type_list (V2HI_type_node,
12673 intSI_type_node, intSI_type_node,
12674 intSI_type_node, NULL_TREE);
12679 /* Iterate through all of the bdesc arrays, initializing all of the
12680 builtin functions. */
12682 offset = 0;
12683 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12685 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
12686 && (m->unsupported_target_flags & target_flags) == 0)
12687 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
12688 if ((d->target_flags & target_flags) == d->target_flags)
12689 add_builtin_function (d->name, types[d->function_type],
12690 d - m->bdesc + offset,
12691 BUILT_IN_MD, NULL, NULL);
12692 offset += m->size;
12696 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
12697 .md pattern and CALL is the function expr with arguments. TARGET,
12698 if nonnull, suggests a good place to put the result.
12699 HAS_TARGET indicates the function must return something. */
12701 static rtx
12702 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
12703 bool has_target)
12705 rtx ops[MAX_RECOG_OPERANDS];
12706 int i = 0;
12707 int j = 0;
12709 if (has_target)
12711 /* We save target to ops[0]. */
12712 ops[0] = mips_prepare_builtin_target (icode, 0, target);
12713 i = 1;
12716 /* We need to test if the arglist is not zero. Some instructions have extra
12717 clobber registers. */
12718 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
12719 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12721 switch (i)
12723 case 2:
12724 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
12725 break;
12727 case 3:
12728 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
12729 break;
12731 case 4:
12732 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
12733 break;
12735 default:
12736 gcc_unreachable ();
12738 return target;
12741 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12742 function (TYPE says which). EXP is the tree for the function
12743 function, ICODE is the instruction that should be used to compare
12744 the first two arguments, and COND is the condition it should test.
12745 TARGET, if nonnull, suggests a good place to put the result. */
12747 static rtx
12748 mips_expand_builtin_movtf (enum mips_builtin_type type,
12749 enum insn_code icode, enum mips_fp_condition cond,
12750 rtx target, tree exp)
12752 rtx cmp_result, op0, op1;
12754 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12755 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12756 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12757 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12759 icode = CODE_FOR_mips_cond_move_tf_ps;
12760 target = mips_prepare_builtin_target (icode, 0, target);
12761 if (type == MIPS_BUILTIN_MOVT)
12763 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12764 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12766 else
12768 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12769 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12771 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12772 return target;
12775 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12776 into TARGET otherwise. Return TARGET. */
12778 static rtx
12779 mips_builtin_branch_and_move (rtx condition, rtx target,
12780 rtx value_if_true, rtx value_if_false)
12782 rtx true_label, done_label;
12784 true_label = gen_label_rtx ();
12785 done_label = gen_label_rtx ();
12787 /* First assume that CONDITION is false. */
12788 mips_emit_move (target, value_if_false);
12790 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12791 emit_jump_insn (gen_condjump (condition, true_label));
12792 emit_jump_insn (gen_jump (done_label));
12793 emit_barrier ();
12795 /* Fix TARGET if CONDITION is true. */
12796 emit_label (true_label);
12797 mips_emit_move (target, value_if_true);
12799 emit_label (done_label);
12800 return target;
12803 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12804 of the comparison instruction and COND is the condition it should test.
12805 EXP is the function call and arguments and TARGET, if nonnull,
12806 suggests a good place to put the boolean result. */
12808 static rtx
12809 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12810 enum insn_code icode, enum mips_fp_condition cond,
12811 rtx target, tree exp)
12813 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
12814 int i;
12815 int j = 0;
12817 if (target == 0 || GET_MODE (target) != SImode)
12818 target = gen_reg_rtx (SImode);
12820 /* Prepare the operands to the comparison. */
12821 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12822 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
12823 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12825 switch (insn_data[icode].n_operands)
12827 case 4:
12828 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
12829 break;
12831 case 6:
12832 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
12833 ops[3], ops[4], GEN_INT (cond)));
12834 break;
12836 default:
12837 gcc_unreachable ();
12840 /* If the comparison sets more than one register, we define the result
12841 to be 0 if all registers are false and -1 if all registers are true.
12842 The value of the complete result is indeterminate otherwise. */
12843 switch (builtin_type)
12845 case MIPS_BUILTIN_CMP_ALL:
12846 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12847 return mips_builtin_branch_and_move (condition, target,
12848 const0_rtx, const1_rtx);
12850 case MIPS_BUILTIN_CMP_UPPER:
12851 case MIPS_BUILTIN_CMP_LOWER:
12852 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12853 condition = gen_single_cc (cmp_result, offset);
12854 return mips_builtin_branch_and_move (condition, target,
12855 const1_rtx, const0_rtx);
12857 default:
12858 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12859 return mips_builtin_branch_and_move (condition, target,
12860 const1_rtx, const0_rtx);
12864 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12865 suggests a good place to put the boolean result. */
12867 static rtx
12868 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12870 rtx condition, cmp_result;
12871 int cmp_value;
12873 if (target == 0 || GET_MODE (target) != SImode)
12874 target = gen_reg_rtx (SImode);
12876 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12878 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12879 cmp_value = 32;
12880 else
12881 gcc_assert (0);
12883 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12884 return mips_builtin_branch_and_move (condition, target,
12885 const1_rtx, const0_rtx);
12888 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12889 FIRST is true if this is the first time handling this decl. */
12891 static void
12892 mips_encode_section_info (tree decl, rtx rtl, int first)
12894 default_encode_section_info (decl, rtl, first);
12896 if (TREE_CODE (decl) == FUNCTION_DECL)
12898 rtx symbol = XEXP (rtl, 0);
12899 tree type = TREE_TYPE (decl);
12901 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
12902 || mips_far_type_p (type))
12903 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
12907 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12908 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12910 static void
12911 mips_extra_live_on_entry (bitmap regs)
12913 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
12914 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
12917 /* SImode values are represented as sign-extended to DImode. */
12920 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12922 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12923 return SIGN_EXTEND;
12925 return UNKNOWN;
12928 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12930 static void
12931 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
12933 switch (size)
12935 case 4:
12936 fputs ("\t.dtprelword\t", file);
12937 break;
12939 case 8:
12940 fputs ("\t.dtpreldword\t", file);
12941 break;
12943 default:
12944 gcc_unreachable ();
12946 output_addr_const (file, x);
12947 fputs ("+0x8000", file);
12950 #include "gt-mips.h"