* config/mips/iris6.h (DWARF_FRAME_RETURN_COLUMN): Redefine to
[official-gcc.git] / gcc / config / mips / mips.c
blob0a82ce1067d482b2aefcbfc60a4bf52123d80cc4
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 59 Temple Place - Suite 330,
24 Boston, MA 02111-1307, USA. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
61 #define UNSPEC_ADDRESS_P(X) \
62 (GET_CODE (X) == UNSPEC \
63 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
64 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66 /* Extract the symbol or label from UNSPEC wrapper X. */
67 #define UNSPEC_ADDRESS(X) \
68 XVECEXP (X, 0, 0)
70 /* Extract the symbol type from UNSPEC wrapper X. */
71 #define UNSPEC_ADDRESS_TYPE(X) \
72 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74 /* The maximum distance between the top of the stack frame and the
75 value $sp has when we save & restore registers.
77 Use a maximum gap of 0x100 in the mips16 case. We can then use
78 unextended instructions to save and restore registers, and to
79 allocate and deallocate the top part of the frame.
81 The value in the !mips16 case must be a SMALL_OPERAND and must
82 preserve the maximum stack alignment. */
83 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
85 /* True if INSN is a mips.md pattern or asm statement. */
86 #define USEFUL_INSN_P(INSN) \
87 (INSN_P (INSN) \
88 && GET_CODE (PATTERN (INSN)) != USE \
89 && GET_CODE (PATTERN (INSN)) != CLOBBER \
90 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
91 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
93 /* If INSN is a delayed branch sequence, return the first instruction
94 in the sequence, otherwise return INSN itself. */
95 #define SEQ_BEGIN(INSN) \
96 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
97 ? XVECEXP (PATTERN (INSN), 0, 0) \
98 : (INSN))
100 /* Likewise for the last instruction in a delayed branch sequence. */
101 #define SEQ_END(INSN) \
102 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
103 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
104 : (INSN))
106 /* Execute the following loop body with SUBINSN set to each instruction
107 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
108 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
109 for ((SUBINSN) = SEQ_BEGIN (INSN); \
110 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
111 (SUBINSN) = NEXT_INSN (SUBINSN))
113 /* Classifies an address.
115 ADDRESS_REG
116 A natural register + offset address. The register satisfies
117 mips_valid_base_register_p and the offset is a const_arith_operand.
119 ADDRESS_LO_SUM
120 A LO_SUM rtx. The first operand is a valid base register and
121 the second operand is a symbolic address.
123 ADDRESS_CONST_INT
124 A signed 16-bit constant address.
126 ADDRESS_SYMBOLIC:
127 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
128 enum mips_address_type {
129 ADDRESS_REG,
130 ADDRESS_LO_SUM,
131 ADDRESS_CONST_INT,
132 ADDRESS_SYMBOLIC
135 /* Classifies the prototype of a builtin function. */
136 enum mips_function_type
138 MIPS_V2SF_FTYPE_V2SF,
139 MIPS_V2SF_FTYPE_V2SF_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
142 MIPS_V2SF_FTYPE_SF_SF,
143 MIPS_INT_FTYPE_V2SF_V2SF,
144 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
145 MIPS_INT_FTYPE_SF_SF,
146 MIPS_INT_FTYPE_DF_DF,
147 MIPS_SF_FTYPE_V2SF,
148 MIPS_SF_FTYPE_SF,
149 MIPS_SF_FTYPE_SF_SF,
150 MIPS_DF_FTYPE_DF,
151 MIPS_DF_FTYPE_DF_DF,
153 /* The last type. */
154 MIPS_MAX_FTYPE_MAX
157 /* Specifies how a builtin function should be converted into rtl. */
158 enum mips_builtin_type
160 /* The builtin corresponds directly to an .md pattern. The return
161 value is mapped to operand 0 and the arguments are mapped to
162 operands 1 and above. */
163 MIPS_BUILTIN_DIRECT,
165 /* The builtin corresponds to a comparison instruction followed by
166 a mips_cond_move_tf_ps pattern. The first two arguments are the
167 values to compare and the second two arguments are the vector
168 operands for the movt.ps or movf.ps instruction (in assembly order). */
169 MIPS_BUILTIN_MOVF,
170 MIPS_BUILTIN_MOVT,
172 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
173 of this instruction is the result of the comparison, which has mode
174 CCV2 or CCV4. The function arguments are mapped to operands 1 and
175 above. The function's return value is an SImode boolean that is
176 true under the following conditions:
178 MIPS_BUILTIN_CMP_ANY: one of the registers is true
179 MIPS_BUILTIN_CMP_ALL: all of the registers are true
180 MIPS_BUILTIN_CMP_LOWER: the first register is true
181 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
182 MIPS_BUILTIN_CMP_ANY,
183 MIPS_BUILTIN_CMP_ALL,
184 MIPS_BUILTIN_CMP_UPPER,
185 MIPS_BUILTIN_CMP_LOWER,
187 /* As above, but the instruction only sets a single $fcc register. */
188 MIPS_BUILTIN_CMP_SINGLE
191 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
192 #define MIPS_FP_CONDITIONS(MACRO) \
193 MACRO (f), \
194 MACRO (un), \
195 MACRO (eq), \
196 MACRO (ueq), \
197 MACRO (olt), \
198 MACRO (ult), \
199 MACRO (ole), \
200 MACRO (ule), \
201 MACRO (sf), \
202 MACRO (ngle), \
203 MACRO (seq), \
204 MACRO (ngl), \
205 MACRO (lt), \
206 MACRO (nge), \
207 MACRO (le), \
208 MACRO (ngt)
210 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
211 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
212 enum mips_fp_condition {
213 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
216 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
217 #define STRINGIFY(X) #X
218 static const char *const mips_fp_conditions[] = {
219 MIPS_FP_CONDITIONS (STRINGIFY)
222 /* A function to save or store a register. The first argument is the
223 register and the second is the stack slot. */
224 typedef void (*mips_save_restore_fn) (rtx, rtx);
226 struct mips16_constant;
227 struct mips_arg_info;
228 struct mips_address_info;
229 struct mips_integer_op;
230 struct mips_sim;
232 static enum mips_symbol_type mips_classify_symbol (rtx);
233 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
234 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
235 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
236 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
237 static bool mips_classify_address (struct mips_address_info *, rtx,
238 enum machine_mode, int);
239 static bool mips_cannot_force_const_mem (rtx);
240 static int mips_symbol_insns (enum mips_symbol_type);
241 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
242 static rtx mips_force_temporary (rtx, rtx);
243 static rtx mips_split_symbol (rtx, rtx);
244 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
245 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
246 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
247 static unsigned int mips_build_lower (struct mips_integer_op *,
248 unsigned HOST_WIDE_INT);
249 static unsigned int mips_build_integer (struct mips_integer_op *,
250 unsigned HOST_WIDE_INT);
251 static void mips_move_integer (rtx, unsigned HOST_WIDE_INT);
252 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
253 static int m16_check_op (rtx, int, int, int);
254 static bool mips_rtx_costs (rtx, int, int, int *);
255 static int mips_address_cost (rtx);
256 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
257 static void mips_load_call_address (rtx, rtx, int);
258 static bool mips_function_ok_for_sibcall (tree, tree);
259 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
260 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
261 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
262 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
263 tree, int, struct mips_arg_info *);
264 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
265 static void mips_set_architecture (const struct mips_cpu_info *);
266 static void mips_set_tune (const struct mips_cpu_info *);
267 static bool mips_handle_option (size_t, const char *, int);
268 static struct machine_function *mips_init_machine_status (void);
269 static void print_operand_reloc (FILE *, rtx, const char **);
270 #if TARGET_IRIX
271 static void irix_output_external_libcall (rtx);
272 #endif
273 static void mips_file_start (void);
274 static void mips_file_end (void);
275 static bool mips_rewrite_small_data_p (rtx);
276 static int mips_small_data_pattern_1 (rtx *, void *);
277 static int mips_rewrite_small_data_1 (rtx *, void *);
278 static bool mips_function_has_gp_insn (void);
279 static unsigned int mips_global_pointer (void);
280 static bool mips_save_reg_p (unsigned int);
281 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
282 mips_save_restore_fn);
283 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
284 static void mips_output_cplocal (void);
285 static void mips_emit_loadgp (void);
286 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
287 static void mips_set_frame_expr (rtx);
288 static rtx mips_frame_set (rtx, rtx);
289 static void mips_save_reg (rtx, rtx);
290 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
291 static void mips_restore_reg (rtx, rtx);
292 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
293 HOST_WIDE_INT, tree);
294 static int symbolic_expression_p (rtx);
295 static void mips_select_rtx_section (enum machine_mode, rtx,
296 unsigned HOST_WIDE_INT);
297 static void mips_function_rodata_section (tree);
298 static bool mips_in_small_data_p (tree);
299 static int mips_fpr_return_fields (tree, tree *);
300 static bool mips_return_in_msb (tree);
301 static rtx mips_return_fpr_pair (enum machine_mode mode,
302 enum machine_mode mode1, HOST_WIDE_INT,
303 enum machine_mode mode2, HOST_WIDE_INT);
304 static rtx mips16_gp_pseudo_reg (void);
305 static void mips16_fp_args (FILE *, int, int);
306 static void build_mips16_function_stub (FILE *);
307 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
308 static void dump_constants (struct mips16_constant *, rtx);
309 static int mips16_insn_length (rtx);
310 static int mips16_rewrite_pool_refs (rtx *, void *);
311 static void mips16_lay_out_constants (void);
312 static void mips_sim_reset (struct mips_sim *);
313 static void mips_sim_init (struct mips_sim *, state_t);
314 static void mips_sim_next_cycle (struct mips_sim *);
315 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
316 static int mips_sim_wait_regs_2 (rtx *, void *);
317 static void mips_sim_wait_regs_1 (rtx *, void *);
318 static void mips_sim_wait_regs (struct mips_sim *, rtx);
319 static void mips_sim_wait_units (struct mips_sim *, rtx);
320 static void mips_sim_wait_insn (struct mips_sim *, rtx);
321 static void mips_sim_record_set (rtx, rtx, void *);
322 static void mips_sim_issue_insn (struct mips_sim *, rtx);
323 static void mips_sim_issue_nop (struct mips_sim *);
324 static void mips_sim_finish_insn (struct mips_sim *, rtx);
325 static void vr4130_avoid_branch_rt_conflict (rtx);
326 static void vr4130_align_insns (void);
327 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
328 static void mips_avoid_hazards (void);
329 static void mips_reorg (void);
330 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
331 static bool mips_matching_cpu_name_p (const char *, const char *);
332 static const struct mips_cpu_info *mips_parse_cpu (const char *);
333 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
334 static bool mips_return_in_memory (tree, tree);
335 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
336 static void mips_macc_chains_record (rtx);
337 static void mips_macc_chains_reorder (rtx *, int);
338 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
339 static bool vr4130_true_reg_dependence_p (rtx);
340 static bool vr4130_swap_insns_p (rtx, rtx);
341 static void vr4130_reorder (rtx *, int);
342 static void mips_promote_ready (rtx *, int, int);
343 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
344 static int mips_variable_issue (FILE *, int, rtx, int);
345 static int mips_adjust_cost (rtx, rtx, rtx, int);
346 static int mips_issue_rate (void);
347 static int mips_multipass_dfa_lookahead (void);
348 static void mips_init_libfuncs (void);
349 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
350 tree, int *, int);
351 static tree mips_build_builtin_va_list (void);
352 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
353 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
354 tree, bool);
355 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
356 tree, bool);
357 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
358 tree, bool);
359 static bool mips_valid_pointer_mode (enum machine_mode);
360 static bool mips_scalar_mode_supported_p (enum machine_mode);
361 static bool mips_vector_mode_supported_p (enum machine_mode);
362 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
363 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
364 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
365 static void mips_init_builtins (void);
366 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree);
367 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
368 enum insn_code, enum mips_fp_condition,
369 rtx, tree);
370 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
371 enum insn_code, enum mips_fp_condition,
372 rtx, tree);
374 /* Structure to be filled in by compute_frame_size with register
375 save masks, and offsets for the current function. */
377 struct mips_frame_info GTY(())
379 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
380 HOST_WIDE_INT var_size; /* # bytes that variables take up */
381 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
382 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
383 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
384 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
385 unsigned int mask; /* mask of saved gp registers */
386 unsigned int fmask; /* mask of saved fp registers */
387 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
388 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
389 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
390 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
391 bool initialized; /* true if frame size already calculated */
392 int num_gp; /* number of gp registers saved */
393 int num_fp; /* number of fp registers saved */
396 struct machine_function GTY(()) {
397 /* Pseudo-reg holding the value of $28 in a mips16 function which
398 refers to GP relative global variables. */
399 rtx mips16_gp_pseudo_rtx;
401 /* Current frame information, calculated by compute_frame_size. */
402 struct mips_frame_info frame;
404 /* The register to use as the global pointer within this function. */
405 unsigned int global_pointer;
407 /* True if mips_adjust_insn_length should ignore an instruction's
408 hazard attribute. */
409 bool ignore_hazard_length_p;
411 /* True if the whole function is suitable for .set noreorder and
412 .set nomacro. */
413 bool all_noreorder_p;
415 /* True if the function is known to have an instruction that needs $gp. */
416 bool has_gp_insn_p;
419 /* Information about a single argument. */
420 struct mips_arg_info
422 /* True if the argument is passed in a floating-point register, or
423 would have been if we hadn't run out of registers. */
424 bool fpr_p;
426 /* The number of words passed in registers, rounded up. */
427 unsigned int reg_words;
429 /* For EABI, the offset of the first register from GP_ARG_FIRST or
430 FP_ARG_FIRST. For other ABIs, the offset of the first register from
431 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
432 comment for details).
434 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
435 on the stack. */
436 unsigned int reg_offset;
438 /* The number of words that must be passed on the stack, rounded up. */
439 unsigned int stack_words;
441 /* The offset from the start of the stack overflow area of the argument's
442 first stack word. Only meaningful when STACK_WORDS is nonzero. */
443 unsigned int stack_offset;
447 /* Information about an address described by mips_address_type.
449 ADDRESS_CONST_INT
450 No fields are used.
452 ADDRESS_REG
453 REG is the base register and OFFSET is the constant offset.
455 ADDRESS_LO_SUM
456 REG is the register that contains the high part of the address,
457 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
458 is the type of OFFSET's symbol.
460 ADDRESS_SYMBOLIC
461 SYMBOL_TYPE is the type of symbol being referenced. */
463 struct mips_address_info
465 enum mips_address_type type;
466 rtx reg;
467 rtx offset;
468 enum mips_symbol_type symbol_type;
472 /* One stage in a constant building sequence. These sequences have
473 the form:
475 A = VALUE[0]
476 A = A CODE[1] VALUE[1]
477 A = A CODE[2] VALUE[2]
480 where A is an accumulator, each CODE[i] is a binary rtl operation
481 and each VALUE[i] is a constant integer. */
482 struct mips_integer_op {
483 enum rtx_code code;
484 unsigned HOST_WIDE_INT value;
488 /* The largest number of operations needed to load an integer constant.
489 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
490 When the lowest bit is clear, we can try, but reject a sequence with
491 an extra SLL at the end. */
492 #define MIPS_MAX_INTEGER_OPS 7
495 /* Global variables for machine-dependent things. */
497 /* Threshold for data being put into the small data/bss area, instead
498 of the normal data area. */
499 int mips_section_threshold = -1;
501 /* Count the number of .file directives, so that .loc is up to date. */
502 int num_source_filenames = 0;
504 /* Count the number of sdb related labels are generated (to find block
505 start and end boundaries). */
506 int sdb_label_count = 0;
508 /* Next label # for each statement for Silicon Graphics IRIS systems. */
509 int sym_lineno = 0;
511 /* Linked list of all externals that are to be emitted when optimizing
512 for the global pointer if they haven't been declared by the end of
513 the program with an appropriate .comm or initialization. */
515 struct extern_list GTY (())
517 struct extern_list *next; /* next external */
518 const char *name; /* name of the external */
519 int size; /* size in bytes */
522 static GTY (()) struct extern_list *extern_head = 0;
524 /* Name of the file containing the current function. */
525 const char *current_function_file = "";
527 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
528 int set_noreorder;
529 int set_noat;
530 int set_nomacro;
531 int set_volatile;
533 /* The next branch instruction is a branch likely, not branch normal. */
534 int mips_branch_likely;
536 /* The operands passed to the last cmpMM expander. */
537 rtx cmp_operands[2];
539 /* The target cpu for code generation. */
540 enum processor_type mips_arch;
541 const struct mips_cpu_info *mips_arch_info;
543 /* The target cpu for optimization and scheduling. */
544 enum processor_type mips_tune;
545 const struct mips_cpu_info *mips_tune_info;
547 /* Which instruction set architecture to use. */
548 int mips_isa;
550 /* Which ABI to use. */
551 int mips_abi = MIPS_ABI_DEFAULT;
553 /* Whether we are generating mips16 hard float code. In mips16 mode
554 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
555 -msoft-float was not specified by the user, which means that we
556 should arrange to call mips32 hard floating point code. */
557 int mips16_hard_float;
559 /* The arguments passed to -march and -mtune. */
560 static const char *mips_arch_string;
561 static const char *mips_tune_string;
563 /* The architecture selected by -mipsN. */
564 static const struct mips_cpu_info *mips_isa_info;
566 const char *mips_cache_flush_func = CACHE_FLUSH_FUNC;
568 /* If TRUE, we split addresses into their high and low parts in the RTL. */
569 int mips_split_addresses;
571 /* Mode used for saving/restoring general purpose registers. */
572 static enum machine_mode gpr_mode;
574 /* Array giving truth value on whether or not a given hard register
575 can support a given mode. */
576 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
578 /* List of all MIPS punctuation characters used by print_operand. */
579 char mips_print_operand_punct[256];
581 /* Map GCC register number to debugger register number. */
582 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
584 /* A copy of the original flag_delayed_branch: see override_options. */
585 static int mips_flag_delayed_branch;
587 static GTY (()) int mips_output_filename_first_time = 1;
589 /* mips_split_p[X] is true if symbols of type X can be split by
590 mips_split_symbol(). */
591 static bool mips_split_p[NUM_SYMBOL_TYPES];
593 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
594 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
595 if they are matched by a special .md file pattern. */
596 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
598 /* Likewise for HIGHs. */
599 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
601 /* Map hard register number to register class */
602 const enum reg_class mips_regno_to_class[] =
604 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
605 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
606 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
607 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
608 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
609 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
610 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
611 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
612 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
613 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
614 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
615 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
616 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
617 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
618 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
619 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
620 HI_REG, LO_REG, NO_REGS, ST_REGS,
621 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
622 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
623 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
624 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
625 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
626 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
627 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
628 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
629 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
630 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
631 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
632 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
633 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
634 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
635 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
636 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
637 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
638 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
639 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
640 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
641 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
642 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
643 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
644 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
645 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
646 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
647 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS
650 /* Map register constraint character to register class. */
651 enum reg_class mips_char_to_class[256];
653 /* A table describing all the processors gcc knows about. Names are
654 matched in the order listed. The first mention of an ISA level is
655 taken as the canonical name for that ISA.
657 To ease comparison, please keep this table in the same order as
658 gas's mips_cpu_info_table[]. */
659 const struct mips_cpu_info mips_cpu_info_table[] = {
660 /* Entries for generic ISAs */
661 { "mips1", PROCESSOR_R3000, 1 },
662 { "mips2", PROCESSOR_R6000, 2 },
663 { "mips3", PROCESSOR_R4000, 3 },
664 { "mips4", PROCESSOR_R8000, 4 },
665 { "mips32", PROCESSOR_4KC, 32 },
666 { "mips32r2", PROCESSOR_M4K, 33 },
667 { "mips64", PROCESSOR_5KC, 64 },
669 /* MIPS I */
670 { "r3000", PROCESSOR_R3000, 1 },
671 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
672 { "r3900", PROCESSOR_R3900, 1 },
674 /* MIPS II */
675 { "r6000", PROCESSOR_R6000, 2 },
677 /* MIPS III */
678 { "r4000", PROCESSOR_R4000, 3 },
679 { "vr4100", PROCESSOR_R4100, 3 },
680 { "vr4111", PROCESSOR_R4111, 3 },
681 { "vr4120", PROCESSOR_R4120, 3 },
682 { "vr4130", PROCESSOR_R4130, 3 },
683 { "vr4300", PROCESSOR_R4300, 3 },
684 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
685 { "r4600", PROCESSOR_R4600, 3 },
686 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
687 { "r4650", PROCESSOR_R4650, 3 },
689 /* MIPS IV */
690 { "r8000", PROCESSOR_R8000, 4 },
691 { "vr5000", PROCESSOR_R5000, 4 },
692 { "vr5400", PROCESSOR_R5400, 4 },
693 { "vr5500", PROCESSOR_R5500, 4 },
694 { "rm7000", PROCESSOR_R7000, 4 },
695 { "rm9000", PROCESSOR_R9000, 4 },
697 /* MIPS32 */
698 { "4kc", PROCESSOR_4KC, 32 },
699 { "4kp", PROCESSOR_4KC, 32 }, /* = 4kc */
701 /* MIPS32 Release 2 */
702 { "m4k", PROCESSOR_M4K, 33 },
704 /* MIPS64 */
705 { "5kc", PROCESSOR_5KC, 64 },
706 { "20kc", PROCESSOR_20KC, 64 },
707 { "sb1", PROCESSOR_SB1, 64 },
708 { "sr71000", PROCESSOR_SR71000, 64 },
710 /* End marker */
711 { 0, 0, 0 }
714 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
715 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
716 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
717 #endif
719 /* Initialize the GCC target structure. */
720 #undef TARGET_ASM_ALIGNED_HI_OP
721 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
722 #undef TARGET_ASM_ALIGNED_SI_OP
723 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
724 #undef TARGET_ASM_ALIGNED_DI_OP
725 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
727 #undef TARGET_ASM_FUNCTION_PROLOGUE
728 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
729 #undef TARGET_ASM_FUNCTION_EPILOGUE
730 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
731 #undef TARGET_ASM_SELECT_RTX_SECTION
732 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
733 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
734 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
736 #undef TARGET_SCHED_REORDER
737 #define TARGET_SCHED_REORDER mips_sched_reorder
738 #undef TARGET_SCHED_VARIABLE_ISSUE
739 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
740 #undef TARGET_SCHED_ADJUST_COST
741 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
742 #undef TARGET_SCHED_ISSUE_RATE
743 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
744 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
745 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
746 mips_multipass_dfa_lookahead
748 #undef TARGET_DEFAULT_TARGET_FLAGS
749 #define TARGET_DEFAULT_TARGET_FLAGS \
750 (TARGET_DEFAULT \
751 | TARGET_CPU_DEFAULT \
752 | TARGET_ENDIAN_DEFAULT \
753 | TARGET_FP_EXCEPTIONS_DEFAULT \
754 | MASK_CHECK_ZERO_DIV \
755 | MASK_FUSED_MADD)
756 #undef TARGET_HANDLE_OPTION
757 #define TARGET_HANDLE_OPTION mips_handle_option
759 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
760 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
762 #undef TARGET_VALID_POINTER_MODE
763 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
764 #undef TARGET_RTX_COSTS
765 #define TARGET_RTX_COSTS mips_rtx_costs
766 #undef TARGET_ADDRESS_COST
767 #define TARGET_ADDRESS_COST mips_address_cost
769 #undef TARGET_IN_SMALL_DATA_P
770 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
772 #undef TARGET_MACHINE_DEPENDENT_REORG
773 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
775 #undef TARGET_ASM_FILE_START
776 #undef TARGET_ASM_FILE_END
777 #define TARGET_ASM_FILE_START mips_file_start
778 #define TARGET_ASM_FILE_END mips_file_end
779 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
780 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
782 #undef TARGET_INIT_LIBFUNCS
783 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
785 #undef TARGET_BUILD_BUILTIN_VA_LIST
786 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
787 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
788 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
790 #undef TARGET_PROMOTE_FUNCTION_ARGS
791 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
792 #undef TARGET_PROMOTE_FUNCTION_RETURN
793 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
794 #undef TARGET_PROMOTE_PROTOTYPES
795 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
797 #undef TARGET_RETURN_IN_MEMORY
798 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
799 #undef TARGET_RETURN_IN_MSB
800 #define TARGET_RETURN_IN_MSB mips_return_in_msb
802 #undef TARGET_ASM_OUTPUT_MI_THUNK
803 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
804 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
805 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
807 #undef TARGET_SETUP_INCOMING_VARARGS
808 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
809 #undef TARGET_STRICT_ARGUMENT_NAMING
810 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
811 #undef TARGET_MUST_PASS_IN_STACK
812 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
813 #undef TARGET_PASS_BY_REFERENCE
814 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
815 #undef TARGET_CALLEE_COPIES
816 #define TARGET_CALLEE_COPIES mips_callee_copies
817 #undef TARGET_ARG_PARTIAL_BYTES
818 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
820 #undef TARGET_VECTOR_MODE_SUPPORTED_P
821 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
823 #undef TARGET_SCALAR_MODE_SUPPORTED_P
824 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
826 #undef TARGET_INIT_BUILTINS
827 #define TARGET_INIT_BUILTINS mips_init_builtins
828 #undef TARGET_EXPAND_BUILTIN
829 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
831 #undef TARGET_HAVE_TLS
832 #define TARGET_HAVE_TLS HAVE_AS_TLS
834 #undef TARGET_CANNOT_FORCE_CONST_MEM
835 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
837 struct gcc_target targetm = TARGET_INITIALIZER;
839 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
841 static enum mips_symbol_type
842 mips_classify_symbol (rtx x)
844 if (GET_CODE (x) == LABEL_REF)
846 if (TARGET_MIPS16)
847 return SYMBOL_CONSTANT_POOL;
848 if (TARGET_ABICALLS)
849 return SYMBOL_GOT_LOCAL;
850 return SYMBOL_GENERAL;
853 gcc_assert (GET_CODE (x) == SYMBOL_REF);
855 if (SYMBOL_REF_TLS_MODEL (x))
856 return SYMBOL_TLS;
858 if (CONSTANT_POOL_ADDRESS_P (x))
860 if (TARGET_MIPS16)
861 return SYMBOL_CONSTANT_POOL;
863 if (TARGET_ABICALLS)
864 return SYMBOL_GOT_LOCAL;
866 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
867 return SYMBOL_SMALL_DATA;
869 return SYMBOL_GENERAL;
872 if (SYMBOL_REF_SMALL_P (x))
873 return SYMBOL_SMALL_DATA;
875 if (TARGET_ABICALLS)
877 if (SYMBOL_REF_DECL (x) == 0)
878 return SYMBOL_REF_LOCAL_P (x) ? SYMBOL_GOT_LOCAL : SYMBOL_GOT_GLOBAL;
880 /* There are three cases to consider:
882 - o32 PIC (either with or without explicit relocs)
883 - n32/n64 PIC without explicit relocs
884 - n32/n64 PIC with explicit relocs
886 In the first case, both local and global accesses will use an
887 R_MIPS_GOT16 relocation. We must correctly predict which of
888 the two semantics (local or global) the assembler and linker
889 will apply. The choice doesn't depend on the symbol's
890 visibility, so we deliberately ignore decl_visibility and
891 binds_local_p here.
893 In the second case, the assembler will not use R_MIPS_GOT16
894 relocations, but it chooses between local and global accesses
895 in the same way as for o32 PIC.
897 In the third case we have more freedom since both forms of
898 access will work for any kind of symbol. However, there seems
899 little point in doing things differently. */
900 if (DECL_P (SYMBOL_REF_DECL (x)) && TREE_PUBLIC (SYMBOL_REF_DECL (x)))
901 return SYMBOL_GOT_GLOBAL;
903 return SYMBOL_GOT_LOCAL;
906 return SYMBOL_GENERAL;
910 /* Split X into a base and a constant offset, storing them in *BASE
911 and *OFFSET respectively. */
913 static void
914 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
916 *offset = 0;
918 if (GET_CODE (x) == CONST)
919 x = XEXP (x, 0);
921 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
923 *offset += INTVAL (XEXP (x, 1));
924 x = XEXP (x, 0);
926 *base = x;
930 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
931 to the same object as SYMBOL. */
933 static bool
934 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
936 if (GET_CODE (symbol) != SYMBOL_REF)
937 return false;
939 if (CONSTANT_POOL_ADDRESS_P (symbol)
940 && offset >= 0
941 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
942 return true;
944 if (SYMBOL_REF_DECL (symbol) != 0
945 && offset >= 0
946 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
947 return true;
949 return false;
953 /* Return true if X is a symbolic constant that can be calculated in
954 the same way as a bare symbol. If it is, store the type of the
955 symbol in *SYMBOL_TYPE. */
957 bool
958 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
960 HOST_WIDE_INT offset;
962 mips_split_const (x, &x, &offset);
963 if (UNSPEC_ADDRESS_P (x))
964 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
965 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
967 *symbol_type = mips_classify_symbol (x);
968 if (*symbol_type == SYMBOL_TLS)
969 return false;
971 else
972 return false;
974 if (offset == 0)
975 return true;
977 /* Check whether a nonzero offset is valid for the underlying
978 relocations. */
979 switch (*symbol_type)
981 case SYMBOL_GENERAL:
982 case SYMBOL_64_HIGH:
983 case SYMBOL_64_MID:
984 case SYMBOL_64_LOW:
985 /* If the target has 64-bit pointers and the object file only
986 supports 32-bit symbols, the values of those symbols will be
987 sign-extended. In this case we can't allow an arbitrary offset
988 in case the 32-bit value X + OFFSET has a different sign from X. */
989 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
990 return mips_offset_within_object_p (x, offset);
992 /* In other cases the relocations can handle any offset. */
993 return true;
995 case SYMBOL_CONSTANT_POOL:
996 /* Allow constant pool references to be converted to LABEL+CONSTANT.
997 In this case, we no longer have access to the underlying constant,
998 but the original symbol-based access was known to be valid. */
999 if (GET_CODE (x) == LABEL_REF)
1000 return true;
1002 /* Fall through. */
1004 case SYMBOL_SMALL_DATA:
1005 /* Make sure that the offset refers to something within the
1006 underlying object. This should guarantee that the final
1007 PC- or GP-relative offset is within the 16-bit limit. */
1008 return mips_offset_within_object_p (x, offset);
1010 case SYMBOL_GOT_LOCAL:
1011 case SYMBOL_GOTOFF_PAGE:
1012 /* The linker should provide enough local GOT entries for a
1013 16-bit offset. Larger offsets may lead to GOT overflow. */
1014 return SMALL_OPERAND (offset);
1016 case SYMBOL_GOT_GLOBAL:
1017 case SYMBOL_GOTOFF_GLOBAL:
1018 case SYMBOL_GOTOFF_CALL:
1019 case SYMBOL_GOTOFF_LOADGP:
1020 case SYMBOL_TLSGD:
1021 case SYMBOL_TLSLDM:
1022 case SYMBOL_DTPREL:
1023 case SYMBOL_TPREL:
1024 case SYMBOL_GOTTPREL:
1025 case SYMBOL_TLS:
1026 return false;
1028 gcc_unreachable ();
1032 /* Return true if X is a symbolic constant whose value is not split
1033 into separate relocations. */
1035 bool
1036 mips_atomic_symbolic_constant_p (rtx x)
1038 enum mips_symbol_type type;
1039 return mips_symbolic_constant_p (x, &type) && !mips_split_p[type];
1043 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1046 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1048 if (regno >= FIRST_PSEUDO_REGISTER)
1050 if (!strict)
1051 return true;
1052 regno = reg_renumber[regno];
1055 /* These fake registers will be eliminated to either the stack or
1056 hard frame pointer, both of which are usually valid base registers.
1057 Reload deals with the cases where the eliminated form isn't valid. */
1058 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1059 return true;
1061 /* In mips16 mode, the stack pointer can only address word and doubleword
1062 values, nothing smaller. There are two problems here:
1064 (a) Instantiating virtual registers can introduce new uses of the
1065 stack pointer. If these virtual registers are valid addresses,
1066 the stack pointer should be too.
1068 (b) Most uses of the stack pointer are not made explicit until
1069 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1070 We don't know until that stage whether we'll be eliminating to the
1071 stack pointer (which needs the restriction) or the hard frame
1072 pointer (which doesn't).
1074 All in all, it seems more consistent to only enforce this restriction
1075 during and after reload. */
1076 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1077 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1079 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1083 /* Return true if X is a valid base register for the given mode.
1084 Allow only hard registers if STRICT. */
1086 static bool
1087 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1089 if (!strict && GET_CODE (x) == SUBREG)
1090 x = SUBREG_REG (x);
1092 return (REG_P (x)
1093 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1097 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1098 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1100 static bool
1101 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1102 enum machine_mode mode)
1104 switch (symbol_type)
1106 case SYMBOL_GENERAL:
1107 return !TARGET_MIPS16;
1109 case SYMBOL_SMALL_DATA:
1110 return true;
1112 case SYMBOL_CONSTANT_POOL:
1113 /* PC-relative addressing is only available for lw and ld. */
1114 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1116 case SYMBOL_GOT_LOCAL:
1117 return true;
1119 case SYMBOL_GOT_GLOBAL:
1120 /* The address will have to be loaded from the GOT first. */
1121 return false;
1123 case SYMBOL_TLSGD:
1124 case SYMBOL_TLSLDM:
1125 case SYMBOL_DTPREL:
1126 case SYMBOL_TPREL:
1127 case SYMBOL_GOTTPREL:
1128 case SYMBOL_TLS:
1129 return false;
1131 case SYMBOL_GOTOFF_PAGE:
1132 case SYMBOL_GOTOFF_GLOBAL:
1133 case SYMBOL_GOTOFF_CALL:
1134 case SYMBOL_GOTOFF_LOADGP:
1135 case SYMBOL_64_HIGH:
1136 case SYMBOL_64_MID:
1137 case SYMBOL_64_LOW:
1138 return true;
1140 gcc_unreachable ();
1144 /* Return true if X is a valid address for machine mode MODE. If it is,
1145 fill in INFO appropriately. STRICT is true if we should only accept
1146 hard base registers. */
1148 static bool
1149 mips_classify_address (struct mips_address_info *info, rtx x,
1150 enum machine_mode mode, int strict)
1152 switch (GET_CODE (x))
1154 case REG:
1155 case SUBREG:
1156 info->type = ADDRESS_REG;
1157 info->reg = x;
1158 info->offset = const0_rtx;
1159 return mips_valid_base_register_p (info->reg, mode, strict);
1161 case PLUS:
1162 info->type = ADDRESS_REG;
1163 info->reg = XEXP (x, 0);
1164 info->offset = XEXP (x, 1);
1165 return (mips_valid_base_register_p (info->reg, mode, strict)
1166 && const_arith_operand (info->offset, VOIDmode));
1168 case LO_SUM:
1169 info->type = ADDRESS_LO_SUM;
1170 info->reg = XEXP (x, 0);
1171 info->offset = XEXP (x, 1);
1172 return (mips_valid_base_register_p (info->reg, mode, strict)
1173 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1174 && mips_symbolic_address_p (info->symbol_type, mode)
1175 && mips_lo_relocs[info->symbol_type] != 0);
1177 case CONST_INT:
1178 /* Small-integer addresses don't occur very often, but they
1179 are legitimate if $0 is a valid base register. */
1180 info->type = ADDRESS_CONST_INT;
1181 return !TARGET_MIPS16 && SMALL_INT (x);
1183 case CONST:
1184 case LABEL_REF:
1185 case SYMBOL_REF:
1186 info->type = ADDRESS_SYMBOLIC;
1187 return (mips_symbolic_constant_p (x, &info->symbol_type)
1188 && mips_symbolic_address_p (info->symbol_type, mode)
1189 && !mips_split_p[info->symbol_type]);
1191 default:
1192 return false;
1196 /* Return true if X is a thread-local symbol. */
1198 static bool
1199 mips_tls_operand_p (rtx x)
1201 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1204 /* Return true if X can not be forced into a constant pool. */
1206 static int
1207 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1209 return mips_tls_operand_p (*x);
1212 /* Return true if X can not be forced into a constant pool. */
1214 static bool
1215 mips_cannot_force_const_mem (rtx x)
1217 if (! TARGET_HAVE_TLS)
1218 return false;
1220 return for_each_rtx (&x, &mips_tls_symbol_ref_1, 0);
1223 /* Return the number of instructions needed to load a symbol of the
1224 given type into a register. If valid in an address, the same number
1225 of instructions are needed for loads and stores. Treat extended
1226 mips16 instructions as two instructions. */
1228 static int
1229 mips_symbol_insns (enum mips_symbol_type type)
1231 switch (type)
1233 case SYMBOL_GENERAL:
1234 /* In mips16 code, general symbols must be fetched from the
1235 constant pool. */
1236 if (TARGET_MIPS16)
1237 return 0;
1239 /* When using 64-bit symbols, we need 5 preparatory instructions,
1240 such as:
1242 lui $at,%highest(symbol)
1243 daddiu $at,$at,%higher(symbol)
1244 dsll $at,$at,16
1245 daddiu $at,$at,%hi(symbol)
1246 dsll $at,$at,16
1248 The final address is then $at + %lo(symbol). With 32-bit
1249 symbols we just need a preparatory lui. */
1250 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1252 case SYMBOL_SMALL_DATA:
1253 return 1;
1255 case SYMBOL_CONSTANT_POOL:
1256 /* This case is for mips16 only. Assume we'll need an
1257 extended instruction. */
1258 return 2;
1260 case SYMBOL_GOT_LOCAL:
1261 case SYMBOL_GOT_GLOBAL:
1262 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1263 the local/global classification is accurate. See override_options
1264 for details.
1266 The worst cases are:
1268 (1) For local symbols when generating o32 or o64 code. The assembler
1269 will use:
1271 lw $at,%got(symbol)
1274 ...and the final address will be $at + %lo(symbol).
1276 (2) For global symbols when -mxgot. The assembler will use:
1278 lui $at,%got_hi(symbol)
1279 (d)addu $at,$at,$gp
1281 ...and the final address will be $at + %got_lo(symbol). */
1282 return 3;
1284 case SYMBOL_GOTOFF_PAGE:
1285 case SYMBOL_GOTOFF_GLOBAL:
1286 case SYMBOL_GOTOFF_CALL:
1287 case SYMBOL_GOTOFF_LOADGP:
1288 case SYMBOL_64_HIGH:
1289 case SYMBOL_64_MID:
1290 case SYMBOL_64_LOW:
1291 case SYMBOL_TLSGD:
1292 case SYMBOL_TLSLDM:
1293 case SYMBOL_DTPREL:
1294 case SYMBOL_GOTTPREL:
1295 case SYMBOL_TPREL:
1296 /* Check whether the offset is a 16- or 32-bit value. */
1297 return mips_split_p[type] ? 2 : 1;
1299 case SYMBOL_TLS:
1300 /* We don't treat a bare TLS symbol as a constant. */
1301 return 0;
1303 gcc_unreachable ();
1306 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1308 bool
1309 mips_stack_address_p (rtx x, enum machine_mode mode)
1311 struct mips_address_info addr;
1313 return (mips_classify_address (&addr, x, mode, false)
1314 && addr.type == ADDRESS_REG
1315 && addr.reg == stack_pointer_rtx);
1318 /* Return true if a value at OFFSET bytes from BASE can be accessed
1319 using an unextended mips16 instruction. MODE is the mode of the
1320 value.
1322 Usually the offset in an unextended instruction is a 5-bit field.
1323 The offset is unsigned and shifted left once for HIs, twice
1324 for SIs, and so on. An exception is SImode accesses off the
1325 stack pointer, which have an 8-bit immediate field. */
1327 static bool
1328 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1330 if (TARGET_MIPS16
1331 && GET_CODE (offset) == CONST_INT
1332 && INTVAL (offset) >= 0
1333 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1335 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1336 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1337 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1339 return false;
1343 /* Return the number of instructions needed to load or store a value
1344 of mode MODE at X. Return 0 if X isn't valid for MODE.
1346 For mips16 code, count extended instructions as two instructions. */
1349 mips_address_insns (rtx x, enum machine_mode mode)
1351 struct mips_address_info addr;
1352 int factor;
1354 if (mode == BLKmode)
1355 /* BLKmode is used for single unaligned loads and stores. */
1356 factor = 1;
1357 else
1358 /* Each word of a multi-word value will be accessed individually. */
1359 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1361 if (mips_classify_address (&addr, x, mode, false))
1362 switch (addr.type)
1364 case ADDRESS_REG:
1365 if (TARGET_MIPS16
1366 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1367 return factor * 2;
1368 return factor;
1370 case ADDRESS_LO_SUM:
1371 return (TARGET_MIPS16 ? factor * 2 : factor);
1373 case ADDRESS_CONST_INT:
1374 return factor;
1376 case ADDRESS_SYMBOLIC:
1377 return factor * mips_symbol_insns (addr.symbol_type);
1379 return 0;
1383 /* Likewise for constant X. */
1386 mips_const_insns (rtx x)
1388 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1389 enum mips_symbol_type symbol_type;
1390 HOST_WIDE_INT offset;
1392 switch (GET_CODE (x))
1394 case HIGH:
1395 if (TARGET_MIPS16
1396 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1397 || !mips_split_p[symbol_type])
1398 return 0;
1400 return 1;
1402 case CONST_INT:
1403 if (TARGET_MIPS16)
1404 /* Unsigned 8-bit constants can be loaded using an unextended
1405 LI instruction. Unsigned 16-bit constants can be loaded
1406 using an extended LI. Negative constants must be loaded
1407 using LI and then negated. */
1408 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1409 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1410 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1411 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1412 : 0);
1414 return mips_build_integer (codes, INTVAL (x));
1416 case CONST_DOUBLE:
1417 case CONST_VECTOR:
1418 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1420 case CONST:
1421 if (CONST_GP_P (x))
1422 return 1;
1424 /* See if we can refer to X directly. */
1425 if (mips_symbolic_constant_p (x, &symbol_type))
1426 return mips_symbol_insns (symbol_type);
1428 /* Otherwise try splitting the constant into a base and offset.
1429 16-bit offsets can be added using an extra addiu. Larger offsets
1430 must be calculated separately and then added to the base. */
1431 mips_split_const (x, &x, &offset);
1432 if (offset != 0)
1434 int n = mips_const_insns (x);
1435 if (n != 0)
1437 if (SMALL_OPERAND (offset))
1438 return n + 1;
1439 else
1440 return n + 1 + mips_build_integer (codes, offset);
1443 return 0;
1445 case SYMBOL_REF:
1446 case LABEL_REF:
1447 return mips_symbol_insns (mips_classify_symbol (x));
1449 default:
1450 return 0;
1455 /* Return the number of instructions needed for memory reference X.
1456 Count extended mips16 instructions as two instructions. */
1459 mips_fetch_insns (rtx x)
1461 gcc_assert (MEM_P (x));
1462 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1466 /* Return the number of instructions needed for an integer division. */
1469 mips_idiv_insns (void)
1471 int count;
1473 count = 1;
1474 if (TARGET_CHECK_ZERO_DIV)
1476 if (GENERATE_DIVIDE_TRAPS)
1477 count++;
1478 else
1479 count += 2;
1482 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1483 count++;
1484 return count;
1487 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1488 returns a nonzero value if X is a legitimate address for a memory
1489 operand of the indicated MODE. STRICT is nonzero if this function
1490 is called during reload. */
1492 bool
1493 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1495 struct mips_address_info addr;
1497 return mips_classify_address (&addr, x, mode, strict);
1501 /* Copy VALUE to a register and return that register. If new psuedos
1502 are allowed, copy it into a new register, otherwise use DEST. */
1504 static rtx
1505 mips_force_temporary (rtx dest, rtx value)
1507 if (!no_new_pseudos)
1508 return force_reg (Pmode, value);
1509 else
1511 emit_move_insn (copy_rtx (dest), value);
1512 return dest;
1517 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1518 and is used to load the high part into a register. */
1520 static rtx
1521 mips_split_symbol (rtx temp, rtx addr)
1523 rtx high;
1525 if (TARGET_MIPS16)
1526 high = mips16_gp_pseudo_reg ();
1527 else
1528 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1529 return gen_rtx_LO_SUM (Pmode, high, addr);
1533 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1534 type SYMBOL_TYPE. */
1537 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1539 rtx base;
1540 HOST_WIDE_INT offset;
1542 mips_split_const (address, &base, &offset);
1543 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1544 UNSPEC_ADDRESS_FIRST + symbol_type);
1545 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1549 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1550 high part to BASE and return the result. Just return BASE otherwise.
1551 TEMP is available as a temporary register if needed.
1553 The returned expression can be used as the first operand to a LO_SUM. */
1555 static rtx
1556 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1557 enum mips_symbol_type symbol_type)
1559 if (mips_split_p[symbol_type])
1561 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1562 addr = mips_force_temporary (temp, addr);
1563 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1565 return base;
1569 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1570 mips_force_temporary; it is only needed when OFFSET is not a
1571 SMALL_OPERAND. */
1573 static rtx
1574 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1576 if (!SMALL_OPERAND (offset))
1578 rtx high;
1579 if (TARGET_MIPS16)
1581 /* Load the full offset into a register so that we can use
1582 an unextended instruction for the address itself. */
1583 high = GEN_INT (offset);
1584 offset = 0;
1586 else
1588 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1589 high = GEN_INT (CONST_HIGH_PART (offset));
1590 offset = CONST_LOW_PART (offset);
1592 high = mips_force_temporary (temp, high);
1593 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1595 return plus_constant (reg, offset);
1598 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
1599 referencing, and TYPE is the symbol type to use (either global
1600 dynamic or local dynamic). V0 is an RTX for the return value
1601 location. The entire insn sequence is returned. */
1603 static GTY(()) rtx mips_tls_symbol;
1605 static rtx
1606 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
1608 rtx insn, loc, tga, a0;
1610 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
1612 if (!mips_tls_symbol)
1613 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
1615 loc = mips_unspec_address (sym, type);
1617 start_sequence ();
1619 emit_insn (gen_rtx_SET (Pmode, a0,
1620 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
1621 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
1622 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
1623 CONST_OR_PURE_CALL_P (insn) = 1;
1624 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
1625 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
1626 insn = get_insns ();
1628 end_sequence ();
1630 return insn;
1633 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
1634 return value will be a valid address and move_operand (either a REG
1635 or a LO_SUM). */
1637 static rtx
1638 mips_legitimize_tls_address (rtx loc)
1640 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
1641 enum tls_model model;
1643 v0 = gen_rtx_REG (Pmode, GP_RETURN);
1644 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
1646 model = SYMBOL_REF_TLS_MODEL (loc);
1648 switch (model)
1650 case TLS_MODEL_GLOBAL_DYNAMIC:
1651 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
1652 dest = gen_reg_rtx (Pmode);
1653 emit_libcall_block (insn, dest, v0, loc);
1654 break;
1656 case TLS_MODEL_LOCAL_DYNAMIC:
1657 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
1658 tmp1 = gen_reg_rtx (Pmode);
1660 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
1661 share the LDM result with other LD model accesses. */
1662 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1663 UNSPEC_TLS_LDM);
1664 emit_libcall_block (insn, tmp1, v0, eqv);
1666 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
1667 dest = gen_rtx_LO_SUM (Pmode, tmp2,
1668 mips_unspec_address (loc, SYMBOL_DTPREL));
1669 break;
1671 case TLS_MODEL_INITIAL_EXEC:
1672 tmp1 = gen_reg_rtx (Pmode);
1673 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
1674 if (Pmode == DImode)
1676 emit_insn (gen_tls_get_tp_di (v1));
1677 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
1679 else
1681 emit_insn (gen_tls_get_tp_si (v1));
1682 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
1684 dest = gen_reg_rtx (Pmode);
1685 emit_insn (gen_add3_insn (dest, tmp1, v1));
1686 break;
1688 case TLS_MODEL_LOCAL_EXEC:
1690 if (Pmode == DImode)
1691 emit_insn (gen_tls_get_tp_di (v1));
1692 else
1693 emit_insn (gen_tls_get_tp_si (v1));
1695 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
1696 dest = gen_rtx_LO_SUM (Pmode, tmp1,
1697 mips_unspec_address (loc, SYMBOL_TPREL));
1698 break;
1700 default:
1701 abort ();
1704 return dest;
1707 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
1708 be legitimized in a way that the generic machinery might not expect,
1709 put the new address in *XLOC and return true. MODE is the mode of
1710 the memory being accessed. */
1712 bool
1713 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
1715 enum mips_symbol_type symbol_type;
1717 if (mips_tls_operand_p (*xloc))
1719 *xloc = mips_legitimize_tls_address (*xloc);
1720 return true;
1723 /* See if the address can split into a high part and a LO_SUM. */
1724 if (mips_symbolic_constant_p (*xloc, &symbol_type)
1725 && mips_symbolic_address_p (symbol_type, mode)
1726 && mips_split_p[symbol_type])
1728 *xloc = mips_split_symbol (0, *xloc);
1729 return true;
1732 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
1734 /* Handle REG + CONSTANT using mips_add_offset. */
1735 rtx reg;
1737 reg = XEXP (*xloc, 0);
1738 if (!mips_valid_base_register_p (reg, mode, 0))
1739 reg = copy_to_mode_reg (Pmode, reg);
1740 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
1741 return true;
1744 return false;
1748 /* Subroutine of mips_build_integer (with the same interface).
1749 Assume that the final action in the sequence should be a left shift. */
1751 static unsigned int
1752 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1754 unsigned int i, shift;
1756 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1757 since signed numbers are easier to load than unsigned ones. */
1758 shift = 0;
1759 while ((value & 1) == 0)
1760 value /= 2, shift++;
1762 i = mips_build_integer (codes, value);
1763 codes[i].code = ASHIFT;
1764 codes[i].value = shift;
1765 return i + 1;
1769 /* As for mips_build_shift, but assume that the final action will be
1770 an IOR or PLUS operation. */
1772 static unsigned int
1773 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1775 unsigned HOST_WIDE_INT high;
1776 unsigned int i;
1778 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1779 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1781 /* The constant is too complex to load with a simple lui/ori pair
1782 so our goal is to clear as many trailing zeros as possible.
1783 In this case, we know bit 16 is set and that the low 16 bits
1784 form a negative number. If we subtract that number from VALUE,
1785 we will clear at least the lowest 17 bits, maybe more. */
1786 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1787 codes[i].code = PLUS;
1788 codes[i].value = CONST_LOW_PART (value);
1790 else
1792 i = mips_build_integer (codes, high);
1793 codes[i].code = IOR;
1794 codes[i].value = value & 0xffff;
1796 return i + 1;
1800 /* Fill CODES with a sequence of rtl operations to load VALUE.
1801 Return the number of operations needed. */
1803 static unsigned int
1804 mips_build_integer (struct mips_integer_op *codes,
1805 unsigned HOST_WIDE_INT value)
1807 if (SMALL_OPERAND (value)
1808 || SMALL_OPERAND_UNSIGNED (value)
1809 || LUI_OPERAND (value))
1811 /* The value can be loaded with a single instruction. */
1812 codes[0].code = UNKNOWN;
1813 codes[0].value = value;
1814 return 1;
1816 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1818 /* Either the constant is a simple LUI/ORI combination or its
1819 lowest bit is set. We don't want to shift in this case. */
1820 return mips_build_lower (codes, value);
1822 else if ((value & 0xffff) == 0)
1824 /* The constant will need at least three actions. The lowest
1825 16 bits are clear, so the final action will be a shift. */
1826 return mips_build_shift (codes, value);
1828 else
1830 /* The final action could be a shift, add or inclusive OR.
1831 Rather than use a complex condition to select the best
1832 approach, try both mips_build_shift and mips_build_lower
1833 and pick the one that gives the shortest sequence.
1834 Note that this case is only used once per constant. */
1835 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1836 unsigned int cost, alt_cost;
1838 cost = mips_build_shift (codes, value);
1839 alt_cost = mips_build_lower (alt_codes, value);
1840 if (alt_cost < cost)
1842 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1843 cost = alt_cost;
1845 return cost;
1850 /* Move VALUE into register DEST. */
1852 static void
1853 mips_move_integer (rtx dest, unsigned HOST_WIDE_INT value)
1855 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1856 enum machine_mode mode;
1857 unsigned int i, cost;
1858 rtx x;
1860 mode = GET_MODE (dest);
1861 cost = mips_build_integer (codes, value);
1863 /* Apply each binary operation to X. Invariant: X is a legitimate
1864 source operand for a SET pattern. */
1865 x = GEN_INT (codes[0].value);
1866 for (i = 1; i < cost; i++)
1868 if (no_new_pseudos)
1869 emit_move_insn (dest, x), x = dest;
1870 else
1871 x = force_reg (mode, x);
1872 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1875 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1879 /* Subroutine of mips_legitimize_move. Move constant SRC into register
1880 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1881 move_operand. */
1883 static void
1884 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
1886 rtx base;
1887 HOST_WIDE_INT offset;
1888 enum mips_symbol_type symbol_type;
1890 /* Split moves of big integers into smaller pieces. In mips16 code,
1891 it's better to force the constant into memory instead. */
1892 if (GET_CODE (src) == CONST_INT && !TARGET_MIPS16)
1894 mips_move_integer (dest, INTVAL (src));
1895 return;
1898 if (mips_tls_operand_p (src))
1900 emit_move_insn (dest, mips_legitimize_tls_address (src));
1901 return;
1904 /* See if the symbol can be split. For mips16, this is often worse than
1905 forcing it in the constant pool since it needs the single-register form
1906 of addiu or daddiu. */
1907 if (!TARGET_MIPS16
1908 && mips_symbolic_constant_p (src, &symbol_type)
1909 && mips_split_p[symbol_type])
1911 emit_move_insn (dest, mips_split_symbol (dest, src));
1912 return;
1915 /* If we have (const (plus symbol offset)), load the symbol first
1916 and then add in the offset. This is usually better than forcing
1917 the constant into memory, at least in non-mips16 code. */
1918 mips_split_const (src, &base, &offset);
1919 if (!TARGET_MIPS16
1920 && offset != 0
1921 && (!no_new_pseudos || SMALL_OPERAND (offset)))
1923 base = mips_force_temporary (dest, base);
1924 emit_move_insn (dest, mips_add_offset (0, base, offset));
1925 return;
1928 src = force_const_mem (mode, src);
1930 /* When using explicit relocs, constant pool references are sometimes
1931 not legitimate addresses. */
1932 if (!memory_operand (src, VOIDmode))
1933 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
1934 emit_move_insn (dest, src);
1938 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
1939 sequence that is valid. */
1941 bool
1942 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
1944 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1946 emit_move_insn (dest, force_reg (mode, src));
1947 return true;
1950 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
1951 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1952 && REG_P (src) && MD_REG_P (REGNO (src))
1953 && REG_P (dest) && GP_REG_P (REGNO (dest)))
1955 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
1956 if (GET_MODE_SIZE (mode) <= 4)
1957 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
1958 gen_rtx_REG (SImode, REGNO (src)),
1959 gen_rtx_REG (SImode, other_regno)));
1960 else
1961 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
1962 gen_rtx_REG (DImode, REGNO (src)),
1963 gen_rtx_REG (DImode, other_regno)));
1964 return true;
1967 /* We need to deal with constants that would be legitimate
1968 immediate_operands but not legitimate move_operands. */
1969 if (CONSTANT_P (src) && !move_operand (src, mode))
1971 mips_legitimize_const_move (mode, dest, src);
1972 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1973 return true;
1975 return false;
1978 /* We need a lot of little routines to check constant values on the
1979 mips16. These are used to figure out how long the instruction will
1980 be. It would be much better to do this using constraints, but
1981 there aren't nearly enough letters available. */
1983 static int
1984 m16_check_op (rtx op, int low, int high, int mask)
1986 return (GET_CODE (op) == CONST_INT
1987 && INTVAL (op) >= low
1988 && INTVAL (op) <= high
1989 && (INTVAL (op) & mask) == 0);
1993 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1995 return m16_check_op (op, 0x1, 0x8, 0);
1999 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2001 return m16_check_op (op, - 0x8, 0x7, 0);
2005 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2007 return m16_check_op (op, - 0x7, 0x8, 0);
2011 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2013 return m16_check_op (op, - 0x10, 0xf, 0);
2017 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2019 return m16_check_op (op, - 0xf, 0x10, 0);
2023 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2025 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2029 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2031 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2035 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2037 return m16_check_op (op, - 0x80, 0x7f, 0);
2041 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2043 return m16_check_op (op, - 0x7f, 0x80, 0);
2047 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2049 return m16_check_op (op, 0x0, 0xff, 0);
2053 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2055 return m16_check_op (op, - 0xff, 0x0, 0);
2059 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2061 return m16_check_op (op, - 0x1, 0xfe, 0);
2065 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2067 return m16_check_op (op, 0x0, 0xff << 2, 3);
2071 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2073 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2077 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2079 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2083 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2085 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2088 static bool
2089 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2091 enum machine_mode mode = GET_MODE (x);
2093 switch (code)
2095 case CONST_INT:
2096 if (!TARGET_MIPS16)
2098 /* Always return 0, since we don't have different sized
2099 instructions, hence different costs according to Richard
2100 Kenner */
2101 *total = 0;
2102 return true;
2105 /* A number between 1 and 8 inclusive is efficient for a shift.
2106 Otherwise, we will need an extended instruction. */
2107 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2108 || (outer_code) == LSHIFTRT)
2110 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2111 *total = 0;
2112 else
2113 *total = COSTS_N_INSNS (1);
2114 return true;
2117 /* We can use cmpi for an xor with an unsigned 16 bit value. */
2118 if ((outer_code) == XOR
2119 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2121 *total = 0;
2122 return true;
2125 /* We may be able to use slt or sltu for a comparison with a
2126 signed 16 bit value. (The boundary conditions aren't quite
2127 right, but this is just a heuristic anyhow.) */
2128 if (((outer_code) == LT || (outer_code) == LE
2129 || (outer_code) == GE || (outer_code) == GT
2130 || (outer_code) == LTU || (outer_code) == LEU
2131 || (outer_code) == GEU || (outer_code) == GTU)
2132 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2134 *total = 0;
2135 return true;
2138 /* Equality comparisons with 0 are cheap. */
2139 if (((outer_code) == EQ || (outer_code) == NE)
2140 && INTVAL (x) == 0)
2142 *total = 0;
2143 return true;
2146 /* Constants in the range 0...255 can be loaded with an unextended
2147 instruction. They are therefore as cheap as a register move.
2149 Given the choice between "li R1,0...255" and "move R1,R2"
2150 (where R2 is a known constant), it is usually better to use "li",
2151 since we do not want to unnecessarily extend the lifetime of R2. */
2152 if (outer_code == SET
2153 && INTVAL (x) >= 0
2154 && INTVAL (x) < 256)
2156 *total = 0;
2157 return true;
2160 /* Otherwise fall through to the handling below. */
2162 case CONST:
2163 case SYMBOL_REF:
2164 case LABEL_REF:
2165 case CONST_DOUBLE:
2166 if (LEGITIMATE_CONSTANT_P (x))
2168 *total = COSTS_N_INSNS (1);
2169 return true;
2171 else
2173 /* The value will need to be fetched from the constant pool. */
2174 *total = CONSTANT_POOL_COST;
2175 return true;
2178 case MEM:
2180 /* If the address is legitimate, return the number of
2181 instructions it needs, otherwise use the default handling. */
2182 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
2183 if (n > 0)
2185 *total = COSTS_N_INSNS (1 + n);
2186 return true;
2188 return false;
2191 case FFS:
2192 *total = COSTS_N_INSNS (6);
2193 return true;
2195 case NOT:
2196 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2197 return true;
2199 case AND:
2200 case IOR:
2201 case XOR:
2202 if (mode == DImode && !TARGET_64BIT)
2204 *total = COSTS_N_INSNS (2);
2205 return true;
2207 return false;
2209 case ASHIFT:
2210 case ASHIFTRT:
2211 case LSHIFTRT:
2212 if (mode == DImode && !TARGET_64BIT)
2214 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2215 ? 4 : 12);
2216 return true;
2218 return false;
2220 case ABS:
2221 if (mode == SFmode || mode == DFmode)
2222 *total = COSTS_N_INSNS (1);
2223 else
2224 *total = COSTS_N_INSNS (4);
2225 return true;
2227 case LO_SUM:
2228 *total = COSTS_N_INSNS (1);
2229 return true;
2231 case PLUS:
2232 case MINUS:
2233 if (mode == SFmode || mode == DFmode)
2235 if (TUNE_MIPS3000 || TUNE_MIPS3900)
2236 *total = COSTS_N_INSNS (2);
2237 else if (TUNE_MIPS6000)
2238 *total = COSTS_N_INSNS (3);
2239 else if (TUNE_SB1)
2240 *total = COSTS_N_INSNS (4);
2241 else
2242 *total = COSTS_N_INSNS (6);
2243 return true;
2245 if (mode == DImode && !TARGET_64BIT)
2247 *total = COSTS_N_INSNS (4);
2248 return true;
2250 return false;
2252 case NEG:
2253 if (mode == DImode && !TARGET_64BIT)
2255 *total = 4;
2256 return true;
2258 return false;
2260 case MULT:
2261 if (mode == SFmode)
2263 if (TUNE_MIPS3000
2264 || TUNE_MIPS3900
2265 || TUNE_MIPS5000
2266 || TUNE_SB1)
2267 *total = COSTS_N_INSNS (4);
2268 else if (TUNE_MIPS6000
2269 || TUNE_MIPS5400
2270 || TUNE_MIPS5500)
2271 *total = COSTS_N_INSNS (5);
2272 else
2273 *total = COSTS_N_INSNS (7);
2274 return true;
2277 if (mode == DFmode)
2279 if (TUNE_SB1)
2280 *total = COSTS_N_INSNS (4);
2281 else if (TUNE_MIPS3000
2282 || TUNE_MIPS3900
2283 || TUNE_MIPS5000)
2284 *total = COSTS_N_INSNS (5);
2285 else if (TUNE_MIPS6000
2286 || TUNE_MIPS5400
2287 || TUNE_MIPS5500)
2288 *total = COSTS_N_INSNS (6);
2289 else
2290 *total = COSTS_N_INSNS (8);
2291 return true;
2294 if (TUNE_MIPS3000)
2295 *total = COSTS_N_INSNS (12);
2296 else if (TUNE_MIPS3900)
2297 *total = COSTS_N_INSNS (2);
2298 else if (TUNE_MIPS4130)
2299 *total = COSTS_N_INSNS (mode == DImode ? 6 : 4);
2300 else if (TUNE_MIPS5400 || TUNE_SB1)
2301 *total = COSTS_N_INSNS (mode == DImode ? 4 : 3);
2302 else if (TUNE_MIPS5500 || TUNE_MIPS7000)
2303 *total = COSTS_N_INSNS (mode == DImode ? 9 : 5);
2304 else if (TUNE_MIPS9000)
2305 *total = COSTS_N_INSNS (mode == DImode ? 8 : 3);
2306 else if (TUNE_MIPS6000)
2307 *total = COSTS_N_INSNS (17);
2308 else if (TUNE_MIPS5000)
2309 *total = COSTS_N_INSNS (5);
2310 else
2311 *total = COSTS_N_INSNS (10);
2312 return true;
2314 case DIV:
2315 case MOD:
2316 if (mode == SFmode)
2318 if (TUNE_MIPS3000
2319 || TUNE_MIPS3900)
2320 *total = COSTS_N_INSNS (12);
2321 else if (TUNE_MIPS6000)
2322 *total = COSTS_N_INSNS (15);
2323 else if (TUNE_SB1)
2324 *total = COSTS_N_INSNS (24);
2325 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2326 *total = COSTS_N_INSNS (30);
2327 else
2328 *total = COSTS_N_INSNS (23);
2329 return true;
2332 if (mode == DFmode)
2334 if (TUNE_MIPS3000
2335 || TUNE_MIPS3900)
2336 *total = COSTS_N_INSNS (19);
2337 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2338 *total = COSTS_N_INSNS (59);
2339 else if (TUNE_MIPS6000)
2340 *total = COSTS_N_INSNS (16);
2341 else if (TUNE_SB1)
2342 *total = COSTS_N_INSNS (32);
2343 else
2344 *total = COSTS_N_INSNS (36);
2345 return true;
2347 /* Fall through. */
2349 case UDIV:
2350 case UMOD:
2351 if (TUNE_MIPS3000
2352 || TUNE_MIPS3900)
2353 *total = COSTS_N_INSNS (35);
2354 else if (TUNE_MIPS6000)
2355 *total = COSTS_N_INSNS (38);
2356 else if (TUNE_MIPS5000)
2357 *total = COSTS_N_INSNS (36);
2358 else if (TUNE_SB1)
2359 *total = COSTS_N_INSNS ((mode == SImode) ? 36 : 68);
2360 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2361 *total = COSTS_N_INSNS ((mode == SImode) ? 42 : 74);
2362 else
2363 *total = COSTS_N_INSNS (69);
2364 return true;
2366 case SIGN_EXTEND:
2367 /* A sign extend from SImode to DImode in 64 bit mode is often
2368 zero instructions, because the result can often be used
2369 directly by another instruction; we'll call it one. */
2370 if (TARGET_64BIT && mode == DImode
2371 && GET_MODE (XEXP (x, 0)) == SImode)
2372 *total = COSTS_N_INSNS (1);
2373 else
2374 *total = COSTS_N_INSNS (2);
2375 return true;
2377 case ZERO_EXTEND:
2378 if (TARGET_64BIT && mode == DImode
2379 && GET_MODE (XEXP (x, 0)) == SImode)
2380 *total = COSTS_N_INSNS (2);
2381 else
2382 *total = COSTS_N_INSNS (1);
2383 return true;
2385 default:
2386 return false;
2390 /* Provide the costs of an addressing mode that contains ADDR.
2391 If ADDR is not a valid address, its cost is irrelevant. */
2393 static int
2394 mips_address_cost (rtx addr)
2396 return mips_address_insns (addr, SImode);
2399 /* Return one word of double-word value OP, taking into account the fixed
2400 endianness of certain registers. HIGH_P is true to select the high part,
2401 false to select the low part. */
2404 mips_subword (rtx op, int high_p)
2406 unsigned int byte;
2407 enum machine_mode mode;
2409 mode = GET_MODE (op);
2410 if (mode == VOIDmode)
2411 mode = DImode;
2413 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2414 byte = UNITS_PER_WORD;
2415 else
2416 byte = 0;
2418 if (REG_P (op))
2420 if (FP_REG_P (REGNO (op)))
2421 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2422 if (REGNO (op) == HI_REGNUM)
2423 return gen_rtx_REG (word_mode, high_p ? HI_REGNUM : LO_REGNUM);
2426 if (MEM_P (op))
2427 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2429 return simplify_gen_subreg (word_mode, op, mode, byte);
2433 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2435 bool
2436 mips_split_64bit_move_p (rtx dest, rtx src)
2438 if (TARGET_64BIT)
2439 return false;
2441 /* FP->FP moves can be done in a single instruction. */
2442 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2443 return false;
2445 /* Check for floating-point loads and stores. They can be done using
2446 ldc1 and sdc1 on MIPS II and above. */
2447 if (mips_isa > 1)
2449 if (FP_REG_RTX_P (dest) && MEM_P (src))
2450 return false;
2451 if (FP_REG_RTX_P (src) && MEM_P (dest))
2452 return false;
2454 return true;
2458 /* Split a 64-bit move from SRC to DEST assuming that
2459 mips_split_64bit_move_p holds.
2461 Moves into and out of FPRs cause some difficulty here. Such moves
2462 will always be DFmode, since paired FPRs are not allowed to store
2463 DImode values. The most natural representation would be two separate
2464 32-bit moves, such as:
2466 (set (reg:SI $f0) (mem:SI ...))
2467 (set (reg:SI $f1) (mem:SI ...))
2469 However, the second insn is invalid because odd-numbered FPRs are
2470 not allowed to store independent values. Use the patterns load_df_low,
2471 load_df_high and store_df_high instead. */
2473 void
2474 mips_split_64bit_move (rtx dest, rtx src)
2476 if (FP_REG_RTX_P (dest))
2478 /* Loading an FPR from memory or from GPRs. */
2479 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2480 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2481 copy_rtx (dest)));
2483 else if (FP_REG_RTX_P (src))
2485 /* Storing an FPR into memory or GPRs. */
2486 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2487 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2489 else
2491 /* The operation can be split into two normal moves. Decide in
2492 which order to do them. */
2493 rtx low_dest;
2495 low_dest = mips_subword (dest, 0);
2496 if (REG_P (low_dest)
2497 && reg_overlap_mentioned_p (low_dest, src))
2499 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2500 emit_move_insn (low_dest, mips_subword (src, 0));
2502 else
2504 emit_move_insn (low_dest, mips_subword (src, 0));
2505 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2510 /* Return the appropriate instructions to move SRC into DEST. Assume
2511 that SRC is operand 1 and DEST is operand 0. */
2513 const char *
2514 mips_output_move (rtx dest, rtx src)
2516 enum rtx_code dest_code, src_code;
2517 bool dbl_p;
2519 dest_code = GET_CODE (dest);
2520 src_code = GET_CODE (src);
2521 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2523 if (dbl_p && mips_split_64bit_move_p (dest, src))
2524 return "#";
2526 if ((src_code == REG && GP_REG_P (REGNO (src)))
2527 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2529 if (dest_code == REG)
2531 if (GP_REG_P (REGNO (dest)))
2532 return "move\t%0,%z1";
2534 if (MD_REG_P (REGNO (dest)))
2535 return "mt%0\t%z1";
2537 if (FP_REG_P (REGNO (dest)))
2538 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2540 if (ALL_COP_REG_P (REGNO (dest)))
2542 static char retval[] = "dmtc_\t%z1,%0";
2544 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2545 return (dbl_p ? retval : retval + 1);
2548 if (dest_code == MEM)
2549 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2551 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2553 if (src_code == REG)
2555 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2556 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2558 if (FP_REG_P (REGNO (src)))
2559 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2561 if (ALL_COP_REG_P (REGNO (src)))
2563 static char retval[] = "dmfc_\t%0,%1";
2565 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2566 return (dbl_p ? retval : retval + 1);
2570 if (src_code == MEM)
2571 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2573 if (src_code == CONST_INT)
2575 /* Don't use the X format, because that will give out of
2576 range numbers for 64 bit hosts and 32 bit targets. */
2577 if (!TARGET_MIPS16)
2578 return "li\t%0,%1\t\t\t# %X1";
2580 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2581 return "li\t%0,%1";
2583 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2584 return "#";
2587 if (src_code == HIGH)
2588 return "lui\t%0,%h1";
2590 if (CONST_GP_P (src))
2591 return "move\t%0,%1";
2593 if (symbolic_operand (src, VOIDmode))
2594 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2596 if (src_code == REG && FP_REG_P (REGNO (src)))
2598 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2600 if (GET_MODE (dest) == V2SFmode)
2601 return "mov.ps\t%0,%1";
2602 else
2603 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2606 if (dest_code == MEM)
2607 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2609 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2611 if (src_code == MEM)
2612 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2614 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2616 static char retval[] = "l_c_\t%0,%1";
2618 retval[1] = (dbl_p ? 'd' : 'w');
2619 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2620 return retval;
2622 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2624 static char retval[] = "s_c_\t%1,%0";
2626 retval[1] = (dbl_p ? 'd' : 'w');
2627 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2628 return retval;
2630 gcc_unreachable ();
2633 /* Restore $gp from its save slot. Valid only when using o32 or
2634 o64 abicalls. */
2636 void
2637 mips_restore_gp (void)
2639 rtx address, slot;
2641 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
2643 address = mips_add_offset (pic_offset_table_rtx,
2644 frame_pointer_needed
2645 ? hard_frame_pointer_rtx
2646 : stack_pointer_rtx,
2647 current_function_outgoing_args_size);
2648 slot = gen_rtx_MEM (Pmode, address);
2650 emit_move_insn (pic_offset_table_rtx, slot);
2651 if (!TARGET_EXPLICIT_RELOCS)
2652 emit_insn (gen_blockage ());
2655 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2657 static void
2658 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2660 emit_insn (gen_rtx_SET (VOIDmode, target,
2661 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2664 /* Return true if CMP1 is a suitable second operand for relational
2665 operator CODE. See also the *sCC patterns in mips.md. */
2667 static bool
2668 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
2670 switch (code)
2672 case GT:
2673 case GTU:
2674 return reg_or_0_operand (cmp1, VOIDmode);
2676 case GE:
2677 case GEU:
2678 return !TARGET_MIPS16 && cmp1 == const1_rtx;
2680 case LT:
2681 case LTU:
2682 return arith_operand (cmp1, VOIDmode);
2684 case LE:
2685 return sle_operand (cmp1, VOIDmode);
2687 case LEU:
2688 return sleu_operand (cmp1, VOIDmode);
2690 default:
2691 gcc_unreachable ();
2695 /* Compare CMP0 and CMP1 using relational operator CODE and store the
2696 result in TARGET. CMP0 and TARGET are register_operands that have
2697 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
2698 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
2700 static void
2701 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
2702 rtx target, rtx cmp0, rtx cmp1)
2704 /* First see if there is a MIPS instruction that can do this operation
2705 with CMP1 in its current form. If not, try doing the same for the
2706 inverse operation. If that also fails, force CMP1 into a register
2707 and try again. */
2708 if (mips_relational_operand_ok_p (code, cmp1))
2709 mips_emit_binary (code, target, cmp0, cmp1);
2710 else
2712 enum rtx_code inv_code = reverse_condition (code);
2713 if (!mips_relational_operand_ok_p (inv_code, cmp1))
2715 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
2716 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
2718 else if (invert_ptr == 0)
2720 rtx inv_target = gen_reg_rtx (GET_MODE (target));
2721 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
2722 mips_emit_binary (XOR, target, inv_target, const1_rtx);
2724 else
2726 *invert_ptr = !*invert_ptr;
2727 mips_emit_binary (inv_code, target, cmp0, cmp1);
2732 /* Return a register that is zero iff CMP0 and CMP1 are equal.
2733 The register will have the same mode as CMP0. */
2735 static rtx
2736 mips_zero_if_equal (rtx cmp0, rtx cmp1)
2738 if (cmp1 == const0_rtx)
2739 return cmp0;
2741 if (uns_arith_operand (cmp1, VOIDmode))
2742 return expand_binop (GET_MODE (cmp0), xor_optab,
2743 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2745 return expand_binop (GET_MODE (cmp0), sub_optab,
2746 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2749 /* Convert a comparison into something that can be used in a branch or
2750 conditional move. cmp_operands[0] and cmp_operands[1] are the values
2751 being compared and *CODE is the code used to compare them.
2753 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
2754 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
2755 otherwise any standard branch condition can be used. The standard branch
2756 conditions are:
2758 - EQ/NE between two registers.
2759 - any comparison between a register and zero. */
2761 static void
2762 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
2764 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
2766 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
2768 *op0 = cmp_operands[0];
2769 *op1 = cmp_operands[1];
2771 else if (*code == EQ || *code == NE)
2773 if (need_eq_ne_p)
2775 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2776 *op1 = const0_rtx;
2778 else
2780 *op0 = cmp_operands[0];
2781 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
2784 else
2786 /* The comparison needs a separate scc instruction. Store the
2787 result of the scc in *OP0 and compare it against zero. */
2788 bool invert = false;
2789 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
2790 *op1 = const0_rtx;
2791 mips_emit_int_relational (*code, &invert, *op0,
2792 cmp_operands[0], cmp_operands[1]);
2793 *code = (invert ? EQ : NE);
2796 else
2798 enum rtx_code cmp_code;
2800 /* Floating-point tests use a separate c.cond.fmt comparison to
2801 set a condition code register. The branch or conditional move
2802 will then compare that register against zero.
2804 Set CMP_CODE to the code of the comparison instruction and
2805 *CODE to the code that the branch or move should use. */
2806 switch (*code)
2808 case NE:
2809 case UNGE:
2810 case UNGT:
2811 case LTGT:
2812 case ORDERED:
2813 cmp_code = reverse_condition_maybe_unordered (*code);
2814 *code = EQ;
2815 break;
2817 default:
2818 cmp_code = *code;
2819 *code = NE;
2820 break;
2822 *op0 = (ISA_HAS_8CC
2823 ? gen_reg_rtx (CCmode)
2824 : gen_rtx_REG (CCmode, FPSW_REGNUM));
2825 *op1 = const0_rtx;
2826 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
2830 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
2831 Store the result in TARGET and return true if successful.
2833 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
2835 bool
2836 mips_emit_scc (enum rtx_code code, rtx target)
2838 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
2839 return false;
2841 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
2842 if (code == EQ || code == NE)
2844 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2845 mips_emit_binary (code, target, zie, const0_rtx);
2847 else
2848 mips_emit_int_relational (code, 0, target,
2849 cmp_operands[0], cmp_operands[1]);
2850 return true;
2853 /* Emit the common code for doing conditional branches.
2854 operand[0] is the label to jump to.
2855 The comparison operands are saved away by cmp{si,di,sf,df}. */
2857 void
2858 gen_conditional_branch (rtx *operands, enum rtx_code code)
2860 rtx op0, op1, target;
2862 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
2863 target = gen_rtx_IF_THEN_ELSE (VOIDmode,
2864 gen_rtx_fmt_ee (code, GET_MODE (op0),
2865 op0, op1),
2866 gen_rtx_LABEL_REF (VOIDmode, operands[0]),
2867 pc_rtx);
2868 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, target));
2871 /* Emit the common code for conditional moves. OPERANDS is the array
2872 of operands passed to the conditional move define_expand. */
2874 void
2875 gen_conditional_move (rtx *operands)
2877 enum rtx_code code;
2878 rtx op0, op1;
2880 code = GET_CODE (operands[1]);
2881 mips_emit_compare (&code, &op0, &op1, true);
2882 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2883 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2884 gen_rtx_fmt_ee (code,
2885 GET_MODE (op0),
2886 op0, op1),
2887 operands[2], operands[3])));
2890 /* Emit a conditional trap. OPERANDS is the array of operands passed to
2891 the conditional_trap expander. */
2893 void
2894 mips_gen_conditional_trap (rtx *operands)
2896 rtx op0, op1;
2897 enum rtx_code cmp_code = GET_CODE (operands[0]);
2898 enum machine_mode mode = GET_MODE (cmp_operands[0]);
2900 /* MIPS conditional trap machine instructions don't have GT or LE
2901 flavors, so we must invert the comparison and convert to LT and
2902 GE, respectively. */
2903 switch (cmp_code)
2905 case GT: cmp_code = LT; break;
2906 case LE: cmp_code = GE; break;
2907 case GTU: cmp_code = LTU; break;
2908 case LEU: cmp_code = GEU; break;
2909 default: break;
2911 if (cmp_code == GET_CODE (operands[0]))
2913 op0 = cmp_operands[0];
2914 op1 = cmp_operands[1];
2916 else
2918 op0 = cmp_operands[1];
2919 op1 = cmp_operands[0];
2921 op0 = force_reg (mode, op0);
2922 if (!arith_operand (op1, mode))
2923 op1 = force_reg (mode, op1);
2925 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
2926 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
2927 operands[1]));
2930 /* Load function address ADDR into register DEST. SIBCALL_P is true
2931 if the address is needed for a sibling call. */
2933 static void
2934 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
2936 /* If we're generating PIC, and this call is to a global function,
2937 try to allow its address to be resolved lazily. This isn't
2938 possible for NewABI sibcalls since the value of $gp on entry
2939 to the stub would be our caller's gp, not ours. */
2940 if (TARGET_EXPLICIT_RELOCS
2941 && !(sibcall_p && TARGET_NEWABI)
2942 && global_got_operand (addr, VOIDmode))
2944 rtx high, lo_sum_symbol;
2946 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
2947 addr, SYMBOL_GOTOFF_CALL);
2948 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
2949 if (Pmode == SImode)
2950 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
2951 else
2952 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
2954 else
2955 emit_move_insn (dest, addr);
2959 /* Expand a call or call_value instruction. RESULT is where the
2960 result will go (null for calls), ADDR is the address of the
2961 function, ARGS_SIZE is the size of the arguments and AUX is
2962 the value passed to us by mips_function_arg. SIBCALL_P is true
2963 if we are expanding a sibling call, false if we're expanding
2964 a normal call. */
2966 void
2967 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
2969 rtx orig_addr, pattern, insn;
2971 orig_addr = addr;
2972 if (!call_insn_operand (addr, VOIDmode))
2974 addr = gen_reg_rtx (Pmode);
2975 mips_load_call_address (addr, orig_addr, sibcall_p);
2978 if (TARGET_MIPS16
2979 && mips16_hard_float
2980 && build_mips16_call_stub (result, addr, args_size,
2981 aux == 0 ? 0 : (int) GET_MODE (aux)))
2982 return;
2984 if (result == 0)
2985 pattern = (sibcall_p
2986 ? gen_sibcall_internal (addr, args_size)
2987 : gen_call_internal (addr, args_size));
2988 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
2990 rtx reg1, reg2;
2992 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
2993 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
2994 pattern =
2995 (sibcall_p
2996 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
2997 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
2999 else
3000 pattern = (sibcall_p
3001 ? gen_sibcall_value_internal (result, addr, args_size)
3002 : gen_call_value_internal (result, addr, args_size));
3004 insn = emit_call_insn (pattern);
3006 /* Lazy-binding stubs require $gp to be valid on entry. */
3007 if (global_got_operand (orig_addr, VOIDmode))
3008 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3012 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3014 static bool
3015 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3016 tree exp ATTRIBUTE_UNUSED)
3018 return TARGET_SIBCALLS;
3021 /* Emit code to move general operand SRC into condition-code
3022 register DEST. SCRATCH is a scratch TFmode float register.
3023 The sequence is:
3025 FP1 = SRC
3026 FP2 = 0.0f
3027 DEST = FP2 < FP1
3029 where FP1 and FP2 are single-precision float registers
3030 taken from SCRATCH. */
3032 void
3033 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3035 rtx fp1, fp2;
3037 /* Change the source to SFmode. */
3038 if (MEM_P (src))
3039 src = adjust_address (src, SFmode, 0);
3040 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3041 src = gen_rtx_REG (SFmode, true_regnum (src));
3043 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3044 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
3046 emit_move_insn (copy_rtx (fp1), src);
3047 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3048 emit_insn (gen_slt_sf (dest, fp2, fp1));
3051 /* Emit code to change the current function's return address to
3052 ADDRESS. SCRATCH is available as a scratch register, if needed.
3053 ADDRESS and SCRATCH are both word-mode GPRs. */
3055 void
3056 mips_set_return_address (rtx address, rtx scratch)
3058 rtx slot_address;
3060 compute_frame_size (get_frame_size ());
3061 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3062 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3063 cfun->machine->frame.gp_sp_offset);
3065 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3068 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3069 Assume that the areas do not overlap. */
3071 static void
3072 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3074 HOST_WIDE_INT offset, delta;
3075 unsigned HOST_WIDE_INT bits;
3076 int i;
3077 enum machine_mode mode;
3078 rtx *regs;
3080 /* Work out how many bits to move at a time. If both operands have
3081 half-word alignment, it is usually better to move in half words.
3082 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3083 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3084 Otherwise move word-sized chunks. */
3085 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3086 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3087 bits = BITS_PER_WORD / 2;
3088 else
3089 bits = BITS_PER_WORD;
3091 mode = mode_for_size (bits, MODE_INT, 0);
3092 delta = bits / BITS_PER_UNIT;
3094 /* Allocate a buffer for the temporary registers. */
3095 regs = alloca (sizeof (rtx) * length / delta);
3097 /* Load as many BITS-sized chunks as possible. Use a normal load if
3098 the source has enough alignment, otherwise use left/right pairs. */
3099 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3101 regs[i] = gen_reg_rtx (mode);
3102 if (MEM_ALIGN (src) >= bits)
3103 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3104 else
3106 rtx part = adjust_address (src, BLKmode, offset);
3107 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3108 gcc_unreachable ();
3112 /* Copy the chunks to the destination. */
3113 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3114 if (MEM_ALIGN (dest) >= bits)
3115 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3116 else
3118 rtx part = adjust_address (dest, BLKmode, offset);
3119 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3120 gcc_unreachable ();
3123 /* Mop up any left-over bytes. */
3124 if (offset < length)
3126 src = adjust_address (src, BLKmode, offset);
3127 dest = adjust_address (dest, BLKmode, offset);
3128 move_by_pieces (dest, src, length - offset,
3129 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3133 #define MAX_MOVE_REGS 4
3134 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3137 /* Helper function for doing a loop-based block operation on memory
3138 reference MEM. Each iteration of the loop will operate on LENGTH
3139 bytes of MEM.
3141 Create a new base register for use within the loop and point it to
3142 the start of MEM. Create a new memory reference that uses this
3143 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3145 static void
3146 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3147 rtx *loop_reg, rtx *loop_mem)
3149 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3151 /* Although the new mem does not refer to a known location,
3152 it does keep up to LENGTH bytes of alignment. */
3153 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3154 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3158 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3159 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3160 memory regions do not overlap. */
3162 static void
3163 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3165 rtx label, src_reg, dest_reg, final_src;
3166 HOST_WIDE_INT leftover;
3168 leftover = length % MAX_MOVE_BYTES;
3169 length -= leftover;
3171 /* Create registers and memory references for use within the loop. */
3172 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3173 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3175 /* Calculate the value that SRC_REG should have after the last iteration
3176 of the loop. */
3177 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3178 0, 0, OPTAB_WIDEN);
3180 /* Emit the start of the loop. */
3181 label = gen_label_rtx ();
3182 emit_label (label);
3184 /* Emit the loop body. */
3185 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3187 /* Move on to the next block. */
3188 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3189 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3191 /* Emit the loop condition. */
3192 if (Pmode == DImode)
3193 emit_insn (gen_cmpdi (src_reg, final_src));
3194 else
3195 emit_insn (gen_cmpsi (src_reg, final_src));
3196 emit_jump_insn (gen_bne (label));
3198 /* Mop up any left-over bytes. */
3199 if (leftover)
3200 mips_block_move_straight (dest, src, leftover);
3203 /* Expand a movmemsi instruction. */
3205 bool
3206 mips_expand_block_move (rtx dest, rtx src, rtx length)
3208 if (GET_CODE (length) == CONST_INT)
3210 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3212 mips_block_move_straight (dest, src, INTVAL (length));
3213 return true;
3215 else if (optimize)
3217 mips_block_move_loop (dest, src, INTVAL (length));
3218 return true;
3221 return false;
3224 /* Argument support functions. */
3226 /* Initialize CUMULATIVE_ARGS for a function. */
3228 void
3229 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3230 rtx libname ATTRIBUTE_UNUSED)
3232 static CUMULATIVE_ARGS zero_cum;
3233 tree param, next_param;
3235 *cum = zero_cum;
3236 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3238 /* Determine if this function has variable arguments. This is
3239 indicated by the last argument being 'void_type_mode' if there
3240 are no variable arguments. The standard MIPS calling sequence
3241 passes all arguments in the general purpose registers in this case. */
3243 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3244 param != 0; param = next_param)
3246 next_param = TREE_CHAIN (param);
3247 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3248 cum->gp_reg_found = 1;
3253 /* Fill INFO with information about a single argument. CUM is the
3254 cumulative state for earlier arguments. MODE is the mode of this
3255 argument and TYPE is its type (if known). NAMED is true if this
3256 is a named (fixed) argument rather than a variable one. */
3258 static void
3259 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3260 tree type, int named, struct mips_arg_info *info)
3262 bool doubleword_aligned_p;
3263 unsigned int num_bytes, num_words, max_regs;
3265 /* Work out the size of the argument. */
3266 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3267 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3269 /* Decide whether it should go in a floating-point register, assuming
3270 one is free. Later code checks for availability.
3272 The checks against UNITS_PER_FPVALUE handle the soft-float and
3273 single-float cases. */
3274 switch (mips_abi)
3276 case ABI_EABI:
3277 /* The EABI conventions have traditionally been defined in terms
3278 of TYPE_MODE, regardless of the actual type. */
3279 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3280 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3281 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3282 break;
3284 case ABI_32:
3285 case ABI_O64:
3286 /* Only leading floating-point scalars are passed in
3287 floating-point registers. We also handle vector floats the same
3288 say, which is OK because they are not covered by the standard ABI. */
3289 info->fpr_p = (!cum->gp_reg_found
3290 && cum->arg_number < 2
3291 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3292 || VECTOR_FLOAT_TYPE_P (type))
3293 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3294 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3295 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3296 break;
3298 case ABI_N32:
3299 case ABI_64:
3300 /* Scalar and complex floating-point types are passed in
3301 floating-point registers. */
3302 info->fpr_p = (named
3303 && (type == 0 || FLOAT_TYPE_P (type))
3304 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3305 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3306 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3307 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3309 /* ??? According to the ABI documentation, the real and imaginary
3310 parts of complex floats should be passed in individual registers.
3311 The real and imaginary parts of stack arguments are supposed
3312 to be contiguous and there should be an extra word of padding
3313 at the end.
3315 This has two problems. First, it makes it impossible to use a
3316 single "void *" va_list type, since register and stack arguments
3317 are passed differently. (At the time of writing, MIPSpro cannot
3318 handle complex float varargs correctly.) Second, it's unclear
3319 what should happen when there is only one register free.
3321 For now, we assume that named complex floats should go into FPRs
3322 if there are two FPRs free, otherwise they should be passed in the
3323 same way as a struct containing two floats. */
3324 if (info->fpr_p
3325 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3326 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3328 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3329 info->fpr_p = false;
3330 else
3331 num_words = 2;
3333 break;
3335 default:
3336 gcc_unreachable ();
3339 /* See whether the argument has doubleword alignment. */
3340 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3342 /* Set REG_OFFSET to the register count we're interested in.
3343 The EABI allocates the floating-point registers separately,
3344 but the other ABIs allocate them like integer registers. */
3345 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3346 ? cum->num_fprs
3347 : cum->num_gprs);
3349 /* Advance to an even register if the argument is doubleword-aligned. */
3350 if (doubleword_aligned_p)
3351 info->reg_offset += info->reg_offset & 1;
3353 /* Work out the offset of a stack argument. */
3354 info->stack_offset = cum->stack_words;
3355 if (doubleword_aligned_p)
3356 info->stack_offset += info->stack_offset & 1;
3358 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3360 /* Partition the argument between registers and stack. */
3361 info->reg_words = MIN (num_words, max_regs);
3362 info->stack_words = num_words - info->reg_words;
3366 /* Implement FUNCTION_ARG_ADVANCE. */
3368 void
3369 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3370 tree type, int named)
3372 struct mips_arg_info info;
3374 mips_arg_info (cum, mode, type, named, &info);
3376 if (!info.fpr_p)
3377 cum->gp_reg_found = true;
3379 /* See the comment above the cumulative args structure in mips.h
3380 for an explanation of what this code does. It assumes the O32
3381 ABI, which passes at most 2 arguments in float registers. */
3382 if (cum->arg_number < 2 && info.fpr_p)
3383 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3385 if (mips_abi != ABI_EABI || !info.fpr_p)
3386 cum->num_gprs = info.reg_offset + info.reg_words;
3387 else if (info.reg_words > 0)
3388 cum->num_fprs += FP_INC;
3390 if (info.stack_words > 0)
3391 cum->stack_words = info.stack_offset + info.stack_words;
3393 cum->arg_number++;
3396 /* Implement FUNCTION_ARG. */
3398 struct rtx_def *
3399 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3400 tree type, int named)
3402 struct mips_arg_info info;
3404 /* We will be called with a mode of VOIDmode after the last argument
3405 has been seen. Whatever we return will be passed to the call
3406 insn. If we need a mips16 fp_code, return a REG with the code
3407 stored as the mode. */
3408 if (mode == VOIDmode)
3410 if (TARGET_MIPS16 && cum->fp_code != 0)
3411 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3413 else
3414 return 0;
3417 mips_arg_info (cum, mode, type, named, &info);
3419 /* Return straight away if the whole argument is passed on the stack. */
3420 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3421 return 0;
3423 if (type != 0
3424 && TREE_CODE (type) == RECORD_TYPE
3425 && TARGET_NEWABI
3426 && TYPE_SIZE_UNIT (type)
3427 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3428 && named)
3430 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3431 structure contains a double in its entirety, then that 64 bit
3432 chunk is passed in a floating point register. */
3433 tree field;
3435 /* First check to see if there is any such field. */
3436 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3437 if (TREE_CODE (field) == FIELD_DECL
3438 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3439 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3440 && host_integerp (bit_position (field), 0)
3441 && int_bit_position (field) % BITS_PER_WORD == 0)
3442 break;
3444 if (field != 0)
3446 /* Now handle the special case by returning a PARALLEL
3447 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3448 chunks are passed in registers. */
3449 unsigned int i;
3450 HOST_WIDE_INT bitpos;
3451 rtx ret;
3453 /* assign_parms checks the mode of ENTRY_PARM, so we must
3454 use the actual mode here. */
3455 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3457 bitpos = 0;
3458 field = TYPE_FIELDS (type);
3459 for (i = 0; i < info.reg_words; i++)
3461 rtx reg;
3463 for (; field; field = TREE_CHAIN (field))
3464 if (TREE_CODE (field) == FIELD_DECL
3465 && int_bit_position (field) >= bitpos)
3466 break;
3468 if (field
3469 && int_bit_position (field) == bitpos
3470 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3471 && !TARGET_SOFT_FLOAT
3472 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3473 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3474 else
3475 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3477 XVECEXP (ret, 0, i)
3478 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3479 GEN_INT (bitpos / BITS_PER_UNIT));
3481 bitpos += BITS_PER_WORD;
3483 return ret;
3487 /* Handle the n32/n64 conventions for passing complex floating-point
3488 arguments in FPR pairs. The real part goes in the lower register
3489 and the imaginary part goes in the upper register. */
3490 if (TARGET_NEWABI
3491 && info.fpr_p
3492 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3494 rtx real, imag;
3495 enum machine_mode inner;
3496 int reg;
3498 inner = GET_MODE_INNER (mode);
3499 reg = FP_ARG_FIRST + info.reg_offset;
3500 real = gen_rtx_EXPR_LIST (VOIDmode,
3501 gen_rtx_REG (inner, reg),
3502 const0_rtx);
3503 imag = gen_rtx_EXPR_LIST (VOIDmode,
3504 gen_rtx_REG (inner, reg + info.reg_words / 2),
3505 GEN_INT (GET_MODE_SIZE (inner)));
3506 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3509 if (!info.fpr_p)
3510 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3511 else if (info.reg_offset == 1)
3512 /* This code handles the special o32 case in which the second word
3513 of the argument structure is passed in floating-point registers. */
3514 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3515 else
3516 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3520 /* Implement TARGET_ARG_PARTIAL_BYTES. */
3522 static int
3523 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
3524 enum machine_mode mode, tree type, bool named)
3526 struct mips_arg_info info;
3528 mips_arg_info (cum, mode, type, named, &info);
3529 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
3533 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
3534 PARM_BOUNDARY bits of alignment, but will be given anything up
3535 to STACK_BOUNDARY bits if the type requires it. */
3538 function_arg_boundary (enum machine_mode mode, tree type)
3540 unsigned int alignment;
3542 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
3543 if (alignment < PARM_BOUNDARY)
3544 alignment = PARM_BOUNDARY;
3545 if (alignment > STACK_BOUNDARY)
3546 alignment = STACK_BOUNDARY;
3547 return alignment;
3550 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
3551 upward rather than downward. In other words, return true if the
3552 first byte of the stack slot has useful data, false if the last
3553 byte does. */
3555 bool
3556 mips_pad_arg_upward (enum machine_mode mode, tree type)
3558 /* On little-endian targets, the first byte of every stack argument
3559 is passed in the first byte of the stack slot. */
3560 if (!BYTES_BIG_ENDIAN)
3561 return true;
3563 /* Otherwise, integral types are padded downward: the last byte of a
3564 stack argument is passed in the last byte of the stack slot. */
3565 if (type != 0
3566 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
3567 : GET_MODE_CLASS (mode) == MODE_INT)
3568 return false;
3570 /* Big-endian o64 pads floating-point arguments downward. */
3571 if (mips_abi == ABI_O64)
3572 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3573 return false;
3575 /* Other types are padded upward for o32, o64, n32 and n64. */
3576 if (mips_abi != ABI_EABI)
3577 return true;
3579 /* Arguments smaller than a stack slot are padded downward. */
3580 if (mode != BLKmode)
3581 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
3582 else
3583 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
3587 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
3588 if the least significant byte of the register has useful data. Return
3589 the opposite if the most significant byte does. */
3591 bool
3592 mips_pad_reg_upward (enum machine_mode mode, tree type)
3594 /* No shifting is required for floating-point arguments. */
3595 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3596 return !BYTES_BIG_ENDIAN;
3598 /* Otherwise, apply the same padding to register arguments as we do
3599 to stack arguments. */
3600 return mips_pad_arg_upward (mode, type);
3603 static void
3604 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3605 tree type, int *pretend_size, int no_rtl)
3607 CUMULATIVE_ARGS local_cum;
3608 int gp_saved, fp_saved;
3610 /* The caller has advanced CUM up to, but not beyond, the last named
3611 argument. Advance a local copy of CUM past the last "real" named
3612 argument, to find out how many registers are left over. */
3614 local_cum = *cum;
3615 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
3617 /* Found out how many registers we need to save. */
3618 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
3619 fp_saved = (EABI_FLOAT_VARARGS_P
3620 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
3621 : 0);
3623 if (!no_rtl)
3625 if (gp_saved > 0)
3627 rtx ptr, mem;
3629 ptr = virtual_incoming_args_rtx;
3630 switch (mips_abi)
3632 case ABI_32:
3633 case ABI_O64:
3634 ptr = plus_constant (ptr, local_cum.num_gprs * UNITS_PER_WORD);
3635 break;
3637 case ABI_EABI:
3638 ptr = plus_constant (ptr, -gp_saved * UNITS_PER_WORD);
3639 break;
3641 mem = gen_rtx_MEM (BLKmode, ptr);
3642 set_mem_alias_set (mem, get_varargs_alias_set ());
3644 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
3645 mem, gp_saved);
3647 if (fp_saved > 0)
3649 /* We can't use move_block_from_reg, because it will use
3650 the wrong mode. */
3651 enum machine_mode mode;
3652 int off, i;
3654 /* Set OFF to the offset from virtual_incoming_args_rtx of
3655 the first float register. The FP save area lies below
3656 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
3657 off = -gp_saved * UNITS_PER_WORD;
3658 off &= ~(UNITS_PER_FPVALUE - 1);
3659 off -= fp_saved * UNITS_PER_FPREG;
3661 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
3663 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
3665 rtx ptr, mem;
3667 ptr = plus_constant (virtual_incoming_args_rtx, off);
3668 mem = gen_rtx_MEM (mode, ptr);
3669 set_mem_alias_set (mem, get_varargs_alias_set ());
3670 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
3671 off += UNITS_PER_HWFPVALUE;
3675 if (TARGET_OLDABI)
3677 /* No need for pretend arguments: the register parameter area was
3678 allocated by the caller. */
3679 *pretend_size = 0;
3680 return;
3682 *pretend_size = (gp_saved * UNITS_PER_WORD) + (fp_saved * UNITS_PER_FPREG);
3685 /* Create the va_list data type.
3686 We keep 3 pointers, and two offsets.
3687 Two pointers are to the overflow area, which starts at the CFA.
3688 One of these is constant, for addressing into the GPR save area below it.
3689 The other is advanced up the stack through the overflow region.
3690 The third pointer is to the GPR save area. Since the FPR save area
3691 is just below it, we can address FPR slots off this pointer.
3692 We also keep two one-byte offsets, which are to be subtracted from the
3693 constant pointers to yield addresses in the GPR and FPR save areas.
3694 These are downcounted as float or non-float arguments are used,
3695 and when they get to zero, the argument must be obtained from the
3696 overflow region.
3697 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
3698 pointer is enough. It's started at the GPR save area, and is
3699 advanced, period.
3700 Note that the GPR save area is not constant size, due to optimization
3701 in the prologue. Hence, we can't use a design with two pointers
3702 and two offsets, although we could have designed this with two pointers
3703 and three offsets. */
3705 static tree
3706 mips_build_builtin_va_list (void)
3708 if (EABI_FLOAT_VARARGS_P)
3710 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
3711 tree array, index;
3713 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3715 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
3716 ptr_type_node);
3717 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
3718 ptr_type_node);
3719 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
3720 ptr_type_node);
3721 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
3722 unsigned_char_type_node);
3723 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
3724 unsigned_char_type_node);
3725 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
3726 warn on every user file. */
3727 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
3728 array = build_array_type (unsigned_char_type_node,
3729 build_index_type (index));
3730 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
3732 DECL_FIELD_CONTEXT (f_ovfl) = record;
3733 DECL_FIELD_CONTEXT (f_gtop) = record;
3734 DECL_FIELD_CONTEXT (f_ftop) = record;
3735 DECL_FIELD_CONTEXT (f_goff) = record;
3736 DECL_FIELD_CONTEXT (f_foff) = record;
3737 DECL_FIELD_CONTEXT (f_res) = record;
3739 TYPE_FIELDS (record) = f_ovfl;
3740 TREE_CHAIN (f_ovfl) = f_gtop;
3741 TREE_CHAIN (f_gtop) = f_ftop;
3742 TREE_CHAIN (f_ftop) = f_goff;
3743 TREE_CHAIN (f_goff) = f_foff;
3744 TREE_CHAIN (f_foff) = f_res;
3746 layout_type (record);
3747 return record;
3749 else if (TARGET_IRIX && TARGET_IRIX6)
3750 /* On IRIX 6, this type is 'char *'. */
3751 return build_pointer_type (char_type_node);
3752 else
3753 /* Otherwise, we use 'void *'. */
3754 return ptr_type_node;
3757 /* Implement va_start. */
3759 void
3760 mips_va_start (tree valist, rtx nextarg)
3762 const CUMULATIVE_ARGS *cum = &current_function_args_info;
3764 /* ARG_POINTER_REGNUM is initialized to STACK_POINTER_BOUNDARY, but
3765 since the stack is aligned for a pair of argument-passing slots,
3766 and the beginning of a variable argument list may be an odd slot,
3767 we have to decrease its alignment. */
3768 if (cfun && cfun->emit->regno_pointer_align)
3769 while (((current_function_pretend_args_size * BITS_PER_UNIT)
3770 & (REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) - 1)) != 0)
3771 REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) /= 2;
3773 if (mips_abi == ABI_EABI)
3775 int gpr_save_area_size;
3777 gpr_save_area_size
3778 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
3780 if (EABI_FLOAT_VARARGS_P)
3782 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3783 tree ovfl, gtop, ftop, goff, foff;
3784 tree t;
3785 int fpr_offset;
3786 int fpr_save_area_size;
3788 f_ovfl = TYPE_FIELDS (va_list_type_node);
3789 f_gtop = TREE_CHAIN (f_ovfl);
3790 f_ftop = TREE_CHAIN (f_gtop);
3791 f_goff = TREE_CHAIN (f_ftop);
3792 f_foff = TREE_CHAIN (f_goff);
3794 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3795 NULL_TREE);
3796 gtop = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3797 NULL_TREE);
3798 ftop = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3799 NULL_TREE);
3800 goff = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3801 NULL_TREE);
3802 foff = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3803 NULL_TREE);
3805 /* Emit code to initialize OVFL, which points to the next varargs
3806 stack argument. CUM->STACK_WORDS gives the number of stack
3807 words used by named arguments. */
3808 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
3809 if (cum->stack_words > 0)
3810 t = build (PLUS_EXPR, TREE_TYPE (ovfl), t,
3811 build_int_cst (NULL_TREE,
3812 cum->stack_words * UNITS_PER_WORD));
3813 t = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3814 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3816 /* Emit code to initialize GTOP, the top of the GPR save area. */
3817 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
3818 t = build (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
3819 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3821 /* Emit code to initialize FTOP, the top of the FPR save area.
3822 This address is gpr_save_area_bytes below GTOP, rounded
3823 down to the next fp-aligned boundary. */
3824 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
3825 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
3826 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
3827 if (fpr_offset)
3828 t = build (PLUS_EXPR, TREE_TYPE (ftop), t,
3829 build_int_cst (NULL_TREE, -fpr_offset));
3830 t = build (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
3831 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3833 /* Emit code to initialize GOFF, the offset from GTOP of the
3834 next GPR argument. */
3835 t = build (MODIFY_EXPR, TREE_TYPE (goff), goff,
3836 build_int_cst (NULL_TREE, gpr_save_area_size));
3837 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3839 /* Likewise emit code to initialize FOFF, the offset from FTOP
3840 of the next FPR argument. */
3841 fpr_save_area_size
3842 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
3843 t = build (MODIFY_EXPR, TREE_TYPE (foff), foff,
3844 build_int_cst (NULL_TREE, fpr_save_area_size));
3845 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3847 else
3849 /* Everything is in the GPR save area, or in the overflow
3850 area which is contiguous with it. */
3851 nextarg = plus_constant (nextarg, -gpr_save_area_size);
3852 std_expand_builtin_va_start (valist, nextarg);
3855 else
3856 std_expand_builtin_va_start (valist, nextarg);
3859 /* Implement va_arg. */
3861 static tree
3862 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
3864 HOST_WIDE_INT size, rsize;
3865 tree addr;
3866 bool indirect;
3868 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
3870 if (indirect)
3871 type = build_pointer_type (type);
3873 size = int_size_in_bytes (type);
3874 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
3876 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
3877 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3878 else
3880 /* Not a simple merged stack. */
3882 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3883 tree ovfl, top, off, align;
3884 HOST_WIDE_INT osize;
3885 tree t, u;
3887 f_ovfl = TYPE_FIELDS (va_list_type_node);
3888 f_gtop = TREE_CHAIN (f_ovfl);
3889 f_ftop = TREE_CHAIN (f_gtop);
3890 f_goff = TREE_CHAIN (f_ftop);
3891 f_foff = TREE_CHAIN (f_goff);
3893 /* We maintain separate pointers and offsets for floating-point
3894 and integer arguments, but we need similar code in both cases.
3895 Let:
3897 TOP be the top of the register save area;
3898 OFF be the offset from TOP of the next register;
3899 ADDR_RTX be the address of the argument;
3900 RSIZE be the number of bytes used to store the argument
3901 when it's in the register save area;
3902 OSIZE be the number of bytes used to store it when it's
3903 in the stack overflow area; and
3904 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
3906 The code we want is:
3908 1: off &= -rsize; // round down
3909 2: if (off != 0)
3910 3: {
3911 4: addr_rtx = top - off;
3912 5: off -= rsize;
3913 6: }
3914 7: else
3915 8: {
3916 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
3917 10: addr_rtx = ovfl + PADDING;
3918 11: ovfl += osize;
3919 14: }
3921 [1] and [9] can sometimes be optimized away. */
3923 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3924 NULL_TREE);
3926 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
3927 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
3929 top = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3930 NULL_TREE);
3931 off = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3932 NULL_TREE);
3934 /* When floating-point registers are saved to the stack,
3935 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
3936 of the float's precision. */
3937 rsize = UNITS_PER_HWFPVALUE;
3939 /* Overflow arguments are padded to UNITS_PER_WORD bytes
3940 (= PARM_BOUNDARY bits). This can be different from RSIZE
3941 in two cases:
3943 (1) On 32-bit targets when TYPE is a structure such as:
3945 struct s { float f; };
3947 Such structures are passed in paired FPRs, so RSIZE
3948 will be 8 bytes. However, the structure only takes
3949 up 4 bytes of memory, so OSIZE will only be 4.
3951 (2) In combinations such as -mgp64 -msingle-float
3952 -fshort-double. Doubles passed in registers
3953 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
3954 but those passed on the stack take up
3955 UNITS_PER_WORD bytes. */
3956 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
3958 else
3960 top = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3961 NULL_TREE);
3962 off = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3963 NULL_TREE);
3964 if (rsize > UNITS_PER_WORD)
3966 /* [1] Emit code for: off &= -rsize. */
3967 t = build (BIT_AND_EXPR, TREE_TYPE (off), off,
3968 build_int_cst (NULL_TREE, -rsize));
3969 t = build (MODIFY_EXPR, TREE_TYPE (off), off, t);
3970 gimplify_and_add (t, pre_p);
3972 osize = rsize;
3975 /* [2] Emit code to branch if off == 0. */
3976 t = build (NE_EXPR, boolean_type_node, off,
3977 build_int_cst (TREE_TYPE (off), 0));
3978 addr = build (COND_EXPR, ptr_type_node, t, NULL, NULL);
3980 /* [5] Emit code for: off -= rsize. We do this as a form of
3981 post-increment not available to C. Also widen for the
3982 coming pointer arithmetic. */
3983 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
3984 t = build (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
3985 t = fold_convert (sizetype, t);
3986 t = fold_convert (TREE_TYPE (top), t);
3988 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
3989 the argument has RSIZE - SIZE bytes of leading padding. */
3990 t = build (MINUS_EXPR, TREE_TYPE (top), top, t);
3991 if (BYTES_BIG_ENDIAN && rsize > size)
3993 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
3994 rsize - size));
3995 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3997 COND_EXPR_THEN (addr) = t;
3999 if (osize > UNITS_PER_WORD)
4001 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4002 u = fold_convert (TREE_TYPE (ovfl),
4003 build_int_cst (NULL_TREE, osize - 1));
4004 t = build (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4005 u = fold_convert (TREE_TYPE (ovfl),
4006 build_int_cst (NULL_TREE, -osize));
4007 t = build (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
4008 align = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4010 else
4011 align = NULL;
4013 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4014 post-increment ovfl by osize. On big-endian machines,
4015 the argument has OSIZE - SIZE bytes of leading padding. */
4016 u = fold_convert (TREE_TYPE (ovfl),
4017 build_int_cst (NULL_TREE, osize));
4018 t = build (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4019 if (BYTES_BIG_ENDIAN && osize > size)
4021 u = fold_convert (TREE_TYPE (t),
4022 build_int_cst (NULL_TREE, osize - size));
4023 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
4026 /* String [9] and [10,11] together. */
4027 if (align)
4028 t = build (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4029 COND_EXPR_ELSE (addr) = t;
4031 addr = fold_convert (build_pointer_type (type), addr);
4032 addr = build_fold_indirect_ref (addr);
4035 if (indirect)
4036 addr = build_fold_indirect_ref (addr);
4038 return addr;
4041 /* Return true if it is possible to use left/right accesses for a
4042 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4043 returning true, update *OP, *LEFT and *RIGHT as follows:
4045 *OP is a BLKmode reference to the whole field.
4047 *LEFT is a QImode reference to the first byte if big endian or
4048 the last byte if little endian. This address can be used in the
4049 left-side instructions (lwl, swl, ldl, sdl).
4051 *RIGHT is a QImode reference to the opposite end of the field and
4052 can be used in the parterning right-side instruction. */
4054 static bool
4055 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4056 rtx *left, rtx *right)
4058 rtx first, last;
4060 /* Check that the operand really is a MEM. Not all the extv and
4061 extzv predicates are checked. */
4062 if (!MEM_P (*op))
4063 return false;
4065 /* Check that the size is valid. */
4066 if (width != 32 && (!TARGET_64BIT || width != 64))
4067 return false;
4069 /* We can only access byte-aligned values. Since we are always passed
4070 a reference to the first byte of the field, it is not necessary to
4071 do anything with BITPOS after this check. */
4072 if (bitpos % BITS_PER_UNIT != 0)
4073 return false;
4075 /* Reject aligned bitfields: we want to use a normal load or store
4076 instead of a left/right pair. */
4077 if (MEM_ALIGN (*op) >= width)
4078 return false;
4080 /* Adjust *OP to refer to the whole field. This also has the effect
4081 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4082 *op = adjust_address (*op, BLKmode, 0);
4083 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4085 /* Get references to both ends of the field. We deliberately don't
4086 use the original QImode *OP for FIRST since the new BLKmode one
4087 might have a simpler address. */
4088 first = adjust_address (*op, QImode, 0);
4089 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4091 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4092 be the upper word and RIGHT the lower word. */
4093 if (TARGET_BIG_ENDIAN)
4094 *left = first, *right = last;
4095 else
4096 *left = last, *right = first;
4098 return true;
4102 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4103 Return true on success. We only handle cases where zero_extract is
4104 equivalent to sign_extract. */
4106 bool
4107 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4109 rtx left, right, temp;
4111 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4112 paradoxical word_mode subreg. This is the only case in which
4113 we allow the destination to be larger than the source. */
4114 if (GET_CODE (dest) == SUBREG
4115 && GET_MODE (dest) == DImode
4116 && SUBREG_BYTE (dest) == 0
4117 && GET_MODE (SUBREG_REG (dest)) == SImode)
4118 dest = SUBREG_REG (dest);
4120 /* After the above adjustment, the destination must be the same
4121 width as the source. */
4122 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4123 return false;
4125 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4126 return false;
4128 temp = gen_reg_rtx (GET_MODE (dest));
4129 if (GET_MODE (dest) == DImode)
4131 emit_insn (gen_mov_ldl (temp, src, left));
4132 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4134 else
4136 emit_insn (gen_mov_lwl (temp, src, left));
4137 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4139 return true;
4143 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4144 true on success. */
4146 bool
4147 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4149 rtx left, right;
4151 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4152 return false;
4154 src = gen_lowpart (mode_for_size (width, MODE_INT, 0), src);
4156 if (GET_MODE (src) == DImode)
4158 emit_insn (gen_mov_sdl (dest, src, left));
4159 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4161 else
4163 emit_insn (gen_mov_swl (dest, src, left));
4164 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4166 return true;
4169 /* Set up globals to generate code for the ISA or processor
4170 described by INFO. */
4172 static void
4173 mips_set_architecture (const struct mips_cpu_info *info)
4175 if (info != 0)
4177 mips_arch_info = info;
4178 mips_arch = info->cpu;
4179 mips_isa = info->isa;
4184 /* Likewise for tuning. */
4186 static void
4187 mips_set_tune (const struct mips_cpu_info *info)
4189 if (info != 0)
4191 mips_tune_info = info;
4192 mips_tune = info->cpu;
4196 /* Implement TARGET_HANDLE_OPTION. */
4198 static bool
4199 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4201 switch (code)
4203 case OPT_mabi_:
4204 if (strcmp (arg, "32") == 0)
4205 mips_abi = ABI_32;
4206 else if (strcmp (arg, "o64") == 0)
4207 mips_abi = ABI_O64;
4208 else if (strcmp (arg, "n32") == 0)
4209 mips_abi = ABI_N32;
4210 else if (strcmp (arg, "64") == 0)
4211 mips_abi = ABI_64;
4212 else if (strcmp (arg, "eabi") == 0)
4213 mips_abi = ABI_EABI;
4214 else
4215 return false;
4216 return true;
4218 case OPT_march_:
4219 mips_arch_string = arg;
4220 return mips_parse_cpu (arg) != 0;
4222 case OPT_mtune_:
4223 mips_tune_string = arg;
4224 return mips_parse_cpu (arg) != 0;
4226 case OPT_mips:
4227 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4228 return mips_isa_info != 0;
4230 case OPT_mflush_func_:
4231 mips_cache_flush_func = arg;
4232 return true;
4234 case OPT_mno_flush_func:
4235 mips_cache_flush_func = NULL;
4236 return true;
4238 default:
4239 return true;
4243 /* Set up the threshold for data to go into the small data area, instead
4244 of the normal data area, and detect any conflicts in the switches. */
4246 void
4247 override_options (void)
4249 int i, start, regno;
4250 enum machine_mode mode;
4252 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4254 /* The following code determines the architecture and register size.
4255 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4256 The GAS and GCC code should be kept in sync as much as possible. */
4258 if (mips_arch_string != 0)
4259 mips_set_architecture (mips_parse_cpu (mips_arch_string));
4261 if (mips_isa_info != 0)
4263 if (mips_arch_info == 0)
4264 mips_set_architecture (mips_isa_info);
4265 else if (mips_arch_info->isa != mips_isa_info->isa)
4266 error ("-%s conflicts with the other architecture options, "
4267 "which specify a %s processor",
4268 mips_isa_info->name,
4269 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
4272 if (mips_arch_info == 0)
4274 #ifdef MIPS_CPU_STRING_DEFAULT
4275 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
4276 #else
4277 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4278 #endif
4281 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4282 error ("-march=%s is not compatible with the selected ABI",
4283 mips_arch_info->name);
4285 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4286 if (mips_tune_string != 0)
4287 mips_set_tune (mips_parse_cpu (mips_tune_string));
4289 if (mips_tune_info == 0)
4290 mips_set_tune (mips_arch_info);
4292 if ((target_flags_explicit & MASK_64BIT) != 0)
4294 /* The user specified the size of the integer registers. Make sure
4295 it agrees with the ABI and ISA. */
4296 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4297 error ("-mgp64 used with a 32-bit processor");
4298 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4299 error ("-mgp32 used with a 64-bit ABI");
4300 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4301 error ("-mgp64 used with a 32-bit ABI");
4303 else
4305 /* Infer the integer register size from the ABI and processor.
4306 Restrict ourselves to 32-bit registers if that's all the
4307 processor has, or if the ABI cannot handle 64-bit registers. */
4308 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4309 target_flags &= ~MASK_64BIT;
4310 else
4311 target_flags |= MASK_64BIT;
4314 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4316 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4317 only one right answer here. */
4318 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4319 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4320 else if (!TARGET_64BIT && TARGET_FLOAT64)
4321 error ("unsupported combination: %s", "-mgp32 -mfp64");
4322 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4323 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4325 else
4327 /* -msingle-float selects 32-bit float registers. Otherwise the
4328 float registers should be the same size as the integer ones. */
4329 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4330 target_flags |= MASK_FLOAT64;
4331 else
4332 target_flags &= ~MASK_FLOAT64;
4335 /* End of code shared with GAS. */
4337 if ((target_flags_explicit & MASK_LONG64) == 0)
4339 if (TARGET_INT64)
4340 target_flags |= MASK_LONG64;
4341 /* If no type size setting options (-mlong64,-mint64,-mlong32)
4342 were used, then set the type sizes. In the EABI in 64 bit mode,
4343 longs and pointers are 64 bits. Likewise for the SGI Irix6 N64
4344 ABI. */
4345 else if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4346 target_flags |= MASK_LONG64;
4347 else
4348 target_flags &= ~MASK_LONG64;
4351 /* Deprecate -mint64. Remove after 4.0 branches. */
4352 if (TARGET_INT64)
4353 warning ("-mint64 is a deprecated option");
4355 if (TARGET_INT64 && !TARGET_LONG64)
4356 error ("unsupported combination: %s", "-mint64 -mlong32");
4358 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4359 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4361 /* For some configurations, it is useful to have -march control
4362 the default setting of MASK_SOFT_FLOAT. */
4363 switch ((int) mips_arch)
4365 case PROCESSOR_R4100:
4366 case PROCESSOR_R4111:
4367 case PROCESSOR_R4120:
4368 case PROCESSOR_R4130:
4369 target_flags |= MASK_SOFT_FLOAT;
4370 break;
4372 default:
4373 target_flags &= ~MASK_SOFT_FLOAT;
4374 break;
4378 if (!TARGET_OLDABI)
4379 flag_pcc_struct_return = 0;
4381 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4383 /* If neither -mbranch-likely nor -mno-branch-likely was given
4384 on the command line, set MASK_BRANCHLIKELY based on the target
4385 architecture.
4387 By default, we enable use of Branch Likely instructions on
4388 all architectures which support them with the following
4389 exceptions: when creating MIPS32 or MIPS64 code, and when
4390 tuning for architectures where their use tends to hurt
4391 performance.
4393 The MIPS32 and MIPS64 architecture specifications say "Software
4394 is strongly encouraged to avoid use of Branch Likely
4395 instructions, as they will be removed from a future revision
4396 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4397 issue those instructions unless instructed to do so by
4398 -mbranch-likely. */
4399 if (ISA_HAS_BRANCHLIKELY
4400 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4401 && !(TUNE_MIPS5500 || TUNE_SB1))
4402 target_flags |= MASK_BRANCHLIKELY;
4403 else
4404 target_flags &= ~MASK_BRANCHLIKELY;
4406 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4407 warning ("generation of Branch Likely instructions enabled, but not supported by architecture");
4409 /* The effect of -mabicalls isn't defined for the EABI. */
4410 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4412 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4413 target_flags &= ~MASK_ABICALLS;
4416 /* -fpic (-KPIC) is the default when TARGET_ABICALLS is defined. We need
4417 to set flag_pic so that the LEGITIMATE_PIC_OPERAND_P macro will work. */
4418 /* ??? -non_shared turns off pic code generation, but this is not
4419 implemented. */
4420 if (TARGET_ABICALLS)
4422 flag_pic = 1;
4423 if (mips_section_threshold > 0)
4424 warning ("-G is incompatible with PIC code which is the default");
4427 /* mips_split_addresses is a half-way house between explicit
4428 relocations and the traditional assembler macros. It can
4429 split absolute 32-bit symbolic constants into a high/lo_sum
4430 pair but uses macros for other sorts of access.
4432 Like explicit relocation support for REL targets, it relies
4433 on GNU extensions in the assembler and the linker.
4435 Although this code should work for -O0, it has traditionally
4436 been treated as an optimization. */
4437 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4438 && optimize && !flag_pic
4439 && !ABI_HAS_64BIT_SYMBOLS)
4440 mips_split_addresses = 1;
4441 else
4442 mips_split_addresses = 0;
4444 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4445 faster code, but at the expense of more nops. Enable it at -O3 and
4446 above. */
4447 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4448 target_flags |= MASK_VR4130_ALIGN;
4450 /* When compiling for the mips16, we cannot use floating point. We
4451 record the original hard float value in mips16_hard_float. */
4452 if (TARGET_MIPS16)
4454 if (TARGET_SOFT_FLOAT)
4455 mips16_hard_float = 0;
4456 else
4457 mips16_hard_float = 1;
4458 target_flags |= MASK_SOFT_FLOAT;
4460 /* Don't run the scheduler before reload, since it tends to
4461 increase register pressure. */
4462 flag_schedule_insns = 0;
4464 /* Don't do hot/cold partitioning. The constant layout code expects
4465 the whole function to be in a single section. */
4466 flag_reorder_blocks_and_partition = 0;
4468 /* Silently disable -mexplicit-relocs since it doesn't apply
4469 to mips16 code. Even so, it would overly pedantic to warn
4470 about "-mips16 -mexplicit-relocs", especially given that
4471 we use a %gprel() operator. */
4472 target_flags &= ~MASK_EXPLICIT_RELOCS;
4475 /* When using explicit relocs, we call dbr_schedule from within
4476 mips_reorg. */
4477 if (TARGET_EXPLICIT_RELOCS)
4479 mips_flag_delayed_branch = flag_delayed_branch;
4480 flag_delayed_branch = 0;
4483 #ifdef MIPS_TFMODE_FORMAT
4484 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4485 #endif
4487 /* Make sure that the user didn't turn off paired single support when
4488 MIPS-3D support is requested. */
4489 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
4490 && !TARGET_PAIRED_SINGLE_FLOAT)
4491 error ("-mips3d requires -mpaired-single");
4493 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
4494 if (TARGET_MIPS3D)
4495 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
4497 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4498 and TARGET_HARD_FLOAT are both true. */
4499 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4500 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4502 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4503 enabled. */
4504 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4505 error ("-mips3d/-mpaired-single must be used with -mips64");
4507 mips_print_operand_punct['?'] = 1;
4508 mips_print_operand_punct['#'] = 1;
4509 mips_print_operand_punct['/'] = 1;
4510 mips_print_operand_punct['&'] = 1;
4511 mips_print_operand_punct['!'] = 1;
4512 mips_print_operand_punct['*'] = 1;
4513 mips_print_operand_punct['@'] = 1;
4514 mips_print_operand_punct['.'] = 1;
4515 mips_print_operand_punct['('] = 1;
4516 mips_print_operand_punct[')'] = 1;
4517 mips_print_operand_punct['['] = 1;
4518 mips_print_operand_punct[']'] = 1;
4519 mips_print_operand_punct['<'] = 1;
4520 mips_print_operand_punct['>'] = 1;
4521 mips_print_operand_punct['{'] = 1;
4522 mips_print_operand_punct['}'] = 1;
4523 mips_print_operand_punct['^'] = 1;
4524 mips_print_operand_punct['$'] = 1;
4525 mips_print_operand_punct['+'] = 1;
4526 mips_print_operand_punct['~'] = 1;
4528 mips_char_to_class['d'] = TARGET_MIPS16 ? M16_REGS : GR_REGS;
4529 mips_char_to_class['t'] = T_REG;
4530 mips_char_to_class['f'] = (TARGET_HARD_FLOAT ? FP_REGS : NO_REGS);
4531 mips_char_to_class['h'] = HI_REG;
4532 mips_char_to_class['l'] = LO_REG;
4533 mips_char_to_class['x'] = MD_REGS;
4534 mips_char_to_class['b'] = ALL_REGS;
4535 mips_char_to_class['c'] = (TARGET_ABICALLS ? PIC_FN_ADDR_REG :
4536 TARGET_MIPS16 ? M16_NA_REGS :
4537 GR_REGS);
4538 mips_char_to_class['e'] = LEA_REGS;
4539 mips_char_to_class['j'] = PIC_FN_ADDR_REG;
4540 mips_char_to_class['v'] = V1_REG;
4541 mips_char_to_class['y'] = GR_REGS;
4542 mips_char_to_class['z'] = ST_REGS;
4543 mips_char_to_class['B'] = COP0_REGS;
4544 mips_char_to_class['C'] = COP2_REGS;
4545 mips_char_to_class['D'] = COP3_REGS;
4547 /* Set up array to map GCC register number to debug register number.
4548 Ignore the special purpose register numbers. */
4550 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4551 mips_dbx_regno[i] = -1;
4553 start = GP_DBX_FIRST - GP_REG_FIRST;
4554 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
4555 mips_dbx_regno[i] = i + start;
4557 start = FP_DBX_FIRST - FP_REG_FIRST;
4558 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
4559 mips_dbx_regno[i] = i + start;
4561 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
4562 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
4564 /* Set up array giving whether a given register can hold a given mode. */
4566 for (mode = VOIDmode;
4567 mode != MAX_MACHINE_MODE;
4568 mode = (enum machine_mode) ((int)mode + 1))
4570 register int size = GET_MODE_SIZE (mode);
4571 register enum mode_class class = GET_MODE_CLASS (mode);
4573 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4575 register int temp;
4577 if (mode == CCV2mode)
4578 temp = (ISA_HAS_8CC
4579 && ST_REG_P (regno)
4580 && (regno - ST_REG_FIRST) % 2 == 0);
4582 else if (mode == CCV4mode)
4583 temp = (ISA_HAS_8CC
4584 && ST_REG_P (regno)
4585 && (regno - ST_REG_FIRST) % 4 == 0);
4587 else if (mode == CCmode)
4589 if (! ISA_HAS_8CC)
4590 temp = (regno == FPSW_REGNUM);
4591 else
4592 temp = (ST_REG_P (regno) || GP_REG_P (regno)
4593 || FP_REG_P (regno));
4596 else if (GP_REG_P (regno))
4597 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
4599 else if (FP_REG_P (regno))
4600 temp = ((regno % FP_INC) == 0)
4601 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
4602 || class == MODE_VECTOR_FLOAT)
4603 && size <= UNITS_PER_FPVALUE)
4604 /* Allow integer modes that fit into a single
4605 register. We need to put integers into FPRs
4606 when using instructions like cvt and trunc. */
4607 || (class == MODE_INT && size <= UNITS_PER_FPREG)
4608 /* Allow TFmode for CCmode reloads. */
4609 || (ISA_HAS_8CC && mode == TFmode));
4611 else if (MD_REG_P (regno))
4612 temp = (INTEGRAL_MODE_P (mode)
4613 && (size <= UNITS_PER_WORD
4614 || (regno == MD_REG_FIRST
4615 && size == 2 * UNITS_PER_WORD)));
4617 else if (ALL_COP_REG_P (regno))
4618 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
4619 else
4620 temp = 0;
4622 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
4626 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
4627 initialized yet, so we can't use that here. */
4628 gpr_mode = TARGET_64BIT ? DImode : SImode;
4630 /* Provide default values for align_* for 64-bit targets. */
4631 if (TARGET_64BIT && !TARGET_MIPS16)
4633 if (align_loops == 0)
4634 align_loops = 8;
4635 if (align_jumps == 0)
4636 align_jumps = 8;
4637 if (align_functions == 0)
4638 align_functions = 8;
4641 /* Function to allocate machine-dependent function status. */
4642 init_machine_status = &mips_init_machine_status;
4644 if (ABI_HAS_64BIT_SYMBOLS)
4646 if (TARGET_EXPLICIT_RELOCS)
4648 mips_split_p[SYMBOL_64_HIGH] = true;
4649 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
4650 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
4652 mips_split_p[SYMBOL_64_MID] = true;
4653 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
4654 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
4656 mips_split_p[SYMBOL_64_LOW] = true;
4657 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
4658 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
4660 mips_split_p[SYMBOL_GENERAL] = true;
4661 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4664 else
4666 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
4668 mips_split_p[SYMBOL_GENERAL] = true;
4669 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
4670 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4674 if (TARGET_MIPS16)
4676 /* The high part is provided by a pseudo copy of $gp. */
4677 mips_split_p[SYMBOL_SMALL_DATA] = true;
4678 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
4681 if (TARGET_EXPLICIT_RELOCS)
4683 /* Small data constants are kept whole until after reload,
4684 then lowered by mips_rewrite_small_data. */
4685 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
4687 mips_split_p[SYMBOL_GOT_LOCAL] = true;
4688 if (TARGET_NEWABI)
4690 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
4691 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
4693 else
4695 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
4696 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
4699 if (TARGET_XGOT)
4701 /* The HIGH and LO_SUM are matched by special .md patterns. */
4702 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
4704 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
4705 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
4706 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
4708 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
4709 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
4710 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
4712 else
4714 if (TARGET_NEWABI)
4715 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
4716 else
4717 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
4718 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
4722 if (TARGET_NEWABI)
4724 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
4725 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
4726 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
4729 /* Thread-local relocation operators. */
4730 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
4731 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
4732 mips_split_p[SYMBOL_DTPREL] = 1;
4733 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
4734 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
4735 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
4736 mips_split_p[SYMBOL_TPREL] = 1;
4737 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
4738 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
4740 /* We don't have a thread pointer access instruction on MIPS16, or
4741 appropriate TLS relocations. */
4742 if (TARGET_MIPS16)
4743 targetm.have_tls = false;
4745 /* Default to working around R4000 errata only if the processor
4746 was selected explicitly. */
4747 if ((target_flags_explicit & MASK_FIX_R4000) == 0
4748 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
4749 target_flags |= MASK_FIX_R4000;
4751 /* Default to working around R4400 errata only if the processor
4752 was selected explicitly. */
4753 if ((target_flags_explicit & MASK_FIX_R4400) == 0
4754 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
4755 target_flags |= MASK_FIX_R4400;
4758 /* Implement CONDITIONAL_REGISTER_USAGE. */
4760 void
4761 mips_conditional_register_usage (void)
4763 if (!TARGET_HARD_FLOAT)
4765 int regno;
4767 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4768 fixed_regs[regno] = call_used_regs[regno] = 1;
4769 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4770 fixed_regs[regno] = call_used_regs[regno] = 1;
4772 else if (! ISA_HAS_8CC)
4774 int regno;
4776 /* We only have a single condition code register. We
4777 implement this by hiding all the condition code registers,
4778 and generating RTL that refers directly to ST_REG_FIRST. */
4779 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4780 fixed_regs[regno] = call_used_regs[regno] = 1;
4782 /* In mips16 mode, we permit the $t temporary registers to be used
4783 for reload. We prohibit the unused $s registers, since they
4784 are caller saved, and saving them via a mips16 register would
4785 probably waste more time than just reloading the value. */
4786 if (TARGET_MIPS16)
4788 fixed_regs[18] = call_used_regs[18] = 1;
4789 fixed_regs[19] = call_used_regs[19] = 1;
4790 fixed_regs[20] = call_used_regs[20] = 1;
4791 fixed_regs[21] = call_used_regs[21] = 1;
4792 fixed_regs[22] = call_used_regs[22] = 1;
4793 fixed_regs[23] = call_used_regs[23] = 1;
4794 fixed_regs[26] = call_used_regs[26] = 1;
4795 fixed_regs[27] = call_used_regs[27] = 1;
4796 fixed_regs[30] = call_used_regs[30] = 1;
4798 /* fp20-23 are now caller saved. */
4799 if (mips_abi == ABI_64)
4801 int regno;
4802 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
4803 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4805 /* Odd registers from fp21 to fp31 are now caller saved. */
4806 if (mips_abi == ABI_N32)
4808 int regno;
4809 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
4810 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4814 /* Allocate a chunk of memory for per-function machine-dependent data. */
4815 static struct machine_function *
4816 mips_init_machine_status (void)
4818 return ((struct machine_function *)
4819 ggc_alloc_cleared (sizeof (struct machine_function)));
4822 /* On the mips16, we want to allocate $24 (T_REG) before other
4823 registers for instructions for which it is possible. This helps
4824 avoid shuffling registers around in order to set up for an xor,
4825 encouraging the compiler to use a cmp instead. */
4827 void
4828 mips_order_regs_for_local_alloc (void)
4830 register int i;
4832 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4833 reg_alloc_order[i] = i;
4835 if (TARGET_MIPS16)
4837 /* It really doesn't matter where we put register 0, since it is
4838 a fixed register anyhow. */
4839 reg_alloc_order[0] = 24;
4840 reg_alloc_order[24] = 0;
4845 /* The MIPS debug format wants all automatic variables and arguments
4846 to be in terms of the virtual frame pointer (stack pointer before
4847 any adjustment in the function), while the MIPS 3.0 linker wants
4848 the frame pointer to be the stack pointer after the initial
4849 adjustment. So, we do the adjustment here. The arg pointer (which
4850 is eliminated) points to the virtual frame pointer, while the frame
4851 pointer (which may be eliminated) points to the stack pointer after
4852 the initial adjustments. */
4854 HOST_WIDE_INT
4855 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
4857 rtx offset2 = const0_rtx;
4858 rtx reg = eliminate_constant_term (addr, &offset2);
4860 if (offset == 0)
4861 offset = INTVAL (offset2);
4863 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
4864 || reg == hard_frame_pointer_rtx)
4866 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
4867 ? compute_frame_size (get_frame_size ())
4868 : cfun->machine->frame.total_size;
4870 /* MIPS16 frame is smaller */
4871 if (frame_pointer_needed && TARGET_MIPS16)
4872 frame_size -= cfun->machine->frame.args_size;
4874 offset = offset - frame_size;
4877 /* sdbout_parms does not want this to crash for unrecognized cases. */
4878 #if 0
4879 else if (reg != arg_pointer_rtx)
4880 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
4881 addr);
4882 #endif
4884 return offset;
4887 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
4889 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
4890 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
4891 'h' OP is HIGH, prints %hi(X),
4892 'd' output integer constant in decimal,
4893 'z' if the operand is 0, use $0 instead of normal operand.
4894 'D' print second part of double-word register or memory operand.
4895 'L' print low-order register of double-word register operand.
4896 'M' print high-order register of double-word register operand.
4897 'C' print part of opcode for a branch condition.
4898 'F' print part of opcode for a floating-point branch condition.
4899 'N' print part of opcode for a branch condition, inverted.
4900 'W' print part of opcode for a floating-point branch condition, inverted.
4901 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
4902 'z' for (eq:?I ...), 'n' for (ne:?I ...).
4903 't' like 'T', but with the EQ/NE cases reversed
4904 'Y' for a CONST_INT X, print mips_fp_conditions[X]
4905 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
4906 'R' print the reloc associated with LO_SUM
4908 The punctuation characters are:
4910 '(' Turn on .set noreorder
4911 ')' Turn on .set reorder
4912 '[' Turn on .set noat
4913 ']' Turn on .set at
4914 '<' Turn on .set nomacro
4915 '>' Turn on .set macro
4916 '{' Turn on .set volatile (not GAS)
4917 '}' Turn on .set novolatile (not GAS)
4918 '&' Turn on .set noreorder if filling delay slots
4919 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
4920 '!' Turn on .set nomacro if filling delay slots
4921 '#' Print nop if in a .set noreorder section.
4922 '/' Like '#', but does nothing within a delayed branch sequence
4923 '?' Print 'l' if we are to use a branch likely instead of normal branch.
4924 '@' Print the name of the assembler temporary register (at or $1).
4925 '.' Print the name of the register with a hard-wired zero (zero or $0).
4926 '^' Print the name of the pic call-through register (t9 or $25).
4927 '$' Print the name of the stack pointer register (sp or $29).
4928 '+' Print the name of the gp register (usually gp or $28).
4929 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
4931 void
4932 print_operand (FILE *file, rtx op, int letter)
4934 register enum rtx_code code;
4936 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
4938 switch (letter)
4940 case '?':
4941 if (mips_branch_likely)
4942 putc ('l', file);
4943 break;
4945 case '@':
4946 fputs (reg_names [GP_REG_FIRST + 1], file);
4947 break;
4949 case '^':
4950 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
4951 break;
4953 case '.':
4954 fputs (reg_names [GP_REG_FIRST + 0], file);
4955 break;
4957 case '$':
4958 fputs (reg_names[STACK_POINTER_REGNUM], file);
4959 break;
4961 case '+':
4962 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
4963 break;
4965 case '&':
4966 if (final_sequence != 0 && set_noreorder++ == 0)
4967 fputs (".set\tnoreorder\n\t", file);
4968 break;
4970 case '*':
4971 if (final_sequence != 0)
4973 if (set_noreorder++ == 0)
4974 fputs (".set\tnoreorder\n\t", file);
4976 if (set_nomacro++ == 0)
4977 fputs (".set\tnomacro\n\t", file);
4979 break;
4981 case '!':
4982 if (final_sequence != 0 && set_nomacro++ == 0)
4983 fputs ("\n\t.set\tnomacro", file);
4984 break;
4986 case '#':
4987 if (set_noreorder != 0)
4988 fputs ("\n\tnop", file);
4989 break;
4991 case '/':
4992 /* Print an extra newline so that the delayed insn is separated
4993 from the following ones. This looks neater and is consistent
4994 with non-nop delayed sequences. */
4995 if (set_noreorder != 0 && final_sequence == 0)
4996 fputs ("\n\tnop\n", file);
4997 break;
4999 case '(':
5000 if (set_noreorder++ == 0)
5001 fputs (".set\tnoreorder\n\t", file);
5002 break;
5004 case ')':
5005 if (set_noreorder == 0)
5006 error ("internal error: %%) found without a %%( in assembler pattern");
5008 else if (--set_noreorder == 0)
5009 fputs ("\n\t.set\treorder", file);
5011 break;
5013 case '[':
5014 if (set_noat++ == 0)
5015 fputs (".set\tnoat\n\t", file);
5016 break;
5018 case ']':
5019 if (set_noat == 0)
5020 error ("internal error: %%] found without a %%[ in assembler pattern");
5021 else if (--set_noat == 0)
5022 fputs ("\n\t.set\tat", file);
5024 break;
5026 case '<':
5027 if (set_nomacro++ == 0)
5028 fputs (".set\tnomacro\n\t", file);
5029 break;
5031 case '>':
5032 if (set_nomacro == 0)
5033 error ("internal error: %%> found without a %%< in assembler pattern");
5034 else if (--set_nomacro == 0)
5035 fputs ("\n\t.set\tmacro", file);
5037 break;
5039 case '{':
5040 if (set_volatile++ == 0)
5041 fputs ("#.set\tvolatile\n\t", file);
5042 break;
5044 case '}':
5045 if (set_volatile == 0)
5046 error ("internal error: %%} found without a %%{ in assembler pattern");
5047 else if (--set_volatile == 0)
5048 fputs ("\n\t#.set\tnovolatile", file);
5050 break;
5052 case '~':
5054 if (align_labels_log > 0)
5055 ASM_OUTPUT_ALIGN (file, align_labels_log);
5057 break;
5059 default:
5060 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5061 break;
5064 return;
5067 if (! op)
5069 error ("PRINT_OPERAND null pointer");
5070 return;
5073 code = GET_CODE (op);
5075 if (letter == 'C')
5076 switch (code)
5078 case EQ: fputs ("eq", file); break;
5079 case NE: fputs ("ne", file); break;
5080 case GT: fputs ("gt", file); break;
5081 case GE: fputs ("ge", file); break;
5082 case LT: fputs ("lt", file); break;
5083 case LE: fputs ("le", file); break;
5084 case GTU: fputs ("gtu", file); break;
5085 case GEU: fputs ("geu", file); break;
5086 case LTU: fputs ("ltu", file); break;
5087 case LEU: fputs ("leu", file); break;
5088 default:
5089 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5092 else if (letter == 'N')
5093 switch (code)
5095 case EQ: fputs ("ne", file); break;
5096 case NE: fputs ("eq", file); break;
5097 case GT: fputs ("le", file); break;
5098 case GE: fputs ("lt", file); break;
5099 case LT: fputs ("ge", file); break;
5100 case LE: fputs ("gt", file); break;
5101 case GTU: fputs ("leu", file); break;
5102 case GEU: fputs ("ltu", file); break;
5103 case LTU: fputs ("geu", file); break;
5104 case LEU: fputs ("gtu", file); break;
5105 default:
5106 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5109 else if (letter == 'F')
5110 switch (code)
5112 case EQ: fputs ("c1f", file); break;
5113 case NE: fputs ("c1t", file); break;
5114 default:
5115 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5118 else if (letter == 'W')
5119 switch (code)
5121 case EQ: fputs ("c1t", file); break;
5122 case NE: fputs ("c1f", file); break;
5123 default:
5124 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5127 else if (letter == 'h')
5129 if (GET_CODE (op) == HIGH)
5130 op = XEXP (op, 0);
5132 print_operand_reloc (file, op, mips_hi_relocs);
5135 else if (letter == 'R')
5136 print_operand_reloc (file, op, mips_lo_relocs);
5138 else if (letter == 'Y')
5140 if (GET_CODE (op) == CONST_INT
5141 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5142 < ARRAY_SIZE (mips_fp_conditions)))
5143 fputs (mips_fp_conditions[INTVAL (op)], file);
5144 else
5145 output_operand_lossage ("invalid %%Y value");
5148 else if (letter == 'Z')
5150 if (ISA_HAS_8CC)
5152 print_operand (file, op, 0);
5153 fputc (',', file);
5157 else if (code == REG || code == SUBREG)
5159 register int regnum;
5161 if (code == REG)
5162 regnum = REGNO (op);
5163 else
5164 regnum = true_regnum (op);
5166 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5167 || (letter == 'L' && WORDS_BIG_ENDIAN)
5168 || letter == 'D')
5169 regnum++;
5171 fprintf (file, "%s", reg_names[regnum]);
5174 else if (code == MEM)
5176 if (letter == 'D')
5177 output_address (plus_constant (XEXP (op, 0), 4));
5178 else
5179 output_address (XEXP (op, 0));
5182 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
5183 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
5185 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
5186 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
5188 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
5189 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
5191 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
5192 fputs (reg_names[GP_REG_FIRST], file);
5194 else if (letter == 'd' || letter == 'x' || letter == 'X')
5195 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
5197 else if (letter == 'T' || letter == 't')
5199 int truth = (code == NE) == (letter == 'T');
5200 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
5203 else if (CONST_GP_P (op))
5204 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
5206 else
5207 output_addr_const (file, op);
5211 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
5212 RELOCS is the array of relocations to use. */
5214 static void
5215 print_operand_reloc (FILE *file, rtx op, const char **relocs)
5217 enum mips_symbol_type symbol_type;
5218 const char *p;
5219 rtx base;
5220 HOST_WIDE_INT offset;
5222 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
5223 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
5225 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
5226 mips_split_const (op, &base, &offset);
5227 if (UNSPEC_ADDRESS_P (base))
5228 op = plus_constant (UNSPEC_ADDRESS (base), offset);
5230 fputs (relocs[symbol_type], file);
5231 output_addr_const (file, op);
5232 for (p = relocs[symbol_type]; *p != 0; p++)
5233 if (*p == '(')
5234 fputc (')', file);
5237 /* Output address operand X to FILE. */
5239 void
5240 print_operand_address (FILE *file, rtx x)
5242 struct mips_address_info addr;
5244 if (mips_classify_address (&addr, x, word_mode, true))
5245 switch (addr.type)
5247 case ADDRESS_REG:
5248 print_operand (file, addr.offset, 0);
5249 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5250 return;
5252 case ADDRESS_LO_SUM:
5253 print_operand (file, addr.offset, 'R');
5254 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5255 return;
5257 case ADDRESS_CONST_INT:
5258 output_addr_const (file, x);
5259 fprintf (file, "(%s)", reg_names[0]);
5260 return;
5262 case ADDRESS_SYMBOLIC:
5263 output_addr_const (file, x);
5264 return;
5266 gcc_unreachable ();
5269 /* When using assembler macros, keep track of all of small-data externs
5270 so that mips_file_end can emit the appropriate declarations for them.
5272 In most cases it would be safe (though pointless) to emit .externs
5273 for other symbols too. One exception is when an object is within
5274 the -G limit but declared by the user to be in a section other
5275 than .sbss or .sdata. */
5278 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5280 register struct extern_list *p;
5282 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5284 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5285 p->next = extern_head;
5286 p->name = name;
5287 p->size = int_size_in_bytes (TREE_TYPE (decl));
5288 extern_head = p;
5291 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5293 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5294 p->next = extern_head;
5295 p->name = name;
5296 p->size = -1;
5297 extern_head = p;
5300 return 0;
5303 #if TARGET_IRIX
5304 static void
5305 irix_output_external_libcall (rtx fun)
5307 register struct extern_list *p;
5309 if (mips_abi == ABI_32)
5311 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5312 p->next = extern_head;
5313 p->name = XSTR (fun, 0);
5314 p->size = -1;
5315 extern_head = p;
5318 #endif
5320 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5321 put out a MIPS ECOFF file and a stab. */
5323 void
5324 mips_output_filename (FILE *stream, const char *name)
5327 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5328 directives. */
5329 if (write_symbols == DWARF2_DEBUG)
5330 return;
5331 else if (mips_output_filename_first_time)
5333 mips_output_filename_first_time = 0;
5334 num_source_filenames += 1;
5335 current_function_file = name;
5336 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5337 output_quoted_string (stream, name);
5338 putc ('\n', stream);
5341 /* If we are emitting stabs, let dbxout.c handle this (except for
5342 the mips_output_filename_first_time case). */
5343 else if (write_symbols == DBX_DEBUG)
5344 return;
5346 else if (name != current_function_file
5347 && strcmp (name, current_function_file) != 0)
5349 num_source_filenames += 1;
5350 current_function_file = name;
5351 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5352 output_quoted_string (stream, name);
5353 putc ('\n', stream);
5357 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5358 that should be written before the opening quote, such as "\t.ascii\t"
5359 for real string data or "\t# " for a comment. */
5361 void
5362 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5363 const char *prefix)
5365 size_t i;
5366 int cur_pos = 17;
5367 register const unsigned char *string =
5368 (const unsigned char *)string_param;
5370 fprintf (stream, "%s\"", prefix);
5371 for (i = 0; i < len; i++)
5373 register int c = string[i];
5375 if (ISPRINT (c))
5377 if (c == '\\' || c == '\"')
5379 putc ('\\', stream);
5380 cur_pos++;
5382 putc (c, stream);
5383 cur_pos++;
5385 else
5387 fprintf (stream, "\\%03o", c);
5388 cur_pos += 4;
5391 if (cur_pos > 72 && i+1 < len)
5393 cur_pos = 17;
5394 fprintf (stream, "\"\n%s\"", prefix);
5397 fprintf (stream, "\"\n");
5400 /* Implement TARGET_ASM_FILE_START. */
5402 static void
5403 mips_file_start (void)
5405 default_file_start ();
5407 if (!TARGET_IRIX)
5409 /* Generate a special section to describe the ABI switches used to
5410 produce the resultant binary. This used to be done by the assembler
5411 setting bits in the ELF header's flags field, but we have run out of
5412 bits. GDB needs this information in order to be able to correctly
5413 debug these binaries. See the function mips_gdbarch_init() in
5414 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5415 causes unnecessary IRIX 6 ld warnings. */
5416 const char * abi_string = NULL;
5418 switch (mips_abi)
5420 case ABI_32: abi_string = "abi32"; break;
5421 case ABI_N32: abi_string = "abiN32"; break;
5422 case ABI_64: abi_string = "abi64"; break;
5423 case ABI_O64: abi_string = "abiO64"; break;
5424 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5425 default:
5426 gcc_unreachable ();
5428 /* Note - we use fprintf directly rather than called named_section()
5429 because in this way we can avoid creating an allocated section. We
5430 do not want this section to take up any space in the running
5431 executable. */
5432 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5434 /* There is no ELF header flag to distinguish long32 forms of the
5435 EABI from long64 forms. Emit a special section to help tools
5436 such as GDB. */
5437 if (mips_abi == ABI_EABI)
5438 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5439 TARGET_LONG64 ? 64 : 32);
5441 /* Restore the default section. */
5442 fprintf (asm_out_file, "\t.previous\n");
5445 /* Generate the pseudo ops that System V.4 wants. */
5446 if (TARGET_ABICALLS)
5447 /* ??? but do not want this (or want pic0) if -non-shared? */
5448 fprintf (asm_out_file, "\t.abicalls\n");
5450 if (TARGET_MIPS16)
5451 fprintf (asm_out_file, "\t.set\tmips16\n");
5453 if (flag_verbose_asm)
5454 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5455 ASM_COMMENT_START,
5456 mips_section_threshold, mips_arch_info->name, mips_isa);
5459 #ifdef BSS_SECTION_ASM_OP
5460 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5461 in the use of sbss. */
5463 void
5464 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5465 unsigned HOST_WIDE_INT size, int align)
5467 extern tree last_assemble_variable_decl;
5469 if (mips_in_small_data_p (decl))
5470 named_section (0, ".sbss", 0);
5471 else
5472 bss_section ();
5473 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5474 last_assemble_variable_decl = decl;
5475 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5476 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5478 #endif
5480 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5481 .externs for any small-data variables that turned out to be external. */
5483 static void
5484 mips_file_end (void)
5486 tree name_tree;
5487 struct extern_list *p;
5489 if (extern_head)
5491 fputs ("\n", asm_out_file);
5493 for (p = extern_head; p != 0; p = p->next)
5495 name_tree = get_identifier (p->name);
5497 /* Positively ensure only one .extern for any given symbol. */
5498 if (!TREE_ASM_WRITTEN (name_tree)
5499 && TREE_SYMBOL_REFERENCED (name_tree))
5501 TREE_ASM_WRITTEN (name_tree) = 1;
5502 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5503 `.global name .text' directive for every used but
5504 undefined function. If we don't, the linker may perform
5505 an optimization (skipping over the insns that set $gp)
5506 when it is unsafe. */
5507 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5509 fputs ("\t.globl ", asm_out_file);
5510 assemble_name (asm_out_file, p->name);
5511 fputs (" .text\n", asm_out_file);
5513 else
5515 fputs ("\t.extern\t", asm_out_file);
5516 assemble_name (asm_out_file, p->name);
5517 fprintf (asm_out_file, ", %d\n", p->size);
5524 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5525 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5527 void
5528 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5529 unsigned HOST_WIDE_INT size,
5530 unsigned int align)
5532 /* If the target wants uninitialized const declarations in
5533 .rdata then don't put them in .comm. */
5534 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5535 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5536 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5538 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5539 targetm.asm_out.globalize_label (stream, name);
5541 readonly_data_section ();
5542 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5543 mips_declare_object (stream, name, "",
5544 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5545 size);
5547 else
5548 mips_declare_common_object (stream, name, "\n\t.comm\t",
5549 size, align, true);
5552 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
5553 NAME is the name of the object and ALIGN is the required alignment
5554 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
5555 alignment argument. */
5557 void
5558 mips_declare_common_object (FILE *stream, const char *name,
5559 const char *init_string,
5560 unsigned HOST_WIDE_INT size,
5561 unsigned int align, bool takes_alignment_p)
5563 if (!takes_alignment_p)
5565 size += (align / BITS_PER_UNIT) - 1;
5566 size -= size % (align / BITS_PER_UNIT);
5567 mips_declare_object (stream, name, init_string,
5568 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
5570 else
5571 mips_declare_object (stream, name, init_string,
5572 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5573 size, align / BITS_PER_UNIT);
5576 /* Emit either a label, .comm, or .lcomm directive. When using assembler
5577 macros, mark the symbol as written so that mips_file_end won't emit an
5578 .extern for it. STREAM is the output file, NAME is the name of the
5579 symbol, INIT_STRING is the string that should be written before the
5580 symbol and FINAL_STRING is the string that should be written after it.
5581 FINAL_STRING is a printf() format that consumes the remaining arguments. */
5583 void
5584 mips_declare_object (FILE *stream, const char *name, const char *init_string,
5585 const char *final_string, ...)
5587 va_list ap;
5589 fputs (init_string, stream);
5590 assemble_name (stream, name);
5591 va_start (ap, final_string);
5592 vfprintf (stream, final_string, ap);
5593 va_end (ap);
5595 if (!TARGET_EXPLICIT_RELOCS)
5597 tree name_tree = get_identifier (name);
5598 TREE_ASM_WRITTEN (name_tree) = 1;
5602 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
5603 extern int size_directive_output;
5605 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
5606 definitions except that it uses mips_declare_object() to emit the label. */
5608 void
5609 mips_declare_object_name (FILE *stream, const char *name,
5610 tree decl ATTRIBUTE_UNUSED)
5612 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5613 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
5614 #endif
5616 size_directive_output = 0;
5617 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
5619 HOST_WIDE_INT size;
5621 size_directive_output = 1;
5622 size = int_size_in_bytes (TREE_TYPE (decl));
5623 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5626 mips_declare_object (stream, name, "", ":\n", 0);
5629 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
5631 void
5632 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
5634 const char *name;
5636 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
5637 if (!flag_inhibit_size_directive
5638 && DECL_SIZE (decl) != 0
5639 && !at_end && top_level
5640 && DECL_INITIAL (decl) == error_mark_node
5641 && !size_directive_output)
5643 HOST_WIDE_INT size;
5645 size_directive_output = 1;
5646 size = int_size_in_bytes (TREE_TYPE (decl));
5647 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5650 #endif
5652 /* Return true if X is a small data address that can be rewritten
5653 as a LO_SUM. */
5655 static bool
5656 mips_rewrite_small_data_p (rtx x)
5658 enum mips_symbol_type symbol_type;
5660 return (TARGET_EXPLICIT_RELOCS
5661 && mips_symbolic_constant_p (x, &symbol_type)
5662 && symbol_type == SYMBOL_SMALL_DATA);
5666 /* A for_each_rtx callback for mips_small_data_pattern_p. */
5668 static int
5669 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5671 if (GET_CODE (*loc) == LO_SUM)
5672 return -1;
5674 return mips_rewrite_small_data_p (*loc);
5677 /* Return true if OP refers to small data symbols directly, not through
5678 a LO_SUM. */
5680 bool
5681 mips_small_data_pattern_p (rtx op)
5683 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
5686 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
5688 static int
5689 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5691 if (mips_rewrite_small_data_p (*loc))
5692 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
5694 if (GET_CODE (*loc) == LO_SUM)
5695 return -1;
5697 return 0;
5700 /* If possible, rewrite OP so that it refers to small data using
5701 explicit relocations. */
5704 mips_rewrite_small_data (rtx op)
5706 op = copy_insn (op);
5707 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
5708 return op;
5711 /* Return true if the current function has an insn that implicitly
5712 refers to $gp. */
5714 static bool
5715 mips_function_has_gp_insn (void)
5717 /* Don't bother rechecking if we found one last time. */
5718 if (!cfun->machine->has_gp_insn_p)
5720 rtx insn;
5722 push_topmost_sequence ();
5723 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5724 if (INSN_P (insn)
5725 && GET_CODE (PATTERN (insn)) != USE
5726 && GET_CODE (PATTERN (insn)) != CLOBBER
5727 && (get_attr_got (insn) != GOT_UNSET
5728 || small_data_pattern (PATTERN (insn), VOIDmode)))
5729 break;
5730 pop_topmost_sequence ();
5732 cfun->machine->has_gp_insn_p = (insn != 0);
5734 return cfun->machine->has_gp_insn_p;
5738 /* Return the register that should be used as the global pointer
5739 within this function. Return 0 if the function doesn't need
5740 a global pointer. */
5742 static unsigned int
5743 mips_global_pointer (void)
5745 unsigned int regno;
5747 /* $gp is always available in non-abicalls code. */
5748 if (!TARGET_ABICALLS)
5749 return GLOBAL_POINTER_REGNUM;
5751 /* We must always provide $gp when it is used implicitly. */
5752 if (!TARGET_EXPLICIT_RELOCS)
5753 return GLOBAL_POINTER_REGNUM;
5755 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
5756 a valid gp. */
5757 if (current_function_profile)
5758 return GLOBAL_POINTER_REGNUM;
5760 /* If the function has a nonlocal goto, $gp must hold the correct
5761 global pointer for the target function. */
5762 if (current_function_has_nonlocal_goto)
5763 return GLOBAL_POINTER_REGNUM;
5765 /* If the gp is never referenced, there's no need to initialize it.
5766 Note that reload can sometimes introduce constant pool references
5767 into a function that otherwise didn't need them. For example,
5768 suppose we have an instruction like:
5770 (set (reg:DF R1) (float:DF (reg:SI R2)))
5772 If R2 turns out to be constant such as 1, the instruction may have a
5773 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
5774 using this constant if R2 doesn't get allocated to a register.
5776 In cases like these, reload will have added the constant to the pool
5777 but no instruction will yet refer to it. */
5778 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
5779 && !current_function_uses_const_pool
5780 && !mips_function_has_gp_insn ())
5781 return 0;
5783 /* We need a global pointer, but perhaps we can use a call-clobbered
5784 register instead of $gp. */
5785 if (TARGET_NEWABI && current_function_is_leaf)
5786 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5787 if (!regs_ever_live[regno]
5788 && call_used_regs[regno]
5789 && !fixed_regs[regno]
5790 && regno != PIC_FUNCTION_ADDR_REGNUM)
5791 return regno;
5793 return GLOBAL_POINTER_REGNUM;
5797 /* Return true if the current function must save REGNO. */
5799 static bool
5800 mips_save_reg_p (unsigned int regno)
5802 /* We only need to save $gp for NewABI PIC. */
5803 if (regno == GLOBAL_POINTER_REGNUM)
5804 return (TARGET_ABICALLS && TARGET_NEWABI
5805 && cfun->machine->global_pointer == regno);
5807 /* Check call-saved registers. */
5808 if (regs_ever_live[regno] && !call_used_regs[regno])
5809 return true;
5811 /* We need to save the old frame pointer before setting up a new one. */
5812 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5813 return true;
5815 /* We need to save the incoming return address if it is ever clobbered
5816 within the function. */
5817 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
5818 return true;
5820 if (TARGET_MIPS16)
5822 tree return_type;
5824 return_type = DECL_RESULT (current_function_decl);
5826 /* $18 is a special case in mips16 code. It may be used to call
5827 a function which returns a floating point value, but it is
5828 marked in call_used_regs. */
5829 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
5830 return true;
5832 /* $31 is also a special case. It will be used to copy a return
5833 value into the floating point registers if the return value is
5834 floating point. */
5835 if (regno == GP_REG_FIRST + 31
5836 && mips16_hard_float
5837 && !aggregate_value_p (return_type, current_function_decl)
5838 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
5839 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
5840 return true;
5843 return false;
5847 /* Return the bytes needed to compute the frame pointer from the current
5848 stack pointer. SIZE is the size (in bytes) of the local variables.
5850 Mips stack frames look like:
5852 Before call After call
5853 +-----------------------+ +-----------------------+
5854 high | | | |
5855 mem. | | | |
5856 | caller's temps. | | caller's temps. |
5857 | | | |
5858 +-----------------------+ +-----------------------+
5859 | | | |
5860 | arguments on stack. | | arguments on stack. |
5861 | | | |
5862 +-----------------------+ +-----------------------+
5863 | 4 words to save | | 4 words to save |
5864 | arguments passed | | arguments passed |
5865 | in registers, even | | in registers, even |
5866 SP->| if not passed. | VFP->| if not passed. |
5867 +-----------------------+ +-----------------------+
5869 | fp register save |
5871 +-----------------------+
5873 | gp register save |
5875 +-----------------------+
5877 | local variables |
5879 +-----------------------+
5881 | alloca allocations |
5883 +-----------------------+
5885 | GP save for V.4 abi |
5887 +-----------------------+
5889 | arguments on stack |
5891 +-----------------------+
5892 | 4 words to save |
5893 | arguments passed |
5894 | in registers, even |
5895 low SP->| if not passed. |
5896 memory +-----------------------+
5900 HOST_WIDE_INT
5901 compute_frame_size (HOST_WIDE_INT size)
5903 unsigned int regno;
5904 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
5905 HOST_WIDE_INT var_size; /* # bytes that variables take up */
5906 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
5907 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
5908 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
5909 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
5910 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
5911 unsigned int mask; /* mask of saved gp registers */
5912 unsigned int fmask; /* mask of saved fp registers */
5914 cfun->machine->global_pointer = mips_global_pointer ();
5916 gp_reg_size = 0;
5917 fp_reg_size = 0;
5918 mask = 0;
5919 fmask = 0;
5920 var_size = MIPS_STACK_ALIGN (size);
5921 args_size = current_function_outgoing_args_size;
5922 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
5924 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
5925 functions. If the function has local variables, we're committed
5926 to allocating it anyway. Otherwise reclaim it here. */
5927 if (var_size == 0 && current_function_is_leaf)
5928 cprestore_size = args_size = 0;
5930 /* The MIPS 3.0 linker does not like functions that dynamically
5931 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
5932 looks like we are trying to create a second frame pointer to the
5933 function, so allocate some stack space to make it happy. */
5935 if (args_size == 0 && current_function_calls_alloca)
5936 args_size = 4 * UNITS_PER_WORD;
5938 total_size = var_size + args_size + cprestore_size;
5940 /* Calculate space needed for gp registers. */
5941 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5942 if (mips_save_reg_p (regno))
5944 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5945 mask |= 1 << (regno - GP_REG_FIRST);
5948 /* We need to restore these for the handler. */
5949 if (current_function_calls_eh_return)
5951 unsigned int i;
5952 for (i = 0; ; ++i)
5954 regno = EH_RETURN_DATA_REGNO (i);
5955 if (regno == INVALID_REGNUM)
5956 break;
5957 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5958 mask |= 1 << (regno - GP_REG_FIRST);
5962 /* This loop must iterate over the same space as its companion in
5963 save_restore_insns. */
5964 for (regno = (FP_REG_LAST - FP_INC + 1);
5965 regno >= FP_REG_FIRST;
5966 regno -= FP_INC)
5968 if (mips_save_reg_p (regno))
5970 fp_reg_size += FP_INC * UNITS_PER_FPREG;
5971 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
5975 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
5976 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
5978 /* Add in space reserved on the stack by the callee for storing arguments
5979 passed in registers. */
5980 if (!TARGET_OLDABI)
5981 total_size += MIPS_STACK_ALIGN (current_function_pretend_args_size);
5983 /* Save other computed information. */
5984 cfun->machine->frame.total_size = total_size;
5985 cfun->machine->frame.var_size = var_size;
5986 cfun->machine->frame.args_size = args_size;
5987 cfun->machine->frame.cprestore_size = cprestore_size;
5988 cfun->machine->frame.gp_reg_size = gp_reg_size;
5989 cfun->machine->frame.fp_reg_size = fp_reg_size;
5990 cfun->machine->frame.mask = mask;
5991 cfun->machine->frame.fmask = fmask;
5992 cfun->machine->frame.initialized = reload_completed;
5993 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
5994 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
5996 if (mask)
5998 HOST_WIDE_INT offset;
6000 offset = (args_size + cprestore_size + var_size
6001 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
6002 cfun->machine->frame.gp_sp_offset = offset;
6003 cfun->machine->frame.gp_save_offset = offset - total_size;
6005 else
6007 cfun->machine->frame.gp_sp_offset = 0;
6008 cfun->machine->frame.gp_save_offset = 0;
6011 if (fmask)
6013 HOST_WIDE_INT offset;
6015 offset = (args_size + cprestore_size + var_size
6016 + gp_reg_rounded + fp_reg_size
6017 - FP_INC * UNITS_PER_FPREG);
6018 cfun->machine->frame.fp_sp_offset = offset;
6019 cfun->machine->frame.fp_save_offset = offset - total_size;
6021 else
6023 cfun->machine->frame.fp_sp_offset = 0;
6024 cfun->machine->frame.fp_save_offset = 0;
6027 /* Ok, we're done. */
6028 return total_size;
6031 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6032 pointer or argument pointer. TO is either the stack pointer or
6033 hard frame pointer. */
6035 HOST_WIDE_INT
6036 mips_initial_elimination_offset (int from, int to)
6038 HOST_WIDE_INT offset;
6040 compute_frame_size (get_frame_size ());
6042 /* Set OFFSET to the offset from the stack pointer. */
6043 switch (from)
6045 case FRAME_POINTER_REGNUM:
6046 offset = 0;
6047 break;
6049 case ARG_POINTER_REGNUM:
6050 offset = cfun->machine->frame.total_size;
6051 if (TARGET_NEWABI)
6052 offset -= current_function_pretend_args_size;
6053 break;
6055 default:
6056 gcc_unreachable ();
6059 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6060 offset -= cfun->machine->frame.args_size;
6062 return offset;
6065 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6066 back to a previous frame. */
6068 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6070 if (count != 0)
6071 return const0_rtx;
6073 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6076 /* Use FN to save or restore register REGNO. MODE is the register's
6077 mode and OFFSET is the offset of its save slot from the current
6078 stack pointer. */
6080 static void
6081 mips_save_restore_reg (enum machine_mode mode, int regno,
6082 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6084 rtx mem;
6086 mem = gen_rtx_MEM (mode, plus_constant (stack_pointer_rtx, offset));
6088 fn (gen_rtx_REG (mode, regno), mem);
6092 /* Call FN for each register that is saved by the current function.
6093 SP_OFFSET is the offset of the current stack pointer from the start
6094 of the frame. */
6096 static void
6097 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6099 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
6101 enum machine_mode fpr_mode;
6102 HOST_WIDE_INT offset;
6103 int regno;
6105 /* Save registers starting from high to low. The debuggers prefer at least
6106 the return register be stored at func+4, and also it allows us not to
6107 need a nop in the epilog if at least one register is reloaded in
6108 addition to return address. */
6109 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6110 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6111 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6113 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6114 offset -= GET_MODE_SIZE (gpr_mode);
6117 /* This loop must iterate over the same space as its companion in
6118 compute_frame_size. */
6119 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6120 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6121 for (regno = (FP_REG_LAST - FP_INC + 1);
6122 regno >= FP_REG_FIRST;
6123 regno -= FP_INC)
6124 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
6126 mips_save_restore_reg (fpr_mode, regno, offset, fn);
6127 offset -= GET_MODE_SIZE (fpr_mode);
6129 #undef BITSET_P
6132 /* If we're generating n32 or n64 abicalls, and the current function
6133 does not use $28 as its global pointer, emit a cplocal directive.
6134 Use pic_offset_table_rtx as the argument to the directive. */
6136 static void
6137 mips_output_cplocal (void)
6139 if (!TARGET_EXPLICIT_RELOCS
6140 && cfun->machine->global_pointer > 0
6141 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
6142 output_asm_insn (".cplocal %+", 0);
6145 /* If we're generating n32 or n64 abicalls, emit instructions
6146 to set up the global pointer. */
6148 static void
6149 mips_emit_loadgp (void)
6151 if (TARGET_ABICALLS && TARGET_NEWABI && cfun->machine->global_pointer > 0)
6153 rtx addr, offset, incoming_address;
6155 addr = XEXP (DECL_RTL (current_function_decl), 0);
6156 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
6157 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6158 emit_insn (gen_loadgp (offset, incoming_address));
6159 if (!TARGET_EXPLICIT_RELOCS)
6160 emit_insn (gen_loadgp_blockage ());
6164 /* Set up the stack and frame (if desired) for the function. */
6166 static void
6167 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6169 const char *fnname;
6170 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
6172 #ifdef SDB_DEBUGGING_INFO
6173 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
6174 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
6175 #endif
6177 /* In mips16 mode, we may need to generate a 32 bit to handle
6178 floating point arguments. The linker will arrange for any 32 bit
6179 functions to call this stub, which will then jump to the 16 bit
6180 function proper. */
6181 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
6182 && current_function_args_info.fp_code != 0)
6183 build_mips16_function_stub (file);
6185 if (!FUNCTION_NAME_ALREADY_DECLARED)
6187 /* Get the function name the same way that toplev.c does before calling
6188 assemble_start_function. This is needed so that the name used here
6189 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6190 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6192 if (!flag_inhibit_size_directive)
6194 fputs ("\t.ent\t", file);
6195 assemble_name (file, fnname);
6196 fputs ("\n", file);
6199 assemble_name (file, fnname);
6200 fputs (":\n", file);
6203 /* Stop mips_file_end from treating this function as external. */
6204 if (TARGET_IRIX && mips_abi == ABI_32)
6205 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6207 if (!flag_inhibit_size_directive)
6209 /* .frame FRAMEREG, FRAMESIZE, RETREG */
6210 fprintf (file,
6211 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6212 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6213 ", args= " HOST_WIDE_INT_PRINT_DEC
6214 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6215 (reg_names[(frame_pointer_needed)
6216 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6217 ((frame_pointer_needed && TARGET_MIPS16)
6218 ? tsize - cfun->machine->frame.args_size
6219 : tsize),
6220 reg_names[GP_REG_FIRST + 31],
6221 cfun->machine->frame.var_size,
6222 cfun->machine->frame.num_gp,
6223 cfun->machine->frame.num_fp,
6224 cfun->machine->frame.args_size,
6225 cfun->machine->frame.cprestore_size);
6227 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6228 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6229 cfun->machine->frame.mask,
6230 cfun->machine->frame.gp_save_offset);
6231 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6232 cfun->machine->frame.fmask,
6233 cfun->machine->frame.fp_save_offset);
6235 /* Require:
6236 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6237 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6240 if (TARGET_ABICALLS && !TARGET_NEWABI && cfun->machine->global_pointer > 0)
6242 /* Handle the initialization of $gp for SVR4 PIC. */
6243 if (!cfun->machine->all_noreorder_p)
6244 output_asm_insn ("%(.cpload\t%^%)", 0);
6245 else
6246 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6248 else if (cfun->machine->all_noreorder_p)
6249 output_asm_insn ("%(%<", 0);
6251 /* Tell the assembler which register we're using as the global
6252 pointer. This is needed for thunks, since they can use either
6253 explicit relocs or assembler macros. */
6254 mips_output_cplocal ();
6257 /* Make the last instruction frame related and note that it performs
6258 the operation described by FRAME_PATTERN. */
6260 static void
6261 mips_set_frame_expr (rtx frame_pattern)
6263 rtx insn;
6265 insn = get_last_insn ();
6266 RTX_FRAME_RELATED_P (insn) = 1;
6267 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6268 frame_pattern,
6269 REG_NOTES (insn));
6273 /* Return a frame-related rtx that stores REG at MEM.
6274 REG must be a single register. */
6276 static rtx
6277 mips_frame_set (rtx mem, rtx reg)
6279 rtx set;
6281 /* If we're saving the return address register and the dwarf return
6282 address column differs from the hard register number, adjust the
6283 note reg to refer to the former. */
6284 if (REGNO (reg) == GP_REG_FIRST + 31
6285 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
6286 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
6288 set = gen_rtx_SET (VOIDmode, mem, reg);
6289 RTX_FRAME_RELATED_P (set) = 1;
6291 return set;
6295 /* Save register REG to MEM. Make the instruction frame-related. */
6297 static void
6298 mips_save_reg (rtx reg, rtx mem)
6300 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6302 rtx x1, x2;
6304 if (mips_split_64bit_move_p (mem, reg))
6305 mips_split_64bit_move (mem, reg);
6306 else
6307 emit_move_insn (mem, reg);
6309 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6310 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6311 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6313 else
6315 if (TARGET_MIPS16
6316 && REGNO (reg) != GP_REG_FIRST + 31
6317 && !M16_REG_P (REGNO (reg)))
6319 /* Save a non-mips16 register by moving it through a temporary.
6320 We don't need to do this for $31 since there's a special
6321 instruction for it. */
6322 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6323 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6325 else
6326 emit_move_insn (mem, reg);
6328 mips_set_frame_expr (mips_frame_set (mem, reg));
6333 /* Expand the prologue into a bunch of separate insns. */
6335 void
6336 mips_expand_prologue (void)
6338 HOST_WIDE_INT size;
6340 if (cfun->machine->global_pointer > 0)
6341 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6343 size = compute_frame_size (get_frame_size ());
6345 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6346 bytes beforehand; this is enough to cover the register save area
6347 without going out of range. */
6348 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6350 HOST_WIDE_INT step1;
6352 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6353 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6354 stack_pointer_rtx,
6355 GEN_INT (-step1)))) = 1;
6356 size -= step1;
6357 mips_for_each_saved_reg (size, mips_save_reg);
6360 /* Allocate the rest of the frame. */
6361 if (size > 0)
6363 if (SMALL_OPERAND (-size))
6364 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6365 stack_pointer_rtx,
6366 GEN_INT (-size)))) = 1;
6367 else
6369 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6370 if (TARGET_MIPS16)
6372 /* There are no instructions to add or subtract registers
6373 from the stack pointer, so use the frame pointer as a
6374 temporary. We should always be using a frame pointer
6375 in this case anyway. */
6376 gcc_assert (frame_pointer_needed);
6377 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6378 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6379 hard_frame_pointer_rtx,
6380 MIPS_PROLOGUE_TEMP (Pmode)));
6381 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6383 else
6384 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6385 stack_pointer_rtx,
6386 MIPS_PROLOGUE_TEMP (Pmode)));
6388 /* Describe the combined effect of the previous instructions. */
6389 mips_set_frame_expr
6390 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6391 plus_constant (stack_pointer_rtx, -size)));
6395 /* Set up the frame pointer, if we're using one. In mips16 code,
6396 we point the frame pointer ahead of the outgoing argument area.
6397 This should allow more variables & incoming arguments to be
6398 accessed with unextended instructions. */
6399 if (frame_pointer_needed)
6401 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6403 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6404 RTX_FRAME_RELATED_P
6405 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6406 stack_pointer_rtx,
6407 offset))) = 1;
6409 else
6410 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6411 stack_pointer_rtx)) = 1;
6414 /* If generating o32/o64 abicalls, save $gp on the stack. */
6415 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6416 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6418 mips_emit_loadgp ();
6420 /* If we are profiling, make sure no instructions are scheduled before
6421 the call to mcount. */
6423 if (current_function_profile)
6424 emit_insn (gen_blockage ());
6427 /* Do any necessary cleanup after a function to restore stack, frame,
6428 and regs. */
6430 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6432 static void
6433 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6434 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6436 /* Reinstate the normal $gp. */
6437 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6438 mips_output_cplocal ();
6440 if (cfun->machine->all_noreorder_p)
6442 /* Avoid using %>%) since it adds excess whitespace. */
6443 output_asm_insn (".set\tmacro", 0);
6444 output_asm_insn (".set\treorder", 0);
6445 set_noreorder = set_nomacro = 0;
6448 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6450 const char *fnname;
6452 /* Get the function name the same way that toplev.c does before calling
6453 assemble_start_function. This is needed so that the name used here
6454 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6455 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6456 fputs ("\t.end\t", file);
6457 assemble_name (file, fnname);
6458 fputs ("\n", file);
6462 /* Emit instructions to restore register REG from slot MEM. */
6464 static void
6465 mips_restore_reg (rtx reg, rtx mem)
6467 /* There's no mips16 instruction to load $31 directly. Load into
6468 $7 instead and adjust the return insn appropriately. */
6469 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6470 reg = gen_rtx_REG (GET_MODE (reg), 7);
6472 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6474 /* Can't restore directly; move through a temporary. */
6475 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6476 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6478 else
6479 emit_move_insn (reg, mem);
6483 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6484 if this epilogue precedes a sibling call, false if it is for a normal
6485 "epilogue" pattern. */
6487 void
6488 mips_expand_epilogue (int sibcall_p)
6490 HOST_WIDE_INT step1, step2;
6491 rtx base, target;
6493 if (!sibcall_p && mips_can_use_return_insn ())
6495 emit_jump_insn (gen_return ());
6496 return;
6499 /* Split the frame into two. STEP1 is the amount of stack we should
6500 deallocate before restoring the registers. STEP2 is the amount we
6501 should deallocate afterwards.
6503 Start off by assuming that no registers need to be restored. */
6504 step1 = cfun->machine->frame.total_size;
6505 step2 = 0;
6507 /* Work out which register holds the frame address. Account for the
6508 frame pointer offset used by mips16 code. */
6509 if (!frame_pointer_needed)
6510 base = stack_pointer_rtx;
6511 else
6513 base = hard_frame_pointer_rtx;
6514 if (TARGET_MIPS16)
6515 step1 -= cfun->machine->frame.args_size;
6518 /* If we need to restore registers, deallocate as much stack as
6519 possible in the second step without going out of range. */
6520 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6522 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
6523 step1 -= step2;
6526 /* Set TARGET to BASE + STEP1. */
6527 target = base;
6528 if (step1 > 0)
6530 rtx adjust;
6532 /* Get an rtx for STEP1 that we can add to BASE. */
6533 adjust = GEN_INT (step1);
6534 if (!SMALL_OPERAND (step1))
6536 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
6537 adjust = MIPS_EPILOGUE_TEMP (Pmode);
6540 /* Normal mode code can copy the result straight into $sp. */
6541 if (!TARGET_MIPS16)
6542 target = stack_pointer_rtx;
6544 emit_insn (gen_add3_insn (target, base, adjust));
6547 /* Copy TARGET into the stack pointer. */
6548 if (target != stack_pointer_rtx)
6549 emit_move_insn (stack_pointer_rtx, target);
6551 /* If we're using addressing macros for n32/n64 abicalls, $gp is
6552 implicitly used by all SYMBOL_REFs. We must emit a blockage
6553 insn before restoring it. */
6554 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
6555 emit_insn (gen_blockage ());
6557 /* Restore the registers. */
6558 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
6559 mips_restore_reg);
6561 /* Deallocate the final bit of the frame. */
6562 if (step2 > 0)
6563 emit_insn (gen_add3_insn (stack_pointer_rtx,
6564 stack_pointer_rtx,
6565 GEN_INT (step2)));
6567 /* Add in the __builtin_eh_return stack adjustment. We need to
6568 use a temporary in mips16 code. */
6569 if (current_function_calls_eh_return)
6571 if (TARGET_MIPS16)
6573 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
6574 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
6575 MIPS_EPILOGUE_TEMP (Pmode),
6576 EH_RETURN_STACKADJ_RTX));
6577 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
6579 else
6580 emit_insn (gen_add3_insn (stack_pointer_rtx,
6581 stack_pointer_rtx,
6582 EH_RETURN_STACKADJ_RTX));
6585 if (!sibcall_p)
6587 /* The mips16 loads the return address into $7, not $31. */
6588 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
6589 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6590 GP_REG_FIRST + 7)));
6591 else
6592 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6593 GP_REG_FIRST + 31)));
6597 /* Return nonzero if this function is known to have a null epilogue.
6598 This allows the optimizer to omit jumps to jumps if no stack
6599 was created. */
6602 mips_can_use_return_insn (void)
6604 tree return_type;
6606 if (! reload_completed)
6607 return 0;
6609 if (regs_ever_live[31] || current_function_profile)
6610 return 0;
6612 return_type = DECL_RESULT (current_function_decl);
6614 /* In mips16 mode, a function which returns a floating point value
6615 needs to arrange to copy the return value into the floating point
6616 registers. */
6617 if (TARGET_MIPS16
6618 && mips16_hard_float
6619 && ! aggregate_value_p (return_type, current_function_decl)
6620 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6621 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6622 return 0;
6624 if (cfun->machine->frame.initialized)
6625 return cfun->machine->frame.total_size == 0;
6627 return compute_frame_size (get_frame_size ()) == 0;
6630 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
6631 in order to avoid duplicating too much logic from elsewhere. */
6633 static void
6634 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
6635 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
6636 tree function)
6638 rtx this, temp1, temp2, insn, fnaddr;
6640 /* Pretend to be a post-reload pass while generating rtl. */
6641 no_new_pseudos = 1;
6642 reload_completed = 1;
6643 reset_block_changes ();
6645 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
6646 for TARGET_NEWABI since the latter is a call-saved register. */
6647 if (TARGET_ABICALLS)
6648 cfun->machine->global_pointer
6649 = REGNO (pic_offset_table_rtx)
6650 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
6652 /* Set up the global pointer for n32 or n64 abicalls. */
6653 mips_emit_loadgp ();
6655 /* We need two temporary registers in some cases. */
6656 temp1 = gen_rtx_REG (Pmode, 2);
6657 temp2 = gen_rtx_REG (Pmode, 3);
6659 /* Find out which register contains the "this" pointer. */
6660 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
6661 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
6662 else
6663 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
6665 /* Add DELTA to THIS. */
6666 if (delta != 0)
6668 rtx offset = GEN_INT (delta);
6669 if (!SMALL_OPERAND (delta))
6671 emit_move_insn (temp1, offset);
6672 offset = temp1;
6674 emit_insn (gen_add3_insn (this, this, offset));
6677 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
6678 if (vcall_offset != 0)
6680 rtx addr;
6682 /* Set TEMP1 to *THIS. */
6683 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
6685 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
6686 addr = mips_add_offset (temp2, temp1, vcall_offset);
6688 /* Load the offset and add it to THIS. */
6689 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
6690 emit_insn (gen_add3_insn (this, this, temp1));
6693 /* Jump to the target function. Use a sibcall if direct jumps are
6694 allowed, otherwise load the address into a register first. */
6695 fnaddr = XEXP (DECL_RTL (function), 0);
6696 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
6698 /* This is messy. gas treats "la $25,foo" as part of a call
6699 sequence and may allow a global "foo" to be lazily bound.
6700 The general move patterns therefore reject this combination.
6702 In this context, lazy binding would actually be OK for o32 and o64,
6703 but it's still wrong for n32 and n64; see mips_load_call_address.
6704 We must therefore load the address via a temporary register if
6705 mips_dangerous_for_la25_p.
6707 If we jump to the temporary register rather than $25, the assembler
6708 can use the move insn to fill the jump's delay slot. */
6709 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
6710 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6711 mips_load_call_address (temp1, fnaddr, true);
6713 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
6714 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
6715 emit_jump_insn (gen_indirect_jump (temp1));
6717 else
6719 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
6720 SIBLING_CALL_P (insn) = 1;
6723 /* Run just enough of rest_of_compilation. This sequence was
6724 "borrowed" from alpha.c. */
6725 insn = get_insns ();
6726 insn_locators_initialize ();
6727 split_all_insns_noflow ();
6728 if (TARGET_MIPS16)
6729 mips16_lay_out_constants ();
6730 shorten_branches (insn);
6731 final_start_function (insn, file, 1);
6732 final (insn, file, 1);
6733 final_end_function ();
6735 /* Clean up the vars set above. Note that final_end_function resets
6736 the global pointer for us. */
6737 reload_completed = 0;
6738 no_new_pseudos = 0;
6741 /* Returns nonzero if X contains a SYMBOL_REF. */
6743 static int
6744 symbolic_expression_p (rtx x)
6746 if (GET_CODE (x) == SYMBOL_REF)
6747 return 1;
6749 if (GET_CODE (x) == CONST)
6750 return symbolic_expression_p (XEXP (x, 0));
6752 if (UNARY_P (x))
6753 return symbolic_expression_p (XEXP (x, 0));
6755 if (ARITHMETIC_P (x))
6756 return (symbolic_expression_p (XEXP (x, 0))
6757 || symbolic_expression_p (XEXP (x, 1)));
6759 return 0;
6762 /* Choose the section to use for the constant rtx expression X that has
6763 mode MODE. */
6765 static void
6766 mips_select_rtx_section (enum machine_mode mode, rtx x,
6767 unsigned HOST_WIDE_INT align)
6769 if (TARGET_MIPS16)
6771 /* In mips16 mode, the constant table always goes in the same section
6772 as the function, so that constants can be loaded using PC relative
6773 addressing. */
6774 function_section (current_function_decl);
6776 else if (TARGET_EMBEDDED_DATA)
6778 /* For embedded applications, always put constants in read-only data,
6779 in order to reduce RAM usage. */
6780 mergeable_constant_section (mode, align, 0);
6782 else
6784 /* For hosted applications, always put constants in small data if
6785 possible, as this gives the best performance. */
6786 /* ??? Consider using mergeable small data sections. */
6788 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
6789 && mips_section_threshold > 0)
6790 named_section (0, ".sdata", 0);
6791 else if (flag_pic && symbolic_expression_p (x))
6792 named_section (0, ".data.rel.ro", 3);
6793 else
6794 mergeable_constant_section (mode, align, 0);
6798 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6800 The complication here is that, with the combination TARGET_ABICALLS
6801 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6802 therefore not be included in the read-only part of a DSO. Handle such
6803 cases by selecting a normal data section instead of a read-only one.
6804 The logic apes that in default_function_rodata_section. */
6806 static void
6807 mips_function_rodata_section (tree decl)
6809 if (!TARGET_ABICALLS || TARGET_GPWORD)
6810 default_function_rodata_section (decl);
6811 else if (decl && DECL_SECTION_NAME (decl))
6813 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6814 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6816 char *rname = ASTRDUP (name);
6817 rname[14] = 'd';
6818 named_section_real (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6820 else if (flag_function_sections && flag_data_sections
6821 && strncmp (name, ".text.", 6) == 0)
6823 char *rname = ASTRDUP (name);
6824 memcpy (rname + 1, "data", 4);
6825 named_section_flags (rname, SECTION_WRITE);
6827 else
6828 data_section ();
6830 else
6831 data_section ();
6834 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
6835 access DECL using %gp_rel(...)($gp). */
6837 static bool
6838 mips_in_small_data_p (tree decl)
6840 HOST_WIDE_INT size;
6842 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6843 return false;
6845 /* We don't yet generate small-data references for -mabicalls. See related
6846 -G handling in override_options. */
6847 if (TARGET_ABICALLS)
6848 return false;
6850 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6852 const char *name;
6854 /* Reject anything that isn't in a known small-data section. */
6855 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6856 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6857 return false;
6859 /* If a symbol is defined externally, the assembler will use the
6860 usual -G rules when deciding how to implement macros. */
6861 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
6862 return true;
6864 else if (TARGET_EMBEDDED_DATA)
6866 /* Don't put constants into the small data section: we want them
6867 to be in ROM rather than RAM. */
6868 if (TREE_CODE (decl) != VAR_DECL)
6869 return false;
6871 if (TREE_READONLY (decl)
6872 && !TREE_SIDE_EFFECTS (decl)
6873 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6874 return false;
6877 size = int_size_in_bytes (TREE_TYPE (decl));
6878 return (size > 0 && size <= mips_section_threshold);
6881 /* See whether VALTYPE is a record whose fields should be returned in
6882 floating-point registers. If so, return the number of fields and
6883 list them in FIELDS (which should have two elements). Return 0
6884 otherwise.
6886 For n32 & n64, a structure with one or two fields is returned in
6887 floating-point registers as long as every field has a floating-point
6888 type. */
6890 static int
6891 mips_fpr_return_fields (tree valtype, tree *fields)
6893 tree field;
6894 int i;
6896 if (!TARGET_NEWABI)
6897 return 0;
6899 if (TREE_CODE (valtype) != RECORD_TYPE)
6900 return 0;
6902 i = 0;
6903 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
6905 if (TREE_CODE (field) != FIELD_DECL)
6906 continue;
6908 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
6909 return 0;
6911 if (i == 2)
6912 return 0;
6914 fields[i++] = field;
6916 return i;
6920 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
6921 a value in the most significant part of $2/$3 if:
6923 - the target is big-endian;
6925 - the value has a structure or union type (we generalize this to
6926 cover aggregates from other languages too); and
6928 - the structure is not returned in floating-point registers. */
6930 static bool
6931 mips_return_in_msb (tree valtype)
6933 tree fields[2];
6935 return (TARGET_NEWABI
6936 && TARGET_BIG_ENDIAN
6937 && AGGREGATE_TYPE_P (valtype)
6938 && mips_fpr_return_fields (valtype, fields) == 0);
6942 /* Return a composite value in a pair of floating-point registers.
6943 MODE1 and OFFSET1 are the mode and byte offset for the first value,
6944 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
6945 complete value.
6947 For n32 & n64, $f0 always holds the first value and $f2 the second.
6948 Otherwise the values are packed together as closely as possible. */
6950 static rtx
6951 mips_return_fpr_pair (enum machine_mode mode,
6952 enum machine_mode mode1, HOST_WIDE_INT offset1,
6953 enum machine_mode mode2, HOST_WIDE_INT offset2)
6955 int inc;
6957 inc = (TARGET_NEWABI ? 2 : FP_INC);
6958 return gen_rtx_PARALLEL
6959 (mode,
6960 gen_rtvec (2,
6961 gen_rtx_EXPR_LIST (VOIDmode,
6962 gen_rtx_REG (mode1, FP_RETURN),
6963 GEN_INT (offset1)),
6964 gen_rtx_EXPR_LIST (VOIDmode,
6965 gen_rtx_REG (mode2, FP_RETURN + inc),
6966 GEN_INT (offset2))));
6971 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
6972 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
6973 VALTYPE is null and MODE is the mode of the return value. */
6976 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
6977 enum machine_mode mode)
6979 if (valtype)
6981 tree fields[2];
6982 int unsignedp;
6984 mode = TYPE_MODE (valtype);
6985 unsignedp = TYPE_UNSIGNED (valtype);
6987 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
6988 true, we must promote the mode just as PROMOTE_MODE does. */
6989 mode = promote_mode (valtype, mode, &unsignedp, 1);
6991 /* Handle structures whose fields are returned in $f0/$f2. */
6992 switch (mips_fpr_return_fields (valtype, fields))
6994 case 1:
6995 return gen_rtx_REG (mode, FP_RETURN);
6997 case 2:
6998 return mips_return_fpr_pair (mode,
6999 TYPE_MODE (TREE_TYPE (fields[0])),
7000 int_byte_position (fields[0]),
7001 TYPE_MODE (TREE_TYPE (fields[1])),
7002 int_byte_position (fields[1]));
7005 /* If a value is passed in the most significant part of a register, see
7006 whether we have to round the mode up to a whole number of words. */
7007 if (mips_return_in_msb (valtype))
7009 HOST_WIDE_INT size = int_size_in_bytes (valtype);
7010 if (size % UNITS_PER_WORD != 0)
7012 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
7013 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7017 /* For EABI, the class of return register depends entirely on MODE.
7018 For example, "struct { some_type x; }" and "union { some_type x; }"
7019 are returned in the same way as a bare "some_type" would be.
7020 Other ABIs only use FPRs for scalar, complex or vector types. */
7021 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
7022 return gen_rtx_REG (mode, GP_RETURN);
7025 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
7026 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7027 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
7028 return gen_rtx_REG (mode, FP_RETURN);
7030 /* Handle long doubles for n32 & n64. */
7031 if (mode == TFmode)
7032 return mips_return_fpr_pair (mode,
7033 DImode, 0,
7034 DImode, GET_MODE_SIZE (mode) / 2);
7036 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7037 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
7038 return mips_return_fpr_pair (mode,
7039 GET_MODE_INNER (mode), 0,
7040 GET_MODE_INNER (mode),
7041 GET_MODE_SIZE (mode) / 2);
7043 return gen_rtx_REG (mode, GP_RETURN);
7046 /* Return nonzero when an argument must be passed by reference. */
7048 static bool
7049 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7050 enum machine_mode mode, tree type,
7051 bool named ATTRIBUTE_UNUSED)
7053 if (mips_abi == ABI_EABI)
7055 int size;
7057 /* ??? How should SCmode be handled? */
7058 if (type == NULL_TREE || mode == DImode || mode == DFmode)
7059 return 0;
7061 size = int_size_in_bytes (type);
7062 return size == -1 || size > UNITS_PER_WORD;
7064 else
7066 /* If we have a variable-sized parameter, we have no choice. */
7067 return targetm.calls.must_pass_in_stack (mode, type);
7071 static bool
7072 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7073 enum machine_mode mode ATTRIBUTE_UNUSED,
7074 tree type ATTRIBUTE_UNUSED, bool named)
7076 return mips_abi == ABI_EABI && named;
7079 /* Return true if registers of class CLASS cannot change from mode FROM
7080 to mode TO. */
7082 bool
7083 mips_cannot_change_mode_class (enum machine_mode from,
7084 enum machine_mode to, enum reg_class class)
7086 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
7087 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
7089 if (TARGET_BIG_ENDIAN)
7091 /* When a multi-word value is stored in paired floating-point
7092 registers, the first register always holds the low word.
7093 We therefore can't allow FPRs to change between single-word
7094 and multi-word modes. */
7095 if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
7096 return true;
7098 else
7100 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
7101 in LO and HI, the high word always comes first. We therefore
7102 can't allow values stored in HI to change between single-word
7103 and multi-word modes. */
7104 if (reg_classes_intersect_p (HI_REG, class))
7105 return true;
7108 /* Loading a 32-bit value into a 64-bit floating-point register
7109 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
7110 We can't allow 64-bit float registers to change from SImode to
7111 to a wider mode. */
7112 if (TARGET_FLOAT64
7113 && from == SImode
7114 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
7115 && reg_classes_intersect_p (FP_REGS, class))
7116 return true;
7117 return false;
7120 /* Return true if X should not be moved directly into register $25.
7121 We need this because many versions of GAS will treat "la $25,foo" as
7122 part of a call sequence and so allow a global "foo" to be lazily bound. */
7124 bool
7125 mips_dangerous_for_la25_p (rtx x)
7127 HOST_WIDE_INT offset;
7129 if (TARGET_EXPLICIT_RELOCS)
7130 return false;
7132 mips_split_const (x, &x, &offset);
7133 return global_got_operand (x, VOIDmode);
7136 /* Implement PREFERRED_RELOAD_CLASS. */
7138 enum reg_class
7139 mips_preferred_reload_class (rtx x, enum reg_class class)
7141 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
7142 return LEA_REGS;
7144 if (TARGET_HARD_FLOAT
7145 && FLOAT_MODE_P (GET_MODE (x))
7146 && reg_class_subset_p (FP_REGS, class))
7147 return FP_REGS;
7149 if (reg_class_subset_p (GR_REGS, class))
7150 class = GR_REGS;
7152 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
7153 class = M16_REGS;
7155 return class;
7158 /* This function returns the register class required for a secondary
7159 register when copying between one of the registers in CLASS, and X,
7160 using MODE. If IN_P is nonzero, the copy is going from X to the
7161 register, otherwise the register is the source. A return value of
7162 NO_REGS means that no secondary register is required. */
7164 enum reg_class
7165 mips_secondary_reload_class (enum reg_class class,
7166 enum machine_mode mode, rtx x, int in_p)
7168 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
7169 int regno = -1;
7170 int gp_reg_p;
7172 if (REG_P (x)|| GET_CODE (x) == SUBREG)
7173 regno = true_regnum (x);
7175 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
7177 if (mips_dangerous_for_la25_p (x))
7179 gr_regs = LEA_REGS;
7180 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
7181 return gr_regs;
7184 /* Copying from HI or LO to anywhere other than a general register
7185 requires a general register. */
7186 if (class == HI_REG || class == LO_REG || class == MD_REGS)
7188 if (TARGET_MIPS16 && in_p)
7190 /* We can't really copy to HI or LO at all in mips16 mode. */
7191 return M16_REGS;
7193 return gp_reg_p ? NO_REGS : gr_regs;
7195 if (MD_REG_P (regno))
7197 if (TARGET_MIPS16 && ! in_p)
7199 /* We can't really copy to HI or LO at all in mips16 mode. */
7200 return M16_REGS;
7202 return class == gr_regs ? NO_REGS : gr_regs;
7205 /* We can only copy a value to a condition code register from a
7206 floating point register, and even then we require a scratch
7207 floating point register. We can only copy a value out of a
7208 condition code register into a general register. */
7209 if (class == ST_REGS)
7211 if (in_p)
7212 return FP_REGS;
7213 return gp_reg_p ? NO_REGS : gr_regs;
7215 if (ST_REG_P (regno))
7217 if (! in_p)
7218 return FP_REGS;
7219 return class == gr_regs ? NO_REGS : gr_regs;
7222 if (class == FP_REGS)
7224 if (MEM_P (x))
7226 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
7227 return NO_REGS;
7229 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7231 /* We can use the l.s and l.d macros to load floating-point
7232 constants. ??? For l.s, we could probably get better
7233 code by returning GR_REGS here. */
7234 return NO_REGS;
7236 else if (gp_reg_p || x == CONST0_RTX (mode))
7238 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
7239 return NO_REGS;
7241 else if (FP_REG_P (regno))
7243 /* In this case we can use mov.s or mov.d. */
7244 return NO_REGS;
7246 else
7248 /* Otherwise, we need to reload through an integer register. */
7249 return gr_regs;
7253 /* In mips16 mode, going between memory and anything but M16_REGS
7254 requires an M16_REG. */
7255 if (TARGET_MIPS16)
7257 if (class != M16_REGS && class != M16_NA_REGS)
7259 if (gp_reg_p)
7260 return NO_REGS;
7261 return M16_REGS;
7263 if (! gp_reg_p)
7265 if (class == M16_REGS || class == M16_NA_REGS)
7266 return NO_REGS;
7267 return M16_REGS;
7271 return NO_REGS;
7274 /* Implement CLASS_MAX_NREGS.
7276 Usually all registers are word-sized. The only supported exception
7277 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7278 registers. A word-based calculation is correct even in that case,
7279 since -msingle-float disallows multi-FPR values.
7281 The FP status registers are an exception to this rule. They are always
7282 4 bytes wide as they only hold condition code modes, and CCmode is always
7283 considered to be 4 bytes wide. */
7286 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7287 enum machine_mode mode)
7289 if (class == ST_REGS)
7290 return (GET_MODE_SIZE (mode) + 3) / 4;
7291 else
7292 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7295 static bool
7296 mips_valid_pointer_mode (enum machine_mode mode)
7298 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7301 /* Define this so that we can deal with a testcase like:
7303 char foo __attribute__ ((mode (SI)));
7305 then compiled with -mabi=64 and -mint64. We have no
7306 32-bit type at that point and so the default case
7307 always fails. */
7309 static bool
7310 mips_scalar_mode_supported_p (enum machine_mode mode)
7312 switch (mode)
7314 case QImode:
7315 case HImode:
7316 case SImode:
7317 case DImode:
7318 return true;
7320 /* Handled via optabs.c. */
7321 case TImode:
7322 return TARGET_64BIT;
7324 case SFmode:
7325 case DFmode:
7326 return true;
7328 /* LONG_DOUBLE_TYPE_SIZE is 128 for TARGET_NEWABI only. */
7329 case TFmode:
7330 return TARGET_NEWABI;
7332 default:
7333 return false;
7338 /* Target hook for vector_mode_supported_p. */
7339 static bool
7340 mips_vector_mode_supported_p (enum machine_mode mode)
7342 if (mode == V2SFmode && TARGET_PAIRED_SINGLE_FLOAT)
7343 return true;
7344 else
7345 return false;
7348 /* If we can access small data directly (using gp-relative relocation
7349 operators) return the small data pointer, otherwise return null.
7351 For each mips16 function which refers to GP relative symbols, we
7352 use a pseudo register, initialized at the start of the function, to
7353 hold the $gp value. */
7355 static rtx
7356 mips16_gp_pseudo_reg (void)
7358 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7360 rtx unspec;
7361 rtx insn, scan;
7363 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7365 /* We want to initialize this to a value which gcc will believe
7366 is constant. */
7367 start_sequence ();
7368 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7369 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7370 gen_rtx_CONST (Pmode, unspec));
7371 insn = get_insns ();
7372 end_sequence ();
7374 push_topmost_sequence ();
7375 /* We need to emit the initialization after the FUNCTION_BEG
7376 note, so that it will be integrated. */
7377 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7378 if (NOTE_P (scan)
7379 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7380 break;
7381 if (scan == NULL_RTX)
7382 scan = get_insns ();
7383 insn = emit_insn_after (insn, scan);
7384 pop_topmost_sequence ();
7387 return cfun->machine->mips16_gp_pseudo_rtx;
7390 /* Write out code to move floating point arguments in or out of
7391 general registers. Output the instructions to FILE. FP_CODE is
7392 the code describing which arguments are present (see the comment at
7393 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7394 we are copying from the floating point registers. */
7396 static void
7397 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7399 const char *s;
7400 int gparg, fparg;
7401 unsigned int f;
7403 /* This code only works for the original 32 bit ABI and the O64 ABI. */
7404 gcc_assert (TARGET_OLDABI);
7406 if (from_fp_p)
7407 s = "mfc1";
7408 else
7409 s = "mtc1";
7410 gparg = GP_ARG_FIRST;
7411 fparg = FP_ARG_FIRST;
7412 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7414 if ((f & 3) == 1)
7416 if ((fparg & 1) != 0)
7417 ++fparg;
7418 fprintf (file, "\t%s\t%s,%s\n", s,
7419 reg_names[gparg], reg_names[fparg]);
7421 else if ((f & 3) == 2)
7423 if (TARGET_64BIT)
7424 fprintf (file, "\td%s\t%s,%s\n", s,
7425 reg_names[gparg], reg_names[fparg]);
7426 else
7428 if ((fparg & 1) != 0)
7429 ++fparg;
7430 if (TARGET_BIG_ENDIAN)
7431 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7432 reg_names[gparg], reg_names[fparg + 1], s,
7433 reg_names[gparg + 1], reg_names[fparg]);
7434 else
7435 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7436 reg_names[gparg], reg_names[fparg], s,
7437 reg_names[gparg + 1], reg_names[fparg + 1]);
7438 ++gparg;
7439 ++fparg;
7442 else
7443 gcc_unreachable ();
7445 ++gparg;
7446 ++fparg;
7450 /* Build a mips16 function stub. This is used for functions which
7451 take arguments in the floating point registers. It is 32 bit code
7452 that moves the floating point args into the general registers, and
7453 then jumps to the 16 bit code. */
7455 static void
7456 build_mips16_function_stub (FILE *file)
7458 const char *fnname;
7459 char *secname, *stubname;
7460 tree stubid, stubdecl;
7461 int need_comma;
7462 unsigned int f;
7464 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7465 secname = (char *) alloca (strlen (fnname) + 20);
7466 sprintf (secname, ".mips16.fn.%s", fnname);
7467 stubname = (char *) alloca (strlen (fnname) + 20);
7468 sprintf (stubname, "__fn_stub_%s", fnname);
7469 stubid = get_identifier (stubname);
7470 stubdecl = build_decl (FUNCTION_DECL, stubid,
7471 build_function_type (void_type_node, NULL_TREE));
7472 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7474 fprintf (file, "\t# Stub function for %s (", current_function_name ());
7475 need_comma = 0;
7476 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7478 fprintf (file, "%s%s",
7479 need_comma ? ", " : "",
7480 (f & 3) == 1 ? "float" : "double");
7481 need_comma = 1;
7483 fprintf (file, ")\n");
7485 fprintf (file, "\t.set\tnomips16\n");
7486 function_section (stubdecl);
7487 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7489 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7490 within a .ent, and we cannot emit another .ent. */
7491 if (!FUNCTION_NAME_ALREADY_DECLARED)
7493 fputs ("\t.ent\t", file);
7494 assemble_name (file, stubname);
7495 fputs ("\n", file);
7498 assemble_name (file, stubname);
7499 fputs (":\n", file);
7501 /* We don't want the assembler to insert any nops here. */
7502 fprintf (file, "\t.set\tnoreorder\n");
7504 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7506 fprintf (asm_out_file, "\t.set\tnoat\n");
7507 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
7508 assemble_name (file, fnname);
7509 fprintf (file, "\n");
7510 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7511 fprintf (asm_out_file, "\t.set\tat\n");
7513 /* Unfortunately, we can't fill the jump delay slot. We can't fill
7514 with one of the mfc1 instructions, because the result is not
7515 available for one instruction, so if the very first instruction
7516 in the function refers to the register, it will see the wrong
7517 value. */
7518 fprintf (file, "\tnop\n");
7520 fprintf (file, "\t.set\treorder\n");
7522 if (!FUNCTION_NAME_ALREADY_DECLARED)
7524 fputs ("\t.end\t", file);
7525 assemble_name (file, stubname);
7526 fputs ("\n", file);
7529 fprintf (file, "\t.set\tmips16\n");
7531 function_section (current_function_decl);
7534 /* We keep a list of functions for which we have already built stubs
7535 in build_mips16_call_stub. */
7537 struct mips16_stub
7539 struct mips16_stub *next;
7540 char *name;
7541 int fpret;
7544 static struct mips16_stub *mips16_stubs;
7546 /* Build a call stub for a mips16 call. A stub is needed if we are
7547 passing any floating point values which should go into the floating
7548 point registers. If we are, and the call turns out to be to a 32
7549 bit function, the stub will be used to move the values into the
7550 floating point registers before calling the 32 bit function. The
7551 linker will magically adjust the function call to either the 16 bit
7552 function or the 32 bit stub, depending upon where the function call
7553 is actually defined.
7555 Similarly, we need a stub if the return value might come back in a
7556 floating point register.
7558 RETVAL is the location of the return value, or null if this is
7559 a call rather than a call_value. FN is the address of the
7560 function and ARG_SIZE is the size of the arguments. FP_CODE
7561 is the code built by function_arg. This function returns a nonzero
7562 value if it builds the call instruction itself. */
7565 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
7567 int fpret;
7568 const char *fnname;
7569 char *secname, *stubname;
7570 struct mips16_stub *l;
7571 tree stubid, stubdecl;
7572 int need_comma;
7573 unsigned int f;
7575 /* We don't need to do anything if we aren't in mips16 mode, or if
7576 we were invoked with the -msoft-float option. */
7577 if (! TARGET_MIPS16 || ! mips16_hard_float)
7578 return 0;
7580 /* Figure out whether the value might come back in a floating point
7581 register. */
7582 fpret = (retval != 0
7583 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
7584 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
7586 /* We don't need to do anything if there were no floating point
7587 arguments and the value will not be returned in a floating point
7588 register. */
7589 if (fp_code == 0 && ! fpret)
7590 return 0;
7592 /* We don't need to do anything if this is a call to a special
7593 mips16 support function. */
7594 if (GET_CODE (fn) == SYMBOL_REF
7595 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
7596 return 0;
7598 /* This code will only work for o32 and o64 abis. The other ABI's
7599 require more sophisticated support. */
7600 gcc_assert (TARGET_OLDABI);
7602 /* We can only handle SFmode and DFmode floating point return
7603 values. */
7604 if (fpret)
7605 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
7607 /* If we're calling via a function pointer, then we must always call
7608 via a stub. There are magic stubs provided in libgcc.a for each
7609 of the required cases. Each of them expects the function address
7610 to arrive in register $2. */
7612 if (GET_CODE (fn) != SYMBOL_REF)
7614 char buf[30];
7615 tree id;
7616 rtx stub_fn, insn;
7618 /* ??? If this code is modified to support other ABI's, we need
7619 to handle PARALLEL return values here. */
7621 sprintf (buf, "__mips16_call_stub_%s%d",
7622 (fpret
7623 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
7624 : ""),
7625 fp_code);
7626 id = get_identifier (buf);
7627 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7629 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
7631 if (retval == NULL_RTX)
7632 insn = gen_call_internal (stub_fn, arg_size);
7633 else
7634 insn = gen_call_value_internal (retval, stub_fn, arg_size);
7635 insn = emit_call_insn (insn);
7637 /* Put the register usage information on the CALL. */
7638 CALL_INSN_FUNCTION_USAGE (insn) =
7639 gen_rtx_EXPR_LIST (VOIDmode,
7640 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
7641 CALL_INSN_FUNCTION_USAGE (insn));
7643 /* If we are handling a floating point return value, we need to
7644 save $18 in the function prologue. Putting a note on the
7645 call will mean that regs_ever_live[$18] will be true if the
7646 call is not eliminated, and we can check that in the prologue
7647 code. */
7648 if (fpret)
7649 CALL_INSN_FUNCTION_USAGE (insn) =
7650 gen_rtx_EXPR_LIST (VOIDmode,
7651 gen_rtx_USE (VOIDmode,
7652 gen_rtx_REG (word_mode, 18)),
7653 CALL_INSN_FUNCTION_USAGE (insn));
7655 /* Return 1 to tell the caller that we've generated the call
7656 insn. */
7657 return 1;
7660 /* We know the function we are going to call. If we have already
7661 built a stub, we don't need to do anything further. */
7663 fnname = XSTR (fn, 0);
7664 for (l = mips16_stubs; l != NULL; l = l->next)
7665 if (strcmp (l->name, fnname) == 0)
7666 break;
7668 if (l == NULL)
7670 /* Build a special purpose stub. When the linker sees a
7671 function call in mips16 code, it will check where the target
7672 is defined. If the target is a 32 bit call, the linker will
7673 search for the section defined here. It can tell which
7674 symbol this section is associated with by looking at the
7675 relocation information (the name is unreliable, since this
7676 might be a static function). If such a section is found, the
7677 linker will redirect the call to the start of the magic
7678 section.
7680 If the function does not return a floating point value, the
7681 special stub section is named
7682 .mips16.call.FNNAME
7684 If the function does return a floating point value, the stub
7685 section is named
7686 .mips16.call.fp.FNNAME
7689 secname = (char *) alloca (strlen (fnname) + 40);
7690 sprintf (secname, ".mips16.call.%s%s",
7691 fpret ? "fp." : "",
7692 fnname);
7693 stubname = (char *) alloca (strlen (fnname) + 20);
7694 sprintf (stubname, "__call_stub_%s%s",
7695 fpret ? "fp_" : "",
7696 fnname);
7697 stubid = get_identifier (stubname);
7698 stubdecl = build_decl (FUNCTION_DECL, stubid,
7699 build_function_type (void_type_node, NULL_TREE));
7700 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7702 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
7703 (fpret
7704 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
7705 : ""),
7706 fnname);
7707 need_comma = 0;
7708 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7710 fprintf (asm_out_file, "%s%s",
7711 need_comma ? ", " : "",
7712 (f & 3) == 1 ? "float" : "double");
7713 need_comma = 1;
7715 fprintf (asm_out_file, ")\n");
7717 fprintf (asm_out_file, "\t.set\tnomips16\n");
7718 assemble_start_function (stubdecl, stubname);
7720 if (!FUNCTION_NAME_ALREADY_DECLARED)
7722 fputs ("\t.ent\t", asm_out_file);
7723 assemble_name (asm_out_file, stubname);
7724 fputs ("\n", asm_out_file);
7726 assemble_name (asm_out_file, stubname);
7727 fputs (":\n", asm_out_file);
7730 /* We build the stub code by hand. That's the only way we can
7731 do it, since we can't generate 32 bit code during a 16 bit
7732 compilation. */
7734 /* We don't want the assembler to insert any nops here. */
7735 fprintf (asm_out_file, "\t.set\tnoreorder\n");
7737 mips16_fp_args (asm_out_file, fp_code, 0);
7739 if (! fpret)
7741 fprintf (asm_out_file, "\t.set\tnoat\n");
7742 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
7743 fnname);
7744 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7745 fprintf (asm_out_file, "\t.set\tat\n");
7746 /* Unfortunately, we can't fill the jump delay slot. We
7747 can't fill with one of the mtc1 instructions, because the
7748 result is not available for one instruction, so if the
7749 very first instruction in the function refers to the
7750 register, it will see the wrong value. */
7751 fprintf (asm_out_file, "\tnop\n");
7753 else
7755 fprintf (asm_out_file, "\tmove\t%s,%s\n",
7756 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
7757 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
7758 /* As above, we can't fill the delay slot. */
7759 fprintf (asm_out_file, "\tnop\n");
7760 if (GET_MODE (retval) == SFmode)
7761 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7762 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
7763 else
7765 if (TARGET_BIG_ENDIAN)
7767 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7768 reg_names[GP_REG_FIRST + 2],
7769 reg_names[FP_REG_FIRST + 1]);
7770 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7771 reg_names[GP_REG_FIRST + 3],
7772 reg_names[FP_REG_FIRST + 0]);
7774 else
7776 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7777 reg_names[GP_REG_FIRST + 2],
7778 reg_names[FP_REG_FIRST + 0]);
7779 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7780 reg_names[GP_REG_FIRST + 3],
7781 reg_names[FP_REG_FIRST + 1]);
7784 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
7785 /* As above, we can't fill the delay slot. */
7786 fprintf (asm_out_file, "\tnop\n");
7789 fprintf (asm_out_file, "\t.set\treorder\n");
7791 #ifdef ASM_DECLARE_FUNCTION_SIZE
7792 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
7793 #endif
7795 if (!FUNCTION_NAME_ALREADY_DECLARED)
7797 fputs ("\t.end\t", asm_out_file);
7798 assemble_name (asm_out_file, stubname);
7799 fputs ("\n", asm_out_file);
7802 fprintf (asm_out_file, "\t.set\tmips16\n");
7804 /* Record this stub. */
7805 l = (struct mips16_stub *) xmalloc (sizeof *l);
7806 l->name = xstrdup (fnname);
7807 l->fpret = fpret;
7808 l->next = mips16_stubs;
7809 mips16_stubs = l;
7812 /* If we expect a floating point return value, but we've built a
7813 stub which does not expect one, then we're in trouble. We can't
7814 use the existing stub, because it won't handle the floating point
7815 value. We can't build a new stub, because the linker won't know
7816 which stub to use for the various calls in this object file.
7817 Fortunately, this case is illegal, since it means that a function
7818 was declared in two different ways in a single compilation. */
7819 if (fpret && ! l->fpret)
7820 error ("cannot handle inconsistent calls to %qs", fnname);
7822 /* If we are calling a stub which handles a floating point return
7823 value, we need to arrange to save $18 in the prologue. We do
7824 this by marking the function call as using the register. The
7825 prologue will later see that it is used, and emit code to save
7826 it. */
7828 if (l->fpret)
7830 rtx insn;
7832 if (retval == NULL_RTX)
7833 insn = gen_call_internal (fn, arg_size);
7834 else
7835 insn = gen_call_value_internal (retval, fn, arg_size);
7836 insn = emit_call_insn (insn);
7838 CALL_INSN_FUNCTION_USAGE (insn) =
7839 gen_rtx_EXPR_LIST (VOIDmode,
7840 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
7841 CALL_INSN_FUNCTION_USAGE (insn));
7843 /* Return 1 to tell the caller that we've generated the call
7844 insn. */
7845 return 1;
7848 /* Return 0 to let the caller generate the call insn. */
7849 return 0;
7852 /* An entry in the mips16 constant pool. VALUE is the pool constant,
7853 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
7855 struct mips16_constant {
7856 struct mips16_constant *next;
7857 rtx value;
7858 rtx label;
7859 enum machine_mode mode;
7862 /* Information about an incomplete mips16 constant pool. FIRST is the
7863 first constant, HIGHEST_ADDRESS is the highest address that the first
7864 byte of the pool can have, and INSN_ADDRESS is the current instruction
7865 address. */
7867 struct mips16_constant_pool {
7868 struct mips16_constant *first;
7869 int highest_address;
7870 int insn_address;
7873 /* Add constant VALUE to POOL and return its label. MODE is the
7874 value's mode (used for CONST_INTs, etc.). */
7876 static rtx
7877 add_constant (struct mips16_constant_pool *pool,
7878 rtx value, enum machine_mode mode)
7880 struct mips16_constant **p, *c;
7881 bool first_of_size_p;
7883 /* See whether the constant is already in the pool. If so, return the
7884 existing label, otherwise leave P pointing to the place where the
7885 constant should be added.
7887 Keep the pool sorted in increasing order of mode size so that we can
7888 reduce the number of alignments needed. */
7889 first_of_size_p = true;
7890 for (p = &pool->first; *p != 0; p = &(*p)->next)
7892 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
7893 return (*p)->label;
7894 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
7895 break;
7896 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
7897 first_of_size_p = false;
7900 /* In the worst case, the constant needed by the earliest instruction
7901 will end up at the end of the pool. The entire pool must then be
7902 accessible from that instruction.
7904 When adding the first constant, set the pool's highest address to
7905 the address of the first out-of-range byte. Adjust this address
7906 downwards each time a new constant is added. */
7907 if (pool->first == 0)
7908 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
7909 is the address of the instruction with the lowest two bits clear.
7910 The base PC value for ld has the lowest three bits clear. Assume
7911 the worst case here. */
7912 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
7913 pool->highest_address -= GET_MODE_SIZE (mode);
7914 if (first_of_size_p)
7915 /* Take into account the worst possible padding due to alignment. */
7916 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
7918 /* Create a new entry. */
7919 c = (struct mips16_constant *) xmalloc (sizeof *c);
7920 c->value = value;
7921 c->mode = mode;
7922 c->label = gen_label_rtx ();
7923 c->next = *p;
7924 *p = c;
7926 return c->label;
7929 /* Output constant VALUE after instruction INSN and return the last
7930 instruction emitted. MODE is the mode of the constant. */
7932 static rtx
7933 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
7935 switch (GET_MODE_CLASS (mode))
7937 case MODE_INT:
7939 rtx size = GEN_INT (GET_MODE_SIZE (mode));
7940 return emit_insn_after (gen_consttable_int (value, size), insn);
7943 case MODE_FLOAT:
7944 return emit_insn_after (gen_consttable_float (value), insn);
7946 case MODE_VECTOR_FLOAT:
7947 case MODE_VECTOR_INT:
7949 int i;
7950 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
7951 insn = dump_constants_1 (GET_MODE_INNER (mode),
7952 CONST_VECTOR_ELT (value, i), insn);
7953 return insn;
7956 default:
7957 gcc_unreachable ();
7962 /* Dump out the constants in CONSTANTS after INSN. */
7964 static void
7965 dump_constants (struct mips16_constant *constants, rtx insn)
7967 struct mips16_constant *c, *next;
7968 int align;
7970 align = 0;
7971 for (c = constants; c != NULL; c = next)
7973 /* If necessary, increase the alignment of PC. */
7974 if (align < GET_MODE_SIZE (c->mode))
7976 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
7977 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
7979 align = GET_MODE_SIZE (c->mode);
7981 insn = emit_label_after (c->label, insn);
7982 insn = dump_constants_1 (c->mode, c->value, insn);
7984 next = c->next;
7985 free (c);
7988 emit_barrier_after (insn);
7991 /* Return the length of instruction INSN.
7993 ??? MIPS16 switch tables go in .text, but we don't define
7994 JUMP_TABLES_IN_TEXT_SECTION, so get_attr_length will not
7995 compute their lengths correctly. */
7997 static int
7998 mips16_insn_length (rtx insn)
8000 if (JUMP_P (insn))
8002 rtx body = PATTERN (insn);
8003 if (GET_CODE (body) == ADDR_VEC)
8004 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
8005 if (GET_CODE (body) == ADDR_DIFF_VEC)
8006 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
8008 return get_attr_length (insn);
8011 /* Rewrite *X so that constant pool references refer to the constant's
8012 label instead. DATA points to the constant pool structure. */
8014 static int
8015 mips16_rewrite_pool_refs (rtx *x, void *data)
8017 struct mips16_constant_pool *pool = data;
8018 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
8019 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
8020 get_pool_constant (*x),
8021 get_pool_mode (*x)));
8022 return 0;
8025 /* Build MIPS16 constant pools. */
8027 static void
8028 mips16_lay_out_constants (void)
8030 struct mips16_constant_pool pool;
8031 rtx insn, barrier;
8033 barrier = 0;
8034 memset (&pool, 0, sizeof (pool));
8035 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8037 /* Rewrite constant pool references in INSN. */
8038 if (INSN_P (insn))
8039 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
8041 pool.insn_address += mips16_insn_length (insn);
8043 if (pool.first != NULL)
8045 /* If there are no natural barriers between the first user of
8046 the pool and the highest acceptable address, we'll need to
8047 create a new instruction to jump around the constant pool.
8048 In the worst case, this instruction will be 4 bytes long.
8050 If it's too late to do this transformation after INSN,
8051 do it immediately before INSN. */
8052 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
8054 rtx label, jump;
8056 label = gen_label_rtx ();
8058 jump = emit_jump_insn_before (gen_jump (label), insn);
8059 JUMP_LABEL (jump) = label;
8060 LABEL_NUSES (label) = 1;
8061 barrier = emit_barrier_after (jump);
8063 emit_label_after (label, barrier);
8064 pool.insn_address += 4;
8067 /* See whether the constant pool is now out of range of the first
8068 user. If so, output the constants after the previous barrier.
8069 Note that any instructions between BARRIER and INSN (inclusive)
8070 will use negative offsets to refer to the pool. */
8071 if (pool.insn_address > pool.highest_address)
8073 dump_constants (pool.first, barrier);
8074 pool.first = NULL;
8075 barrier = 0;
8077 else if (BARRIER_P (insn))
8078 barrier = insn;
8081 dump_constants (pool.first, get_last_insn ());
8084 /* A temporary variable used by for_each_rtx callbacks, etc. */
8085 static rtx mips_sim_insn;
8087 /* A structure representing the state of the processor pipeline.
8088 Used by the mips_sim_* family of functions. */
8089 struct mips_sim {
8090 /* The maximum number of instructions that can be issued in a cycle.
8091 (Caches mips_issue_rate.) */
8092 unsigned int issue_rate;
8094 /* The current simulation time. */
8095 unsigned int time;
8097 /* How many more instructions can be issued in the current cycle. */
8098 unsigned int insns_left;
8100 /* LAST_SET[X].INSN is the last instruction to set register X.
8101 LAST_SET[X].TIME is the time at which that instruction was issued.
8102 INSN is null if no instruction has yet set register X. */
8103 struct {
8104 rtx insn;
8105 unsigned int time;
8106 } last_set[FIRST_PSEUDO_REGISTER];
8108 /* The pipeline's current DFA state. */
8109 state_t dfa_state;
8112 /* Reset STATE to the initial simulation state. */
8114 static void
8115 mips_sim_reset (struct mips_sim *state)
8117 state->time = 0;
8118 state->insns_left = state->issue_rate;
8119 memset (&state->last_set, 0, sizeof (state->last_set));
8120 state_reset (state->dfa_state);
8123 /* Initialize STATE before its first use. DFA_STATE points to an
8124 allocated but uninitialized DFA state. */
8126 static void
8127 mips_sim_init (struct mips_sim *state, state_t dfa_state)
8129 state->issue_rate = mips_issue_rate ();
8130 state->dfa_state = dfa_state;
8131 mips_sim_reset (state);
8134 /* Advance STATE by one clock cycle. */
8136 static void
8137 mips_sim_next_cycle (struct mips_sim *state)
8139 state->time++;
8140 state->insns_left = state->issue_rate;
8141 state_transition (state->dfa_state, 0);
8144 /* Advance simulation state STATE until instruction INSN can read
8145 register REG. */
8147 static void
8148 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
8150 unsigned int i;
8152 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
8153 if (state->last_set[REGNO (reg) + i].insn != 0)
8155 unsigned int t;
8157 t = state->last_set[REGNO (reg) + i].time;
8158 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
8159 while (state->time < t)
8160 mips_sim_next_cycle (state);
8164 /* A for_each_rtx callback. If *X is a register, advance simulation state
8165 DATA until mips_sim_insn can read the register's value. */
8167 static int
8168 mips_sim_wait_regs_2 (rtx *x, void *data)
8170 if (REG_P (*x))
8171 mips_sim_wait_reg (data, mips_sim_insn, *x);
8172 return 0;
8175 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
8177 static void
8178 mips_sim_wait_regs_1 (rtx *x, void *data)
8180 for_each_rtx (x, mips_sim_wait_regs_2, data);
8183 /* Advance simulation state STATE until all of INSN's register
8184 dependencies are satisfied. */
8186 static void
8187 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
8189 mips_sim_insn = insn;
8190 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
8193 /* Advance simulation state STATE until the units required by
8194 instruction INSN are available. */
8196 static void
8197 mips_sim_wait_units (struct mips_sim *state, rtx insn)
8199 state_t tmp_state;
8201 tmp_state = alloca (state_size ());
8202 while (state->insns_left == 0
8203 || (memcpy (tmp_state, state->dfa_state, state_size ()),
8204 state_transition (tmp_state, insn) >= 0))
8205 mips_sim_next_cycle (state);
8208 /* Advance simulation state STATE until INSN is ready to issue. */
8210 static void
8211 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
8213 mips_sim_wait_regs (state, insn);
8214 mips_sim_wait_units (state, insn);
8217 /* mips_sim_insn has just set X. Update the LAST_SET array
8218 in simulation state DATA. */
8220 static void
8221 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8223 struct mips_sim *state;
8224 unsigned int i;
8226 state = data;
8227 if (REG_P (x))
8228 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
8230 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
8231 state->last_set[REGNO (x) + i].time = state->time;
8235 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
8236 can issue immediately (i.e., that mips_sim_wait_insn has already
8237 been called). */
8239 static void
8240 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8242 state_transition (state->dfa_state, insn);
8243 state->insns_left--;
8245 mips_sim_insn = insn;
8246 note_stores (PATTERN (insn), mips_sim_record_set, state);
8249 /* Simulate issuing a NOP in state STATE. */
8251 static void
8252 mips_sim_issue_nop (struct mips_sim *state)
8254 if (state->insns_left == 0)
8255 mips_sim_next_cycle (state);
8256 state->insns_left--;
8259 /* Update simulation state STATE so that it's ready to accept the instruction
8260 after INSN. INSN should be part of the main rtl chain, not a member of a
8261 SEQUENCE. */
8263 static void
8264 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8266 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
8267 if (JUMP_P (insn))
8268 mips_sim_issue_nop (state);
8270 switch (GET_CODE (SEQ_BEGIN (insn)))
8272 case CODE_LABEL:
8273 case CALL_INSN:
8274 /* We can't predict the processor state after a call or label. */
8275 mips_sim_reset (state);
8276 break;
8278 case JUMP_INSN:
8279 /* The delay slots of branch likely instructions are only executed
8280 when the branch is taken. Therefore, if the caller has simulated
8281 the delay slot instruction, STATE does not really reflect the state
8282 of the pipeline for the instruction after the delay slot. Also,
8283 branch likely instructions tend to incur a penalty when not taken,
8284 so there will probably be an extra delay between the branch and
8285 the instruction after the delay slot. */
8286 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8287 mips_sim_reset (state);
8288 break;
8290 default:
8291 break;
8295 /* The VR4130 pipeline issues aligned pairs of instructions together,
8296 but it stalls the second instruction if it depends on the first.
8297 In order to cut down the amount of logic required, this dependence
8298 check is not based on a full instruction decode. Instead, any non-SPECIAL
8299 instruction is assumed to modify the register specified by bits 20-16
8300 (which is usually the "rt" field).
8302 In beq, beql, bne and bnel instructions, the rt field is actually an
8303 input, so we can end up with a false dependence between the branch
8304 and its delay slot. If this situation occurs in instruction INSN,
8305 try to avoid it by swapping rs and rt. */
8307 static void
8308 vr4130_avoid_branch_rt_conflict (rtx insn)
8310 rtx first, second;
8312 first = SEQ_BEGIN (insn);
8313 second = SEQ_END (insn);
8314 if (JUMP_P (first)
8315 && NONJUMP_INSN_P (second)
8316 && GET_CODE (PATTERN (first)) == SET
8317 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8318 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8320 /* Check for the right kind of condition. */
8321 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8322 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8323 && REG_P (XEXP (cond, 0))
8324 && REG_P (XEXP (cond, 1))
8325 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8326 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8328 /* SECOND mentions the rt register but not the rs register. */
8329 rtx tmp = XEXP (cond, 0);
8330 XEXP (cond, 0) = XEXP (cond, 1);
8331 XEXP (cond, 1) = tmp;
8336 /* Implement -mvr4130-align. Go through each basic block and simulate the
8337 processor pipeline. If we find that a pair of instructions could execute
8338 in parallel, and the first of those instruction is not 8-byte aligned,
8339 insert a nop to make it aligned. */
8341 static void
8342 vr4130_align_insns (void)
8344 struct mips_sim state;
8345 rtx insn, subinsn, last, last2, next;
8346 bool aligned_p;
8348 dfa_start ();
8350 /* LAST is the last instruction before INSN to have a nonzero length.
8351 LAST2 is the last such instruction before LAST. */
8352 last = 0;
8353 last2 = 0;
8355 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8356 aligned_p = true;
8358 mips_sim_init (&state, alloca (state_size ()));
8359 for (insn = get_insns (); insn != 0; insn = next)
8361 unsigned int length;
8363 next = NEXT_INSN (insn);
8365 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8366 This isn't really related to the alignment pass, but we do it on
8367 the fly to avoid a separate instruction walk. */
8368 vr4130_avoid_branch_rt_conflict (insn);
8370 if (USEFUL_INSN_P (insn))
8371 FOR_EACH_SUBINSN (subinsn, insn)
8373 mips_sim_wait_insn (&state, subinsn);
8375 /* If we want this instruction to issue in parallel with the
8376 previous one, make sure that the previous instruction is
8377 aligned. There are several reasons why this isn't worthwhile
8378 when the second instruction is a call:
8380 - Calls are less likely to be performance critical,
8381 - There's a good chance that the delay slot can execute
8382 in parallel with the call.
8383 - The return address would then be unaligned.
8385 In general, if we're going to insert a nop between instructions
8386 X and Y, it's better to insert it immediately after X. That
8387 way, if the nop makes Y aligned, it will also align any labels
8388 between X and Y. */
8389 if (state.insns_left != state.issue_rate
8390 && !CALL_P (subinsn))
8392 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8394 /* SUBINSN is the first instruction in INSN and INSN is
8395 aligned. We want to align the previous instruction
8396 instead, so insert a nop between LAST2 and LAST.
8398 Note that LAST could be either a single instruction
8399 or a branch with a delay slot. In the latter case,
8400 LAST, like INSN, is already aligned, but the delay
8401 slot must have some extra delay that stops it from
8402 issuing at the same time as the branch. We therefore
8403 insert a nop before the branch in order to align its
8404 delay slot. */
8405 emit_insn_after (gen_nop (), last2);
8406 aligned_p = false;
8408 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8410 /* SUBINSN is the delay slot of INSN, but INSN is
8411 currently unaligned. Insert a nop between
8412 LAST and INSN to align it. */
8413 emit_insn_after (gen_nop (), last);
8414 aligned_p = true;
8417 mips_sim_issue_insn (&state, subinsn);
8419 mips_sim_finish_insn (&state, insn);
8421 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8422 length = get_attr_length (insn);
8423 if (length > 0)
8425 /* If the instruction is an asm statement or multi-instruction
8426 mips.md patern, the length is only an estimate. Insert an
8427 8 byte alignment after it so that the following instructions
8428 can be handled correctly. */
8429 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8430 && (recog_memoized (insn) < 0 || length >= 8))
8432 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8433 next = NEXT_INSN (next);
8434 mips_sim_next_cycle (&state);
8435 aligned_p = true;
8437 else if (length & 4)
8438 aligned_p = !aligned_p;
8439 last2 = last;
8440 last = insn;
8443 /* See whether INSN is an aligned label. */
8444 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8445 aligned_p = true;
8447 dfa_finish ();
8450 /* Subroutine of mips_reorg. If there is a hazard between INSN
8451 and a previous instruction, avoid it by inserting nops after
8452 instruction AFTER.
8454 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8455 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8456 before using the value of that register. *HILO_DELAY counts the
8457 number of instructions since the last hilo hazard (that is,
8458 the number of instructions since the last mflo or mfhi).
8460 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8461 for the next instruction.
8463 LO_REG is an rtx for the LO register, used in dependence checking. */
8465 static void
8466 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8467 rtx *delayed_reg, rtx lo_reg)
8469 rtx pattern, set;
8470 int nops, ninsns;
8472 if (!INSN_P (insn))
8473 return;
8475 pattern = PATTERN (insn);
8477 /* Do not put the whole function in .set noreorder if it contains
8478 an asm statement. We don't know whether there will be hazards
8479 between the asm statement and the gcc-generated code. */
8480 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8481 cfun->machine->all_noreorder_p = false;
8483 /* Ignore zero-length instructions (barriers and the like). */
8484 ninsns = get_attr_length (insn) / 4;
8485 if (ninsns == 0)
8486 return;
8488 /* Work out how many nops are needed. Note that we only care about
8489 registers that are explicitly mentioned in the instruction's pattern.
8490 It doesn't matter that calls use the argument registers or that they
8491 clobber hi and lo. */
8492 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8493 nops = 2 - *hilo_delay;
8494 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8495 nops = 1;
8496 else
8497 nops = 0;
8499 /* Insert the nops between this instruction and the previous one.
8500 Each new nop takes us further from the last hilo hazard. */
8501 *hilo_delay += nops;
8502 while (nops-- > 0)
8503 emit_insn_after (gen_hazard_nop (), after);
8505 /* Set up the state for the next instruction. */
8506 *hilo_delay += ninsns;
8507 *delayed_reg = 0;
8508 if (INSN_CODE (insn) >= 0)
8509 switch (get_attr_hazard (insn))
8511 case HAZARD_NONE:
8512 break;
8514 case HAZARD_HILO:
8515 *hilo_delay = 0;
8516 break;
8518 case HAZARD_DELAY:
8519 set = single_set (insn);
8520 gcc_assert (set != 0);
8521 *delayed_reg = SET_DEST (set);
8522 break;
8527 /* Go through the instruction stream and insert nops where necessary.
8528 See if the whole function can then be put into .set noreorder &
8529 .set nomacro. */
8531 static void
8532 mips_avoid_hazards (void)
8534 rtx insn, last_insn, lo_reg, delayed_reg;
8535 int hilo_delay, i;
8537 /* Force all instructions to be split into their final form. */
8538 split_all_insns_noflow ();
8540 /* Recalculate instruction lengths without taking nops into account. */
8541 cfun->machine->ignore_hazard_length_p = true;
8542 shorten_branches (get_insns ());
8544 cfun->machine->all_noreorder_p = true;
8546 /* Profiled functions can't be all noreorder because the profiler
8547 support uses assembler macros. */
8548 if (current_function_profile)
8549 cfun->machine->all_noreorder_p = false;
8551 /* Code compiled with -mfix-vr4120 can't be all noreorder because
8552 we rely on the assembler to work around some errata. */
8553 if (TARGET_FIX_VR4120)
8554 cfun->machine->all_noreorder_p = false;
8556 /* The same is true for -mfix-vr4130 if we might generate mflo or
8557 mfhi instructions. Note that we avoid using mflo and mfhi if
8558 the VR4130 macc and dmacc instructions are available instead;
8559 see the *mfhilo_{si,di}_macc patterns. */
8560 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
8561 cfun->machine->all_noreorder_p = false;
8563 last_insn = 0;
8564 hilo_delay = 2;
8565 delayed_reg = 0;
8566 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
8568 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
8569 if (INSN_P (insn))
8571 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8572 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8573 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
8574 &hilo_delay, &delayed_reg, lo_reg);
8575 else
8576 mips_avoid_hazard (last_insn, insn, &hilo_delay,
8577 &delayed_reg, lo_reg);
8579 last_insn = insn;
8584 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
8586 static void
8587 mips_reorg (void)
8589 if (TARGET_MIPS16)
8590 mips16_lay_out_constants ();
8591 else if (TARGET_EXPLICIT_RELOCS)
8593 if (mips_flag_delayed_branch)
8594 dbr_schedule (get_insns (), dump_file);
8595 mips_avoid_hazards ();
8596 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
8597 vr4130_align_insns ();
8601 /* This function does three things:
8603 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
8604 - Register the mips16 hardware floating point stubs.
8605 - Register the gofast functions if selected using --enable-gofast. */
8607 #include "config/gofast.h"
8609 static void
8610 mips_init_libfuncs (void)
8612 if (TARGET_FIX_VR4120)
8614 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
8615 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
8618 if (TARGET_MIPS16 && mips16_hard_float)
8620 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
8621 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
8622 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
8623 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
8625 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
8626 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
8627 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
8628 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
8629 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
8630 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
8632 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
8633 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
8635 if (TARGET_DOUBLE_FLOAT)
8637 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
8638 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
8639 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
8640 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
8642 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
8643 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
8644 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
8645 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
8646 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
8647 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
8649 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
8650 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
8652 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
8653 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
8656 else
8657 gofast_maybe_init_libfuncs ();
8660 /* Return a number assessing the cost of moving a register in class
8661 FROM to class TO. The classes are expressed using the enumeration
8662 values such as `GENERAL_REGS'. A value of 2 is the default; other
8663 values are interpreted relative to that.
8665 It is not required that the cost always equal 2 when FROM is the
8666 same as TO; on some machines it is expensive to move between
8667 registers if they are not general registers.
8669 If reload sees an insn consisting of a single `set' between two
8670 hard registers, and if `REGISTER_MOVE_COST' applied to their
8671 classes returns a value of 2, reload does not check to ensure that
8672 the constraints of the insn are met. Setting a cost of other than
8673 2 will allow reload to verify that the constraints are met. You
8674 should do this if the `movM' pattern's constraints do not allow
8675 such copying.
8677 ??? We make the cost of moving from HI/LO into general
8678 registers the same as for one of moving general registers to
8679 HI/LO for TARGET_MIPS16 in order to prevent allocating a
8680 pseudo to HI/LO. This might hurt optimizations though, it
8681 isn't clear if it is wise. And it might not work in all cases. We
8682 could solve the DImode LO reg problem by using a multiply, just
8683 like reload_{in,out}si. We could solve the SImode/HImode HI reg
8684 problem by using divide instructions. divu puts the remainder in
8685 the HI reg, so doing a divide by -1 will move the value in the HI
8686 reg for all values except -1. We could handle that case by using a
8687 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
8688 a compare/branch to test the input value to see which instruction
8689 we need to use. This gets pretty messy, but it is feasible. */
8692 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
8693 enum reg_class to, enum reg_class from)
8695 if (from == M16_REGS && GR_REG_CLASS_P (to))
8696 return 2;
8697 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
8698 return 2;
8699 else if (GR_REG_CLASS_P (from))
8701 if (to == M16_REGS)
8702 return 2;
8703 else if (to == M16_NA_REGS)
8704 return 2;
8705 else if (GR_REG_CLASS_P (to))
8707 if (TARGET_MIPS16)
8708 return 4;
8709 else
8710 return 2;
8712 else if (to == FP_REGS)
8713 return 4;
8714 else if (to == HI_REG || to == LO_REG || to == MD_REGS)
8716 if (TARGET_MIPS16)
8717 return 12;
8718 else
8719 return 6;
8721 else if (COP_REG_CLASS_P (to))
8723 return 5;
8725 } /* GR_REG_CLASS_P (from) */
8726 else if (from == FP_REGS)
8728 if (GR_REG_CLASS_P (to))
8729 return 4;
8730 else if (to == FP_REGS)
8731 return 2;
8732 else if (to == ST_REGS)
8733 return 8;
8734 } /* from == FP_REGS */
8735 else if (from == HI_REG || from == LO_REG || from == MD_REGS)
8737 if (GR_REG_CLASS_P (to))
8739 if (TARGET_MIPS16)
8740 return 12;
8741 else
8742 return 6;
8744 } /* from == HI_REG, etc. */
8745 else if (from == ST_REGS && GR_REG_CLASS_P (to))
8746 return 4;
8747 else if (COP_REG_CLASS_P (from))
8749 return 5;
8750 } /* COP_REG_CLASS_P (from) */
8752 /* Fall through. */
8754 return 12;
8757 /* Return the length of INSN. LENGTH is the initial length computed by
8758 attributes in the machine-description file. */
8761 mips_adjust_insn_length (rtx insn, int length)
8763 /* A unconditional jump has an unfilled delay slot if it is not part
8764 of a sequence. A conditional jump normally has a delay slot, but
8765 does not on MIPS16. */
8766 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
8767 length += 4;
8769 /* See how many nops might be needed to avoid hardware hazards. */
8770 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
8771 switch (get_attr_hazard (insn))
8773 case HAZARD_NONE:
8774 break;
8776 case HAZARD_DELAY:
8777 length += 4;
8778 break;
8780 case HAZARD_HILO:
8781 length += 8;
8782 break;
8785 /* All MIPS16 instructions are a measly two bytes. */
8786 if (TARGET_MIPS16)
8787 length /= 2;
8789 return length;
8793 /* Return an asm sequence to start a noat block and load the address
8794 of a label into $1. */
8796 const char *
8797 mips_output_load_label (void)
8799 if (TARGET_EXPLICIT_RELOCS)
8800 switch (mips_abi)
8802 case ABI_N32:
8803 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
8805 case ABI_64:
8806 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
8808 default:
8809 if (ISA_HAS_LOAD_DELAY)
8810 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
8811 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
8813 else
8815 if (Pmode == DImode)
8816 return "%[dla\t%@,%0";
8817 else
8818 return "%[la\t%@,%0";
8823 /* Output assembly instructions to peform a conditional branch.
8825 INSN is the branch instruction. OPERANDS[0] is the condition.
8826 OPERANDS[1] is the target of the branch. OPERANDS[2] is the target
8827 of the first operand to the condition. If TWO_OPERANDS_P is
8828 nonzero the comparison takes two operands; OPERANDS[3] will be the
8829 second operand.
8831 If INVERTED_P is nonzero we are to branch if the condition does
8832 not hold. If FLOAT_P is nonzero this is a floating-point comparison.
8834 LENGTH is the length (in bytes) of the sequence we are to generate.
8835 That tells us whether to generate a simple conditional branch, or a
8836 reversed conditional branch around a `jr' instruction. */
8837 const char *
8838 mips_output_conditional_branch (rtx insn, rtx *operands, int two_operands_p,
8839 int float_p, int inverted_p, int length)
8841 static char buffer[200];
8842 /* The kind of comparison we are doing. */
8843 enum rtx_code code = GET_CODE (operands[0]);
8844 /* Nonzero if the opcode for the comparison needs a `z' indicating
8845 that it is a comparison against zero. */
8846 int need_z_p;
8847 /* A string to use in the assembly output to represent the first
8848 operand. */
8849 const char *op1 = "%z2";
8850 /* A string to use in the assembly output to represent the second
8851 operand. Use the hard-wired zero register if there's no second
8852 operand. */
8853 const char *op2 = (two_operands_p ? ",%z3" : ",%.");
8854 /* The operand-printing string for the comparison. */
8855 const char *const comp = (float_p ? "%F0" : "%C0");
8856 /* The operand-printing string for the inverted comparison. */
8857 const char *const inverted_comp = (float_p ? "%W0" : "%N0");
8859 /* The MIPS processors (for levels of the ISA at least two), have
8860 "likely" variants of each branch instruction. These instructions
8861 annul the instruction in the delay slot if the branch is not
8862 taken. */
8863 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
8865 if (!two_operands_p)
8867 /* To compute whether than A > B, for example, we normally
8868 subtract B from A and then look at the sign bit. But, if we
8869 are doing an unsigned comparison, and B is zero, we don't
8870 have to do the subtraction. Instead, we can just check to
8871 see if A is nonzero. Thus, we change the CODE here to
8872 reflect the simpler comparison operation. */
8873 switch (code)
8875 case GTU:
8876 code = NE;
8877 break;
8879 case LEU:
8880 code = EQ;
8881 break;
8883 case GEU:
8884 /* A condition which will always be true. */
8885 code = EQ;
8886 op1 = "%.";
8887 break;
8889 case LTU:
8890 /* A condition which will always be false. */
8891 code = NE;
8892 op1 = "%.";
8893 break;
8895 default:
8896 /* Not a special case. */
8897 break;
8901 /* Relative comparisons are always done against zero. But
8902 equality comparisons are done between two operands, and therefore
8903 do not require a `z' in the assembly language output. */
8904 need_z_p = (!float_p && code != EQ && code != NE);
8905 /* For comparisons against zero, the zero is not provided
8906 explicitly. */
8907 if (need_z_p)
8908 op2 = "";
8910 /* Begin by terminating the buffer. That way we can always use
8911 strcat to add to it. */
8912 buffer[0] = '\0';
8914 switch (length)
8916 case 4:
8917 case 8:
8918 /* Just a simple conditional branch. */
8919 if (float_p)
8920 sprintf (buffer, "%%*b%s%%?\t%%Z2%%1%%/",
8921 inverted_p ? inverted_comp : comp);
8922 else
8923 sprintf (buffer, "%%*b%s%s%%?\t%s%s,%%1%%/",
8924 inverted_p ? inverted_comp : comp,
8925 need_z_p ? "z" : "",
8926 op1,
8927 op2);
8928 return buffer;
8930 case 12:
8931 case 16:
8932 case 24:
8933 case 28:
8935 /* Generate a reversed conditional branch around ` j'
8936 instruction:
8938 .set noreorder
8939 .set nomacro
8940 bc l
8941 delay_slot or #nop
8942 j target
8943 #nop
8945 .set macro
8946 .set reorder
8948 If the original branch was a likely branch, the delay slot
8949 must be executed only if the branch is taken, so generate:
8951 .set noreorder
8952 .set nomacro
8953 bc l
8954 #nop
8955 j target
8956 delay slot or #nop
8958 .set macro
8959 .set reorder
8961 When generating PIC, instead of:
8963 j target
8965 we emit:
8967 .set noat
8968 la $at, target
8969 jr $at
8970 .set at
8973 rtx orig_target;
8974 rtx target = gen_label_rtx ();
8976 orig_target = operands[1];
8977 operands[1] = target;
8978 /* Generate the reversed comparison. This takes four
8979 bytes. */
8980 if (float_p)
8981 sprintf (buffer, "%%*b%s\t%%Z2%%1",
8982 inverted_p ? comp : inverted_comp);
8983 else
8984 sprintf (buffer, "%%*b%s%s\t%s%s,%%1",
8985 inverted_p ? comp : inverted_comp,
8986 need_z_p ? "z" : "",
8987 op1,
8988 op2);
8989 output_asm_insn (buffer, operands);
8991 if (length != 16 && length != 28 && ! mips_branch_likely)
8993 /* Output delay slot instruction. */
8994 rtx insn = final_sequence;
8995 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8996 optimize, 1, NULL);
8997 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8999 else
9000 output_asm_insn ("%#", 0);
9002 if (length <= 16)
9003 output_asm_insn ("j\t%0", &orig_target);
9004 else
9006 output_asm_insn (mips_output_load_label (), &orig_target);
9007 output_asm_insn ("jr\t%@%]", 0);
9010 if (length != 16 && length != 28 && mips_branch_likely)
9012 /* Output delay slot instruction. */
9013 rtx insn = final_sequence;
9014 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
9015 optimize, 1, NULL);
9016 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
9018 else
9019 output_asm_insn ("%#", 0);
9021 (*targetm.asm_out.internal_label) (asm_out_file, "L",
9022 CODE_LABEL_NUMBER (target));
9024 return "";
9027 default:
9028 gcc_unreachable ();
9031 /* NOTREACHED */
9032 return 0;
9035 /* Used to output div or ddiv instruction DIVISION, which has the operands
9036 given by OPERANDS. Add in a divide-by-zero check if needed.
9038 When working around R4000 and R4400 errata, we need to make sure that
9039 the division is not immediately followed by a shift[1][2]. We also
9040 need to stop the division from being put into a branch delay slot[3].
9041 The easiest way to avoid both problems is to add a nop after the
9042 division. When a divide-by-zero check is needed, this nop can be
9043 used to fill the branch delay slot.
9045 [1] If a double-word or a variable shift executes immediately
9046 after starting an integer division, the shift may give an
9047 incorrect result. See quotations of errata #16 and #28 from
9048 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9049 in mips.md for details.
9051 [2] A similar bug to [1] exists for all revisions of the
9052 R4000 and the R4400 when run in an MC configuration.
9053 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9055 "19. In this following sequence:
9057 ddiv (or ddivu or div or divu)
9058 dsll32 (or dsrl32, dsra32)
9060 if an MPT stall occurs, while the divide is slipping the cpu
9061 pipeline, then the following double shift would end up with an
9062 incorrect result.
9064 Workaround: The compiler needs to avoid generating any
9065 sequence with divide followed by extended double shift."
9067 This erratum is also present in "MIPS R4400MC Errata, Processor
9068 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9069 & 3.0" as errata #10 and #4, respectively.
9071 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9072 (also valid for MIPS R4000MC processors):
9074 "52. R4000SC: This bug does not apply for the R4000PC.
9076 There are two flavors of this bug:
9078 1) If the instruction just after divide takes an RF exception
9079 (tlb-refill, tlb-invalid) and gets an instruction cache
9080 miss (both primary and secondary) and the line which is
9081 currently in secondary cache at this index had the first
9082 data word, where the bits 5..2 are set, then R4000 would
9083 get a wrong result for the div.
9087 div r8, r9
9088 ------------------- # end-of page. -tlb-refill
9092 div r8, r9
9093 ------------------- # end-of page. -tlb-invalid
9096 2) If the divide is in the taken branch delay slot, where the
9097 target takes RF exception and gets an I-cache miss for the
9098 exception vector or where I-cache miss occurs for the
9099 target address, under the above mentioned scenarios, the
9100 div would get wrong results.
9103 j r2 # to next page mapped or unmapped
9104 div r8,r9 # this bug would be there as long
9105 # as there is an ICache miss and
9106 nop # the "data pattern" is present
9109 beq r0, r0, NextPage # to Next page
9110 div r8,r9
9113 This bug is present for div, divu, ddiv, and ddivu
9114 instructions.
9116 Workaround: For item 1), OS could make sure that the next page
9117 after the divide instruction is also mapped. For item 2), the
9118 compiler could make sure that the divide instruction is not in
9119 the branch delay slot."
9121 These processors have PRId values of 0x00004220 and 0x00004300 for
9122 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9124 const char *
9125 mips_output_division (const char *division, rtx *operands)
9127 const char *s;
9129 s = division;
9130 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9132 output_asm_insn (s, operands);
9133 s = "nop";
9135 if (TARGET_CHECK_ZERO_DIV)
9137 if (TARGET_MIPS16)
9139 output_asm_insn (s, operands);
9140 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9142 else if (GENERATE_DIVIDE_TRAPS)
9144 output_asm_insn (s, operands);
9145 s = "teq\t%2,%.,7";
9147 else
9149 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9150 output_asm_insn (s, operands);
9151 s = "break\t7%)\n1:";
9154 return s;
9157 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
9158 with a final "000" replaced by "k". Ignore case.
9160 Note: this function is shared between GCC and GAS. */
9162 static bool
9163 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
9165 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
9166 given++, canonical++;
9168 return ((*given == 0 && *canonical == 0)
9169 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
9173 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
9174 CPU name. We've traditionally allowed a lot of variation here.
9176 Note: this function is shared between GCC and GAS. */
9178 static bool
9179 mips_matching_cpu_name_p (const char *canonical, const char *given)
9181 /* First see if the name matches exactly, or with a final "000"
9182 turned into "k". */
9183 if (mips_strict_matching_cpu_name_p (canonical, given))
9184 return true;
9186 /* If not, try comparing based on numerical designation alone.
9187 See if GIVEN is an unadorned number, or 'r' followed by a number. */
9188 if (TOLOWER (*given) == 'r')
9189 given++;
9190 if (!ISDIGIT (*given))
9191 return false;
9193 /* Skip over some well-known prefixes in the canonical name,
9194 hoping to find a number there too. */
9195 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
9196 canonical += 2;
9197 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
9198 canonical += 2;
9199 else if (TOLOWER (canonical[0]) == 'r')
9200 canonical += 1;
9202 return mips_strict_matching_cpu_name_p (canonical, given);
9206 /* Return the mips_cpu_info entry for the processor or ISA given
9207 by CPU_STRING. Return null if the string isn't recognized.
9209 A similar function exists in GAS. */
9211 static const struct mips_cpu_info *
9212 mips_parse_cpu (const char *cpu_string)
9214 const struct mips_cpu_info *p;
9215 const char *s;
9217 /* In the past, we allowed upper-case CPU names, but it doesn't
9218 work well with the multilib machinery. */
9219 for (s = cpu_string; *s != 0; s++)
9220 if (ISUPPER (*s))
9222 warning ("the cpu name must be lower case");
9223 break;
9226 /* 'from-abi' selects the most compatible architecture for the given
9227 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
9228 EABIs, we have to decide whether we're using the 32-bit or 64-bit
9229 version. Look first at the -mgp options, if given, otherwise base
9230 the choice on MASK_64BIT in TARGET_DEFAULT. */
9231 if (strcasecmp (cpu_string, "from-abi") == 0)
9232 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
9233 : ABI_NEEDS_64BIT_REGS ? 3
9234 : (TARGET_64BIT ? 3 : 1));
9236 /* 'default' has traditionally been a no-op. Probably not very useful. */
9237 if (strcasecmp (cpu_string, "default") == 0)
9238 return 0;
9240 for (p = mips_cpu_info_table; p->name != 0; p++)
9241 if (mips_matching_cpu_name_p (p->name, cpu_string))
9242 return p;
9244 return 0;
9248 /* Return the processor associated with the given ISA level, or null
9249 if the ISA isn't valid. */
9251 static const struct mips_cpu_info *
9252 mips_cpu_info_from_isa (int isa)
9254 const struct mips_cpu_info *p;
9256 for (p = mips_cpu_info_table; p->name != 0; p++)
9257 if (p->isa == isa)
9258 return p;
9260 return 0;
9263 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
9264 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
9265 they only hold condition code modes, and CCmode is always considered to
9266 be 4 bytes wide. All other registers are word sized. */
9268 unsigned int
9269 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9271 if (ST_REG_P (regno))
9272 return ((GET_MODE_SIZE (mode) + 3) / 4);
9273 else if (! FP_REG_P (regno))
9274 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9275 else
9276 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9279 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
9280 all BLKmode objects are returned in memory. Under the new (N32 and
9281 64-bit MIPS ABIs) small structures are returned in a register.
9282 Objects with varying size must still be returned in memory, of
9283 course. */
9285 static bool
9286 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9288 if (TARGET_OLDABI)
9289 return (TYPE_MODE (type) == BLKmode);
9290 else
9291 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9292 || (int_size_in_bytes (type) == -1));
9295 static bool
9296 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9298 return !TARGET_OLDABI;
9301 /* Return true if INSN is a multiply-add or multiply-subtract
9302 instruction and PREV assigns to the accumulator operand. */
9304 bool
9305 mips_linked_madd_p (rtx prev, rtx insn)
9307 rtx x;
9309 x = single_set (insn);
9310 if (x == 0)
9311 return false;
9313 x = SET_SRC (x);
9315 if (GET_CODE (x) == PLUS
9316 && GET_CODE (XEXP (x, 0)) == MULT
9317 && reg_set_p (XEXP (x, 1), prev))
9318 return true;
9320 if (GET_CODE (x) == MINUS
9321 && GET_CODE (XEXP (x, 1)) == MULT
9322 && reg_set_p (XEXP (x, 0), prev))
9323 return true;
9325 return false;
9328 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9329 that may clobber hi or lo. */
9331 static rtx mips_macc_chains_last_hilo;
9333 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9334 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9336 static void
9337 mips_macc_chains_record (rtx insn)
9339 if (get_attr_may_clobber_hilo (insn))
9340 mips_macc_chains_last_hilo = insn;
9343 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9344 has NREADY elements, looking for a multiply-add or multiply-subtract
9345 instruction that is cumulative with mips_macc_chains_last_hilo.
9346 If there is one, promote it ahead of anything else that might
9347 clobber hi or lo. */
9349 static void
9350 mips_macc_chains_reorder (rtx *ready, int nready)
9352 int i, j;
9354 if (mips_macc_chains_last_hilo != 0)
9355 for (i = nready - 1; i >= 0; i--)
9356 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9358 for (j = nready - 1; j > i; j--)
9359 if (recog_memoized (ready[j]) >= 0
9360 && get_attr_may_clobber_hilo (ready[j]))
9362 mips_promote_ready (ready, i, j);
9363 break;
9365 break;
9369 /* The last instruction to be scheduled. */
9371 static rtx vr4130_last_insn;
9373 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9374 points to an rtx that is initially an instruction. Nullify the rtx
9375 if the instruction uses the value of register X. */
9377 static void
9378 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9380 rtx *insn_ptr = data;
9381 if (REG_P (x)
9382 && *insn_ptr != 0
9383 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9384 *insn_ptr = 0;
9387 /* Return true if there is true register dependence between vr4130_last_insn
9388 and INSN. */
9390 static bool
9391 vr4130_true_reg_dependence_p (rtx insn)
9393 note_stores (PATTERN (vr4130_last_insn),
9394 vr4130_true_reg_dependence_p_1, &insn);
9395 return insn == 0;
9398 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9399 the ready queue and that INSN2 is the instruction after it, return
9400 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9401 in which INSN1 and INSN2 can probably issue in parallel, but for
9402 which (INSN2, INSN1) should be less sensitive to instruction
9403 alignment than (INSN1, INSN2). See 4130.md for more details. */
9405 static bool
9406 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9408 rtx dep;
9410 /* Check for the following case:
9412 1) there is some other instruction X with an anti dependence on INSN1;
9413 2) X has a higher priority than INSN2; and
9414 3) X is an arithmetic instruction (and thus has no unit restrictions).
9416 If INSN1 is the last instruction blocking X, it would better to
9417 choose (INSN1, X) over (INSN2, INSN1). */
9418 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9419 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9420 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9421 && recog_memoized (XEXP (dep, 0)) >= 0
9422 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9423 return false;
9425 if (vr4130_last_insn != 0
9426 && recog_memoized (insn1) >= 0
9427 && recog_memoized (insn2) >= 0)
9429 /* See whether INSN1 and INSN2 use different execution units,
9430 or if they are both ALU-type instructions. If so, they can
9431 probably execute in parallel. */
9432 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9433 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9434 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9436 /* If only one of the instructions has a dependence on
9437 vr4130_last_insn, prefer to schedule the other one first. */
9438 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9439 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9440 if (dep1 != dep2)
9441 return dep1;
9443 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9444 is not an ALU-type instruction and if INSN1 uses the same
9445 execution unit. (Note that if this condition holds, we already
9446 know that INSN2 uses a different execution unit.) */
9447 if (class1 != VR4130_CLASS_ALU
9448 && recog_memoized (vr4130_last_insn) >= 0
9449 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9450 return true;
9453 return false;
9456 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9457 queue with at least two instructions. Swap the first two if
9458 vr4130_swap_insns_p says that it could be worthwhile. */
9460 static void
9461 vr4130_reorder (rtx *ready, int nready)
9463 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9464 mips_promote_ready (ready, nready - 2, nready - 1);
9467 /* Remove the instruction at index LOWER from ready queue READY and
9468 reinsert it in front of the instruction at index HIGHER. LOWER must
9469 be <= HIGHER. */
9471 static void
9472 mips_promote_ready (rtx *ready, int lower, int higher)
9474 rtx new_head;
9475 int i;
9477 new_head = ready[lower];
9478 for (i = lower; i < higher; i++)
9479 ready[i] = ready[i + 1];
9480 ready[i] = new_head;
9483 /* Implement TARGET_SCHED_REORDER. */
9485 static int
9486 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9487 rtx *ready, int *nreadyp, int cycle)
9489 if (!reload_completed && TUNE_MACC_CHAINS)
9491 if (cycle == 0)
9492 mips_macc_chains_last_hilo = 0;
9493 if (*nreadyp > 0)
9494 mips_macc_chains_reorder (ready, *nreadyp);
9496 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9498 if (cycle == 0)
9499 vr4130_last_insn = 0;
9500 if (*nreadyp > 1)
9501 vr4130_reorder (ready, *nreadyp);
9503 return mips_issue_rate ();
9506 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9508 static int
9509 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9510 rtx insn, int more)
9512 switch (GET_CODE (PATTERN (insn)))
9514 case USE:
9515 case CLOBBER:
9516 /* Don't count USEs and CLOBBERs against the issue rate. */
9517 break;
9519 default:
9520 more--;
9521 if (!reload_completed && TUNE_MACC_CHAINS)
9522 mips_macc_chains_record (insn);
9523 vr4130_last_insn = insn;
9524 break;
9526 return more;
9529 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9530 dependencies have no cost. */
9532 static int
9533 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9534 rtx dep ATTRIBUTE_UNUSED, int cost)
9536 if (REG_NOTE_KIND (link) != 0)
9537 return 0;
9538 return cost;
9541 /* Return the number of instructions that can be issued per cycle. */
9543 static int
9544 mips_issue_rate (void)
9546 switch (mips_tune)
9548 case PROCESSOR_R4130:
9549 case PROCESSOR_R5400:
9550 case PROCESSOR_R5500:
9551 case PROCESSOR_R7000:
9552 case PROCESSOR_R9000:
9553 return 2;
9555 case PROCESSOR_SB1:
9556 /* This is actually 4, but we get better performance if we claim 3.
9557 This is partly because of unwanted speculative code motion with the
9558 larger number, and partly because in most common cases we can't
9559 reach the theoretical max of 4. */
9560 return 3;
9562 default:
9563 return 1;
9567 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9568 be as wide as the scheduling freedom in the DFA. */
9570 static int
9571 mips_multipass_dfa_lookahead (void)
9573 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9574 if (mips_tune == PROCESSOR_SB1)
9575 return 4;
9577 return 0;
9580 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9581 return the first operand of the associated "pref" or "prefx" insn. */
9584 mips_prefetch_cookie (rtx write, rtx locality)
9586 /* store_streamed / load_streamed. */
9587 if (INTVAL (locality) <= 0)
9588 return GEN_INT (INTVAL (write) + 4);
9590 /* store / load. */
9591 if (INTVAL (locality) <= 2)
9592 return write;
9594 /* store_retained / load_retained. */
9595 return GEN_INT (INTVAL (write) + 6);
9598 /* MIPS builtin function support. */
9600 struct builtin_description
9602 /* The code of the main .md file instruction. See mips_builtin_type
9603 for more information. */
9604 enum insn_code icode;
9606 /* The floating-point comparison code to use with ICODE, if any. */
9607 enum mips_fp_condition cond;
9609 /* The name of the builtin function. */
9610 const char *name;
9612 /* Specifies how the function should be expanded. */
9613 enum mips_builtin_type builtin_type;
9615 /* The function's prototype. */
9616 enum mips_function_type function_type;
9618 /* The target flags required for this function. */
9619 int target_flags;
9622 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9623 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9624 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9625 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9626 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9628 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
9629 TARGET_FLAGS. */
9630 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9631 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9632 "__builtin_mips_" #INSN "_" #COND "_s", \
9633 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9634 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9635 "__builtin_mips_" #INSN "_" #COND "_d", \
9636 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9638 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9639 The lower and upper forms require TARGET_FLAGS while the any and all
9640 forms require MASK_MIPS3D. */
9641 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9642 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9643 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9644 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9645 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9646 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
9647 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9648 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9649 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
9650 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
9651 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9652 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
9653 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
9655 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
9656 require MASK_MIPS3D. */
9657 #define CMP_4S_BUILTINS(INSN, COND) \
9658 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9659 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
9660 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9661 MASK_MIPS3D }, \
9662 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9663 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
9664 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9665 MASK_MIPS3D }
9667 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
9668 instruction requires TARGET_FLAGS. */
9669 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
9670 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9671 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
9672 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9673 TARGET_FLAGS }, \
9674 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9675 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
9676 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9677 TARGET_FLAGS }
9679 /* Define all the builtins related to c.cond.fmt condition COND. */
9680 #define CMP_BUILTINS(COND) \
9681 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9682 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
9683 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
9684 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9685 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
9686 CMP_4S_BUILTINS (c, COND), \
9687 CMP_4S_BUILTINS (cabs, COND)
9689 /* __builtin_mips_abs_ps() maps to the standard absM2 pattern. */
9690 #define CODE_FOR_mips_abs_ps CODE_FOR_absv2sf2
9692 static const struct builtin_description mips_bdesc[] =
9694 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9695 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9696 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9697 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9698 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
9699 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9700 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9701 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9703 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
9704 MASK_PAIRED_SINGLE_FLOAT),
9705 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9706 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9707 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9708 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9710 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9711 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9712 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9713 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9714 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9715 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9717 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9718 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9719 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9720 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9721 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9722 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9724 MIPS_FP_CONDITIONS (CMP_BUILTINS)
9727 /* Builtin functions for the SB-1 processor. */
9729 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
9731 static const struct builtin_description sb1_bdesc[] =
9733 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
9736 /* This helps provide a mapping from builtin function codes to bdesc
9737 arrays. */
9739 struct bdesc_map
9741 /* The builtin function table that this entry describes. */
9742 const struct builtin_description *bdesc;
9744 /* The number of entries in the builtin function table. */
9745 unsigned int size;
9747 /* The target processor that supports these builtin functions.
9748 PROCESSOR_DEFAULT means we enable them for all processors. */
9749 enum processor_type proc;
9752 static const struct bdesc_map bdesc_arrays[] =
9754 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_DEFAULT },
9755 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 }
9758 /* Take the head of argument list *ARGLIST and convert it into a form
9759 suitable for input operand OP of instruction ICODE. Return the value
9760 and point *ARGLIST at the next element of the list. */
9762 static rtx
9763 mips_prepare_builtin_arg (enum insn_code icode,
9764 unsigned int op, tree *arglist)
9766 rtx value;
9767 enum machine_mode mode;
9769 value = expand_expr (TREE_VALUE (*arglist), NULL_RTX, VOIDmode, 0);
9770 mode = insn_data[icode].operand[op].mode;
9771 if (!insn_data[icode].operand[op].predicate (value, mode))
9772 value = copy_to_mode_reg (mode, value);
9774 *arglist = TREE_CHAIN (*arglist);
9775 return value;
9778 /* Return an rtx suitable for output operand OP of instruction ICODE.
9779 If TARGET is non-null, try to use it where possible. */
9781 static rtx
9782 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
9784 enum machine_mode mode;
9786 mode = insn_data[icode].operand[op].mode;
9787 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
9788 target = gen_reg_rtx (mode);
9790 return target;
9793 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
9796 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9797 enum machine_mode mode ATTRIBUTE_UNUSED,
9798 int ignore ATTRIBUTE_UNUSED)
9800 enum insn_code icode;
9801 enum mips_builtin_type type;
9802 tree fndecl, arglist;
9803 unsigned int fcode;
9804 const struct builtin_description *bdesc;
9805 const struct bdesc_map *m;
9807 fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9808 arglist = TREE_OPERAND (exp, 1);
9809 fcode = DECL_FUNCTION_CODE (fndecl);
9811 bdesc = NULL;
9812 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9814 if (fcode < m->size)
9816 bdesc = m->bdesc;
9817 icode = bdesc[fcode].icode;
9818 type = bdesc[fcode].builtin_type;
9819 break;
9821 fcode -= m->size;
9823 if (bdesc == NULL)
9824 return 0;
9826 switch (type)
9828 case MIPS_BUILTIN_DIRECT:
9829 return mips_expand_builtin_direct (icode, target, arglist);
9831 case MIPS_BUILTIN_MOVT:
9832 case MIPS_BUILTIN_MOVF:
9833 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
9834 target, arglist);
9836 case MIPS_BUILTIN_CMP_ANY:
9837 case MIPS_BUILTIN_CMP_ALL:
9838 case MIPS_BUILTIN_CMP_UPPER:
9839 case MIPS_BUILTIN_CMP_LOWER:
9840 case MIPS_BUILTIN_CMP_SINGLE:
9841 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
9842 target, arglist);
9844 default:
9845 return 0;
9849 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
9851 void
9852 mips_init_builtins (void)
9854 const struct builtin_description *d;
9855 const struct bdesc_map *m;
9856 tree types[(int) MIPS_MAX_FTYPE_MAX];
9857 tree V2SF_type_node;
9858 unsigned int offset;
9860 /* We have only builtins for -mpaired-single and -mips3d. */
9861 if (!TARGET_PAIRED_SINGLE_FLOAT)
9862 return;
9864 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
9866 types[MIPS_V2SF_FTYPE_V2SF]
9867 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
9869 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
9870 = build_function_type_list (V2SF_type_node,
9871 V2SF_type_node, V2SF_type_node, NULL_TREE);
9873 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
9874 = build_function_type_list (V2SF_type_node,
9875 V2SF_type_node, V2SF_type_node,
9876 integer_type_node, NULL_TREE);
9878 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
9879 = build_function_type_list (V2SF_type_node,
9880 V2SF_type_node, V2SF_type_node,
9881 V2SF_type_node, V2SF_type_node, NULL_TREE);
9883 types[MIPS_V2SF_FTYPE_SF_SF]
9884 = build_function_type_list (V2SF_type_node,
9885 float_type_node, float_type_node, NULL_TREE);
9887 types[MIPS_INT_FTYPE_V2SF_V2SF]
9888 = build_function_type_list (integer_type_node,
9889 V2SF_type_node, V2SF_type_node, NULL_TREE);
9891 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
9892 = build_function_type_list (integer_type_node,
9893 V2SF_type_node, V2SF_type_node,
9894 V2SF_type_node, V2SF_type_node, NULL_TREE);
9896 types[MIPS_INT_FTYPE_SF_SF]
9897 = build_function_type_list (integer_type_node,
9898 float_type_node, float_type_node, NULL_TREE);
9900 types[MIPS_INT_FTYPE_DF_DF]
9901 = build_function_type_list (integer_type_node,
9902 double_type_node, double_type_node, NULL_TREE);
9904 types[MIPS_SF_FTYPE_V2SF]
9905 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
9907 types[MIPS_SF_FTYPE_SF]
9908 = build_function_type_list (float_type_node,
9909 float_type_node, NULL_TREE);
9911 types[MIPS_SF_FTYPE_SF_SF]
9912 = build_function_type_list (float_type_node,
9913 float_type_node, float_type_node, NULL_TREE);
9915 types[MIPS_DF_FTYPE_DF]
9916 = build_function_type_list (double_type_node,
9917 double_type_node, NULL_TREE);
9919 types[MIPS_DF_FTYPE_DF_DF]
9920 = build_function_type_list (double_type_node,
9921 double_type_node, double_type_node, NULL_TREE);
9923 /* Iterate through all of the bdesc arrays, initializing all of the
9924 builtin functions. */
9926 offset = 0;
9927 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9929 if (m->proc == PROCESSOR_DEFAULT || (m->proc == mips_arch))
9930 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
9931 if ((d->target_flags & target_flags) == d->target_flags)
9932 lang_hooks.builtin_function (d->name, types[d->function_type],
9933 d - m->bdesc + offset,
9934 BUILT_IN_MD, NULL, NULL);
9935 offset += m->size;
9939 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
9940 .md pattern and ARGLIST is the list of function arguments. TARGET,
9941 if nonnull, suggests a good place to put the result. */
9943 static rtx
9944 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist)
9946 rtx ops[MAX_RECOG_OPERANDS];
9947 int i;
9949 target = mips_prepare_builtin_target (icode, 0, target);
9950 for (i = 1; i < insn_data[icode].n_operands; i++)
9951 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
9953 switch (insn_data[icode].n_operands)
9955 case 2:
9956 emit_insn (GEN_FCN (icode) (target, ops[1]));
9957 break;
9959 case 3:
9960 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2]));
9961 break;
9963 case 4:
9964 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2], ops[3]));
9965 break;
9967 default:
9968 gcc_unreachable ();
9970 return target;
9973 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
9974 function (TYPE says which). ARGLIST is the list of arguments to the
9975 function, ICODE is the instruction that should be used to compare
9976 the first two arguments, and COND is the condition it should test.
9977 TARGET, if nonnull, suggests a good place to put the result. */
9979 static rtx
9980 mips_expand_builtin_movtf (enum mips_builtin_type type,
9981 enum insn_code icode, enum mips_fp_condition cond,
9982 rtx target, tree arglist)
9984 rtx cmp_result, op0, op1;
9986 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
9987 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9988 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9989 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
9991 icode = CODE_FOR_mips_cond_move_tf_ps;
9992 target = mips_prepare_builtin_target (icode, 0, target);
9993 if (type == MIPS_BUILTIN_MOVT)
9995 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9996 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9998 else
10000 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10001 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10003 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10004 return target;
10007 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
10008 of the comparison instruction and COND is the condition it should test.
10009 ARGLIST is the list of function arguments and TARGET, if nonnull,
10010 suggests a good place to put the boolean result. */
10012 static rtx
10013 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10014 enum insn_code icode, enum mips_fp_condition cond,
10015 rtx target, tree arglist)
10017 rtx label1, label2, if_then_else;
10018 rtx pat, cmp_result, ops[MAX_RECOG_OPERANDS];
10019 rtx target_if_equal, target_if_unequal;
10020 int cmp_value, i;
10022 if (target == 0 || GET_MODE (target) != SImode)
10023 target = gen_reg_rtx (SImode);
10025 /* Prepare the operands to the comparison. */
10026 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10027 for (i = 1; i < insn_data[icode].n_operands - 1; i++)
10028 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10030 switch (insn_data[icode].n_operands)
10032 case 4:
10033 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond));
10034 break;
10036 case 6:
10037 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10038 ops[3], ops[4], GEN_INT (cond));
10039 break;
10041 default:
10042 gcc_unreachable ();
10045 /* If the comparison sets more than one register, we define the result
10046 to be 0 if all registers are false and -1 if all registers are true.
10047 The value of the complete result is indeterminate otherwise. It is
10048 possible to test individual registers using SUBREGs.
10050 Set up CMP_RESULT, CMP_VALUE, TARGET_IF_EQUAL and TARGET_IF_UNEQUAL so
10051 that the result should be TARGET_IF_EQUAL if (EQ CMP_RESULT CMP_VALUE)
10052 and TARGET_IF_UNEQUAL otherwise. */
10053 if (builtin_type == MIPS_BUILTIN_CMP_ALL)
10055 cmp_value = -1;
10056 target_if_equal = const1_rtx;
10057 target_if_unequal = const0_rtx;
10059 else
10061 cmp_value = 0;
10062 target_if_equal = const0_rtx;
10063 target_if_unequal = const1_rtx;
10064 if (builtin_type == MIPS_BUILTIN_CMP_UPPER)
10065 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 4);
10066 else if (builtin_type == MIPS_BUILTIN_CMP_LOWER)
10067 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 0);
10070 /* First assume that CMP_RESULT == CMP_VALUE. */
10071 emit_move_insn (target, target_if_equal);
10073 /* Branch to LABEL1 if CMP_RESULT != CMP_VALUE. */
10074 emit_insn (pat);
10075 label1 = gen_label_rtx ();
10076 label2 = gen_label_rtx ();
10077 if_then_else
10078 = gen_rtx_IF_THEN_ELSE (VOIDmode,
10079 gen_rtx_fmt_ee (NE, GET_MODE (cmp_result),
10080 cmp_result, GEN_INT (cmp_value)),
10081 gen_rtx_LABEL_REF (VOIDmode, label1), pc_rtx);
10082 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, if_then_else));
10083 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
10084 gen_rtx_LABEL_REF (VOIDmode, label2)));
10085 emit_barrier ();
10086 emit_label (label1);
10088 /* Fix TARGET for CMP_RESULT != CMP_VALUE. */
10089 emit_move_insn (target, target_if_unequal);
10090 emit_label (label2);
10092 return target;
10095 #include "gt-mips.h"