2004-12-07 Eric Christopher <echristo@redhat.com>
[official-gcc.git] / gcc / config / mips / mips.c
blob0a3025a83a08e91454e7052d4568e84302844353
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 59 Temple Place - Suite 330,
24 Boston, MA 02111-1307, USA. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
61 #define UNSPEC_ADDRESS_P(X) \
62 (GET_CODE (X) == UNSPEC \
63 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
64 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66 /* Extract the symbol or label from UNSPEC wrapper X. */
67 #define UNSPEC_ADDRESS(X) \
68 XVECEXP (X, 0, 0)
70 /* Extract the symbol type from UNSPEC wrapper X. */
71 #define UNSPEC_ADDRESS_TYPE(X) \
72 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74 /* The maximum distance between the top of the stack frame and the
75 value $sp has when we save & restore registers.
77 Use a maximum gap of 0x100 in the mips16 case. We can then use
78 unextended instructions to save and restore registers, and to
79 allocate and deallocate the top part of the frame.
81 The value in the !mips16 case must be a SMALL_OPERAND and must
82 preserve the maximum stack alignment. */
83 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
85 /* True if INSN is a mips.md pattern or asm statement. */
86 #define USEFUL_INSN_P(INSN) \
87 (INSN_P (INSN) \
88 && GET_CODE (PATTERN (INSN)) != USE \
89 && GET_CODE (PATTERN (INSN)) != CLOBBER \
90 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
91 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
93 /* If INSN is a delayed branch sequence, return the first instruction
94 in the sequence, otherwise return INSN itself. */
95 #define SEQ_BEGIN(INSN) \
96 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
97 ? XVECEXP (PATTERN (INSN), 0, 0) \
98 : (INSN))
100 /* Likewise for the last instruction in a delayed branch sequence. */
101 #define SEQ_END(INSN) \
102 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
103 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
104 : (INSN))
106 /* Execute the following loop body with SUBINSN set to each instruction
107 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
108 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
109 for ((SUBINSN) = SEQ_BEGIN (INSN); \
110 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
111 (SUBINSN) = NEXT_INSN (SUBINSN))
113 /* Classifies an address.
115 ADDRESS_REG
116 A natural register + offset address. The register satisfies
117 mips_valid_base_register_p and the offset is a const_arith_operand.
119 ADDRESS_LO_SUM
120 A LO_SUM rtx. The first operand is a valid base register and
121 the second operand is a symbolic address.
123 ADDRESS_CONST_INT
124 A signed 16-bit constant address.
126 ADDRESS_SYMBOLIC:
127 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
128 enum mips_address_type {
129 ADDRESS_REG,
130 ADDRESS_LO_SUM,
131 ADDRESS_CONST_INT,
132 ADDRESS_SYMBOLIC
135 /* Classifies the prototype of a builtin function. */
136 enum mips_function_type
138 MIPS_V2SF_FTYPE_V2SF,
139 MIPS_V2SF_FTYPE_V2SF_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
142 MIPS_V2SF_FTYPE_SF_SF,
143 MIPS_INT_FTYPE_V2SF_V2SF,
144 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
145 MIPS_INT_FTYPE_SF_SF,
146 MIPS_INT_FTYPE_DF_DF,
147 MIPS_SF_FTYPE_V2SF,
148 MIPS_SF_FTYPE_SF,
149 MIPS_SF_FTYPE_SF_SF,
150 MIPS_DF_FTYPE_DF,
151 MIPS_DF_FTYPE_DF_DF,
153 /* The last type. */
154 MIPS_MAX_FTYPE_MAX
157 /* Specifies how a builtin function should be converted into rtl. */
158 enum mips_builtin_type
160 /* The builtin corresponds directly to an .md pattern. The return
161 value is mapped to operand 0 and the arguments are mapped to
162 operands 1 and above. */
163 MIPS_BUILTIN_DIRECT,
165 /* The builtin corresponds to a comparison instruction followed by
166 a mips_cond_move_tf_ps pattern. The first two arguments are the
167 values to compare and the second two arguments are the vector
168 operands for the movt.ps or movf.ps instruction (in assembly order). */
169 MIPS_BUILTIN_MOVF,
170 MIPS_BUILTIN_MOVT,
172 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
173 of this instruction is the result of the comparison, which has mode
174 CCV2 or CCV4. The function arguments are mapped to operands 1 and
175 above. The function's return value is an SImode boolean that is
176 true under the following conditions:
178 MIPS_BUILTIN_CMP_ANY: one of the registers is true
179 MIPS_BUILTIN_CMP_ALL: all of the registers are true
180 MIPS_BUILTIN_CMP_LOWER: the first register is true
181 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
182 MIPS_BUILTIN_CMP_ANY,
183 MIPS_BUILTIN_CMP_ALL,
184 MIPS_BUILTIN_CMP_UPPER,
185 MIPS_BUILTIN_CMP_LOWER,
187 /* As above, but the instruction only sets a single $fcc register. */
188 MIPS_BUILTIN_CMP_SINGLE
191 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
192 #define MIPS_FP_CONDITIONS(MACRO) \
193 MACRO (f), \
194 MACRO (un), \
195 MACRO (eq), \
196 MACRO (ueq), \
197 MACRO (olt), \
198 MACRO (ult), \
199 MACRO (ole), \
200 MACRO (ule), \
201 MACRO (sf), \
202 MACRO (ngle), \
203 MACRO (seq), \
204 MACRO (ngl), \
205 MACRO (lt), \
206 MACRO (nge), \
207 MACRO (le), \
208 MACRO (ngt)
210 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
211 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
212 enum mips_fp_condition {
213 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
216 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
217 #define STRINGIFY(X) #X
218 static const char *const mips_fp_conditions[] = {
219 MIPS_FP_CONDITIONS (STRINGIFY)
222 /* A function to save or store a register. The first argument is the
223 register and the second is the stack slot. */
224 typedef void (*mips_save_restore_fn) (rtx, rtx);
226 struct mips16_constant;
227 struct mips_arg_info;
228 struct mips_address_info;
229 struct mips_integer_op;
230 struct mips_sim;
232 static enum mips_symbol_type mips_classify_symbol (rtx);
233 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
234 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
235 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
236 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
237 static bool mips_classify_address (struct mips_address_info *, rtx,
238 enum machine_mode, int);
239 static int mips_symbol_insns (enum mips_symbol_type);
240 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
241 static rtx mips_force_temporary (rtx, rtx);
242 static rtx mips_split_symbol (rtx, rtx);
243 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
244 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
245 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
246 static unsigned int mips_build_lower (struct mips_integer_op *,
247 unsigned HOST_WIDE_INT);
248 static unsigned int mips_build_integer (struct mips_integer_op *,
249 unsigned HOST_WIDE_INT);
250 static void mips_move_integer (rtx, unsigned HOST_WIDE_INT);
251 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
252 static int m16_check_op (rtx, int, int, int);
253 static bool mips_rtx_costs (rtx, int, int, int *);
254 static int mips_address_cost (rtx);
255 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
256 static void mips_load_call_address (rtx, rtx, int);
257 static bool mips_function_ok_for_sibcall (tree, tree);
258 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
259 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
260 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
261 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
262 tree, int, struct mips_arg_info *);
263 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
264 static void mips_set_architecture (const struct mips_cpu_info *);
265 static void mips_set_tune (const struct mips_cpu_info *);
266 static struct machine_function *mips_init_machine_status (void);
267 static void print_operand_reloc (FILE *, rtx, const char **);
268 #if TARGET_IRIX
269 static void irix_output_external_libcall (rtx);
270 #endif
271 static void mips_file_start (void);
272 static void mips_file_end (void);
273 static bool mips_rewrite_small_data_p (rtx);
274 static int mips_small_data_pattern_1 (rtx *, void *);
275 static int mips_rewrite_small_data_1 (rtx *, void *);
276 static bool mips_function_has_gp_insn (void);
277 static unsigned int mips_global_pointer (void);
278 static bool mips_save_reg_p (unsigned int);
279 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
280 mips_save_restore_fn);
281 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
282 static void mips_output_cplocal (void);
283 static void mips_emit_loadgp (void);
284 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
285 static void mips_set_frame_expr (rtx);
286 static rtx mips_frame_set (rtx, rtx);
287 static void mips_save_reg (rtx, rtx);
288 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
289 static void mips_restore_reg (rtx, rtx);
290 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
291 HOST_WIDE_INT, tree);
292 static int symbolic_expression_p (rtx);
293 static void mips_select_rtx_section (enum machine_mode, rtx,
294 unsigned HOST_WIDE_INT);
295 static void mips_function_rodata_section (tree);
296 static bool mips_in_small_data_p (tree);
297 static int mips_fpr_return_fields (tree, tree *);
298 static bool mips_return_in_msb (tree);
299 static rtx mips_return_fpr_pair (enum machine_mode mode,
300 enum machine_mode mode1, HOST_WIDE_INT,
301 enum machine_mode mode2, HOST_WIDE_INT);
302 static rtx mips16_gp_pseudo_reg (void);
303 static void mips16_fp_args (FILE *, int, int);
304 static void build_mips16_function_stub (FILE *);
305 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
306 static void dump_constants (struct mips16_constant *, rtx);
307 static int mips16_insn_length (rtx);
308 static int mips16_rewrite_pool_refs (rtx *, void *);
309 static void mips16_lay_out_constants (void);
310 static void mips_sim_reset (struct mips_sim *);
311 static void mips_sim_init (struct mips_sim *, state_t);
312 static void mips_sim_next_cycle (struct mips_sim *);
313 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
314 static int mips_sim_wait_regs_2 (rtx *, void *);
315 static void mips_sim_wait_regs_1 (rtx *, void *);
316 static void mips_sim_wait_regs (struct mips_sim *, rtx);
317 static void mips_sim_wait_units (struct mips_sim *, rtx);
318 static void mips_sim_wait_insn (struct mips_sim *, rtx);
319 static void mips_sim_record_set (rtx, rtx, void *);
320 static void mips_sim_issue_insn (struct mips_sim *, rtx);
321 static void mips_sim_issue_nop (struct mips_sim *);
322 static void mips_sim_finish_insn (struct mips_sim *, rtx);
323 static void vr4130_avoid_branch_rt_conflict (rtx);
324 static void vr4130_align_insns (void);
325 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
326 static void mips_avoid_hazards (void);
327 static void mips_reorg (void);
328 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
329 static bool mips_matching_cpu_name_p (const char *, const char *);
330 static const struct mips_cpu_info *mips_parse_cpu (const char *, const char *);
331 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
332 static bool mips_return_in_memory (tree, tree);
333 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
334 static void mips_macc_chains_record (rtx);
335 static void mips_macc_chains_reorder (rtx *, int);
336 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
337 static bool vr4130_true_reg_dependence_p (rtx);
338 static bool vr4130_swap_insns_p (rtx, rtx);
339 static void vr4130_reorder (rtx *, int);
340 static void mips_promote_ready (rtx *, int, int);
341 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
342 static int mips_variable_issue (FILE *, int, rtx, int);
343 static int mips_adjust_cost (rtx, rtx, rtx, int);
344 static int mips_issue_rate (void);
345 static int mips_multipass_dfa_lookahead (void);
346 static void mips_init_libfuncs (void);
347 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
348 tree, int *, int);
349 static tree mips_build_builtin_va_list (void);
350 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
351 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
352 tree, bool);
353 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
354 tree, bool);
355 static bool mips_valid_pointer_mode (enum machine_mode);
356 static bool mips_scalar_mode_supported_p (enum machine_mode);
357 static bool mips_vector_mode_supported_p (enum machine_mode);
358 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
359 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
360 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
361 static void mips_init_builtins (void);
362 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree);
363 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
364 enum insn_code, enum mips_fp_condition,
365 rtx, tree);
366 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
367 enum insn_code, enum mips_fp_condition,
368 rtx, tree);
370 /* Structure to be filled in by compute_frame_size with register
371 save masks, and offsets for the current function. */
373 struct mips_frame_info GTY(())
375 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
376 HOST_WIDE_INT var_size; /* # bytes that variables take up */
377 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
378 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
379 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
380 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
381 unsigned int mask; /* mask of saved gp registers */
382 unsigned int fmask; /* mask of saved fp registers */
383 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
384 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
385 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
386 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
387 bool initialized; /* true if frame size already calculated */
388 int num_gp; /* number of gp registers saved */
389 int num_fp; /* number of fp registers saved */
392 struct machine_function GTY(()) {
393 /* Pseudo-reg holding the value of $28 in a mips16 function which
394 refers to GP relative global variables. */
395 rtx mips16_gp_pseudo_rtx;
397 /* Current frame information, calculated by compute_frame_size. */
398 struct mips_frame_info frame;
400 /* The register to use as the global pointer within this function. */
401 unsigned int global_pointer;
403 /* True if mips_adjust_insn_length should ignore an instruction's
404 hazard attribute. */
405 bool ignore_hazard_length_p;
407 /* True if the whole function is suitable for .set noreorder and
408 .set nomacro. */
409 bool all_noreorder_p;
411 /* True if the function is known to have an instruction that needs $gp. */
412 bool has_gp_insn_p;
415 /* Information about a single argument. */
416 struct mips_arg_info
418 /* True if the argument is passed in a floating-point register, or
419 would have been if we hadn't run out of registers. */
420 bool fpr_p;
422 /* The number of words passed in registers, rounded up. */
423 unsigned int reg_words;
425 /* For EABI, the offset of the first register from GP_ARG_FIRST or
426 FP_ARG_FIRST. For other ABIs, the offset of the first register from
427 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
428 comment for details).
430 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
431 on the stack. */
432 unsigned int reg_offset;
434 /* The number of words that must be passed on the stack, rounded up. */
435 unsigned int stack_words;
437 /* The offset from the start of the stack overflow area of the argument's
438 first stack word. Only meaningful when STACK_WORDS is nonzero. */
439 unsigned int stack_offset;
443 /* Information about an address described by mips_address_type.
445 ADDRESS_CONST_INT
446 No fields are used.
448 ADDRESS_REG
449 REG is the base register and OFFSET is the constant offset.
451 ADDRESS_LO_SUM
452 REG is the register that contains the high part of the address,
453 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
454 is the type of OFFSET's symbol.
456 ADDRESS_SYMBOLIC
457 SYMBOL_TYPE is the type of symbol being referenced. */
459 struct mips_address_info
461 enum mips_address_type type;
462 rtx reg;
463 rtx offset;
464 enum mips_symbol_type symbol_type;
468 /* One stage in a constant building sequence. These sequences have
469 the form:
471 A = VALUE[0]
472 A = A CODE[1] VALUE[1]
473 A = A CODE[2] VALUE[2]
476 where A is an accumulator, each CODE[i] is a binary rtl operation
477 and each VALUE[i] is a constant integer. */
478 struct mips_integer_op {
479 enum rtx_code code;
480 unsigned HOST_WIDE_INT value;
484 /* The largest number of operations needed to load an integer constant.
485 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
486 When the lowest bit is clear, we can try, but reject a sequence with
487 an extra SLL at the end. */
488 #define MIPS_MAX_INTEGER_OPS 7
491 /* Global variables for machine-dependent things. */
493 /* Threshold for data being put into the small data/bss area, instead
494 of the normal data area. */
495 int mips_section_threshold = -1;
497 /* Count the number of .file directives, so that .loc is up to date. */
498 int num_source_filenames = 0;
500 /* Count the number of sdb related labels are generated (to find block
501 start and end boundaries). */
502 int sdb_label_count = 0;
504 /* Next label # for each statement for Silicon Graphics IRIS systems. */
505 int sym_lineno = 0;
507 /* Linked list of all externals that are to be emitted when optimizing
508 for the global pointer if they haven't been declared by the end of
509 the program with an appropriate .comm or initialization. */
511 struct extern_list GTY (())
513 struct extern_list *next; /* next external */
514 const char *name; /* name of the external */
515 int size; /* size in bytes */
518 static GTY (()) struct extern_list *extern_head = 0;
520 /* Name of the file containing the current function. */
521 const char *current_function_file = "";
523 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
524 int set_noreorder;
525 int set_noat;
526 int set_nomacro;
527 int set_volatile;
529 /* The next branch instruction is a branch likely, not branch normal. */
530 int mips_branch_likely;
532 /* The operands passed to the last cmpMM expander. */
533 rtx cmp_operands[2];
535 /* The target cpu for code generation. */
536 enum processor_type mips_arch;
537 const struct mips_cpu_info *mips_arch_info;
539 /* The target cpu for optimization and scheduling. */
540 enum processor_type mips_tune;
541 const struct mips_cpu_info *mips_tune_info;
543 /* Which instruction set architecture to use. */
544 int mips_isa;
546 /* Which ABI to use. */
547 int mips_abi;
549 /* Strings to hold which cpu and instruction set architecture to use. */
550 const char *mips_arch_string; /* for -march=<xxx> */
551 const char *mips_tune_string; /* for -mtune=<xxx> */
552 const char *mips_isa_string; /* for -mips{1,2,3,4} */
553 const char *mips_abi_string; /* for -mabi={32,n32,64,eabi} */
555 /* Whether we are generating mips16 hard float code. In mips16 mode
556 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
557 -msoft-float was not specified by the user, which means that we
558 should arrange to call mips32 hard floating point code. */
559 int mips16_hard_float;
561 const char *mips_cache_flush_func = CACHE_FLUSH_FUNC;
563 /* If TRUE, we split addresses into their high and low parts in the RTL. */
564 int mips_split_addresses;
566 /* Mode used for saving/restoring general purpose registers. */
567 static enum machine_mode gpr_mode;
569 /* Array giving truth value on whether or not a given hard register
570 can support a given mode. */
571 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
573 /* List of all MIPS punctuation characters used by print_operand. */
574 char mips_print_operand_punct[256];
576 /* Map GCC register number to debugger register number. */
577 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
579 /* A copy of the original flag_delayed_branch: see override_options. */
580 static int mips_flag_delayed_branch;
582 static GTY (()) int mips_output_filename_first_time = 1;
584 /* mips_split_p[X] is true if symbols of type X can be split by
585 mips_split_symbol(). */
586 static bool mips_split_p[NUM_SYMBOL_TYPES];
588 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
589 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
590 if they are matched by a special .md file pattern. */
591 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
593 /* Likewise for HIGHs. */
594 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
596 /* Map hard register number to register class */
597 const enum reg_class mips_regno_to_class[] =
599 LEA_REGS, LEA_REGS, M16_NA_REGS, M16_NA_REGS,
600 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
601 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
602 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
603 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
604 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
605 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
606 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
607 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
608 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
609 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
610 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
611 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
612 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
613 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
614 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
615 HI_REG, LO_REG, NO_REGS, ST_REGS,
616 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
617 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
618 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
619 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
620 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
621 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
622 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
623 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
624 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
625 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
626 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
627 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
628 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
629 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
630 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
631 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
632 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
633 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
634 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
635 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
636 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
637 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
638 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
639 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
640 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
641 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
642 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS
645 /* Map register constraint character to register class. */
646 enum reg_class mips_char_to_class[256];
648 /* A table describing all the processors gcc knows about. Names are
649 matched in the order listed. The first mention of an ISA level is
650 taken as the canonical name for that ISA.
652 To ease comparison, please keep this table in the same order as
653 gas's mips_cpu_info_table[]. */
654 const struct mips_cpu_info mips_cpu_info_table[] = {
655 /* Entries for generic ISAs */
656 { "mips1", PROCESSOR_R3000, 1 },
657 { "mips2", PROCESSOR_R6000, 2 },
658 { "mips3", PROCESSOR_R4000, 3 },
659 { "mips4", PROCESSOR_R8000, 4 },
660 { "mips32", PROCESSOR_4KC, 32 },
661 { "mips32r2", PROCESSOR_M4K, 33 },
662 { "mips64", PROCESSOR_5KC, 64 },
664 /* MIPS I */
665 { "r3000", PROCESSOR_R3000, 1 },
666 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
667 { "r3900", PROCESSOR_R3900, 1 },
669 /* MIPS II */
670 { "r6000", PROCESSOR_R6000, 2 },
672 /* MIPS III */
673 { "r4000", PROCESSOR_R4000, 3 },
674 { "vr4100", PROCESSOR_R4100, 3 },
675 { "vr4111", PROCESSOR_R4111, 3 },
676 { "vr4120", PROCESSOR_R4120, 3 },
677 { "vr4130", PROCESSOR_R4130, 3 },
678 { "vr4300", PROCESSOR_R4300, 3 },
679 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
680 { "r4600", PROCESSOR_R4600, 3 },
681 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
682 { "r4650", PROCESSOR_R4650, 3 },
684 /* MIPS IV */
685 { "r8000", PROCESSOR_R8000, 4 },
686 { "vr5000", PROCESSOR_R5000, 4 },
687 { "vr5400", PROCESSOR_R5400, 4 },
688 { "vr5500", PROCESSOR_R5500, 4 },
689 { "rm7000", PROCESSOR_R7000, 4 },
690 { "rm9000", PROCESSOR_R9000, 4 },
692 /* MIPS32 */
693 { "4kc", PROCESSOR_4KC, 32 },
694 { "4kp", PROCESSOR_4KC, 32 }, /* = 4kc */
696 /* MIPS32 Release 2 */
697 { "m4k", PROCESSOR_M4K, 33 },
699 /* MIPS64 */
700 { "5kc", PROCESSOR_5KC, 64 },
701 { "20kc", PROCESSOR_20KC, 64 },
702 { "sb1", PROCESSOR_SB1, 64 },
703 { "sr71000", PROCESSOR_SR71000, 64 },
705 /* End marker */
706 { 0, 0, 0 }
709 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
710 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
711 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
712 #endif
714 /* Initialize the GCC target structure. */
715 #undef TARGET_ASM_ALIGNED_HI_OP
716 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
717 #undef TARGET_ASM_ALIGNED_SI_OP
718 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
719 #undef TARGET_ASM_ALIGNED_DI_OP
720 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
722 #undef TARGET_ASM_FUNCTION_PROLOGUE
723 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
724 #undef TARGET_ASM_FUNCTION_EPILOGUE
725 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
726 #undef TARGET_ASM_SELECT_RTX_SECTION
727 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
728 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
729 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
731 #undef TARGET_SCHED_REORDER
732 #define TARGET_SCHED_REORDER mips_sched_reorder
733 #undef TARGET_SCHED_VARIABLE_ISSUE
734 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
735 #undef TARGET_SCHED_ADJUST_COST
736 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
737 #undef TARGET_SCHED_ISSUE_RATE
738 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
739 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
740 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
741 mips_multipass_dfa_lookahead
743 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
744 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
746 #undef TARGET_VALID_POINTER_MODE
747 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
748 #undef TARGET_RTX_COSTS
749 #define TARGET_RTX_COSTS mips_rtx_costs
750 #undef TARGET_ADDRESS_COST
751 #define TARGET_ADDRESS_COST mips_address_cost
753 #undef TARGET_IN_SMALL_DATA_P
754 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
756 #undef TARGET_MACHINE_DEPENDENT_REORG
757 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
759 #undef TARGET_ASM_FILE_START
760 #undef TARGET_ASM_FILE_END
761 #define TARGET_ASM_FILE_START mips_file_start
762 #define TARGET_ASM_FILE_END mips_file_end
763 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
764 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
766 #undef TARGET_INIT_LIBFUNCS
767 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
769 #undef TARGET_BUILD_BUILTIN_VA_LIST
770 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
771 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
772 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
774 #undef TARGET_PROMOTE_FUNCTION_ARGS
775 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
776 #undef TARGET_PROMOTE_FUNCTION_RETURN
777 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
778 #undef TARGET_PROMOTE_PROTOTYPES
779 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
781 #undef TARGET_RETURN_IN_MEMORY
782 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
783 #undef TARGET_RETURN_IN_MSB
784 #define TARGET_RETURN_IN_MSB mips_return_in_msb
786 #undef TARGET_ASM_OUTPUT_MI_THUNK
787 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
788 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
789 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
791 #undef TARGET_SETUP_INCOMING_VARARGS
792 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
793 #undef TARGET_STRICT_ARGUMENT_NAMING
794 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
795 #undef TARGET_MUST_PASS_IN_STACK
796 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
797 #undef TARGET_PASS_BY_REFERENCE
798 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
799 #undef TARGET_CALLEE_COPIES
800 #define TARGET_CALLEE_COPIES mips_callee_copies
802 #undef TARGET_VECTOR_MODE_SUPPORTED_P
803 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
805 #undef TARGET_SCALAR_MODE_SUPPORTED_P
806 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
808 #undef TARGET_INIT_BUILTINS
809 #define TARGET_INIT_BUILTINS mips_init_builtins
810 #undef TARGET_EXPAND_BUILTIN
811 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
813 struct gcc_target targetm = TARGET_INITIALIZER;
815 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
817 static enum mips_symbol_type
818 mips_classify_symbol (rtx x)
820 if (GET_CODE (x) == LABEL_REF)
822 if (TARGET_MIPS16)
823 return SYMBOL_CONSTANT_POOL;
824 if (TARGET_ABICALLS)
825 return SYMBOL_GOT_LOCAL;
826 return SYMBOL_GENERAL;
829 gcc_assert (GET_CODE (x) == SYMBOL_REF);
831 if (CONSTANT_POOL_ADDRESS_P (x))
833 if (TARGET_MIPS16)
834 return SYMBOL_CONSTANT_POOL;
836 if (TARGET_ABICALLS)
837 return SYMBOL_GOT_LOCAL;
839 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
840 return SYMBOL_SMALL_DATA;
842 return SYMBOL_GENERAL;
845 if (SYMBOL_REF_SMALL_P (x))
846 return SYMBOL_SMALL_DATA;
848 if (TARGET_ABICALLS)
850 if (SYMBOL_REF_DECL (x) == 0)
851 return SYMBOL_REF_LOCAL_P (x) ? SYMBOL_GOT_LOCAL : SYMBOL_GOT_GLOBAL;
853 /* There are three cases to consider:
855 - o32 PIC (either with or without explicit relocs)
856 - n32/n64 PIC without explicit relocs
857 - n32/n64 PIC with explicit relocs
859 In the first case, both local and global accesses will use an
860 R_MIPS_GOT16 relocation. We must correctly predict which of
861 the two semantics (local or global) the assembler and linker
862 will apply. The choice doesn't depend on the symbol's
863 visibility, so we deliberately ignore decl_visibility and
864 binds_local_p here.
866 In the second case, the assembler will not use R_MIPS_GOT16
867 relocations, but it chooses between local and global accesses
868 in the same way as for o32 PIC.
870 In the third case we have more freedom since both forms of
871 access will work for any kind of symbol. However, there seems
872 little point in doing things differently. */
873 if (DECL_P (SYMBOL_REF_DECL (x)) && TREE_PUBLIC (SYMBOL_REF_DECL (x)))
874 return SYMBOL_GOT_GLOBAL;
876 return SYMBOL_GOT_LOCAL;
879 return SYMBOL_GENERAL;
883 /* Split X into a base and a constant offset, storing them in *BASE
884 and *OFFSET respectively. */
886 static void
887 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
889 *offset = 0;
891 if (GET_CODE (x) == CONST)
892 x = XEXP (x, 0);
894 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
896 *offset += INTVAL (XEXP (x, 1));
897 x = XEXP (x, 0);
899 *base = x;
903 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
904 to the same object as SYMBOL. */
906 static bool
907 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
909 if (GET_CODE (symbol) != SYMBOL_REF)
910 return false;
912 if (CONSTANT_POOL_ADDRESS_P (symbol)
913 && offset >= 0
914 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
915 return true;
917 if (SYMBOL_REF_DECL (symbol) != 0
918 && offset >= 0
919 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
920 return true;
922 return false;
926 /* Return true if X is a symbolic constant that can be calculated in
927 the same way as a bare symbol. If it is, store the type of the
928 symbol in *SYMBOL_TYPE. */
930 bool
931 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
933 HOST_WIDE_INT offset;
935 mips_split_const (x, &x, &offset);
936 if (UNSPEC_ADDRESS_P (x))
937 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
938 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
939 *symbol_type = mips_classify_symbol (x);
940 else
941 return false;
943 if (offset == 0)
944 return true;
946 /* Check whether a nonzero offset is valid for the underlying
947 relocations. */
948 switch (*symbol_type)
950 case SYMBOL_GENERAL:
951 case SYMBOL_64_HIGH:
952 case SYMBOL_64_MID:
953 case SYMBOL_64_LOW:
954 /* If the target has 64-bit pointers and the object file only
955 supports 32-bit symbols, the values of those symbols will be
956 sign-extended. In this case we can't allow an arbitrary offset
957 in case the 32-bit value X + OFFSET has a different sign from X. */
958 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
959 return mips_offset_within_object_p (x, offset);
961 /* In other cases the relocations can handle any offset. */
962 return true;
964 case SYMBOL_CONSTANT_POOL:
965 /* Allow constant pool references to be converted to LABEL+CONSTANT.
966 In this case, we no longer have access to the underlying constant,
967 but the original symbol-based access was known to be valid. */
968 if (GET_CODE (x) == LABEL_REF)
969 return true;
971 /* Fall through. */
973 case SYMBOL_SMALL_DATA:
974 /* Make sure that the offset refers to something within the
975 underlying object. This should guarantee that the final
976 PC- or GP-relative offset is within the 16-bit limit. */
977 return mips_offset_within_object_p (x, offset);
979 case SYMBOL_GOT_LOCAL:
980 case SYMBOL_GOTOFF_PAGE:
981 /* The linker should provide enough local GOT entries for a
982 16-bit offset. Larger offsets may lead to GOT overflow. */
983 return SMALL_OPERAND (offset);
985 case SYMBOL_GOT_GLOBAL:
986 case SYMBOL_GOTOFF_GLOBAL:
987 case SYMBOL_GOTOFF_CALL:
988 case SYMBOL_GOTOFF_LOADGP:
989 return false;
991 gcc_unreachable ();
995 /* Return true if X is a symbolic constant whose value is not split
996 into separate relocations. */
998 bool
999 mips_atomic_symbolic_constant_p (rtx x)
1001 enum mips_symbol_type type;
1002 return mips_symbolic_constant_p (x, &type) && !mips_split_p[type];
1006 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1009 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1011 if (regno >= FIRST_PSEUDO_REGISTER)
1013 if (!strict)
1014 return true;
1015 regno = reg_renumber[regno];
1018 /* These fake registers will be eliminated to either the stack or
1019 hard frame pointer, both of which are usually valid base registers.
1020 Reload deals with the cases where the eliminated form isn't valid. */
1021 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1022 return true;
1024 /* In mips16 mode, the stack pointer can only address word and doubleword
1025 values, nothing smaller. There are two problems here:
1027 (a) Instantiating virtual registers can introduce new uses of the
1028 stack pointer. If these virtual registers are valid addresses,
1029 the stack pointer should be too.
1031 (b) Most uses of the stack pointer are not made explicit until
1032 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1033 We don't know until that stage whether we'll be eliminating to the
1034 stack pointer (which needs the restriction) or the hard frame
1035 pointer (which doesn't).
1037 All in all, it seems more consistent to only enforce this restriction
1038 during and after reload. */
1039 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1040 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1042 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1046 /* Return true if X is a valid base register for the given mode.
1047 Allow only hard registers if STRICT. */
1049 static bool
1050 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1052 if (!strict && GET_CODE (x) == SUBREG)
1053 x = SUBREG_REG (x);
1055 return (REG_P (x)
1056 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1060 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1061 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1063 static bool
1064 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1065 enum machine_mode mode)
1067 switch (symbol_type)
1069 case SYMBOL_GENERAL:
1070 return !TARGET_MIPS16;
1072 case SYMBOL_SMALL_DATA:
1073 return true;
1075 case SYMBOL_CONSTANT_POOL:
1076 /* PC-relative addressing is only available for lw and ld. */
1077 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1079 case SYMBOL_GOT_LOCAL:
1080 return true;
1082 case SYMBOL_GOT_GLOBAL:
1083 /* The address will have to be loaded from the GOT first. */
1084 return false;
1086 case SYMBOL_GOTOFF_PAGE:
1087 case SYMBOL_GOTOFF_GLOBAL:
1088 case SYMBOL_GOTOFF_CALL:
1089 case SYMBOL_GOTOFF_LOADGP:
1090 case SYMBOL_64_HIGH:
1091 case SYMBOL_64_MID:
1092 case SYMBOL_64_LOW:
1093 return true;
1095 gcc_unreachable ();
1099 /* Return true if X is a valid address for machine mode MODE. If it is,
1100 fill in INFO appropriately. STRICT is true if we should only accept
1101 hard base registers. */
1103 static bool
1104 mips_classify_address (struct mips_address_info *info, rtx x,
1105 enum machine_mode mode, int strict)
1107 switch (GET_CODE (x))
1109 case REG:
1110 case SUBREG:
1111 info->type = ADDRESS_REG;
1112 info->reg = x;
1113 info->offset = const0_rtx;
1114 return mips_valid_base_register_p (info->reg, mode, strict);
1116 case PLUS:
1117 info->type = ADDRESS_REG;
1118 info->reg = XEXP (x, 0);
1119 info->offset = XEXP (x, 1);
1120 return (mips_valid_base_register_p (info->reg, mode, strict)
1121 && const_arith_operand (info->offset, VOIDmode));
1123 case LO_SUM:
1124 info->type = ADDRESS_LO_SUM;
1125 info->reg = XEXP (x, 0);
1126 info->offset = XEXP (x, 1);
1127 return (mips_valid_base_register_p (info->reg, mode, strict)
1128 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1129 && mips_symbolic_address_p (info->symbol_type, mode)
1130 && mips_lo_relocs[info->symbol_type] != 0);
1132 case CONST_INT:
1133 /* Small-integer addresses don't occur very often, but they
1134 are legitimate if $0 is a valid base register. */
1135 info->type = ADDRESS_CONST_INT;
1136 return !TARGET_MIPS16 && SMALL_INT (x);
1138 case CONST:
1139 case LABEL_REF:
1140 case SYMBOL_REF:
1141 info->type = ADDRESS_SYMBOLIC;
1142 return (mips_symbolic_constant_p (x, &info->symbol_type)
1143 && mips_symbolic_address_p (info->symbol_type, mode)
1144 && !mips_split_p[info->symbol_type]);
1146 default:
1147 return false;
1151 /* Return the number of instructions needed to load a symbol of the
1152 given type into a register. If valid in an address, the same number
1153 of instructions are needed for loads and stores. Treat extended
1154 mips16 instructions as two instructions. */
1156 static int
1157 mips_symbol_insns (enum mips_symbol_type type)
1159 switch (type)
1161 case SYMBOL_GENERAL:
1162 /* In mips16 code, general symbols must be fetched from the
1163 constant pool. */
1164 if (TARGET_MIPS16)
1165 return 0;
1167 /* When using 64-bit symbols, we need 5 preparatory instructions,
1168 such as:
1170 lui $at,%highest(symbol)
1171 daddiu $at,$at,%higher(symbol)
1172 dsll $at,$at,16
1173 daddiu $at,$at,%hi(symbol)
1174 dsll $at,$at,16
1176 The final address is then $at + %lo(symbol). With 32-bit
1177 symbols we just need a preparatory lui. */
1178 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1180 case SYMBOL_SMALL_DATA:
1181 return 1;
1183 case SYMBOL_CONSTANT_POOL:
1184 /* This case is for mips16 only. Assume we'll need an
1185 extended instruction. */
1186 return 2;
1188 case SYMBOL_GOT_LOCAL:
1189 case SYMBOL_GOT_GLOBAL:
1190 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1191 the local/global classification is accurate. See override_options
1192 for details.
1194 The worst cases are:
1196 (1) For local symbols when generating o32 or o64 code. The assembler
1197 will use:
1199 lw $at,%got(symbol)
1202 ...and the final address will be $at + %lo(symbol).
1204 (2) For global symbols when -mxgot. The assembler will use:
1206 lui $at,%got_hi(symbol)
1207 (d)addu $at,$at,$gp
1209 ...and the final address will be $at + %got_lo(symbol). */
1210 return 3;
1212 case SYMBOL_GOTOFF_PAGE:
1213 case SYMBOL_GOTOFF_GLOBAL:
1214 case SYMBOL_GOTOFF_CALL:
1215 case SYMBOL_GOTOFF_LOADGP:
1216 case SYMBOL_64_HIGH:
1217 case SYMBOL_64_MID:
1218 case SYMBOL_64_LOW:
1219 /* Check whether the offset is a 16- or 32-bit value. */
1220 return mips_split_p[type] ? 2 : 1;
1222 gcc_unreachable ();
1225 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1227 bool
1228 mips_stack_address_p (rtx x, enum machine_mode mode)
1230 struct mips_address_info addr;
1232 return (mips_classify_address (&addr, x, mode, false)
1233 && addr.type == ADDRESS_REG
1234 && addr.reg == stack_pointer_rtx);
1237 /* Return true if a value at OFFSET bytes from BASE can be accessed
1238 using an unextended mips16 instruction. MODE is the mode of the
1239 value.
1241 Usually the offset in an unextended instruction is a 5-bit field.
1242 The offset is unsigned and shifted left once for HIs, twice
1243 for SIs, and so on. An exception is SImode accesses off the
1244 stack pointer, which have an 8-bit immediate field. */
1246 static bool
1247 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1249 if (TARGET_MIPS16
1250 && GET_CODE (offset) == CONST_INT
1251 && INTVAL (offset) >= 0
1252 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1254 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1255 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1256 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1258 return false;
1262 /* Return the number of instructions needed to load or store a value
1263 of mode MODE at X. Return 0 if X isn't valid for MODE.
1265 For mips16 code, count extended instructions as two instructions. */
1268 mips_address_insns (rtx x, enum machine_mode mode)
1270 struct mips_address_info addr;
1271 int factor;
1273 if (mode == BLKmode)
1274 /* BLKmode is used for single unaligned loads and stores. */
1275 factor = 1;
1276 else
1277 /* Each word of a multi-word value will be accessed individually. */
1278 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1280 if (mips_classify_address (&addr, x, mode, false))
1281 switch (addr.type)
1283 case ADDRESS_REG:
1284 if (TARGET_MIPS16
1285 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1286 return factor * 2;
1287 return factor;
1289 case ADDRESS_LO_SUM:
1290 return (TARGET_MIPS16 ? factor * 2 : factor);
1292 case ADDRESS_CONST_INT:
1293 return factor;
1295 case ADDRESS_SYMBOLIC:
1296 return factor * mips_symbol_insns (addr.symbol_type);
1298 return 0;
1302 /* Likewise for constant X. */
1305 mips_const_insns (rtx x)
1307 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1308 enum mips_symbol_type symbol_type;
1309 HOST_WIDE_INT offset;
1311 switch (GET_CODE (x))
1313 case HIGH:
1314 if (TARGET_MIPS16
1315 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1316 || !mips_split_p[symbol_type])
1317 return 0;
1319 return 1;
1321 case CONST_INT:
1322 if (TARGET_MIPS16)
1323 /* Unsigned 8-bit constants can be loaded using an unextended
1324 LI instruction. Unsigned 16-bit constants can be loaded
1325 using an extended LI. Negative constants must be loaded
1326 using LI and then negated. */
1327 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1328 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1329 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1330 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1331 : 0);
1333 return mips_build_integer (codes, INTVAL (x));
1335 case CONST_DOUBLE:
1336 case CONST_VECTOR:
1337 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1339 case CONST:
1340 if (CONST_GP_P (x))
1341 return 1;
1343 /* See if we can refer to X directly. */
1344 if (mips_symbolic_constant_p (x, &symbol_type))
1345 return mips_symbol_insns (symbol_type);
1347 /* Otherwise try splitting the constant into a base and offset.
1348 16-bit offsets can be added using an extra addiu. Larger offsets
1349 must be calculated separately and then added to the base. */
1350 mips_split_const (x, &x, &offset);
1351 if (offset != 0)
1353 int n = mips_const_insns (x);
1354 if (n != 0)
1356 if (SMALL_OPERAND (offset))
1357 return n + 1;
1358 else
1359 return n + 1 + mips_build_integer (codes, offset);
1362 return 0;
1364 case SYMBOL_REF:
1365 case LABEL_REF:
1366 return mips_symbol_insns (mips_classify_symbol (x));
1368 default:
1369 return 0;
1374 /* Return the number of instructions needed for memory reference X.
1375 Count extended mips16 instructions as two instructions. */
1378 mips_fetch_insns (rtx x)
1380 gcc_assert (MEM_P (x));
1381 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1385 /* Return the number of instructions needed for an integer division. */
1388 mips_idiv_insns (void)
1390 int count;
1392 count = 1;
1393 if (TARGET_CHECK_ZERO_DIV)
1395 if (GENERATE_DIVIDE_TRAPS)
1396 count++;
1397 else
1398 count += 2;
1401 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1402 count++;
1403 return count;
1406 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1407 returns a nonzero value if X is a legitimate address for a memory
1408 operand of the indicated MODE. STRICT is nonzero if this function
1409 is called during reload. */
1411 bool
1412 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1414 struct mips_address_info addr;
1416 return mips_classify_address (&addr, x, mode, strict);
1420 /* Copy VALUE to a register and return that register. If new psuedos
1421 are allowed, copy it into a new register, otherwise use DEST. */
1423 static rtx
1424 mips_force_temporary (rtx dest, rtx value)
1426 if (!no_new_pseudos)
1427 return force_reg (Pmode, value);
1428 else
1430 emit_move_insn (copy_rtx (dest), value);
1431 return dest;
1436 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1437 and is used to load the high part into a register. */
1439 static rtx
1440 mips_split_symbol (rtx temp, rtx addr)
1442 rtx high;
1444 if (TARGET_MIPS16)
1445 high = mips16_gp_pseudo_reg ();
1446 else
1447 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1448 return gen_rtx_LO_SUM (Pmode, high, addr);
1452 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1453 type SYMBOL_TYPE. */
1456 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1458 rtx base;
1459 HOST_WIDE_INT offset;
1461 mips_split_const (address, &base, &offset);
1462 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1463 UNSPEC_ADDRESS_FIRST + symbol_type);
1464 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1468 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1469 high part to BASE and return the result. Just return BASE otherwise.
1470 TEMP is available as a temporary register if needed.
1472 The returned expression can be used as the first operand to a LO_SUM. */
1474 static rtx
1475 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1476 enum mips_symbol_type symbol_type)
1478 if (mips_split_p[symbol_type])
1480 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1481 addr = mips_force_temporary (temp, addr);
1482 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1484 return base;
1488 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1489 mips_force_temporary; it is only needed when OFFSET is not a
1490 SMALL_OPERAND. */
1492 static rtx
1493 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1495 if (!SMALL_OPERAND (offset))
1497 rtx high;
1498 if (TARGET_MIPS16)
1500 /* Load the full offset into a register so that we can use
1501 an unextended instruction for the address itself. */
1502 high = GEN_INT (offset);
1503 offset = 0;
1505 else
1507 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1508 high = GEN_INT (CONST_HIGH_PART (offset));
1509 offset = CONST_LOW_PART (offset);
1511 high = mips_force_temporary (temp, high);
1512 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1514 return plus_constant (reg, offset);
1518 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
1519 be legitimized in a way that the generic machinery might not expect,
1520 put the new address in *XLOC and return true. MODE is the mode of
1521 the memory being accessed. */
1523 bool
1524 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
1526 enum mips_symbol_type symbol_type;
1528 /* See if the address can split into a high part and a LO_SUM. */
1529 if (mips_symbolic_constant_p (*xloc, &symbol_type)
1530 && mips_symbolic_address_p (symbol_type, mode)
1531 && mips_split_p[symbol_type])
1533 *xloc = mips_split_symbol (0, *xloc);
1534 return true;
1537 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
1539 /* Handle REG + CONSTANT using mips_add_offset. */
1540 rtx reg;
1542 reg = XEXP (*xloc, 0);
1543 if (!mips_valid_base_register_p (reg, mode, 0))
1544 reg = copy_to_mode_reg (Pmode, reg);
1545 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
1546 return true;
1549 return false;
1553 /* Subroutine of mips_build_integer (with the same interface).
1554 Assume that the final action in the sequence should be a left shift. */
1556 static unsigned int
1557 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1559 unsigned int i, shift;
1561 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1562 since signed numbers are easier to load than unsigned ones. */
1563 shift = 0;
1564 while ((value & 1) == 0)
1565 value /= 2, shift++;
1567 i = mips_build_integer (codes, value);
1568 codes[i].code = ASHIFT;
1569 codes[i].value = shift;
1570 return i + 1;
1574 /* As for mips_build_shift, but assume that the final action will be
1575 an IOR or PLUS operation. */
1577 static unsigned int
1578 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1580 unsigned HOST_WIDE_INT high;
1581 unsigned int i;
1583 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1584 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1586 /* The constant is too complex to load with a simple lui/ori pair
1587 so our goal is to clear as many trailing zeros as possible.
1588 In this case, we know bit 16 is set and that the low 16 bits
1589 form a negative number. If we subtract that number from VALUE,
1590 we will clear at least the lowest 17 bits, maybe more. */
1591 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1592 codes[i].code = PLUS;
1593 codes[i].value = CONST_LOW_PART (value);
1595 else
1597 i = mips_build_integer (codes, high);
1598 codes[i].code = IOR;
1599 codes[i].value = value & 0xffff;
1601 return i + 1;
1605 /* Fill CODES with a sequence of rtl operations to load VALUE.
1606 Return the number of operations needed. */
1608 static unsigned int
1609 mips_build_integer (struct mips_integer_op *codes,
1610 unsigned HOST_WIDE_INT value)
1612 if (SMALL_OPERAND (value)
1613 || SMALL_OPERAND_UNSIGNED (value)
1614 || LUI_OPERAND (value))
1616 /* The value can be loaded with a single instruction. */
1617 codes[0].code = UNKNOWN;
1618 codes[0].value = value;
1619 return 1;
1621 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1623 /* Either the constant is a simple LUI/ORI combination or its
1624 lowest bit is set. We don't want to shift in this case. */
1625 return mips_build_lower (codes, value);
1627 else if ((value & 0xffff) == 0)
1629 /* The constant will need at least three actions. The lowest
1630 16 bits are clear, so the final action will be a shift. */
1631 return mips_build_shift (codes, value);
1633 else
1635 /* The final action could be a shift, add or inclusive OR.
1636 Rather than use a complex condition to select the best
1637 approach, try both mips_build_shift and mips_build_lower
1638 and pick the one that gives the shortest sequence.
1639 Note that this case is only used once per constant. */
1640 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1641 unsigned int cost, alt_cost;
1643 cost = mips_build_shift (codes, value);
1644 alt_cost = mips_build_lower (alt_codes, value);
1645 if (alt_cost < cost)
1647 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1648 cost = alt_cost;
1650 return cost;
1655 /* Move VALUE into register DEST. */
1657 static void
1658 mips_move_integer (rtx dest, unsigned HOST_WIDE_INT value)
1660 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1661 enum machine_mode mode;
1662 unsigned int i, cost;
1663 rtx x;
1665 mode = GET_MODE (dest);
1666 cost = mips_build_integer (codes, value);
1668 /* Apply each binary operation to X. Invariant: X is a legitimate
1669 source operand for a SET pattern. */
1670 x = GEN_INT (codes[0].value);
1671 for (i = 1; i < cost; i++)
1673 if (no_new_pseudos)
1674 emit_move_insn (dest, x), x = dest;
1675 else
1676 x = force_reg (mode, x);
1677 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1680 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1684 /* Subroutine of mips_legitimize_move. Move constant SRC into register
1685 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1686 move_operand. */
1688 static void
1689 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
1691 rtx base;
1692 HOST_WIDE_INT offset;
1693 enum mips_symbol_type symbol_type;
1695 /* Split moves of big integers into smaller pieces. In mips16 code,
1696 it's better to force the constant into memory instead. */
1697 if (GET_CODE (src) == CONST_INT && !TARGET_MIPS16)
1699 mips_move_integer (dest, INTVAL (src));
1700 return;
1703 /* See if the symbol can be split. For mips16, this is often worse than
1704 forcing it in the constant pool since it needs the single-register form
1705 of addiu or daddiu. */
1706 if (!TARGET_MIPS16
1707 && mips_symbolic_constant_p (src, &symbol_type)
1708 && mips_split_p[symbol_type])
1710 emit_move_insn (dest, mips_split_symbol (dest, src));
1711 return;
1714 /* If we have (const (plus symbol offset)), load the symbol first
1715 and then add in the offset. This is usually better than forcing
1716 the constant into memory, at least in non-mips16 code. */
1717 mips_split_const (src, &base, &offset);
1718 if (!TARGET_MIPS16
1719 && offset != 0
1720 && (!no_new_pseudos || SMALL_OPERAND (offset)))
1722 base = mips_force_temporary (dest, base);
1723 emit_move_insn (dest, mips_add_offset (0, base, offset));
1724 return;
1727 src = force_const_mem (mode, src);
1729 /* When using explicit relocs, constant pool references are sometimes
1730 not legitimate addresses. */
1731 if (!memory_operand (src, VOIDmode))
1732 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
1733 emit_move_insn (dest, src);
1737 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
1738 sequence that is valid. */
1740 bool
1741 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
1743 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1745 emit_move_insn (dest, force_reg (mode, src));
1746 return true;
1749 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
1750 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1751 && REG_P (src) && MD_REG_P (REGNO (src))
1752 && REG_P (dest) && GP_REG_P (REGNO (dest)))
1754 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
1755 if (GET_MODE_SIZE (mode) <= 4)
1756 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
1757 gen_rtx_REG (SImode, REGNO (src)),
1758 gen_rtx_REG (SImode, other_regno)));
1759 else
1760 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
1761 gen_rtx_REG (DImode, REGNO (src)),
1762 gen_rtx_REG (DImode, other_regno)));
1763 return true;
1766 /* We need to deal with constants that would be legitimate
1767 immediate_operands but not legitimate move_operands. */
1768 if (CONSTANT_P (src) && !move_operand (src, mode))
1770 mips_legitimize_const_move (mode, dest, src);
1771 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1772 return true;
1774 return false;
1777 /* We need a lot of little routines to check constant values on the
1778 mips16. These are used to figure out how long the instruction will
1779 be. It would be much better to do this using constraints, but
1780 there aren't nearly enough letters available. */
1782 static int
1783 m16_check_op (rtx op, int low, int high, int mask)
1785 return (GET_CODE (op) == CONST_INT
1786 && INTVAL (op) >= low
1787 && INTVAL (op) <= high
1788 && (INTVAL (op) & mask) == 0);
1792 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1794 return m16_check_op (op, 0x1, 0x8, 0);
1798 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1800 return m16_check_op (op, - 0x8, 0x7, 0);
1804 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1806 return m16_check_op (op, - 0x7, 0x8, 0);
1810 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1812 return m16_check_op (op, - 0x10, 0xf, 0);
1816 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1818 return m16_check_op (op, - 0xf, 0x10, 0);
1822 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1824 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
1828 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1830 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
1834 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1836 return m16_check_op (op, - 0x80, 0x7f, 0);
1840 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1842 return m16_check_op (op, - 0x7f, 0x80, 0);
1846 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1848 return m16_check_op (op, 0x0, 0xff, 0);
1852 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1854 return m16_check_op (op, - 0xff, 0x0, 0);
1858 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1860 return m16_check_op (op, - 0x1, 0xfe, 0);
1864 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1866 return m16_check_op (op, 0x0, 0xff << 2, 3);
1870 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1872 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
1876 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1878 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
1882 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1884 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
1887 static bool
1888 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
1890 enum machine_mode mode = GET_MODE (x);
1892 switch (code)
1894 case CONST_INT:
1895 if (!TARGET_MIPS16)
1897 /* Always return 0, since we don't have different sized
1898 instructions, hence different costs according to Richard
1899 Kenner */
1900 *total = 0;
1901 return true;
1904 /* A number between 1 and 8 inclusive is efficient for a shift.
1905 Otherwise, we will need an extended instruction. */
1906 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
1907 || (outer_code) == LSHIFTRT)
1909 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
1910 *total = 0;
1911 else
1912 *total = COSTS_N_INSNS (1);
1913 return true;
1916 /* We can use cmpi for an xor with an unsigned 16 bit value. */
1917 if ((outer_code) == XOR
1918 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
1920 *total = 0;
1921 return true;
1924 /* We may be able to use slt or sltu for a comparison with a
1925 signed 16 bit value. (The boundary conditions aren't quite
1926 right, but this is just a heuristic anyhow.) */
1927 if (((outer_code) == LT || (outer_code) == LE
1928 || (outer_code) == GE || (outer_code) == GT
1929 || (outer_code) == LTU || (outer_code) == LEU
1930 || (outer_code) == GEU || (outer_code) == GTU)
1931 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
1933 *total = 0;
1934 return true;
1937 /* Equality comparisons with 0 are cheap. */
1938 if (((outer_code) == EQ || (outer_code) == NE)
1939 && INTVAL (x) == 0)
1941 *total = 0;
1942 return true;
1945 /* Constants in the range 0...255 can be loaded with an unextended
1946 instruction. They are therefore as cheap as a register move.
1948 Given the choice between "li R1,0...255" and "move R1,R2"
1949 (where R2 is a known constant), it is usually better to use "li",
1950 since we do not want to unnecessarily extend the lifetime of R2. */
1951 if (outer_code == SET
1952 && INTVAL (x) >= 0
1953 && INTVAL (x) < 256)
1955 *total = 0;
1956 return true;
1959 /* Otherwise fall through to the handling below. */
1961 case CONST:
1962 case SYMBOL_REF:
1963 case LABEL_REF:
1964 case CONST_DOUBLE:
1965 if (LEGITIMATE_CONSTANT_P (x))
1967 *total = COSTS_N_INSNS (1);
1968 return true;
1970 else
1972 /* The value will need to be fetched from the constant pool. */
1973 *total = CONSTANT_POOL_COST;
1974 return true;
1977 case MEM:
1979 /* If the address is legitimate, return the number of
1980 instructions it needs, otherwise use the default handling. */
1981 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
1982 if (n > 0)
1984 *total = COSTS_N_INSNS (1 + n);
1985 return true;
1987 return false;
1990 case FFS:
1991 *total = COSTS_N_INSNS (6);
1992 return true;
1994 case NOT:
1995 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
1996 return true;
1998 case AND:
1999 case IOR:
2000 case XOR:
2001 if (mode == DImode && !TARGET_64BIT)
2003 *total = COSTS_N_INSNS (2);
2004 return true;
2006 return false;
2008 case ASHIFT:
2009 case ASHIFTRT:
2010 case LSHIFTRT:
2011 if (mode == DImode && !TARGET_64BIT)
2013 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2014 ? 4 : 12);
2015 return true;
2017 return false;
2019 case ABS:
2020 if (mode == SFmode || mode == DFmode)
2021 *total = COSTS_N_INSNS (1);
2022 else
2023 *total = COSTS_N_INSNS (4);
2024 return true;
2026 case LO_SUM:
2027 *total = COSTS_N_INSNS (1);
2028 return true;
2030 case PLUS:
2031 case MINUS:
2032 if (mode == SFmode || mode == DFmode)
2034 if (TUNE_MIPS3000 || TUNE_MIPS3900)
2035 *total = COSTS_N_INSNS (2);
2036 else if (TUNE_MIPS6000)
2037 *total = COSTS_N_INSNS (3);
2038 else if (TUNE_SB1)
2039 *total = COSTS_N_INSNS (4);
2040 else
2041 *total = COSTS_N_INSNS (6);
2042 return true;
2044 if (mode == DImode && !TARGET_64BIT)
2046 *total = COSTS_N_INSNS (4);
2047 return true;
2049 return false;
2051 case NEG:
2052 if (mode == DImode && !TARGET_64BIT)
2054 *total = 4;
2055 return true;
2057 return false;
2059 case MULT:
2060 if (mode == SFmode)
2062 if (TUNE_MIPS3000
2063 || TUNE_MIPS3900
2064 || TUNE_MIPS5000
2065 || TUNE_SB1)
2066 *total = COSTS_N_INSNS (4);
2067 else if (TUNE_MIPS6000
2068 || TUNE_MIPS5400
2069 || TUNE_MIPS5500)
2070 *total = COSTS_N_INSNS (5);
2071 else
2072 *total = COSTS_N_INSNS (7);
2073 return true;
2076 if (mode == DFmode)
2078 if (TUNE_SB1)
2079 *total = COSTS_N_INSNS (4);
2080 else if (TUNE_MIPS3000
2081 || TUNE_MIPS3900
2082 || TUNE_MIPS5000)
2083 *total = COSTS_N_INSNS (5);
2084 else if (TUNE_MIPS6000
2085 || TUNE_MIPS5400
2086 || TUNE_MIPS5500)
2087 *total = COSTS_N_INSNS (6);
2088 else
2089 *total = COSTS_N_INSNS (8);
2090 return true;
2093 if (TUNE_MIPS3000)
2094 *total = COSTS_N_INSNS (12);
2095 else if (TUNE_MIPS3900)
2096 *total = COSTS_N_INSNS (2);
2097 else if (TUNE_MIPS4130)
2098 *total = COSTS_N_INSNS (mode == DImode ? 6 : 4);
2099 else if (TUNE_MIPS5400 || TUNE_SB1)
2100 *total = COSTS_N_INSNS (mode == DImode ? 4 : 3);
2101 else if (TUNE_MIPS5500 || TUNE_MIPS7000)
2102 *total = COSTS_N_INSNS (mode == DImode ? 9 : 5);
2103 else if (TUNE_MIPS9000)
2104 *total = COSTS_N_INSNS (mode == DImode ? 8 : 3);
2105 else if (TUNE_MIPS6000)
2106 *total = COSTS_N_INSNS (17);
2107 else if (TUNE_MIPS5000)
2108 *total = COSTS_N_INSNS (5);
2109 else
2110 *total = COSTS_N_INSNS (10);
2111 return true;
2113 case DIV:
2114 case MOD:
2115 if (mode == SFmode)
2117 if (TUNE_MIPS3000
2118 || TUNE_MIPS3900)
2119 *total = COSTS_N_INSNS (12);
2120 else if (TUNE_MIPS6000)
2121 *total = COSTS_N_INSNS (15);
2122 else if (TUNE_SB1)
2123 *total = COSTS_N_INSNS (24);
2124 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2125 *total = COSTS_N_INSNS (30);
2126 else
2127 *total = COSTS_N_INSNS (23);
2128 return true;
2131 if (mode == DFmode)
2133 if (TUNE_MIPS3000
2134 || TUNE_MIPS3900)
2135 *total = COSTS_N_INSNS (19);
2136 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2137 *total = COSTS_N_INSNS (59);
2138 else if (TUNE_MIPS6000)
2139 *total = COSTS_N_INSNS (16);
2140 else if (TUNE_SB1)
2141 *total = COSTS_N_INSNS (32);
2142 else
2143 *total = COSTS_N_INSNS (36);
2144 return true;
2146 /* Fall through. */
2148 case UDIV:
2149 case UMOD:
2150 if (TUNE_MIPS3000
2151 || TUNE_MIPS3900)
2152 *total = COSTS_N_INSNS (35);
2153 else if (TUNE_MIPS6000)
2154 *total = COSTS_N_INSNS (38);
2155 else if (TUNE_MIPS5000)
2156 *total = COSTS_N_INSNS (36);
2157 else if (TUNE_SB1)
2158 *total = COSTS_N_INSNS ((mode == SImode) ? 36 : 68);
2159 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2160 *total = COSTS_N_INSNS ((mode == SImode) ? 42 : 74);
2161 else
2162 *total = COSTS_N_INSNS (69);
2163 return true;
2165 case SIGN_EXTEND:
2166 /* A sign extend from SImode to DImode in 64 bit mode is often
2167 zero instructions, because the result can often be used
2168 directly by another instruction; we'll call it one. */
2169 if (TARGET_64BIT && mode == DImode
2170 && GET_MODE (XEXP (x, 0)) == SImode)
2171 *total = COSTS_N_INSNS (1);
2172 else
2173 *total = COSTS_N_INSNS (2);
2174 return true;
2176 case ZERO_EXTEND:
2177 if (TARGET_64BIT && mode == DImode
2178 && GET_MODE (XEXP (x, 0)) == SImode)
2179 *total = COSTS_N_INSNS (2);
2180 else
2181 *total = COSTS_N_INSNS (1);
2182 return true;
2184 default:
2185 return false;
2189 /* Provide the costs of an addressing mode that contains ADDR.
2190 If ADDR is not a valid address, its cost is irrelevant. */
2192 static int
2193 mips_address_cost (rtx addr)
2195 return mips_address_insns (addr, SImode);
2198 /* Return one word of double-word value OP, taking into account the fixed
2199 endianness of certain registers. HIGH_P is true to select the high part,
2200 false to select the low part. */
2203 mips_subword (rtx op, int high_p)
2205 unsigned int byte;
2206 enum machine_mode mode;
2208 mode = GET_MODE (op);
2209 if (mode == VOIDmode)
2210 mode = DImode;
2212 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2213 byte = UNITS_PER_WORD;
2214 else
2215 byte = 0;
2217 if (REG_P (op))
2219 if (FP_REG_P (REGNO (op)))
2220 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2221 if (REGNO (op) == HI_REGNUM)
2222 return gen_rtx_REG (word_mode, high_p ? HI_REGNUM : LO_REGNUM);
2225 if (MEM_P (op))
2226 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2228 return simplify_gen_subreg (word_mode, op, mode, byte);
2232 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2234 bool
2235 mips_split_64bit_move_p (rtx dest, rtx src)
2237 if (TARGET_64BIT)
2238 return false;
2240 /* FP->FP moves can be done in a single instruction. */
2241 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2242 return false;
2244 /* Check for floating-point loads and stores. They can be done using
2245 ldc1 and sdc1 on MIPS II and above. */
2246 if (mips_isa > 1)
2248 if (FP_REG_RTX_P (dest) && MEM_P (src))
2249 return false;
2250 if (FP_REG_RTX_P (src) && MEM_P (dest))
2251 return false;
2253 return true;
2257 /* Split a 64-bit move from SRC to DEST assuming that
2258 mips_split_64bit_move_p holds.
2260 Moves into and out of FPRs cause some difficulty here. Such moves
2261 will always be DFmode, since paired FPRs are not allowed to store
2262 DImode values. The most natural representation would be two separate
2263 32-bit moves, such as:
2265 (set (reg:SI $f0) (mem:SI ...))
2266 (set (reg:SI $f1) (mem:SI ...))
2268 However, the second insn is invalid because odd-numbered FPRs are
2269 not allowed to store independent values. Use the patterns load_df_low,
2270 load_df_high and store_df_high instead. */
2272 void
2273 mips_split_64bit_move (rtx dest, rtx src)
2275 if (FP_REG_RTX_P (dest))
2277 /* Loading an FPR from memory or from GPRs. */
2278 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2279 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2280 copy_rtx (dest)));
2282 else if (FP_REG_RTX_P (src))
2284 /* Storing an FPR into memory or GPRs. */
2285 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2286 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2288 else
2290 /* The operation can be split into two normal moves. Decide in
2291 which order to do them. */
2292 rtx low_dest;
2294 low_dest = mips_subword (dest, 0);
2295 if (REG_P (low_dest)
2296 && reg_overlap_mentioned_p (low_dest, src))
2298 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2299 emit_move_insn (low_dest, mips_subword (src, 0));
2301 else
2303 emit_move_insn (low_dest, mips_subword (src, 0));
2304 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2309 /* Return the appropriate instructions to move SRC into DEST. Assume
2310 that SRC is operand 1 and DEST is operand 0. */
2312 const char *
2313 mips_output_move (rtx dest, rtx src)
2315 enum rtx_code dest_code, src_code;
2316 bool dbl_p;
2318 dest_code = GET_CODE (dest);
2319 src_code = GET_CODE (src);
2320 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2322 if (dbl_p && mips_split_64bit_move_p (dest, src))
2323 return "#";
2325 if ((src_code == REG && GP_REG_P (REGNO (src)))
2326 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2328 if (dest_code == REG)
2330 if (GP_REG_P (REGNO (dest)))
2331 return "move\t%0,%z1";
2333 if (MD_REG_P (REGNO (dest)))
2334 return "mt%0\t%z1";
2336 if (FP_REG_P (REGNO (dest)))
2337 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2339 if (ALL_COP_REG_P (REGNO (dest)))
2341 static char retval[] = "dmtc_\t%z1,%0";
2343 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2344 return (dbl_p ? retval : retval + 1);
2347 if (dest_code == MEM)
2348 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2350 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2352 if (src_code == REG)
2354 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2355 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2357 if (FP_REG_P (REGNO (src)))
2358 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2360 if (ALL_COP_REG_P (REGNO (src)))
2362 static char retval[] = "dmfc_\t%0,%1";
2364 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2365 return (dbl_p ? retval : retval + 1);
2369 if (src_code == MEM)
2370 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2372 if (src_code == CONST_INT)
2374 /* Don't use the X format, because that will give out of
2375 range numbers for 64 bit hosts and 32 bit targets. */
2376 if (!TARGET_MIPS16)
2377 return "li\t%0,%1\t\t\t# %X1";
2379 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2380 return "li\t%0,%1";
2382 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2383 return "#";
2386 if (src_code == HIGH)
2387 return "lui\t%0,%h1";
2389 if (CONST_GP_P (src))
2390 return "move\t%0,%1";
2392 if (symbolic_operand (src, VOIDmode))
2393 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2395 if (src_code == REG && FP_REG_P (REGNO (src)))
2397 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2399 if (GET_MODE (dest) == V2SFmode)
2400 return "mov.ps\t%0,%1";
2401 else
2402 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2405 if (dest_code == MEM)
2406 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2408 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2410 if (src_code == MEM)
2411 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2413 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2415 static char retval[] = "l_c_\t%0,%1";
2417 retval[1] = (dbl_p ? 'd' : 'w');
2418 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2419 return retval;
2421 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2423 static char retval[] = "s_c_\t%1,%0";
2425 retval[1] = (dbl_p ? 'd' : 'w');
2426 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2427 return retval;
2429 gcc_unreachable ();
2432 /* Restore $gp from its save slot. Valid only when using o32 or
2433 o64 abicalls. */
2435 void
2436 mips_restore_gp (void)
2438 rtx address, slot;
2440 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
2442 address = mips_add_offset (pic_offset_table_rtx,
2443 frame_pointer_needed
2444 ? hard_frame_pointer_rtx
2445 : stack_pointer_rtx,
2446 current_function_outgoing_args_size);
2447 slot = gen_rtx_MEM (Pmode, address);
2449 emit_move_insn (pic_offset_table_rtx, slot);
2450 if (!TARGET_EXPLICIT_RELOCS)
2451 emit_insn (gen_blockage ());
2454 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2456 static void
2457 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2459 emit_insn (gen_rtx_SET (VOIDmode, target,
2460 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2463 /* Return true if CMP1 is a suitable second operand for relational
2464 operator CODE. See also the *sCC patterns in mips.md. */
2466 static bool
2467 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
2469 switch (code)
2471 case GT:
2472 case GTU:
2473 return reg_or_0_operand (cmp1, VOIDmode);
2475 case GE:
2476 case GEU:
2477 return !TARGET_MIPS16 && cmp1 == const1_rtx;
2479 case LT:
2480 case LTU:
2481 return arith_operand (cmp1, VOIDmode);
2483 case LE:
2484 return sle_operand (cmp1, VOIDmode);
2486 case LEU:
2487 return sleu_operand (cmp1, VOIDmode);
2489 default:
2490 gcc_unreachable ();
2494 /* Compare CMP0 and CMP1 using relational operator CODE and store the
2495 result in TARGET. CMP0 and TARGET are register_operands that have
2496 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
2497 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
2499 static void
2500 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
2501 rtx target, rtx cmp0, rtx cmp1)
2503 /* First see if there is a MIPS instruction that can do this operation
2504 with CMP1 in its current form. If not, try doing the same for the
2505 inverse operation. If that also fails, force CMP1 into a register
2506 and try again. */
2507 if (mips_relational_operand_ok_p (code, cmp1))
2508 mips_emit_binary (code, target, cmp0, cmp1);
2509 else
2511 enum rtx_code inv_code = reverse_condition (code);
2512 if (!mips_relational_operand_ok_p (inv_code, cmp1))
2514 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
2515 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
2517 else if (invert_ptr == 0)
2519 rtx inv_target = gen_reg_rtx (GET_MODE (target));
2520 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
2521 mips_emit_binary (XOR, target, inv_target, const1_rtx);
2523 else
2525 *invert_ptr = !*invert_ptr;
2526 mips_emit_binary (inv_code, target, cmp0, cmp1);
2531 /* Return a register that is zero iff CMP0 and CMP1 are equal.
2532 The register will have the same mode as CMP0. */
2534 static rtx
2535 mips_zero_if_equal (rtx cmp0, rtx cmp1)
2537 if (cmp1 == const0_rtx)
2538 return cmp0;
2540 if (uns_arith_operand (cmp1, VOIDmode))
2541 return expand_binop (GET_MODE (cmp0), xor_optab,
2542 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2544 return expand_binop (GET_MODE (cmp0), sub_optab,
2545 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2548 /* Convert a comparison into something that can be used in a branch or
2549 conditional move. cmp_operands[0] and cmp_operands[1] are the values
2550 being compared and *CODE is the code used to compare them.
2552 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
2553 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
2554 otherwise any standard branch condition can be used. The standard branch
2555 conditions are:
2557 - EQ/NE between two registers.
2558 - any comparison between a register and zero. */
2560 static void
2561 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
2563 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
2565 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
2567 *op0 = cmp_operands[0];
2568 *op1 = cmp_operands[1];
2570 else if (*code == EQ || *code == NE)
2572 if (need_eq_ne_p)
2574 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2575 *op1 = const0_rtx;
2577 else
2579 *op0 = cmp_operands[0];
2580 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
2583 else
2585 /* The comparison needs a separate scc instruction. Store the
2586 result of the scc in *OP0 and compare it against zero. */
2587 bool invert = false;
2588 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
2589 *op1 = const0_rtx;
2590 mips_emit_int_relational (*code, &invert, *op0,
2591 cmp_operands[0], cmp_operands[1]);
2592 *code = (invert ? EQ : NE);
2595 else
2597 enum rtx_code cmp_code;
2599 /* Floating-point tests use a separate c.cond.fmt comparison to
2600 set a condition code register. The branch or conditional move
2601 will then compare that register against zero.
2603 Set CMP_CODE to the code of the comparison instruction and
2604 *CODE to the code that the branch or move should use. */
2605 switch (*code)
2607 case NE:
2608 case UNGE:
2609 case UNGT:
2610 case LTGT:
2611 case ORDERED:
2612 cmp_code = reverse_condition_maybe_unordered (*code);
2613 *code = EQ;
2614 break;
2616 default:
2617 cmp_code = *code;
2618 *code = NE;
2619 break;
2621 *op0 = (ISA_HAS_8CC
2622 ? gen_reg_rtx (CCmode)
2623 : gen_rtx_REG (CCmode, FPSW_REGNUM));
2624 *op1 = const0_rtx;
2625 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
2629 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
2630 Store the result in TARGET and return true if successful.
2632 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
2634 bool
2635 mips_emit_scc (enum rtx_code code, rtx target)
2637 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
2638 return false;
2640 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
2641 if (code == EQ || code == NE)
2643 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2644 mips_emit_binary (code, target, zie, const0_rtx);
2646 else
2647 mips_emit_int_relational (code, 0, target,
2648 cmp_operands[0], cmp_operands[1]);
2649 return true;
2652 /* Emit the common code for doing conditional branches.
2653 operand[0] is the label to jump to.
2654 The comparison operands are saved away by cmp{si,di,sf,df}. */
2656 void
2657 gen_conditional_branch (rtx *operands, enum rtx_code code)
2659 rtx op0, op1, target;
2661 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
2662 target = gen_rtx_IF_THEN_ELSE (VOIDmode,
2663 gen_rtx_fmt_ee (code, GET_MODE (op0),
2664 op0, op1),
2665 gen_rtx_LABEL_REF (VOIDmode, operands[0]),
2666 pc_rtx);
2667 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, target));
2670 /* Emit the common code for conditional moves. OPERANDS is the array
2671 of operands passed to the conditional move define_expand. */
2673 void
2674 gen_conditional_move (rtx *operands)
2676 enum rtx_code code;
2677 rtx op0, op1;
2679 code = GET_CODE (operands[1]);
2680 mips_emit_compare (&code, &op0, &op1, true);
2681 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2682 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2683 gen_rtx_fmt_ee (code,
2684 GET_MODE (op0),
2685 op0, op1),
2686 operands[2], operands[3])));
2689 /* Emit a conditional trap. OPERANDS is the array of operands passed to
2690 the conditional_trap expander. */
2692 void
2693 mips_gen_conditional_trap (rtx *operands)
2695 rtx op0, op1;
2696 enum rtx_code cmp_code = GET_CODE (operands[0]);
2697 enum machine_mode mode = GET_MODE (cmp_operands[0]);
2699 /* MIPS conditional trap machine instructions don't have GT or LE
2700 flavors, so we must invert the comparison and convert to LT and
2701 GE, respectively. */
2702 switch (cmp_code)
2704 case GT: cmp_code = LT; break;
2705 case LE: cmp_code = GE; break;
2706 case GTU: cmp_code = LTU; break;
2707 case LEU: cmp_code = GEU; break;
2708 default: break;
2710 if (cmp_code == GET_CODE (operands[0]))
2712 op0 = cmp_operands[0];
2713 op1 = cmp_operands[1];
2715 else
2717 op0 = cmp_operands[1];
2718 op1 = cmp_operands[0];
2720 op0 = force_reg (mode, op0);
2721 if (!arith_operand (op1, mode))
2722 op1 = force_reg (mode, op1);
2724 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
2725 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
2726 operands[1]));
2729 /* Load function address ADDR into register DEST. SIBCALL_P is true
2730 if the address is needed for a sibling call. */
2732 static void
2733 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
2735 /* If we're generating PIC, and this call is to a global function,
2736 try to allow its address to be resolved lazily. This isn't
2737 possible for NewABI sibcalls since the value of $gp on entry
2738 to the stub would be our caller's gp, not ours. */
2739 if (TARGET_EXPLICIT_RELOCS
2740 && !(sibcall_p && TARGET_NEWABI)
2741 && global_got_operand (addr, VOIDmode))
2743 rtx high, lo_sum_symbol;
2745 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
2746 addr, SYMBOL_GOTOFF_CALL);
2747 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
2748 if (Pmode == SImode)
2749 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
2750 else
2751 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
2753 else
2754 emit_move_insn (dest, addr);
2758 /* Expand a call or call_value instruction. RESULT is where the
2759 result will go (null for calls), ADDR is the address of the
2760 function, ARGS_SIZE is the size of the arguments and AUX is
2761 the value passed to us by mips_function_arg. SIBCALL_P is true
2762 if we are expanding a sibling call, false if we're expanding
2763 a normal call. */
2765 void
2766 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
2768 rtx orig_addr, pattern, insn;
2770 orig_addr = addr;
2771 if (!call_insn_operand (addr, VOIDmode))
2773 addr = gen_reg_rtx (Pmode);
2774 mips_load_call_address (addr, orig_addr, sibcall_p);
2777 if (TARGET_MIPS16
2778 && mips16_hard_float
2779 && build_mips16_call_stub (result, addr, args_size,
2780 aux == 0 ? 0 : (int) GET_MODE (aux)))
2781 return;
2783 if (result == 0)
2784 pattern = (sibcall_p
2785 ? gen_sibcall_internal (addr, args_size)
2786 : gen_call_internal (addr, args_size));
2787 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
2789 rtx reg1, reg2;
2791 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
2792 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
2793 pattern =
2794 (sibcall_p
2795 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
2796 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
2798 else
2799 pattern = (sibcall_p
2800 ? gen_sibcall_value_internal (result, addr, args_size)
2801 : gen_call_value_internal (result, addr, args_size));
2803 insn = emit_call_insn (pattern);
2805 /* Lazy-binding stubs require $gp to be valid on entry. */
2806 if (global_got_operand (orig_addr, VOIDmode))
2807 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2811 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
2813 static bool
2814 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
2815 tree exp ATTRIBUTE_UNUSED)
2817 return TARGET_SIBCALLS;
2820 /* Emit code to move general operand SRC into condition-code
2821 register DEST. SCRATCH is a scratch TFmode float register.
2822 The sequence is:
2824 FP1 = SRC
2825 FP2 = 0.0f
2826 DEST = FP2 < FP1
2828 where FP1 and FP2 are single-precision float registers
2829 taken from SCRATCH. */
2831 void
2832 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
2834 rtx fp1, fp2;
2836 /* Change the source to SFmode. */
2837 if (MEM_P (src))
2838 src = adjust_address (src, SFmode, 0);
2839 else if (REG_P (src) || GET_CODE (src) == SUBREG)
2840 src = gen_rtx_REG (SFmode, true_regnum (src));
2842 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
2843 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
2845 emit_move_insn (copy_rtx (fp1), src);
2846 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
2847 emit_insn (gen_slt_sf (dest, fp2, fp1));
2850 /* Emit code to change the current function's return address to
2851 ADDRESS. SCRATCH is available as a scratch register, if needed.
2852 ADDRESS and SCRATCH are both word-mode GPRs. */
2854 void
2855 mips_set_return_address (rtx address, rtx scratch)
2857 rtx slot_address;
2859 compute_frame_size (get_frame_size ());
2860 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
2861 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
2862 cfun->machine->frame.gp_sp_offset);
2864 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
2867 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
2868 Assume that the areas do not overlap. */
2870 static void
2871 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
2873 HOST_WIDE_INT offset, delta;
2874 unsigned HOST_WIDE_INT bits;
2875 int i;
2876 enum machine_mode mode;
2877 rtx *regs;
2879 /* Work out how many bits to move at a time. If both operands have
2880 half-word alignment, it is usually better to move in half words.
2881 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
2882 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
2883 Otherwise move word-sized chunks. */
2884 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
2885 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
2886 bits = BITS_PER_WORD / 2;
2887 else
2888 bits = BITS_PER_WORD;
2890 mode = mode_for_size (bits, MODE_INT, 0);
2891 delta = bits / BITS_PER_UNIT;
2893 /* Allocate a buffer for the temporary registers. */
2894 regs = alloca (sizeof (rtx) * length / delta);
2896 /* Load as many BITS-sized chunks as possible. Use a normal load if
2897 the source has enough alignment, otherwise use left/right pairs. */
2898 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2900 regs[i] = gen_reg_rtx (mode);
2901 if (MEM_ALIGN (src) >= bits)
2902 emit_move_insn (regs[i], adjust_address (src, mode, offset));
2903 else
2905 rtx part = adjust_address (src, BLKmode, offset);
2906 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
2907 gcc_unreachable ();
2911 /* Copy the chunks to the destination. */
2912 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2913 if (MEM_ALIGN (dest) >= bits)
2914 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
2915 else
2917 rtx part = adjust_address (dest, BLKmode, offset);
2918 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
2919 gcc_unreachable ();
2922 /* Mop up any left-over bytes. */
2923 if (offset < length)
2925 src = adjust_address (src, BLKmode, offset);
2926 dest = adjust_address (dest, BLKmode, offset);
2927 move_by_pieces (dest, src, length - offset,
2928 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
2932 #define MAX_MOVE_REGS 4
2933 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
2936 /* Helper function for doing a loop-based block operation on memory
2937 reference MEM. Each iteration of the loop will operate on LENGTH
2938 bytes of MEM.
2940 Create a new base register for use within the loop and point it to
2941 the start of MEM. Create a new memory reference that uses this
2942 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
2944 static void
2945 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
2946 rtx *loop_reg, rtx *loop_mem)
2948 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
2950 /* Although the new mem does not refer to a known location,
2951 it does keep up to LENGTH bytes of alignment. */
2952 *loop_mem = change_address (mem, BLKmode, *loop_reg);
2953 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
2957 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
2958 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
2959 memory regions do not overlap. */
2961 static void
2962 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
2964 rtx label, src_reg, dest_reg, final_src;
2965 HOST_WIDE_INT leftover;
2967 leftover = length % MAX_MOVE_BYTES;
2968 length -= leftover;
2970 /* Create registers and memory references for use within the loop. */
2971 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
2972 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
2974 /* Calculate the value that SRC_REG should have after the last iteration
2975 of the loop. */
2976 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
2977 0, 0, OPTAB_WIDEN);
2979 /* Emit the start of the loop. */
2980 label = gen_label_rtx ();
2981 emit_label (label);
2983 /* Emit the loop body. */
2984 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
2986 /* Move on to the next block. */
2987 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
2988 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
2990 /* Emit the loop condition. */
2991 if (Pmode == DImode)
2992 emit_insn (gen_cmpdi (src_reg, final_src));
2993 else
2994 emit_insn (gen_cmpsi (src_reg, final_src));
2995 emit_jump_insn (gen_bne (label));
2997 /* Mop up any left-over bytes. */
2998 if (leftover)
2999 mips_block_move_straight (dest, src, leftover);
3002 /* Expand a movmemsi instruction. */
3004 bool
3005 mips_expand_block_move (rtx dest, rtx src, rtx length)
3007 if (GET_CODE (length) == CONST_INT)
3009 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3011 mips_block_move_straight (dest, src, INTVAL (length));
3012 return true;
3014 else if (optimize)
3016 mips_block_move_loop (dest, src, INTVAL (length));
3017 return true;
3020 return false;
3023 /* Argument support functions. */
3025 /* Initialize CUMULATIVE_ARGS for a function. */
3027 void
3028 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3029 rtx libname ATTRIBUTE_UNUSED)
3031 static CUMULATIVE_ARGS zero_cum;
3032 tree param, next_param;
3034 *cum = zero_cum;
3035 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3037 /* Determine if this function has variable arguments. This is
3038 indicated by the last argument being 'void_type_mode' if there
3039 are no variable arguments. The standard MIPS calling sequence
3040 passes all arguments in the general purpose registers in this case. */
3042 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3043 param != 0; param = next_param)
3045 next_param = TREE_CHAIN (param);
3046 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3047 cum->gp_reg_found = 1;
3052 /* Fill INFO with information about a single argument. CUM is the
3053 cumulative state for earlier arguments. MODE is the mode of this
3054 argument and TYPE is its type (if known). NAMED is true if this
3055 is a named (fixed) argument rather than a variable one. */
3057 static void
3058 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3059 tree type, int named, struct mips_arg_info *info)
3061 bool doubleword_aligned_p;
3062 unsigned int num_bytes, num_words, max_regs;
3064 /* Work out the size of the argument. */
3065 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3066 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3068 /* Decide whether it should go in a floating-point register, assuming
3069 one is free. Later code checks for availability.
3071 The checks against UNITS_PER_FPVALUE handle the soft-float and
3072 single-float cases. */
3073 switch (mips_abi)
3075 case ABI_EABI:
3076 /* The EABI conventions have traditionally been defined in terms
3077 of TYPE_MODE, regardless of the actual type. */
3078 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3079 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3080 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3081 break;
3083 case ABI_32:
3084 case ABI_O64:
3085 /* Only leading floating-point scalars are passed in
3086 floating-point registers. We also handle vector floats the same
3087 say, which is OK because they are not covered by the standard ABI. */
3088 info->fpr_p = (!cum->gp_reg_found
3089 && cum->arg_number < 2
3090 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3091 || VECTOR_FLOAT_TYPE_P (type))
3092 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3093 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3094 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3095 break;
3097 case ABI_N32:
3098 case ABI_64:
3099 /* Scalar and complex floating-point types are passed in
3100 floating-point registers. */
3101 info->fpr_p = (named
3102 && (type == 0 || FLOAT_TYPE_P (type))
3103 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3104 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3105 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3106 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3108 /* ??? According to the ABI documentation, the real and imaginary
3109 parts of complex floats should be passed in individual registers.
3110 The real and imaginary parts of stack arguments are supposed
3111 to be contiguous and there should be an extra word of padding
3112 at the end.
3114 This has two problems. First, it makes it impossible to use a
3115 single "void *" va_list type, since register and stack arguments
3116 are passed differently. (At the time of writing, MIPSpro cannot
3117 handle complex float varargs correctly.) Second, it's unclear
3118 what should happen when there is only one register free.
3120 For now, we assume that named complex floats should go into FPRs
3121 if there are two FPRs free, otherwise they should be passed in the
3122 same way as a struct containing two floats. */
3123 if (info->fpr_p
3124 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3125 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3127 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3128 info->fpr_p = false;
3129 else
3130 num_words = 2;
3132 break;
3134 default:
3135 gcc_unreachable ();
3138 /* See whether the argument has doubleword alignment. */
3139 doubleword_aligned_p = (type
3140 ? TYPE_ALIGN (type) > BITS_PER_WORD
3141 : GET_MODE_UNIT_SIZE (mode) > UNITS_PER_WORD);
3143 /* Set REG_OFFSET to the register count we're interested in.
3144 The EABI allocates the floating-point registers separately,
3145 but the other ABIs allocate them like integer registers. */
3146 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3147 ? cum->num_fprs
3148 : cum->num_gprs);
3150 /* Advance to an even register if the argument is doubleword-aligned. */
3151 if (doubleword_aligned_p)
3152 info->reg_offset += info->reg_offset & 1;
3154 /* Work out the offset of a stack argument. */
3155 info->stack_offset = cum->stack_words;
3156 if (doubleword_aligned_p)
3157 info->stack_offset += info->stack_offset & 1;
3159 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3161 /* Partition the argument between registers and stack. */
3162 info->reg_words = MIN (num_words, max_regs);
3163 info->stack_words = num_words - info->reg_words;
3167 /* Implement FUNCTION_ARG_ADVANCE. */
3169 void
3170 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3171 tree type, int named)
3173 struct mips_arg_info info;
3175 mips_arg_info (cum, mode, type, named, &info);
3177 if (!info.fpr_p)
3178 cum->gp_reg_found = true;
3180 /* See the comment above the cumulative args structure in mips.h
3181 for an explanation of what this code does. It assumes the O32
3182 ABI, which passes at most 2 arguments in float registers. */
3183 if (cum->arg_number < 2 && info.fpr_p)
3184 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3186 if (mips_abi != ABI_EABI || !info.fpr_p)
3187 cum->num_gprs = info.reg_offset + info.reg_words;
3188 else if (info.reg_words > 0)
3189 cum->num_fprs += FP_INC;
3191 if (info.stack_words > 0)
3192 cum->stack_words = info.stack_offset + info.stack_words;
3194 cum->arg_number++;
3197 /* Implement FUNCTION_ARG. */
3199 struct rtx_def *
3200 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3201 tree type, int named)
3203 struct mips_arg_info info;
3205 /* We will be called with a mode of VOIDmode after the last argument
3206 has been seen. Whatever we return will be passed to the call
3207 insn. If we need a mips16 fp_code, return a REG with the code
3208 stored as the mode. */
3209 if (mode == VOIDmode)
3211 if (TARGET_MIPS16 && cum->fp_code != 0)
3212 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3214 else
3215 return 0;
3218 mips_arg_info (cum, mode, type, named, &info);
3220 /* Return straight away if the whole argument is passed on the stack. */
3221 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3222 return 0;
3224 if (type != 0
3225 && TREE_CODE (type) == RECORD_TYPE
3226 && TARGET_NEWABI
3227 && TYPE_SIZE_UNIT (type)
3228 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3229 && named)
3231 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3232 structure contains a double in its entirety, then that 64 bit
3233 chunk is passed in a floating point register. */
3234 tree field;
3236 /* First check to see if there is any such field. */
3237 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3238 if (TREE_CODE (field) == FIELD_DECL
3239 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3240 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3241 && host_integerp (bit_position (field), 0)
3242 && int_bit_position (field) % BITS_PER_WORD == 0)
3243 break;
3245 if (field != 0)
3247 /* Now handle the special case by returning a PARALLEL
3248 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3249 chunks are passed in registers. */
3250 unsigned int i;
3251 HOST_WIDE_INT bitpos;
3252 rtx ret;
3254 /* assign_parms checks the mode of ENTRY_PARM, so we must
3255 use the actual mode here. */
3256 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3258 bitpos = 0;
3259 field = TYPE_FIELDS (type);
3260 for (i = 0; i < info.reg_words; i++)
3262 rtx reg;
3264 for (; field; field = TREE_CHAIN (field))
3265 if (TREE_CODE (field) == FIELD_DECL
3266 && int_bit_position (field) >= bitpos)
3267 break;
3269 if (field
3270 && int_bit_position (field) == bitpos
3271 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3272 && !TARGET_SOFT_FLOAT
3273 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3274 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3275 else
3276 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3278 XVECEXP (ret, 0, i)
3279 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3280 GEN_INT (bitpos / BITS_PER_UNIT));
3282 bitpos += BITS_PER_WORD;
3284 return ret;
3288 /* Handle the n32/n64 conventions for passing complex floating-point
3289 arguments in FPR pairs. The real part goes in the lower register
3290 and the imaginary part goes in the upper register. */
3291 if (TARGET_NEWABI
3292 && info.fpr_p
3293 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3295 rtx real, imag;
3296 enum machine_mode inner;
3297 int reg;
3299 inner = GET_MODE_INNER (mode);
3300 reg = FP_ARG_FIRST + info.reg_offset;
3301 real = gen_rtx_EXPR_LIST (VOIDmode,
3302 gen_rtx_REG (inner, reg),
3303 const0_rtx);
3304 imag = gen_rtx_EXPR_LIST (VOIDmode,
3305 gen_rtx_REG (inner, reg + info.reg_words / 2),
3306 GEN_INT (GET_MODE_SIZE (inner)));
3307 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3310 if (!info.fpr_p)
3311 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3312 else if (info.reg_offset == 1)
3313 /* This code handles the special o32 case in which the second word
3314 of the argument structure is passed in floating-point registers. */
3315 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3316 else
3317 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3321 /* Implement FUNCTION_ARG_PARTIAL_NREGS. */
3324 function_arg_partial_nregs (const CUMULATIVE_ARGS *cum,
3325 enum machine_mode mode, tree type, int named)
3327 struct mips_arg_info info;
3329 mips_arg_info (cum, mode, type, named, &info);
3330 return info.stack_words > 0 ? info.reg_words : 0;
3334 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
3335 PARM_BOUNDARY bits of alignment, but will be given anything up
3336 to STACK_BOUNDARY bits if the type requires it. */
3339 function_arg_boundary (enum machine_mode mode, tree type)
3341 unsigned int alignment;
3343 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
3344 if (alignment < PARM_BOUNDARY)
3345 alignment = PARM_BOUNDARY;
3346 if (alignment > STACK_BOUNDARY)
3347 alignment = STACK_BOUNDARY;
3348 return alignment;
3351 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
3352 upward rather than downward. In other words, return true if the
3353 first byte of the stack slot has useful data, false if the last
3354 byte does. */
3356 bool
3357 mips_pad_arg_upward (enum machine_mode mode, tree type)
3359 /* On little-endian targets, the first byte of every stack argument
3360 is passed in the first byte of the stack slot. */
3361 if (!BYTES_BIG_ENDIAN)
3362 return true;
3364 /* Otherwise, integral types are padded downward: the last byte of a
3365 stack argument is passed in the last byte of the stack slot. */
3366 if (type != 0
3367 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
3368 : GET_MODE_CLASS (mode) == MODE_INT)
3369 return false;
3371 /* Big-endian o64 pads floating-point arguments downward. */
3372 if (mips_abi == ABI_O64)
3373 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3374 return false;
3376 /* Other types are padded upward for o32, o64, n32 and n64. */
3377 if (mips_abi != ABI_EABI)
3378 return true;
3380 /* Arguments smaller than a stack slot are padded downward. */
3381 if (mode != BLKmode)
3382 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
3383 else
3384 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
3388 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
3389 if the least significant byte of the register has useful data. Return
3390 the opposite if the most significant byte does. */
3392 bool
3393 mips_pad_reg_upward (enum machine_mode mode, tree type)
3395 /* No shifting is required for floating-point arguments. */
3396 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3397 return !BYTES_BIG_ENDIAN;
3399 /* Otherwise, apply the same padding to register arguments as we do
3400 to stack arguments. */
3401 return mips_pad_arg_upward (mode, type);
3404 static void
3405 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3406 tree type, int *pretend_size, int no_rtl)
3408 CUMULATIVE_ARGS local_cum;
3409 int gp_saved, fp_saved;
3411 /* The caller has advanced CUM up to, but not beyond, the last named
3412 argument. Advance a local copy of CUM past the last "real" named
3413 argument, to find out how many registers are left over. */
3415 local_cum = *cum;
3416 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
3418 /* Found out how many registers we need to save. */
3419 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
3420 fp_saved = (EABI_FLOAT_VARARGS_P
3421 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
3422 : 0);
3424 if (!no_rtl)
3426 if (gp_saved > 0)
3428 rtx ptr, mem;
3430 ptr = virtual_incoming_args_rtx;
3431 switch (mips_abi)
3433 case ABI_32:
3434 case ABI_O64:
3435 ptr = plus_constant (ptr, local_cum.num_gprs * UNITS_PER_WORD);
3436 break;
3438 case ABI_EABI:
3439 ptr = plus_constant (ptr, -gp_saved * UNITS_PER_WORD);
3440 break;
3442 mem = gen_rtx_MEM (BLKmode, ptr);
3443 set_mem_alias_set (mem, get_varargs_alias_set ());
3445 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
3446 mem, gp_saved);
3448 if (fp_saved > 0)
3450 /* We can't use move_block_from_reg, because it will use
3451 the wrong mode. */
3452 enum machine_mode mode;
3453 int off, i;
3455 /* Set OFF to the offset from virtual_incoming_args_rtx of
3456 the first float register. The FP save area lies below
3457 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
3458 off = -gp_saved * UNITS_PER_WORD;
3459 off &= ~(UNITS_PER_FPVALUE - 1);
3460 off -= fp_saved * UNITS_PER_FPREG;
3462 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
3464 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
3466 rtx ptr, mem;
3468 ptr = plus_constant (virtual_incoming_args_rtx, off);
3469 mem = gen_rtx_MEM (mode, ptr);
3470 set_mem_alias_set (mem, get_varargs_alias_set ());
3471 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
3472 off += UNITS_PER_HWFPVALUE;
3476 if (TARGET_OLDABI)
3478 /* No need for pretend arguments: the register parameter area was
3479 allocated by the caller. */
3480 *pretend_size = 0;
3481 return;
3483 *pretend_size = (gp_saved * UNITS_PER_WORD) + (fp_saved * UNITS_PER_FPREG);
3486 /* Create the va_list data type.
3487 We keep 3 pointers, and two offsets.
3488 Two pointers are to the overflow area, which starts at the CFA.
3489 One of these is constant, for addressing into the GPR save area below it.
3490 The other is advanced up the stack through the overflow region.
3491 The third pointer is to the GPR save area. Since the FPR save area
3492 is just below it, we can address FPR slots off this pointer.
3493 We also keep two one-byte offsets, which are to be subtracted from the
3494 constant pointers to yield addresses in the GPR and FPR save areas.
3495 These are downcounted as float or non-float arguments are used,
3496 and when they get to zero, the argument must be obtained from the
3497 overflow region.
3498 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
3499 pointer is enough. It's started at the GPR save area, and is
3500 advanced, period.
3501 Note that the GPR save area is not constant size, due to optimization
3502 in the prologue. Hence, we can't use a design with two pointers
3503 and two offsets, although we could have designed this with two pointers
3504 and three offsets. */
3506 static tree
3507 mips_build_builtin_va_list (void)
3509 if (EABI_FLOAT_VARARGS_P)
3511 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
3512 tree array, index;
3514 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3516 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
3517 ptr_type_node);
3518 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
3519 ptr_type_node);
3520 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
3521 ptr_type_node);
3522 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
3523 unsigned_char_type_node);
3524 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
3525 unsigned_char_type_node);
3526 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
3527 warn on every user file. */
3528 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
3529 array = build_array_type (unsigned_char_type_node,
3530 build_index_type (index));
3531 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
3533 DECL_FIELD_CONTEXT (f_ovfl) = record;
3534 DECL_FIELD_CONTEXT (f_gtop) = record;
3535 DECL_FIELD_CONTEXT (f_ftop) = record;
3536 DECL_FIELD_CONTEXT (f_goff) = record;
3537 DECL_FIELD_CONTEXT (f_foff) = record;
3538 DECL_FIELD_CONTEXT (f_res) = record;
3540 TYPE_FIELDS (record) = f_ovfl;
3541 TREE_CHAIN (f_ovfl) = f_gtop;
3542 TREE_CHAIN (f_gtop) = f_ftop;
3543 TREE_CHAIN (f_ftop) = f_goff;
3544 TREE_CHAIN (f_goff) = f_foff;
3545 TREE_CHAIN (f_foff) = f_res;
3547 layout_type (record);
3548 return record;
3550 else if (TARGET_IRIX && TARGET_IRIX6)
3551 /* On IRIX 6, this type is 'char *'. */
3552 return build_pointer_type (char_type_node);
3553 else
3554 /* Otherwise, we use 'void *'. */
3555 return ptr_type_node;
3558 /* Implement va_start. */
3560 void
3561 mips_va_start (tree valist, rtx nextarg)
3563 const CUMULATIVE_ARGS *cum = &current_function_args_info;
3565 /* ARG_POINTER_REGNUM is initialized to STACK_POINTER_BOUNDARY, but
3566 since the stack is aligned for a pair of argument-passing slots,
3567 and the beginning of a variable argument list may be an odd slot,
3568 we have to decrease its alignment. */
3569 if (cfun && cfun->emit->regno_pointer_align)
3570 while (((current_function_pretend_args_size * BITS_PER_UNIT)
3571 & (REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) - 1)) != 0)
3572 REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) /= 2;
3574 if (mips_abi == ABI_EABI)
3576 int gpr_save_area_size;
3578 gpr_save_area_size
3579 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
3581 if (EABI_FLOAT_VARARGS_P)
3583 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3584 tree ovfl, gtop, ftop, goff, foff;
3585 tree t;
3586 int fpr_offset;
3587 int fpr_save_area_size;
3589 f_ovfl = TYPE_FIELDS (va_list_type_node);
3590 f_gtop = TREE_CHAIN (f_ovfl);
3591 f_ftop = TREE_CHAIN (f_gtop);
3592 f_goff = TREE_CHAIN (f_ftop);
3593 f_foff = TREE_CHAIN (f_goff);
3595 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3596 NULL_TREE);
3597 gtop = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3598 NULL_TREE);
3599 ftop = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3600 NULL_TREE);
3601 goff = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3602 NULL_TREE);
3603 foff = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3604 NULL_TREE);
3606 /* Emit code to initialize OVFL, which points to the next varargs
3607 stack argument. CUM->STACK_WORDS gives the number of stack
3608 words used by named arguments. */
3609 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
3610 if (cum->stack_words > 0)
3611 t = build (PLUS_EXPR, TREE_TYPE (ovfl), t,
3612 build_int_cst (NULL_TREE,
3613 cum->stack_words * UNITS_PER_WORD));
3614 t = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3615 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3617 /* Emit code to initialize GTOP, the top of the GPR save area. */
3618 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
3619 t = build (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
3620 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3622 /* Emit code to initialize FTOP, the top of the FPR save area.
3623 This address is gpr_save_area_bytes below GTOP, rounded
3624 down to the next fp-aligned boundary. */
3625 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
3626 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
3627 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
3628 if (fpr_offset)
3629 t = build (PLUS_EXPR, TREE_TYPE (ftop), t,
3630 build_int_cst (NULL_TREE, -fpr_offset));
3631 t = build (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
3632 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3634 /* Emit code to initialize GOFF, the offset from GTOP of the
3635 next GPR argument. */
3636 t = build (MODIFY_EXPR, TREE_TYPE (goff), goff,
3637 build_int_cst (NULL_TREE, gpr_save_area_size));
3638 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3640 /* Likewise emit code to initialize FOFF, the offset from FTOP
3641 of the next FPR argument. */
3642 fpr_save_area_size
3643 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
3644 t = build (MODIFY_EXPR, TREE_TYPE (foff), foff,
3645 build_int_cst (NULL_TREE, fpr_save_area_size));
3646 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3648 else
3650 /* Everything is in the GPR save area, or in the overflow
3651 area which is contiguous with it. */
3652 nextarg = plus_constant (nextarg, -gpr_save_area_size);
3653 std_expand_builtin_va_start (valist, nextarg);
3656 else
3657 std_expand_builtin_va_start (valist, nextarg);
3660 /* Implement va_arg. */
3662 static tree
3663 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
3665 HOST_WIDE_INT size, rsize;
3666 tree addr;
3667 bool indirect;
3669 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
3671 if (indirect)
3672 type = build_pointer_type (type);
3674 size = int_size_in_bytes (type);
3675 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
3677 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
3678 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3679 else
3681 /* Not a simple merged stack. */
3683 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3684 tree ovfl, top, off, align;
3685 HOST_WIDE_INT osize;
3686 tree t, u;
3688 f_ovfl = TYPE_FIELDS (va_list_type_node);
3689 f_gtop = TREE_CHAIN (f_ovfl);
3690 f_ftop = TREE_CHAIN (f_gtop);
3691 f_goff = TREE_CHAIN (f_ftop);
3692 f_foff = TREE_CHAIN (f_goff);
3694 /* We maintain separate pointers and offsets for floating-point
3695 and integer arguments, but we need similar code in both cases.
3696 Let:
3698 TOP be the top of the register save area;
3699 OFF be the offset from TOP of the next register;
3700 ADDR_RTX be the address of the argument;
3701 RSIZE be the number of bytes used to store the argument
3702 when it's in the register save area;
3703 OSIZE be the number of bytes used to store it when it's
3704 in the stack overflow area; and
3705 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
3707 The code we want is:
3709 1: off &= -rsize; // round down
3710 2: if (off != 0)
3711 3: {
3712 4: addr_rtx = top - off;
3713 5: off -= rsize;
3714 6: }
3715 7: else
3716 8: {
3717 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
3718 10: addr_rtx = ovfl + PADDING;
3719 11: ovfl += osize;
3720 14: }
3722 [1] and [9] can sometimes be optimized away. */
3724 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3725 NULL_TREE);
3727 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
3728 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
3730 top = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3731 NULL_TREE);
3732 off = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3733 NULL_TREE);
3735 /* When floating-point registers are saved to the stack,
3736 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
3737 of the float's precision. */
3738 rsize = UNITS_PER_HWFPVALUE;
3740 /* Overflow arguments are padded to UNITS_PER_WORD bytes
3741 (= PARM_BOUNDARY bits). This can be different from RSIZE
3742 in two cases:
3744 (1) On 32-bit targets when TYPE is a structure such as:
3746 struct s { float f; };
3748 Such structures are passed in paired FPRs, so RSIZE
3749 will be 8 bytes. However, the structure only takes
3750 up 4 bytes of memory, so OSIZE will only be 4.
3752 (2) In combinations such as -mgp64 -msingle-float
3753 -fshort-double. Doubles passed in registers
3754 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
3755 but those passed on the stack take up
3756 UNITS_PER_WORD bytes. */
3757 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
3759 else
3761 top = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3762 NULL_TREE);
3763 off = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3764 NULL_TREE);
3765 if (rsize > UNITS_PER_WORD)
3767 /* [1] Emit code for: off &= -rsize. */
3768 t = build (BIT_AND_EXPR, TREE_TYPE (off), off,
3769 build_int_cst (NULL_TREE, -rsize));
3770 t = build (MODIFY_EXPR, TREE_TYPE (off), off, t);
3771 gimplify_and_add (t, pre_p);
3773 osize = rsize;
3776 /* [2] Emit code to branch if off == 0. */
3777 t = lang_hooks.truthvalue_conversion (off);
3778 addr = build (COND_EXPR, ptr_type_node, t, NULL, NULL);
3780 /* [5] Emit code for: off -= rsize. We do this as a form of
3781 post-increment not available to C. Also widen for the
3782 coming pointer arithmetic. */
3783 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
3784 t = build (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
3785 t = fold_convert (sizetype, t);
3786 t = fold_convert (TREE_TYPE (top), t);
3788 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
3789 the argument has RSIZE - SIZE bytes of leading padding. */
3790 t = build (MINUS_EXPR, TREE_TYPE (top), top, t);
3791 if (BYTES_BIG_ENDIAN && rsize > size)
3793 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
3794 rsize - size));
3795 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3797 COND_EXPR_THEN (addr) = t;
3799 if (osize > UNITS_PER_WORD)
3801 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
3802 u = fold_convert (TREE_TYPE (ovfl),
3803 build_int_cst (NULL_TREE, osize - 1));
3804 t = build (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
3805 u = fold_convert (TREE_TYPE (ovfl),
3806 build_int_cst (NULL_TREE, -osize));
3807 t = build (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
3808 align = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3810 else
3811 align = NULL;
3813 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
3814 post-increment ovfl by osize. On big-endian machines,
3815 the argument has OSIZE - SIZE bytes of leading padding. */
3816 u = fold_convert (TREE_TYPE (ovfl),
3817 build_int_cst (NULL_TREE, osize));
3818 t = build (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
3819 if (BYTES_BIG_ENDIAN && osize > size)
3821 u = fold_convert (TREE_TYPE (t),
3822 build_int_cst (NULL_TREE, osize - size));
3823 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3826 /* String [9] and [10,11] together. */
3827 if (align)
3828 t = build (COMPOUND_EXPR, TREE_TYPE (t), align, t);
3829 COND_EXPR_ELSE (addr) = t;
3831 addr = fold_convert (build_pointer_type (type), addr);
3832 addr = build_fold_indirect_ref (addr);
3835 if (indirect)
3836 addr = build_fold_indirect_ref (addr);
3838 return addr;
3841 /* Return true if it is possible to use left/right accesses for a
3842 bitfield of WIDTH bits starting BITPOS bits into *OP. When
3843 returning true, update *OP, *LEFT and *RIGHT as follows:
3845 *OP is a BLKmode reference to the whole field.
3847 *LEFT is a QImode reference to the first byte if big endian or
3848 the last byte if little endian. This address can be used in the
3849 left-side instructions (lwl, swl, ldl, sdl).
3851 *RIGHT is a QImode reference to the opposite end of the field and
3852 can be used in the parterning right-side instruction. */
3854 static bool
3855 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
3856 rtx *left, rtx *right)
3858 rtx first, last;
3860 /* Check that the operand really is a MEM. Not all the extv and
3861 extzv predicates are checked. */
3862 if (!MEM_P (*op))
3863 return false;
3865 /* Check that the size is valid. */
3866 if (width != 32 && (!TARGET_64BIT || width != 64))
3867 return false;
3869 /* We can only access byte-aligned values. Since we are always passed
3870 a reference to the first byte of the field, it is not necessary to
3871 do anything with BITPOS after this check. */
3872 if (bitpos % BITS_PER_UNIT != 0)
3873 return false;
3875 /* Reject aligned bitfields: we want to use a normal load or store
3876 instead of a left/right pair. */
3877 if (MEM_ALIGN (*op) >= width)
3878 return false;
3880 /* Adjust *OP to refer to the whole field. This also has the effect
3881 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
3882 *op = adjust_address (*op, BLKmode, 0);
3883 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
3885 /* Get references to both ends of the field. We deliberately don't
3886 use the original QImode *OP for FIRST since the new BLKmode one
3887 might have a simpler address. */
3888 first = adjust_address (*op, QImode, 0);
3889 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
3891 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
3892 be the upper word and RIGHT the lower word. */
3893 if (TARGET_BIG_ENDIAN)
3894 *left = first, *right = last;
3895 else
3896 *left = last, *right = first;
3898 return true;
3902 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
3903 Return true on success. We only handle cases where zero_extract is
3904 equivalent to sign_extract. */
3906 bool
3907 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
3909 rtx left, right, temp;
3911 /* If TARGET_64BIT, the destination of a 32-bit load will be a
3912 paradoxical word_mode subreg. This is the only case in which
3913 we allow the destination to be larger than the source. */
3914 if (GET_CODE (dest) == SUBREG
3915 && GET_MODE (dest) == DImode
3916 && SUBREG_BYTE (dest) == 0
3917 && GET_MODE (SUBREG_REG (dest)) == SImode)
3918 dest = SUBREG_REG (dest);
3920 /* After the above adjustment, the destination must be the same
3921 width as the source. */
3922 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
3923 return false;
3925 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
3926 return false;
3928 temp = gen_reg_rtx (GET_MODE (dest));
3929 if (GET_MODE (dest) == DImode)
3931 emit_insn (gen_mov_ldl (temp, src, left));
3932 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
3934 else
3936 emit_insn (gen_mov_lwl (temp, src, left));
3937 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
3939 return true;
3943 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
3944 true on success. */
3946 bool
3947 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
3949 rtx left, right;
3951 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
3952 return false;
3954 src = gen_lowpart (mode_for_size (width, MODE_INT, 0), src);
3956 if (GET_MODE (src) == DImode)
3958 emit_insn (gen_mov_sdl (dest, src, left));
3959 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
3961 else
3963 emit_insn (gen_mov_swl (dest, src, left));
3964 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
3966 return true;
3969 /* Set up globals to generate code for the ISA or processor
3970 described by INFO. */
3972 static void
3973 mips_set_architecture (const struct mips_cpu_info *info)
3975 if (info != 0)
3977 mips_arch_info = info;
3978 mips_arch = info->cpu;
3979 mips_isa = info->isa;
3984 /* Likewise for tuning. */
3986 static void
3987 mips_set_tune (const struct mips_cpu_info *info)
3989 if (info != 0)
3991 mips_tune_info = info;
3992 mips_tune = info->cpu;
3997 /* Set up the threshold for data to go into the small data area, instead
3998 of the normal data area, and detect any conflicts in the switches. */
4000 void
4001 override_options (void)
4003 int i, start, regno;
4004 enum machine_mode mode;
4006 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4008 /* Interpret -mabi. */
4009 mips_abi = MIPS_ABI_DEFAULT;
4010 if (mips_abi_string != 0)
4012 if (strcmp (mips_abi_string, "32") == 0)
4013 mips_abi = ABI_32;
4014 else if (strcmp (mips_abi_string, "o64") == 0)
4015 mips_abi = ABI_O64;
4016 else if (strcmp (mips_abi_string, "n32") == 0)
4017 mips_abi = ABI_N32;
4018 else if (strcmp (mips_abi_string, "64") == 0)
4019 mips_abi = ABI_64;
4020 else if (strcmp (mips_abi_string, "eabi") == 0)
4021 mips_abi = ABI_EABI;
4022 else
4023 fatal_error ("bad value (%s) for -mabi= switch", mips_abi_string);
4026 /* The following code determines the architecture and register size.
4027 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4028 The GAS and GCC code should be kept in sync as much as possible. */
4030 if (mips_arch_string != 0)
4031 mips_set_architecture (mips_parse_cpu ("-march", mips_arch_string));
4033 if (mips_isa_string != 0)
4035 /* Handle -mipsN. */
4036 char *whole_isa_str = concat ("mips", mips_isa_string, NULL);
4037 const struct mips_cpu_info *isa_info;
4039 isa_info = mips_parse_cpu ("-mips option", whole_isa_str);
4040 free (whole_isa_str);
4042 /* -march takes precedence over -mipsN, since it is more descriptive.
4043 There's no harm in specifying both as long as the ISA levels
4044 are the same. */
4045 if (mips_arch_info != 0 && mips_isa != isa_info->isa)
4046 error ("-mips%s conflicts with the other architecture options, "
4047 "which specify a MIPS%d processor",
4048 mips_isa_string, mips_isa);
4050 /* Set architecture based on the given option. */
4051 mips_set_architecture (isa_info);
4054 if (mips_arch_info == 0)
4056 #ifdef MIPS_CPU_STRING_DEFAULT
4057 mips_set_architecture (mips_parse_cpu ("default CPU",
4058 MIPS_CPU_STRING_DEFAULT));
4059 #else
4060 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4061 #endif
4064 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4065 error ("-march=%s is not compatible with the selected ABI",
4066 mips_arch_info->name);
4068 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4069 if (mips_tune_string != 0)
4070 mips_set_tune (mips_parse_cpu ("-mtune", mips_tune_string));
4072 if (mips_tune_info == 0)
4073 mips_set_tune (mips_arch_info);
4075 if ((target_flags_explicit & MASK_64BIT) != 0)
4077 /* The user specified the size of the integer registers. Make sure
4078 it agrees with the ABI and ISA. */
4079 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4080 error ("-mgp64 used with a 32-bit processor");
4081 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4082 error ("-mgp32 used with a 64-bit ABI");
4083 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4084 error ("-mgp64 used with a 32-bit ABI");
4086 else
4088 /* Infer the integer register size from the ABI and processor.
4089 Restrict ourselves to 32-bit registers if that's all the
4090 processor has, or if the ABI cannot handle 64-bit registers. */
4091 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4092 target_flags &= ~MASK_64BIT;
4093 else
4094 target_flags |= MASK_64BIT;
4097 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4099 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4100 only one right answer here. */
4101 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4102 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4103 else if (!TARGET_64BIT && TARGET_FLOAT64)
4104 error ("unsupported combination: %s", "-mgp32 -mfp64");
4105 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4106 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4108 else
4110 /* -msingle-float selects 32-bit float registers. Otherwise the
4111 float registers should be the same size as the integer ones. */
4112 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4113 target_flags |= MASK_FLOAT64;
4114 else
4115 target_flags &= ~MASK_FLOAT64;
4118 /* End of code shared with GAS. */
4120 if ((target_flags_explicit & MASK_LONG64) == 0)
4122 /* If no type size setting options (-mlong64,-mint64,-mlong32)
4123 were used, then set the type sizes. In the EABI in 64 bit mode,
4124 longs and pointers are 64 bits. Likewise for the SGI Irix6 N64
4125 ABI. */
4126 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4127 target_flags |= MASK_LONG64;
4128 else
4129 target_flags &= ~MASK_LONG64;
4132 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4133 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4135 /* For some configurations, it is useful to have -march control
4136 the default setting of MASK_SOFT_FLOAT. */
4137 switch ((int) mips_arch)
4139 case PROCESSOR_R4100:
4140 case PROCESSOR_R4111:
4141 case PROCESSOR_R4120:
4142 case PROCESSOR_R4130:
4143 target_flags |= MASK_SOFT_FLOAT;
4144 break;
4146 default:
4147 target_flags &= ~MASK_SOFT_FLOAT;
4148 break;
4152 if (!TARGET_OLDABI)
4153 flag_pcc_struct_return = 0;
4155 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4157 /* If neither -mbranch-likely nor -mno-branch-likely was given
4158 on the command line, set MASK_BRANCHLIKELY based on the target
4159 architecture.
4161 By default, we enable use of Branch Likely instructions on
4162 all architectures which support them with the following
4163 exceptions: when creating MIPS32 or MIPS64 code, and when
4164 tuning for architectures where their use tends to hurt
4165 performance.
4167 The MIPS32 and MIPS64 architecture specifications say "Software
4168 is strongly encouraged to avoid use of Branch Likely
4169 instructions, as they will be removed from a future revision
4170 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4171 issue those instructions unless instructed to do so by
4172 -mbranch-likely. */
4173 if (ISA_HAS_BRANCHLIKELY
4174 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4175 && !(TUNE_MIPS5500 || TUNE_SB1))
4176 target_flags |= MASK_BRANCHLIKELY;
4177 else
4178 target_flags &= ~MASK_BRANCHLIKELY;
4180 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4181 warning ("generation of Branch Likely instructions enabled, but not supported by architecture");
4183 /* The effect of -mabicalls isn't defined for the EABI. */
4184 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4186 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4187 target_flags &= ~MASK_ABICALLS;
4190 /* -fpic (-KPIC) is the default when TARGET_ABICALLS is defined. We need
4191 to set flag_pic so that the LEGITIMATE_PIC_OPERAND_P macro will work. */
4192 /* ??? -non_shared turns off pic code generation, but this is not
4193 implemented. */
4194 if (TARGET_ABICALLS)
4196 flag_pic = 1;
4197 if (mips_section_threshold > 0)
4198 warning ("-G is incompatible with PIC code which is the default");
4201 /* mips_split_addresses is a half-way house between explicit
4202 relocations and the traditional assembler macros. It can
4203 split absolute 32-bit symbolic constants into a high/lo_sum
4204 pair but uses macros for other sorts of access.
4206 Like explicit relocation support for REL targets, it relies
4207 on GNU extensions in the assembler and the linker.
4209 Although this code should work for -O0, it has traditionally
4210 been treated as an optimization. */
4211 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4212 && optimize && !flag_pic
4213 && !ABI_HAS_64BIT_SYMBOLS)
4214 mips_split_addresses = 1;
4215 else
4216 mips_split_addresses = 0;
4218 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4219 faster code, but at the expense of more nops. Enable it at -O3 and
4220 above. */
4221 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4222 target_flags |= MASK_VR4130_ALIGN;
4224 /* When compiling for the mips16, we cannot use floating point. We
4225 record the original hard float value in mips16_hard_float. */
4226 if (TARGET_MIPS16)
4228 if (TARGET_SOFT_FLOAT)
4229 mips16_hard_float = 0;
4230 else
4231 mips16_hard_float = 1;
4232 target_flags |= MASK_SOFT_FLOAT;
4234 /* Don't run the scheduler before reload, since it tends to
4235 increase register pressure. */
4236 flag_schedule_insns = 0;
4238 /* Don't do hot/cold partitioning. The constant layout code expects
4239 the whole function to be in a single section. */
4240 flag_reorder_blocks_and_partition = 0;
4242 /* Silently disable -mexplicit-relocs since it doesn't apply
4243 to mips16 code. Even so, it would overly pedantic to warn
4244 about "-mips16 -mexplicit-relocs", especially given that
4245 we use a %gprel() operator. */
4246 target_flags &= ~MASK_EXPLICIT_RELOCS;
4249 /* When using explicit relocs, we call dbr_schedule from within
4250 mips_reorg. */
4251 if (TARGET_EXPLICIT_RELOCS)
4253 mips_flag_delayed_branch = flag_delayed_branch;
4254 flag_delayed_branch = 0;
4257 #ifdef MIPS_TFMODE_FORMAT
4258 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4259 #endif
4261 /* Make sure that the user didn't turn off paired single support when
4262 MIPS-3D support is requested. */
4263 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE)
4264 && !TARGET_PAIRED_SINGLE_FLOAT)
4265 error ("-mips3d requires -mpaired-single");
4267 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE. */
4268 if (TARGET_MIPS3D)
4269 target_flags |= MASK_PAIRED_SINGLE;
4271 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4272 and TARGET_HARD_FLOAT are both true. */
4273 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4274 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4276 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4277 enabled. */
4278 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4279 error ("-mips3d/-mpaired-single must be used with -mips64");
4281 mips_print_operand_punct['?'] = 1;
4282 mips_print_operand_punct['#'] = 1;
4283 mips_print_operand_punct['/'] = 1;
4284 mips_print_operand_punct['&'] = 1;
4285 mips_print_operand_punct['!'] = 1;
4286 mips_print_operand_punct['*'] = 1;
4287 mips_print_operand_punct['@'] = 1;
4288 mips_print_operand_punct['.'] = 1;
4289 mips_print_operand_punct['('] = 1;
4290 mips_print_operand_punct[')'] = 1;
4291 mips_print_operand_punct['['] = 1;
4292 mips_print_operand_punct[']'] = 1;
4293 mips_print_operand_punct['<'] = 1;
4294 mips_print_operand_punct['>'] = 1;
4295 mips_print_operand_punct['{'] = 1;
4296 mips_print_operand_punct['}'] = 1;
4297 mips_print_operand_punct['^'] = 1;
4298 mips_print_operand_punct['$'] = 1;
4299 mips_print_operand_punct['+'] = 1;
4300 mips_print_operand_punct['~'] = 1;
4302 mips_char_to_class['d'] = TARGET_MIPS16 ? M16_REGS : GR_REGS;
4303 mips_char_to_class['t'] = T_REG;
4304 mips_char_to_class['f'] = (TARGET_HARD_FLOAT ? FP_REGS : NO_REGS);
4305 mips_char_to_class['h'] = HI_REG;
4306 mips_char_to_class['l'] = LO_REG;
4307 mips_char_to_class['x'] = MD_REGS;
4308 mips_char_to_class['b'] = ALL_REGS;
4309 mips_char_to_class['c'] = (TARGET_ABICALLS ? PIC_FN_ADDR_REG :
4310 TARGET_MIPS16 ? M16_NA_REGS :
4311 GR_REGS);
4312 mips_char_to_class['e'] = LEA_REGS;
4313 mips_char_to_class['j'] = PIC_FN_ADDR_REG;
4314 mips_char_to_class['y'] = GR_REGS;
4315 mips_char_to_class['z'] = ST_REGS;
4316 mips_char_to_class['B'] = COP0_REGS;
4317 mips_char_to_class['C'] = COP2_REGS;
4318 mips_char_to_class['D'] = COP3_REGS;
4320 /* Set up array to map GCC register number to debug register number.
4321 Ignore the special purpose register numbers. */
4323 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4324 mips_dbx_regno[i] = -1;
4326 start = GP_DBX_FIRST - GP_REG_FIRST;
4327 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
4328 mips_dbx_regno[i] = i + start;
4330 start = FP_DBX_FIRST - FP_REG_FIRST;
4331 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
4332 mips_dbx_regno[i] = i + start;
4334 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
4335 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
4337 /* Set up array giving whether a given register can hold a given mode. */
4339 for (mode = VOIDmode;
4340 mode != MAX_MACHINE_MODE;
4341 mode = (enum machine_mode) ((int)mode + 1))
4343 register int size = GET_MODE_SIZE (mode);
4344 register enum mode_class class = GET_MODE_CLASS (mode);
4346 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4348 register int temp;
4350 if (mode == CCV2mode)
4351 temp = (ISA_HAS_8CC
4352 && ST_REG_P (regno)
4353 && (regno - ST_REG_FIRST) % 2 == 0);
4355 else if (mode == CCV4mode)
4356 temp = (ISA_HAS_8CC
4357 && ST_REG_P (regno)
4358 && (regno - ST_REG_FIRST) % 4 == 0);
4360 else if (mode == CCmode)
4362 if (! ISA_HAS_8CC)
4363 temp = (regno == FPSW_REGNUM);
4364 else
4365 temp = (ST_REG_P (regno) || GP_REG_P (regno)
4366 || FP_REG_P (regno));
4369 else if (GP_REG_P (regno))
4370 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
4372 else if (FP_REG_P (regno))
4373 temp = ((regno % FP_INC) == 0)
4374 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
4375 || class == MODE_VECTOR_FLOAT)
4376 && size <= UNITS_PER_FPVALUE)
4377 /* Allow integer modes that fit into a single
4378 register. We need to put integers into FPRs
4379 when using instructions like cvt and trunc. */
4380 || (class == MODE_INT && size <= UNITS_PER_FPREG)
4381 /* Allow TFmode for CCmode reloads. */
4382 || (ISA_HAS_8CC && mode == TFmode));
4384 else if (MD_REG_P (regno))
4385 temp = (INTEGRAL_MODE_P (mode)
4386 && (size <= UNITS_PER_WORD
4387 || (regno == MD_REG_FIRST
4388 && size == 2 * UNITS_PER_WORD)));
4390 else if (ALL_COP_REG_P (regno))
4391 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
4392 else
4393 temp = 0;
4395 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
4399 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
4400 initialized yet, so we can't use that here. */
4401 gpr_mode = TARGET_64BIT ? DImode : SImode;
4403 /* Provide default values for align_* for 64-bit targets. */
4404 if (TARGET_64BIT && !TARGET_MIPS16)
4406 if (align_loops == 0)
4407 align_loops = 8;
4408 if (align_jumps == 0)
4409 align_jumps = 8;
4410 if (align_functions == 0)
4411 align_functions = 8;
4414 /* Function to allocate machine-dependent function status. */
4415 init_machine_status = &mips_init_machine_status;
4417 if (ABI_HAS_64BIT_SYMBOLS)
4419 if (TARGET_EXPLICIT_RELOCS)
4421 mips_split_p[SYMBOL_64_HIGH] = true;
4422 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
4423 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
4425 mips_split_p[SYMBOL_64_MID] = true;
4426 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
4427 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
4429 mips_split_p[SYMBOL_64_LOW] = true;
4430 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
4431 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
4433 mips_split_p[SYMBOL_GENERAL] = true;
4434 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4437 else
4439 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
4441 mips_split_p[SYMBOL_GENERAL] = true;
4442 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
4443 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4447 if (TARGET_MIPS16)
4449 /* The high part is provided by a pseudo copy of $gp. */
4450 mips_split_p[SYMBOL_SMALL_DATA] = true;
4451 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
4454 if (TARGET_EXPLICIT_RELOCS)
4456 /* Small data constants are kept whole until after reload,
4457 then lowered by mips_rewrite_small_data. */
4458 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
4460 mips_split_p[SYMBOL_GOT_LOCAL] = true;
4461 if (TARGET_NEWABI)
4463 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
4464 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
4466 else
4468 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
4469 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
4472 if (TARGET_XGOT)
4474 /* The HIGH and LO_SUM are matched by special .md patterns. */
4475 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
4477 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
4478 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
4479 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
4481 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
4482 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
4483 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
4485 else
4487 if (TARGET_NEWABI)
4488 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
4489 else
4490 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
4491 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
4495 if (TARGET_NEWABI)
4497 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
4498 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
4499 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
4502 /* Default to working around R4000 errata only if the processor
4503 was selected explicitly. */
4504 if ((target_flags_explicit & MASK_FIX_R4000) == 0
4505 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
4506 target_flags |= MASK_FIX_R4000;
4508 /* Default to working around R4400 errata only if the processor
4509 was selected explicitly. */
4510 if ((target_flags_explicit & MASK_FIX_R4400) == 0
4511 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
4512 target_flags |= MASK_FIX_R4400;
4515 /* Implement CONDITIONAL_REGISTER_USAGE. */
4517 void
4518 mips_conditional_register_usage (void)
4520 if (!TARGET_HARD_FLOAT)
4522 int regno;
4524 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4525 fixed_regs[regno] = call_used_regs[regno] = 1;
4526 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4527 fixed_regs[regno] = call_used_regs[regno] = 1;
4529 else if (! ISA_HAS_8CC)
4531 int regno;
4533 /* We only have a single condition code register. We
4534 implement this by hiding all the condition code registers,
4535 and generating RTL that refers directly to ST_REG_FIRST. */
4536 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4537 fixed_regs[regno] = call_used_regs[regno] = 1;
4539 /* In mips16 mode, we permit the $t temporary registers to be used
4540 for reload. We prohibit the unused $s registers, since they
4541 are caller saved, and saving them via a mips16 register would
4542 probably waste more time than just reloading the value. */
4543 if (TARGET_MIPS16)
4545 fixed_regs[18] = call_used_regs[18] = 1;
4546 fixed_regs[19] = call_used_regs[19] = 1;
4547 fixed_regs[20] = call_used_regs[20] = 1;
4548 fixed_regs[21] = call_used_regs[21] = 1;
4549 fixed_regs[22] = call_used_regs[22] = 1;
4550 fixed_regs[23] = call_used_regs[23] = 1;
4551 fixed_regs[26] = call_used_regs[26] = 1;
4552 fixed_regs[27] = call_used_regs[27] = 1;
4553 fixed_regs[30] = call_used_regs[30] = 1;
4555 /* fp20-23 are now caller saved. */
4556 if (mips_abi == ABI_64)
4558 int regno;
4559 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
4560 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4562 /* Odd registers from fp21 to fp31 are now caller saved. */
4563 if (mips_abi == ABI_N32)
4565 int regno;
4566 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
4567 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4571 /* Allocate a chunk of memory for per-function machine-dependent data. */
4572 static struct machine_function *
4573 mips_init_machine_status (void)
4575 return ((struct machine_function *)
4576 ggc_alloc_cleared (sizeof (struct machine_function)));
4579 /* On the mips16, we want to allocate $24 (T_REG) before other
4580 registers for instructions for which it is possible. This helps
4581 avoid shuffling registers around in order to set up for an xor,
4582 encouraging the compiler to use a cmp instead. */
4584 void
4585 mips_order_regs_for_local_alloc (void)
4587 register int i;
4589 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4590 reg_alloc_order[i] = i;
4592 if (TARGET_MIPS16)
4594 /* It really doesn't matter where we put register 0, since it is
4595 a fixed register anyhow. */
4596 reg_alloc_order[0] = 24;
4597 reg_alloc_order[24] = 0;
4602 /* The MIPS debug format wants all automatic variables and arguments
4603 to be in terms of the virtual frame pointer (stack pointer before
4604 any adjustment in the function), while the MIPS 3.0 linker wants
4605 the frame pointer to be the stack pointer after the initial
4606 adjustment. So, we do the adjustment here. The arg pointer (which
4607 is eliminated) points to the virtual frame pointer, while the frame
4608 pointer (which may be eliminated) points to the stack pointer after
4609 the initial adjustments. */
4611 HOST_WIDE_INT
4612 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
4614 rtx offset2 = const0_rtx;
4615 rtx reg = eliminate_constant_term (addr, &offset2);
4617 if (offset == 0)
4618 offset = INTVAL (offset2);
4620 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
4621 || reg == hard_frame_pointer_rtx)
4623 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
4624 ? compute_frame_size (get_frame_size ())
4625 : cfun->machine->frame.total_size;
4627 /* MIPS16 frame is smaller */
4628 if (frame_pointer_needed && TARGET_MIPS16)
4629 frame_size -= cfun->machine->frame.args_size;
4631 offset = offset - frame_size;
4634 /* sdbout_parms does not want this to crash for unrecognized cases. */
4635 #if 0
4636 else if (reg != arg_pointer_rtx)
4637 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
4638 addr);
4639 #endif
4641 return offset;
4644 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
4646 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
4647 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
4648 'h' OP is HIGH, prints %hi(X),
4649 'd' output integer constant in decimal,
4650 'z' if the operand is 0, use $0 instead of normal operand.
4651 'D' print second part of double-word register or memory operand.
4652 'L' print low-order register of double-word register operand.
4653 'M' print high-order register of double-word register operand.
4654 'C' print part of opcode for a branch condition.
4655 'F' print part of opcode for a floating-point branch condition.
4656 'N' print part of opcode for a branch condition, inverted.
4657 'W' print part of opcode for a floating-point branch condition, inverted.
4658 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
4659 'z' for (eq:?I ...), 'n' for (ne:?I ...).
4660 't' like 'T', but with the EQ/NE cases reversed
4661 'Y' for a CONST_INT X, print mips_fp_conditions[X]
4662 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
4663 'R' print the reloc associated with LO_SUM
4665 The punctuation characters are:
4667 '(' Turn on .set noreorder
4668 ')' Turn on .set reorder
4669 '[' Turn on .set noat
4670 ']' Turn on .set at
4671 '<' Turn on .set nomacro
4672 '>' Turn on .set macro
4673 '{' Turn on .set volatile (not GAS)
4674 '}' Turn on .set novolatile (not GAS)
4675 '&' Turn on .set noreorder if filling delay slots
4676 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
4677 '!' Turn on .set nomacro if filling delay slots
4678 '#' Print nop if in a .set noreorder section.
4679 '/' Like '#', but does nothing within a delayed branch sequence
4680 '?' Print 'l' if we are to use a branch likely instead of normal branch.
4681 '@' Print the name of the assembler temporary register (at or $1).
4682 '.' Print the name of the register with a hard-wired zero (zero or $0).
4683 '^' Print the name of the pic call-through register (t9 or $25).
4684 '$' Print the name of the stack pointer register (sp or $29).
4685 '+' Print the name of the gp register (usually gp or $28).
4686 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
4688 void
4689 print_operand (FILE *file, rtx op, int letter)
4691 register enum rtx_code code;
4693 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
4695 switch (letter)
4697 case '?':
4698 if (mips_branch_likely)
4699 putc ('l', file);
4700 break;
4702 case '@':
4703 fputs (reg_names [GP_REG_FIRST + 1], file);
4704 break;
4706 case '^':
4707 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
4708 break;
4710 case '.':
4711 fputs (reg_names [GP_REG_FIRST + 0], file);
4712 break;
4714 case '$':
4715 fputs (reg_names[STACK_POINTER_REGNUM], file);
4716 break;
4718 case '+':
4719 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
4720 break;
4722 case '&':
4723 if (final_sequence != 0 && set_noreorder++ == 0)
4724 fputs (".set\tnoreorder\n\t", file);
4725 break;
4727 case '*':
4728 if (final_sequence != 0)
4730 if (set_noreorder++ == 0)
4731 fputs (".set\tnoreorder\n\t", file);
4733 if (set_nomacro++ == 0)
4734 fputs (".set\tnomacro\n\t", file);
4736 break;
4738 case '!':
4739 if (final_sequence != 0 && set_nomacro++ == 0)
4740 fputs ("\n\t.set\tnomacro", file);
4741 break;
4743 case '#':
4744 if (set_noreorder != 0)
4745 fputs ("\n\tnop", file);
4746 break;
4748 case '/':
4749 /* Print an extra newline so that the delayed insn is separated
4750 from the following ones. This looks neater and is consistent
4751 with non-nop delayed sequences. */
4752 if (set_noreorder != 0 && final_sequence == 0)
4753 fputs ("\n\tnop\n", file);
4754 break;
4756 case '(':
4757 if (set_noreorder++ == 0)
4758 fputs (".set\tnoreorder\n\t", file);
4759 break;
4761 case ')':
4762 if (set_noreorder == 0)
4763 error ("internal error: %%) found without a %%( in assembler pattern");
4765 else if (--set_noreorder == 0)
4766 fputs ("\n\t.set\treorder", file);
4768 break;
4770 case '[':
4771 if (set_noat++ == 0)
4772 fputs (".set\tnoat\n\t", file);
4773 break;
4775 case ']':
4776 if (set_noat == 0)
4777 error ("internal error: %%] found without a %%[ in assembler pattern");
4778 else if (--set_noat == 0)
4779 fputs ("\n\t.set\tat", file);
4781 break;
4783 case '<':
4784 if (set_nomacro++ == 0)
4785 fputs (".set\tnomacro\n\t", file);
4786 break;
4788 case '>':
4789 if (set_nomacro == 0)
4790 error ("internal error: %%> found without a %%< in assembler pattern");
4791 else if (--set_nomacro == 0)
4792 fputs ("\n\t.set\tmacro", file);
4794 break;
4796 case '{':
4797 if (set_volatile++ == 0)
4798 fputs ("#.set\tvolatile\n\t", file);
4799 break;
4801 case '}':
4802 if (set_volatile == 0)
4803 error ("internal error: %%} found without a %%{ in assembler pattern");
4804 else if (--set_volatile == 0)
4805 fputs ("\n\t#.set\tnovolatile", file);
4807 break;
4809 case '~':
4811 if (align_labels_log > 0)
4812 ASM_OUTPUT_ALIGN (file, align_labels_log);
4814 break;
4816 default:
4817 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
4818 break;
4821 return;
4824 if (! op)
4826 error ("PRINT_OPERAND null pointer");
4827 return;
4830 code = GET_CODE (op);
4832 if (letter == 'C')
4833 switch (code)
4835 case EQ: fputs ("eq", file); break;
4836 case NE: fputs ("ne", file); break;
4837 case GT: fputs ("gt", file); break;
4838 case GE: fputs ("ge", file); break;
4839 case LT: fputs ("lt", file); break;
4840 case LE: fputs ("le", file); break;
4841 case GTU: fputs ("gtu", file); break;
4842 case GEU: fputs ("geu", file); break;
4843 case LTU: fputs ("ltu", file); break;
4844 case LEU: fputs ("leu", file); break;
4845 default:
4846 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
4849 else if (letter == 'N')
4850 switch (code)
4852 case EQ: fputs ("ne", file); break;
4853 case NE: fputs ("eq", file); break;
4854 case GT: fputs ("le", file); break;
4855 case GE: fputs ("lt", file); break;
4856 case LT: fputs ("ge", file); break;
4857 case LE: fputs ("gt", file); break;
4858 case GTU: fputs ("leu", file); break;
4859 case GEU: fputs ("ltu", file); break;
4860 case LTU: fputs ("geu", file); break;
4861 case LEU: fputs ("gtu", file); break;
4862 default:
4863 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
4866 else if (letter == 'F')
4867 switch (code)
4869 case EQ: fputs ("c1f", file); break;
4870 case NE: fputs ("c1t", file); break;
4871 default:
4872 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
4875 else if (letter == 'W')
4876 switch (code)
4878 case EQ: fputs ("c1t", file); break;
4879 case NE: fputs ("c1f", file); break;
4880 default:
4881 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
4884 else if (letter == 'h')
4886 if (GET_CODE (op) == HIGH)
4887 op = XEXP (op, 0);
4889 print_operand_reloc (file, op, mips_hi_relocs);
4892 else if (letter == 'R')
4893 print_operand_reloc (file, op, mips_lo_relocs);
4895 else if (letter == 'Y')
4897 if (GET_CODE (op) == CONST_INT
4898 && ((unsigned HOST_WIDE_INT) INTVAL (op)
4899 < ARRAY_SIZE (mips_fp_conditions)))
4900 fputs (mips_fp_conditions[INTVAL (op)], file);
4901 else
4902 output_operand_lossage ("invalid %%Y value");
4905 else if (letter == 'Z')
4907 if (ISA_HAS_8CC)
4909 print_operand (file, op, 0);
4910 fputc (',', file);
4914 else if (code == REG || code == SUBREG)
4916 register int regnum;
4918 if (code == REG)
4919 regnum = REGNO (op);
4920 else
4921 regnum = true_regnum (op);
4923 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
4924 || (letter == 'L' && WORDS_BIG_ENDIAN)
4925 || letter == 'D')
4926 regnum++;
4928 fprintf (file, "%s", reg_names[regnum]);
4931 else if (code == MEM)
4933 if (letter == 'D')
4934 output_address (plus_constant (XEXP (op, 0), 4));
4935 else
4936 output_address (XEXP (op, 0));
4939 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
4940 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
4942 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
4943 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
4945 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
4946 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
4948 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
4949 fputs (reg_names[GP_REG_FIRST], file);
4951 else if (letter == 'd' || letter == 'x' || letter == 'X')
4952 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
4954 else if (letter == 'T' || letter == 't')
4956 int truth = (code == NE) == (letter == 'T');
4957 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
4960 else if (CONST_GP_P (op))
4961 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
4963 else
4964 output_addr_const (file, op);
4968 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
4969 RELOCS is the array of relocations to use. */
4971 static void
4972 print_operand_reloc (FILE *file, rtx op, const char **relocs)
4974 enum mips_symbol_type symbol_type;
4975 const char *p;
4976 rtx base;
4977 HOST_WIDE_INT offset;
4979 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
4980 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
4982 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
4983 mips_split_const (op, &base, &offset);
4984 if (UNSPEC_ADDRESS_P (base))
4985 op = plus_constant (UNSPEC_ADDRESS (base), offset);
4987 fputs (relocs[symbol_type], file);
4988 output_addr_const (file, op);
4989 for (p = relocs[symbol_type]; *p != 0; p++)
4990 if (*p == '(')
4991 fputc (')', file);
4994 /* Output address operand X to FILE. */
4996 void
4997 print_operand_address (FILE *file, rtx x)
4999 struct mips_address_info addr;
5001 if (mips_classify_address (&addr, x, word_mode, true))
5002 switch (addr.type)
5004 case ADDRESS_REG:
5005 print_operand (file, addr.offset, 0);
5006 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5007 return;
5009 case ADDRESS_LO_SUM:
5010 print_operand (file, addr.offset, 'R');
5011 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5012 return;
5014 case ADDRESS_CONST_INT:
5015 output_addr_const (file, x);
5016 fprintf (file, "(%s)", reg_names[0]);
5017 return;
5019 case ADDRESS_SYMBOLIC:
5020 output_addr_const (file, x);
5021 return;
5023 gcc_unreachable ();
5026 /* When using assembler macros, keep track of all of small-data externs
5027 so that mips_file_end can emit the appropriate declarations for them.
5029 In most cases it would be safe (though pointless) to emit .externs
5030 for other symbols too. One exception is when an object is within
5031 the -G limit but declared by the user to be in a section other
5032 than .sbss or .sdata. */
5035 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5037 register struct extern_list *p;
5039 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5041 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5042 p->next = extern_head;
5043 p->name = name;
5044 p->size = int_size_in_bytes (TREE_TYPE (decl));
5045 extern_head = p;
5048 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5050 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5051 p->next = extern_head;
5052 p->name = name;
5053 p->size = -1;
5054 extern_head = p;
5057 return 0;
5060 #if TARGET_IRIX
5061 static void
5062 irix_output_external_libcall (rtx fun)
5064 register struct extern_list *p;
5066 if (mips_abi == ABI_32)
5068 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5069 p->next = extern_head;
5070 p->name = XSTR (fun, 0);
5071 p->size = -1;
5072 extern_head = p;
5075 #endif
5077 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5078 put out a MIPS ECOFF file and a stab. */
5080 void
5081 mips_output_filename (FILE *stream, const char *name)
5084 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5085 directives. */
5086 if (write_symbols == DWARF2_DEBUG)
5087 return;
5088 else if (mips_output_filename_first_time)
5090 mips_output_filename_first_time = 0;
5091 num_source_filenames += 1;
5092 current_function_file = name;
5093 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5094 output_quoted_string (stream, name);
5095 putc ('\n', stream);
5098 /* If we are emitting stabs, let dbxout.c handle this (except for
5099 the mips_output_filename_first_time case). */
5100 else if (write_symbols == DBX_DEBUG)
5101 return;
5103 else if (name != current_function_file
5104 && strcmp (name, current_function_file) != 0)
5106 num_source_filenames += 1;
5107 current_function_file = name;
5108 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5109 output_quoted_string (stream, name);
5110 putc ('\n', stream);
5114 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5115 that should be written before the opening quote, such as "\t.ascii\t"
5116 for real string data or "\t# " for a comment. */
5118 void
5119 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5120 const char *prefix)
5122 size_t i;
5123 int cur_pos = 17;
5124 register const unsigned char *string =
5125 (const unsigned char *)string_param;
5127 fprintf (stream, "%s\"", prefix);
5128 for (i = 0; i < len; i++)
5130 register int c = string[i];
5132 switch (c)
5134 case '\"':
5135 case '\\':
5136 putc ('\\', stream);
5137 putc (c, stream);
5138 cur_pos += 2;
5139 break;
5141 case TARGET_NEWLINE:
5142 fputs ("\\n", stream);
5143 if (i+1 < len
5144 && (((c = string[i+1]) >= '\040' && c <= '~')
5145 || c == TARGET_TAB))
5146 cur_pos = 32767; /* break right here */
5147 else
5148 cur_pos += 2;
5149 break;
5151 case TARGET_TAB:
5152 fputs ("\\t", stream);
5153 cur_pos += 2;
5154 break;
5156 case TARGET_FF:
5157 fputs ("\\f", stream);
5158 cur_pos += 2;
5159 break;
5161 case TARGET_BS:
5162 fputs ("\\b", stream);
5163 cur_pos += 2;
5164 break;
5166 case TARGET_CR:
5167 fputs ("\\r", stream);
5168 cur_pos += 2;
5169 break;
5171 default:
5172 if (c >= ' ' && c < 0177)
5174 putc (c, stream);
5175 cur_pos++;
5177 else
5179 fprintf (stream, "\\%03o", c);
5180 cur_pos += 4;
5184 if (cur_pos > 72 && i+1 < len)
5186 cur_pos = 17;
5187 fprintf (stream, "\"\n%s\"", prefix);
5190 fprintf (stream, "\"\n");
5193 /* Implement TARGET_ASM_FILE_START. */
5195 static void
5196 mips_file_start (void)
5198 default_file_start ();
5200 if (!TARGET_IRIX)
5202 /* Generate a special section to describe the ABI switches used to
5203 produce the resultant binary. This used to be done by the assembler
5204 setting bits in the ELF header's flags field, but we have run out of
5205 bits. GDB needs this information in order to be able to correctly
5206 debug these binaries. See the function mips_gdbarch_init() in
5207 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5208 causes unnecessary IRIX 6 ld warnings. */
5209 const char * abi_string = NULL;
5211 switch (mips_abi)
5213 case ABI_32: abi_string = "abi32"; break;
5214 case ABI_N32: abi_string = "abiN32"; break;
5215 case ABI_64: abi_string = "abi64"; break;
5216 case ABI_O64: abi_string = "abiO64"; break;
5217 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5218 default:
5219 gcc_unreachable ();
5221 /* Note - we use fprintf directly rather than called named_section()
5222 because in this way we can avoid creating an allocated section. We
5223 do not want this section to take up any space in the running
5224 executable. */
5225 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5227 /* There is no ELF header flag to distinguish long32 forms of the
5228 EABI from long64 forms. Emit a special section to help tools
5229 such as GDB. */
5230 if (mips_abi == ABI_EABI)
5231 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5232 TARGET_LONG64 ? 64 : 32);
5234 /* Restore the default section. */
5235 fprintf (asm_out_file, "\t.previous\n");
5238 /* Generate the pseudo ops that System V.4 wants. */
5239 if (TARGET_ABICALLS)
5240 /* ??? but do not want this (or want pic0) if -non-shared? */
5241 fprintf (asm_out_file, "\t.abicalls\n");
5243 if (TARGET_MIPS16)
5244 fprintf (asm_out_file, "\t.set\tmips16\n");
5246 if (flag_verbose_asm)
5247 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5248 ASM_COMMENT_START,
5249 mips_section_threshold, mips_arch_info->name, mips_isa);
5252 #ifdef BSS_SECTION_ASM_OP
5253 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5254 in the use of sbss. */
5256 void
5257 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5258 unsigned HOST_WIDE_INT size, int align)
5260 extern tree last_assemble_variable_decl;
5262 if (mips_in_small_data_p (decl))
5263 named_section (0, ".sbss", 0);
5264 else
5265 bss_section ();
5266 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5267 last_assemble_variable_decl = decl;
5268 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5269 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5271 #endif
5273 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5274 .externs for any small-data variables that turned out to be external. */
5276 static void
5277 mips_file_end (void)
5279 tree name_tree;
5280 struct extern_list *p;
5282 if (extern_head)
5284 fputs ("\n", asm_out_file);
5286 for (p = extern_head; p != 0; p = p->next)
5288 name_tree = get_identifier (p->name);
5290 /* Positively ensure only one .extern for any given symbol. */
5291 if (!TREE_ASM_WRITTEN (name_tree)
5292 && TREE_SYMBOL_REFERENCED (name_tree))
5294 TREE_ASM_WRITTEN (name_tree) = 1;
5295 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5296 `.global name .text' directive for every used but
5297 undefined function. If we don't, the linker may perform
5298 an optimization (skipping over the insns that set $gp)
5299 when it is unsafe. */
5300 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5302 fputs ("\t.globl ", asm_out_file);
5303 assemble_name (asm_out_file, p->name);
5304 fputs (" .text\n", asm_out_file);
5306 else
5308 fputs ("\t.extern\t", asm_out_file);
5309 assemble_name (asm_out_file, p->name);
5310 fprintf (asm_out_file, ", %d\n", p->size);
5317 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5318 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5320 void
5321 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5322 unsigned HOST_WIDE_INT size,
5323 unsigned int align)
5325 /* If the target wants uninitialized const declarations in
5326 .rdata then don't put them in .comm. */
5327 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5328 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5329 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5331 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5332 targetm.asm_out.globalize_label (stream, name);
5334 readonly_data_section ();
5335 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5336 mips_declare_object (stream, name, "",
5337 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5338 size);
5340 else
5341 mips_declare_common_object (stream, name, "\n\t.comm\t",
5342 size, align, true);
5345 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
5346 NAME is the name of the object and ALIGN is the required alignment
5347 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
5348 alignment argument. */
5350 void
5351 mips_declare_common_object (FILE *stream, const char *name,
5352 const char *init_string,
5353 unsigned HOST_WIDE_INT size,
5354 unsigned int align, bool takes_alignment_p)
5356 if (!takes_alignment_p)
5358 size += (align / BITS_PER_UNIT) - 1;
5359 size -= size % (align / BITS_PER_UNIT);
5360 mips_declare_object (stream, name, init_string,
5361 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
5363 else
5364 mips_declare_object (stream, name, init_string,
5365 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5366 size, align / BITS_PER_UNIT);
5369 /* Emit either a label, .comm, or .lcomm directive. When using assembler
5370 macros, mark the symbol as written so that mips_file_end won't emit an
5371 .extern for it. STREAM is the output file, NAME is the name of the
5372 symbol, INIT_STRING is the string that should be written before the
5373 symbol and FINAL_STRING is the string that should be written after it.
5374 FINAL_STRING is a printf() format that consumes the remaining arguments. */
5376 void
5377 mips_declare_object (FILE *stream, const char *name, const char *init_string,
5378 const char *final_string, ...)
5380 va_list ap;
5382 fputs (init_string, stream);
5383 assemble_name (stream, name);
5384 va_start (ap, final_string);
5385 vfprintf (stream, final_string, ap);
5386 va_end (ap);
5388 if (!TARGET_EXPLICIT_RELOCS)
5390 tree name_tree = get_identifier (name);
5391 TREE_ASM_WRITTEN (name_tree) = 1;
5395 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
5396 extern int size_directive_output;
5398 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
5399 definitions except that it uses mips_declare_object() to emit the label. */
5401 void
5402 mips_declare_object_name (FILE *stream, const char *name,
5403 tree decl ATTRIBUTE_UNUSED)
5405 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5406 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
5407 #endif
5409 size_directive_output = 0;
5410 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
5412 HOST_WIDE_INT size;
5414 size_directive_output = 1;
5415 size = int_size_in_bytes (TREE_TYPE (decl));
5416 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5419 mips_declare_object (stream, name, "", ":\n", 0);
5422 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
5424 void
5425 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
5427 const char *name;
5429 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
5430 if (!flag_inhibit_size_directive
5431 && DECL_SIZE (decl) != 0
5432 && !at_end && top_level
5433 && DECL_INITIAL (decl) == error_mark_node
5434 && !size_directive_output)
5436 HOST_WIDE_INT size;
5438 size_directive_output = 1;
5439 size = int_size_in_bytes (TREE_TYPE (decl));
5440 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5443 #endif
5445 /* Return true if X is a small data address that can be rewritten
5446 as a LO_SUM. */
5448 static bool
5449 mips_rewrite_small_data_p (rtx x)
5451 enum mips_symbol_type symbol_type;
5453 return (TARGET_EXPLICIT_RELOCS
5454 && mips_symbolic_constant_p (x, &symbol_type)
5455 && symbol_type == SYMBOL_SMALL_DATA);
5459 /* A for_each_rtx callback for mips_small_data_pattern_p. */
5461 static int
5462 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5464 if (GET_CODE (*loc) == LO_SUM)
5465 return -1;
5467 return mips_rewrite_small_data_p (*loc);
5470 /* Return true if OP refers to small data symbols directly, not through
5471 a LO_SUM. */
5473 bool
5474 mips_small_data_pattern_p (rtx op)
5476 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
5479 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
5481 static int
5482 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5484 if (mips_rewrite_small_data_p (*loc))
5485 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
5487 if (GET_CODE (*loc) == LO_SUM)
5488 return -1;
5490 return 0;
5493 /* If possible, rewrite OP so that it refers to small data using
5494 explicit relocations. */
5497 mips_rewrite_small_data (rtx op)
5499 op = copy_insn (op);
5500 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
5501 return op;
5504 /* Return true if the current function has an insn that implicitly
5505 refers to $gp. */
5507 static bool
5508 mips_function_has_gp_insn (void)
5510 /* Don't bother rechecking if we found one last time. */
5511 if (!cfun->machine->has_gp_insn_p)
5513 rtx insn;
5515 push_topmost_sequence ();
5516 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5517 if (INSN_P (insn)
5518 && GET_CODE (PATTERN (insn)) != USE
5519 && GET_CODE (PATTERN (insn)) != CLOBBER
5520 && (get_attr_got (insn) != GOT_UNSET
5521 || small_data_pattern (PATTERN (insn), VOIDmode)))
5522 break;
5523 pop_topmost_sequence ();
5525 cfun->machine->has_gp_insn_p = (insn != 0);
5527 return cfun->machine->has_gp_insn_p;
5531 /* Return the register that should be used as the global pointer
5532 within this function. Return 0 if the function doesn't need
5533 a global pointer. */
5535 static unsigned int
5536 mips_global_pointer (void)
5538 unsigned int regno;
5540 /* $gp is always available in non-abicalls code. */
5541 if (!TARGET_ABICALLS)
5542 return GLOBAL_POINTER_REGNUM;
5544 /* We must always provide $gp when it is used implicitly. */
5545 if (!TARGET_EXPLICIT_RELOCS)
5546 return GLOBAL_POINTER_REGNUM;
5548 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
5549 a valid gp. */
5550 if (current_function_profile)
5551 return GLOBAL_POINTER_REGNUM;
5553 /* If the function has a nonlocal goto, $gp must hold the correct
5554 global pointer for the target function. */
5555 if (current_function_has_nonlocal_goto)
5556 return GLOBAL_POINTER_REGNUM;
5558 /* If the gp is never referenced, there's no need to initialize it.
5559 Note that reload can sometimes introduce constant pool references
5560 into a function that otherwise didn't need them. For example,
5561 suppose we have an instruction like:
5563 (set (reg:DF R1) (float:DF (reg:SI R2)))
5565 If R2 turns out to be constant such as 1, the instruction may have a
5566 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
5567 using this constant if R2 doesn't get allocated to a register.
5569 In cases like these, reload will have added the constant to the pool
5570 but no instruction will yet refer to it. */
5571 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
5572 && !current_function_uses_const_pool
5573 && !mips_function_has_gp_insn ())
5574 return 0;
5576 /* We need a global pointer, but perhaps we can use a call-clobbered
5577 register instead of $gp. */
5578 if (TARGET_NEWABI && current_function_is_leaf)
5579 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5580 if (!regs_ever_live[regno]
5581 && call_used_regs[regno]
5582 && !fixed_regs[regno]
5583 && regno != PIC_FUNCTION_ADDR_REGNUM)
5584 return regno;
5586 return GLOBAL_POINTER_REGNUM;
5590 /* Return true if the current function must save REGNO. */
5592 static bool
5593 mips_save_reg_p (unsigned int regno)
5595 /* We only need to save $gp for NewABI PIC. */
5596 if (regno == GLOBAL_POINTER_REGNUM)
5597 return (TARGET_ABICALLS && TARGET_NEWABI
5598 && cfun->machine->global_pointer == regno);
5600 /* Check call-saved registers. */
5601 if (regs_ever_live[regno] && !call_used_regs[regno])
5602 return true;
5604 /* We need to save the old frame pointer before setting up a new one. */
5605 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5606 return true;
5608 /* We need to save the incoming return address if it is ever clobbered
5609 within the function. */
5610 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
5611 return true;
5613 if (TARGET_MIPS16)
5615 tree return_type;
5617 return_type = DECL_RESULT (current_function_decl);
5619 /* $18 is a special case in mips16 code. It may be used to call
5620 a function which returns a floating point value, but it is
5621 marked in call_used_regs. */
5622 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
5623 return true;
5625 /* $31 is also a special case. It will be used to copy a return
5626 value into the floating point registers if the return value is
5627 floating point. */
5628 if (regno == GP_REG_FIRST + 31
5629 && mips16_hard_float
5630 && !aggregate_value_p (return_type, current_function_decl)
5631 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
5632 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
5633 return true;
5636 return false;
5640 /* Return the bytes needed to compute the frame pointer from the current
5641 stack pointer. SIZE is the size (in bytes) of the local variables.
5643 Mips stack frames look like:
5645 Before call After call
5646 +-----------------------+ +-----------------------+
5647 high | | | |
5648 mem. | | | |
5649 | caller's temps. | | caller's temps. |
5650 | | | |
5651 +-----------------------+ +-----------------------+
5652 | | | |
5653 | arguments on stack. | | arguments on stack. |
5654 | | | |
5655 +-----------------------+ +-----------------------+
5656 | 4 words to save | | 4 words to save |
5657 | arguments passed | | arguments passed |
5658 | in registers, even | | in registers, even |
5659 SP->| if not passed. | VFP->| if not passed. |
5660 +-----------------------+ +-----------------------+
5662 | fp register save |
5664 +-----------------------+
5666 | gp register save |
5668 +-----------------------+
5670 | local variables |
5672 +-----------------------+
5674 | alloca allocations |
5676 +-----------------------+
5678 | GP save for V.4 abi |
5680 +-----------------------+
5682 | arguments on stack |
5684 +-----------------------+
5685 | 4 words to save |
5686 | arguments passed |
5687 | in registers, even |
5688 low SP->| if not passed. |
5689 memory +-----------------------+
5693 HOST_WIDE_INT
5694 compute_frame_size (HOST_WIDE_INT size)
5696 unsigned int regno;
5697 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
5698 HOST_WIDE_INT var_size; /* # bytes that variables take up */
5699 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
5700 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
5701 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
5702 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
5703 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
5704 unsigned int mask; /* mask of saved gp registers */
5705 unsigned int fmask; /* mask of saved fp registers */
5707 cfun->machine->global_pointer = mips_global_pointer ();
5709 gp_reg_size = 0;
5710 fp_reg_size = 0;
5711 mask = 0;
5712 fmask = 0;
5713 var_size = MIPS_STACK_ALIGN (size);
5714 args_size = current_function_outgoing_args_size;
5715 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
5717 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
5718 functions. If the function has local variables, we're committed
5719 to allocating it anyway. Otherwise reclaim it here. */
5720 if (var_size == 0 && current_function_is_leaf)
5721 cprestore_size = args_size = 0;
5723 /* The MIPS 3.0 linker does not like functions that dynamically
5724 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
5725 looks like we are trying to create a second frame pointer to the
5726 function, so allocate some stack space to make it happy. */
5728 if (args_size == 0 && current_function_calls_alloca)
5729 args_size = 4 * UNITS_PER_WORD;
5731 total_size = var_size + args_size + cprestore_size;
5733 /* Calculate space needed for gp registers. */
5734 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5735 if (mips_save_reg_p (regno))
5737 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5738 mask |= 1 << (regno - GP_REG_FIRST);
5741 /* We need to restore these for the handler. */
5742 if (current_function_calls_eh_return)
5744 unsigned int i;
5745 for (i = 0; ; ++i)
5747 regno = EH_RETURN_DATA_REGNO (i);
5748 if (regno == INVALID_REGNUM)
5749 break;
5750 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5751 mask |= 1 << (regno - GP_REG_FIRST);
5755 /* This loop must iterate over the same space as its companion in
5756 save_restore_insns. */
5757 for (regno = (FP_REG_LAST - FP_INC + 1);
5758 regno >= FP_REG_FIRST;
5759 regno -= FP_INC)
5761 if (mips_save_reg_p (regno))
5763 fp_reg_size += FP_INC * UNITS_PER_FPREG;
5764 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
5768 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
5769 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
5771 /* Add in space reserved on the stack by the callee for storing arguments
5772 passed in registers. */
5773 if (!TARGET_OLDABI)
5774 total_size += MIPS_STACK_ALIGN (current_function_pretend_args_size);
5776 /* Save other computed information. */
5777 cfun->machine->frame.total_size = total_size;
5778 cfun->machine->frame.var_size = var_size;
5779 cfun->machine->frame.args_size = args_size;
5780 cfun->machine->frame.cprestore_size = cprestore_size;
5781 cfun->machine->frame.gp_reg_size = gp_reg_size;
5782 cfun->machine->frame.fp_reg_size = fp_reg_size;
5783 cfun->machine->frame.mask = mask;
5784 cfun->machine->frame.fmask = fmask;
5785 cfun->machine->frame.initialized = reload_completed;
5786 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
5787 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
5789 if (mask)
5791 HOST_WIDE_INT offset;
5793 offset = (args_size + cprestore_size + var_size
5794 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
5795 cfun->machine->frame.gp_sp_offset = offset;
5796 cfun->machine->frame.gp_save_offset = offset - total_size;
5798 else
5800 cfun->machine->frame.gp_sp_offset = 0;
5801 cfun->machine->frame.gp_save_offset = 0;
5804 if (fmask)
5806 HOST_WIDE_INT offset;
5808 offset = (args_size + cprestore_size + var_size
5809 + gp_reg_rounded + fp_reg_size
5810 - FP_INC * UNITS_PER_FPREG);
5811 cfun->machine->frame.fp_sp_offset = offset;
5812 cfun->machine->frame.fp_save_offset = offset - total_size;
5814 else
5816 cfun->machine->frame.fp_sp_offset = 0;
5817 cfun->machine->frame.fp_save_offset = 0;
5820 /* Ok, we're done. */
5821 return total_size;
5824 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
5825 pointer or argument pointer. TO is either the stack pointer or
5826 hard frame pointer. */
5828 HOST_WIDE_INT
5829 mips_initial_elimination_offset (int from, int to)
5831 HOST_WIDE_INT offset;
5833 compute_frame_size (get_frame_size ());
5835 /* Set OFFSET to the offset from the stack pointer. */
5836 switch (from)
5838 case FRAME_POINTER_REGNUM:
5839 offset = 0;
5840 break;
5842 case ARG_POINTER_REGNUM:
5843 offset = cfun->machine->frame.total_size;
5844 if (TARGET_NEWABI)
5845 offset -= current_function_pretend_args_size;
5846 break;
5848 default:
5849 gcc_unreachable ();
5852 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
5853 offset -= cfun->machine->frame.args_size;
5855 return offset;
5858 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
5859 back to a previous frame. */
5861 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
5863 if (count != 0)
5864 return const0_rtx;
5866 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
5869 /* Use FN to save or restore register REGNO. MODE is the register's
5870 mode and OFFSET is the offset of its save slot from the current
5871 stack pointer. */
5873 static void
5874 mips_save_restore_reg (enum machine_mode mode, int regno,
5875 HOST_WIDE_INT offset, mips_save_restore_fn fn)
5877 rtx mem;
5879 mem = gen_rtx_MEM (mode, plus_constant (stack_pointer_rtx, offset));
5881 fn (gen_rtx_REG (mode, regno), mem);
5885 /* Call FN for each register that is saved by the current function.
5886 SP_OFFSET is the offset of the current stack pointer from the start
5887 of the frame. */
5889 static void
5890 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
5892 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
5894 enum machine_mode fpr_mode;
5895 HOST_WIDE_INT offset;
5896 int regno;
5898 /* Save registers starting from high to low. The debuggers prefer at least
5899 the return register be stored at func+4, and also it allows us not to
5900 need a nop in the epilog if at least one register is reloaded in
5901 addition to return address. */
5902 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
5903 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
5904 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
5906 mips_save_restore_reg (gpr_mode, regno, offset, fn);
5907 offset -= GET_MODE_SIZE (gpr_mode);
5910 /* This loop must iterate over the same space as its companion in
5911 compute_frame_size. */
5912 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
5913 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
5914 for (regno = (FP_REG_LAST - FP_INC + 1);
5915 regno >= FP_REG_FIRST;
5916 regno -= FP_INC)
5917 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
5919 mips_save_restore_reg (fpr_mode, regno, offset, fn);
5920 offset -= GET_MODE_SIZE (fpr_mode);
5922 #undef BITSET_P
5925 /* If we're generating n32 or n64 abicalls, and the current function
5926 does not use $28 as its global pointer, emit a cplocal directive.
5927 Use pic_offset_table_rtx as the argument to the directive. */
5929 static void
5930 mips_output_cplocal (void)
5932 if (!TARGET_EXPLICIT_RELOCS
5933 && cfun->machine->global_pointer > 0
5934 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
5935 output_asm_insn (".cplocal %+", 0);
5938 /* If we're generating n32 or n64 abicalls, emit instructions
5939 to set up the global pointer. */
5941 static void
5942 mips_emit_loadgp (void)
5944 if (TARGET_ABICALLS && TARGET_NEWABI && cfun->machine->global_pointer > 0)
5946 rtx addr, offset, incoming_address;
5948 addr = XEXP (DECL_RTL (current_function_decl), 0);
5949 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
5950 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
5951 emit_insn (gen_loadgp (offset, incoming_address));
5952 if (!TARGET_EXPLICIT_RELOCS)
5953 emit_insn (gen_loadgp_blockage ());
5957 /* Set up the stack and frame (if desired) for the function. */
5959 static void
5960 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5962 const char *fnname;
5963 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
5965 #ifdef SDB_DEBUGGING_INFO
5966 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
5967 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
5968 #endif
5970 /* In mips16 mode, we may need to generate a 32 bit to handle
5971 floating point arguments. The linker will arrange for any 32 bit
5972 functions to call this stub, which will then jump to the 16 bit
5973 function proper. */
5974 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
5975 && current_function_args_info.fp_code != 0)
5976 build_mips16_function_stub (file);
5978 if (!FUNCTION_NAME_ALREADY_DECLARED)
5980 /* Get the function name the same way that toplev.c does before calling
5981 assemble_start_function. This is needed so that the name used here
5982 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
5983 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5985 if (!flag_inhibit_size_directive)
5987 fputs ("\t.ent\t", file);
5988 assemble_name (file, fnname);
5989 fputs ("\n", file);
5992 assemble_name (file, fnname);
5993 fputs (":\n", file);
5996 /* Stop mips_file_end from treating this function as external. */
5997 if (TARGET_IRIX && mips_abi == ABI_32)
5998 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6000 if (!flag_inhibit_size_directive)
6002 /* .frame FRAMEREG, FRAMESIZE, RETREG */
6003 fprintf (file,
6004 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6005 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6006 ", args= " HOST_WIDE_INT_PRINT_DEC
6007 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6008 (reg_names[(frame_pointer_needed)
6009 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6010 ((frame_pointer_needed && TARGET_MIPS16)
6011 ? tsize - cfun->machine->frame.args_size
6012 : tsize),
6013 reg_names[GP_REG_FIRST + 31],
6014 cfun->machine->frame.var_size,
6015 cfun->machine->frame.num_gp,
6016 cfun->machine->frame.num_fp,
6017 cfun->machine->frame.args_size,
6018 cfun->machine->frame.cprestore_size);
6020 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6021 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6022 cfun->machine->frame.mask,
6023 cfun->machine->frame.gp_save_offset);
6024 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6025 cfun->machine->frame.fmask,
6026 cfun->machine->frame.fp_save_offset);
6028 /* Require:
6029 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6030 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6033 if (TARGET_ABICALLS && !TARGET_NEWABI && cfun->machine->global_pointer > 0)
6035 /* Handle the initialization of $gp for SVR4 PIC. */
6036 if (!cfun->machine->all_noreorder_p)
6037 output_asm_insn ("%(.cpload\t%^%)", 0);
6038 else
6039 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6041 else if (cfun->machine->all_noreorder_p)
6042 output_asm_insn ("%(%<", 0);
6044 /* Tell the assembler which register we're using as the global
6045 pointer. This is needed for thunks, since they can use either
6046 explicit relocs or assembler macros. */
6047 mips_output_cplocal ();
6050 /* Make the last instruction frame related and note that it performs
6051 the operation described by FRAME_PATTERN. */
6053 static void
6054 mips_set_frame_expr (rtx frame_pattern)
6056 rtx insn;
6058 insn = get_last_insn ();
6059 RTX_FRAME_RELATED_P (insn) = 1;
6060 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6061 frame_pattern,
6062 REG_NOTES (insn));
6066 /* Return a frame-related rtx that stores REG at MEM.
6067 REG must be a single register. */
6069 static rtx
6070 mips_frame_set (rtx mem, rtx reg)
6072 rtx set = gen_rtx_SET (VOIDmode, mem, reg);
6073 RTX_FRAME_RELATED_P (set) = 1;
6074 return set;
6078 /* Save register REG to MEM. Make the instruction frame-related. */
6080 static void
6081 mips_save_reg (rtx reg, rtx mem)
6083 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6085 rtx x1, x2;
6087 if (mips_split_64bit_move_p (mem, reg))
6088 mips_split_64bit_move (mem, reg);
6089 else
6090 emit_move_insn (mem, reg);
6092 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6093 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6094 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6096 else
6098 if (TARGET_MIPS16
6099 && REGNO (reg) != GP_REG_FIRST + 31
6100 && !M16_REG_P (REGNO (reg)))
6102 /* Save a non-mips16 register by moving it through a temporary.
6103 We don't need to do this for $31 since there's a special
6104 instruction for it. */
6105 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6106 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6108 else
6109 emit_move_insn (mem, reg);
6111 mips_set_frame_expr (mips_frame_set (mem, reg));
6116 /* Expand the prologue into a bunch of separate insns. */
6118 void
6119 mips_expand_prologue (void)
6121 HOST_WIDE_INT size;
6123 if (cfun->machine->global_pointer > 0)
6124 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6126 size = compute_frame_size (get_frame_size ());
6128 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6129 bytes beforehand; this is enough to cover the register save area
6130 without going out of range. */
6131 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6133 HOST_WIDE_INT step1;
6135 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6136 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6137 stack_pointer_rtx,
6138 GEN_INT (-step1)))) = 1;
6139 size -= step1;
6140 mips_for_each_saved_reg (size, mips_save_reg);
6143 /* Allocate the rest of the frame. */
6144 if (size > 0)
6146 if (SMALL_OPERAND (-size))
6147 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6148 stack_pointer_rtx,
6149 GEN_INT (-size)))) = 1;
6150 else
6152 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6153 if (TARGET_MIPS16)
6155 /* There are no instructions to add or subtract registers
6156 from the stack pointer, so use the frame pointer as a
6157 temporary. We should always be using a frame pointer
6158 in this case anyway. */
6159 gcc_assert (frame_pointer_needed);
6160 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6161 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6162 hard_frame_pointer_rtx,
6163 MIPS_PROLOGUE_TEMP (Pmode)));
6164 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6166 else
6167 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6168 stack_pointer_rtx,
6169 MIPS_PROLOGUE_TEMP (Pmode)));
6171 /* Describe the combined effect of the previous instructions. */
6172 mips_set_frame_expr
6173 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6174 plus_constant (stack_pointer_rtx, -size)));
6178 /* Set up the frame pointer, if we're using one. In mips16 code,
6179 we point the frame pointer ahead of the outgoing argument area.
6180 This should allow more variables & incoming arguments to be
6181 accessed with unextended instructions. */
6182 if (frame_pointer_needed)
6184 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6186 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6187 RTX_FRAME_RELATED_P
6188 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6189 stack_pointer_rtx,
6190 offset))) = 1;
6192 else
6193 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6194 stack_pointer_rtx)) = 1;
6197 /* If generating o32/o64 abicalls, save $gp on the stack. */
6198 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6199 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6201 mips_emit_loadgp ();
6203 /* If we are profiling, make sure no instructions are scheduled before
6204 the call to mcount. */
6206 if (current_function_profile)
6207 emit_insn (gen_blockage ());
6210 /* Do any necessary cleanup after a function to restore stack, frame,
6211 and regs. */
6213 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6215 static void
6216 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6217 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6219 /* Reinstate the normal $gp. */
6220 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6221 mips_output_cplocal ();
6223 if (cfun->machine->all_noreorder_p)
6225 /* Avoid using %>%) since it adds excess whitespace. */
6226 output_asm_insn (".set\tmacro", 0);
6227 output_asm_insn (".set\treorder", 0);
6228 set_noreorder = set_nomacro = 0;
6231 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6233 const char *fnname;
6235 /* Get the function name the same way that toplev.c does before calling
6236 assemble_start_function. This is needed so that the name used here
6237 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6238 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6239 fputs ("\t.end\t", file);
6240 assemble_name (file, fnname);
6241 fputs ("\n", file);
6245 /* Emit instructions to restore register REG from slot MEM. */
6247 static void
6248 mips_restore_reg (rtx reg, rtx mem)
6250 /* There's no mips16 instruction to load $31 directly. Load into
6251 $7 instead and adjust the return insn appropriately. */
6252 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6253 reg = gen_rtx_REG (GET_MODE (reg), 7);
6255 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6257 /* Can't restore directly; move through a temporary. */
6258 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6259 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6261 else
6262 emit_move_insn (reg, mem);
6266 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6267 if this epilogue precedes a sibling call, false if it is for a normal
6268 "epilogue" pattern. */
6270 void
6271 mips_expand_epilogue (int sibcall_p)
6273 HOST_WIDE_INT step1, step2;
6274 rtx base, target;
6276 if (!sibcall_p && mips_can_use_return_insn ())
6278 emit_jump_insn (gen_return ());
6279 return;
6282 /* Split the frame into two. STEP1 is the amount of stack we should
6283 deallocate before restoring the registers. STEP2 is the amount we
6284 should deallocate afterwards.
6286 Start off by assuming that no registers need to be restored. */
6287 step1 = cfun->machine->frame.total_size;
6288 step2 = 0;
6290 /* Work out which register holds the frame address. Account for the
6291 frame pointer offset used by mips16 code. */
6292 if (!frame_pointer_needed)
6293 base = stack_pointer_rtx;
6294 else
6296 base = hard_frame_pointer_rtx;
6297 if (TARGET_MIPS16)
6298 step1 -= cfun->machine->frame.args_size;
6301 /* If we need to restore registers, deallocate as much stack as
6302 possible in the second step without going out of range. */
6303 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6305 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
6306 step1 -= step2;
6309 /* Set TARGET to BASE + STEP1. */
6310 target = base;
6311 if (step1 > 0)
6313 rtx adjust;
6315 /* Get an rtx for STEP1 that we can add to BASE. */
6316 adjust = GEN_INT (step1);
6317 if (!SMALL_OPERAND (step1))
6319 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
6320 adjust = MIPS_EPILOGUE_TEMP (Pmode);
6323 /* Normal mode code can copy the result straight into $sp. */
6324 if (!TARGET_MIPS16)
6325 target = stack_pointer_rtx;
6327 emit_insn (gen_add3_insn (target, base, adjust));
6330 /* Copy TARGET into the stack pointer. */
6331 if (target != stack_pointer_rtx)
6332 emit_move_insn (stack_pointer_rtx, target);
6334 /* If we're using addressing macros for n32/n64 abicalls, $gp is
6335 implicitly used by all SYMBOL_REFs. We must emit a blockage
6336 insn before restoring it. */
6337 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
6338 emit_insn (gen_blockage ());
6340 /* Restore the registers. */
6341 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
6342 mips_restore_reg);
6344 /* Deallocate the final bit of the frame. */
6345 if (step2 > 0)
6346 emit_insn (gen_add3_insn (stack_pointer_rtx,
6347 stack_pointer_rtx,
6348 GEN_INT (step2)));
6350 /* Add in the __builtin_eh_return stack adjustment. We need to
6351 use a temporary in mips16 code. */
6352 if (current_function_calls_eh_return)
6354 if (TARGET_MIPS16)
6356 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
6357 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
6358 MIPS_EPILOGUE_TEMP (Pmode),
6359 EH_RETURN_STACKADJ_RTX));
6360 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
6362 else
6363 emit_insn (gen_add3_insn (stack_pointer_rtx,
6364 stack_pointer_rtx,
6365 EH_RETURN_STACKADJ_RTX));
6368 if (!sibcall_p)
6370 /* The mips16 loads the return address into $7, not $31. */
6371 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
6372 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6373 GP_REG_FIRST + 7)));
6374 else
6375 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6376 GP_REG_FIRST + 31)));
6380 /* Return nonzero if this function is known to have a null epilogue.
6381 This allows the optimizer to omit jumps to jumps if no stack
6382 was created. */
6385 mips_can_use_return_insn (void)
6387 tree return_type;
6389 if (! reload_completed)
6390 return 0;
6392 if (regs_ever_live[31] || current_function_profile)
6393 return 0;
6395 return_type = DECL_RESULT (current_function_decl);
6397 /* In mips16 mode, a function which returns a floating point value
6398 needs to arrange to copy the return value into the floating point
6399 registers. */
6400 if (TARGET_MIPS16
6401 && mips16_hard_float
6402 && ! aggregate_value_p (return_type, current_function_decl)
6403 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6404 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6405 return 0;
6407 if (cfun->machine->frame.initialized)
6408 return cfun->machine->frame.total_size == 0;
6410 return compute_frame_size (get_frame_size ()) == 0;
6413 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
6414 in order to avoid duplicating too much logic from elsewhere. */
6416 static void
6417 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
6418 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
6419 tree function)
6421 rtx this, temp1, temp2, insn, fnaddr;
6423 /* Pretend to be a post-reload pass while generating rtl. */
6424 no_new_pseudos = 1;
6425 reload_completed = 1;
6426 reset_block_changes ();
6428 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
6429 for TARGET_NEWABI since the latter is a call-saved register. */
6430 if (TARGET_ABICALLS)
6431 cfun->machine->global_pointer
6432 = REGNO (pic_offset_table_rtx)
6433 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
6435 /* Set up the global pointer for n32 or n64 abicalls. */
6436 mips_emit_loadgp ();
6438 /* We need two temporary registers in some cases. */
6439 temp1 = gen_rtx_REG (Pmode, 2);
6440 temp2 = gen_rtx_REG (Pmode, 3);
6442 /* Find out which register contains the "this" pointer. */
6443 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
6444 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
6445 else
6446 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
6448 /* Add DELTA to THIS. */
6449 if (delta != 0)
6451 rtx offset = GEN_INT (delta);
6452 if (!SMALL_OPERAND (delta))
6454 emit_move_insn (temp1, offset);
6455 offset = temp1;
6457 emit_insn (gen_add3_insn (this, this, offset));
6460 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
6461 if (vcall_offset != 0)
6463 rtx addr;
6465 /* Set TEMP1 to *THIS. */
6466 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
6468 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
6469 addr = mips_add_offset (temp2, temp1, vcall_offset);
6471 /* Load the offset and add it to THIS. */
6472 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
6473 emit_insn (gen_add3_insn (this, this, temp1));
6476 /* Jump to the target function. Use a sibcall if direct jumps are
6477 allowed, otherwise load the address into a register first. */
6478 fnaddr = XEXP (DECL_RTL (function), 0);
6479 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
6481 /* This is messy. gas treats "la $25,foo" as part of a call
6482 sequence and may allow a global "foo" to be lazily bound.
6483 The general move patterns therefore reject this combination.
6485 In this context, lazy binding would actually be OK for o32 and o64,
6486 but it's still wrong for n32 and n64; see mips_load_call_address.
6487 We must therefore load the address via a temporary register if
6488 mips_dangerous_for_la25_p.
6490 If we jump to the temporary register rather than $25, the assembler
6491 can use the move insn to fill the jump's delay slot. */
6492 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
6493 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6494 mips_load_call_address (temp1, fnaddr, true);
6496 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
6497 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
6498 emit_jump_insn (gen_indirect_jump (temp1));
6500 else
6502 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
6503 SIBLING_CALL_P (insn) = 1;
6506 /* Run just enough of rest_of_compilation. This sequence was
6507 "borrowed" from alpha.c. */
6508 insn = get_insns ();
6509 insn_locators_initialize ();
6510 split_all_insns_noflow ();
6511 if (TARGET_MIPS16)
6512 mips16_lay_out_constants ();
6513 shorten_branches (insn);
6514 final_start_function (insn, file, 1);
6515 final (insn, file, 1, 0);
6516 final_end_function ();
6518 /* Clean up the vars set above. Note that final_end_function resets
6519 the global pointer for us. */
6520 reload_completed = 0;
6521 no_new_pseudos = 0;
6524 /* Returns nonzero if X contains a SYMBOL_REF. */
6526 static int
6527 symbolic_expression_p (rtx x)
6529 if (GET_CODE (x) == SYMBOL_REF)
6530 return 1;
6532 if (GET_CODE (x) == CONST)
6533 return symbolic_expression_p (XEXP (x, 0));
6535 if (UNARY_P (x))
6536 return symbolic_expression_p (XEXP (x, 0));
6538 if (ARITHMETIC_P (x))
6539 return (symbolic_expression_p (XEXP (x, 0))
6540 || symbolic_expression_p (XEXP (x, 1)));
6542 return 0;
6545 /* Choose the section to use for the constant rtx expression X that has
6546 mode MODE. */
6548 static void
6549 mips_select_rtx_section (enum machine_mode mode, rtx x,
6550 unsigned HOST_WIDE_INT align)
6552 if (TARGET_MIPS16)
6554 /* In mips16 mode, the constant table always goes in the same section
6555 as the function, so that constants can be loaded using PC relative
6556 addressing. */
6557 function_section (current_function_decl);
6559 else if (TARGET_EMBEDDED_DATA)
6561 /* For embedded applications, always put constants in read-only data,
6562 in order to reduce RAM usage. */
6563 mergeable_constant_section (mode, align, 0);
6565 else
6567 /* For hosted applications, always put constants in small data if
6568 possible, as this gives the best performance. */
6569 /* ??? Consider using mergeable small data sections. */
6571 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
6572 && mips_section_threshold > 0)
6573 named_section (0, ".sdata", 0);
6574 else if (flag_pic && symbolic_expression_p (x))
6575 named_section (0, ".data.rel.ro", 3);
6576 else
6577 mergeable_constant_section (mode, align, 0);
6581 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6583 The complication here is that, with the combination TARGET_ABICALLS
6584 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6585 therefore not be included in the read-only part of a DSO. Handle such
6586 cases by selecting a normal data section instead of a read-only one.
6587 The logic apes that in default_function_rodata_section. */
6589 static void
6590 mips_function_rodata_section (tree decl)
6592 if (!TARGET_ABICALLS || TARGET_GPWORD)
6593 default_function_rodata_section (decl);
6594 else if (decl && DECL_SECTION_NAME (decl))
6596 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6597 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6599 char *rname = ASTRDUP (name);
6600 rname[14] = 'd';
6601 named_section_real (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6603 else if (flag_function_sections && flag_data_sections
6604 && strncmp (name, ".text.", 6) == 0)
6606 char *rname = ASTRDUP (name);
6607 memcpy (rname + 1, "data", 4);
6608 named_section_flags (rname, SECTION_WRITE);
6610 else
6611 data_section ();
6613 else
6614 data_section ();
6617 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
6618 access DECL using %gp_rel(...)($gp). */
6620 static bool
6621 mips_in_small_data_p (tree decl)
6623 HOST_WIDE_INT size;
6625 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6626 return false;
6628 /* We don't yet generate small-data references for -mabicalls. See related
6629 -G handling in override_options. */
6630 if (TARGET_ABICALLS)
6631 return false;
6633 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6635 const char *name;
6637 /* Reject anything that isn't in a known small-data section. */
6638 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6639 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6640 return false;
6642 /* If a symbol is defined externally, the assembler will use the
6643 usual -G rules when deciding how to implement macros. */
6644 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
6645 return true;
6647 else if (TARGET_EMBEDDED_DATA)
6649 /* Don't put constants into the small data section: we want them
6650 to be in ROM rather than RAM. */
6651 if (TREE_CODE (decl) != VAR_DECL)
6652 return false;
6654 if (TREE_READONLY (decl)
6655 && !TREE_SIDE_EFFECTS (decl)
6656 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6657 return false;
6660 size = int_size_in_bytes (TREE_TYPE (decl));
6661 return (size > 0 && size <= mips_section_threshold);
6664 /* See whether VALTYPE is a record whose fields should be returned in
6665 floating-point registers. If so, return the number of fields and
6666 list them in FIELDS (which should have two elements). Return 0
6667 otherwise.
6669 For n32 & n64, a structure with one or two fields is returned in
6670 floating-point registers as long as every field has a floating-point
6671 type. */
6673 static int
6674 mips_fpr_return_fields (tree valtype, tree *fields)
6676 tree field;
6677 int i;
6679 if (!TARGET_NEWABI)
6680 return 0;
6682 if (TREE_CODE (valtype) != RECORD_TYPE)
6683 return 0;
6685 i = 0;
6686 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
6688 if (TREE_CODE (field) != FIELD_DECL)
6689 continue;
6691 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
6692 return 0;
6694 if (i == 2)
6695 return 0;
6697 fields[i++] = field;
6699 return i;
6703 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
6704 a value in the most significant part of $2/$3 if:
6706 - the target is big-endian;
6708 - the value has a structure or union type (we generalize this to
6709 cover aggregates from other languages too); and
6711 - the structure is not returned in floating-point registers. */
6713 static bool
6714 mips_return_in_msb (tree valtype)
6716 tree fields[2];
6718 return (TARGET_NEWABI
6719 && TARGET_BIG_ENDIAN
6720 && AGGREGATE_TYPE_P (valtype)
6721 && mips_fpr_return_fields (valtype, fields) == 0);
6725 /* Return a composite value in a pair of floating-point registers.
6726 MODE1 and OFFSET1 are the mode and byte offset for the first value,
6727 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
6728 complete value.
6730 For n32 & n64, $f0 always holds the first value and $f2 the second.
6731 Otherwise the values are packed together as closely as possible. */
6733 static rtx
6734 mips_return_fpr_pair (enum machine_mode mode,
6735 enum machine_mode mode1, HOST_WIDE_INT offset1,
6736 enum machine_mode mode2, HOST_WIDE_INT offset2)
6738 int inc;
6740 inc = (TARGET_NEWABI ? 2 : FP_INC);
6741 return gen_rtx_PARALLEL
6742 (mode,
6743 gen_rtvec (2,
6744 gen_rtx_EXPR_LIST (VOIDmode,
6745 gen_rtx_REG (mode1, FP_RETURN),
6746 GEN_INT (offset1)),
6747 gen_rtx_EXPR_LIST (VOIDmode,
6748 gen_rtx_REG (mode2, FP_RETURN + inc),
6749 GEN_INT (offset2))));
6754 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
6755 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
6756 VALTYPE is null and MODE is the mode of the return value. */
6759 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
6760 enum machine_mode mode)
6762 if (valtype)
6764 tree fields[2];
6765 int unsignedp;
6767 mode = TYPE_MODE (valtype);
6768 unsignedp = TYPE_UNSIGNED (valtype);
6770 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
6771 true, we must promote the mode just as PROMOTE_MODE does. */
6772 mode = promote_mode (valtype, mode, &unsignedp, 1);
6774 /* Handle structures whose fields are returned in $f0/$f2. */
6775 switch (mips_fpr_return_fields (valtype, fields))
6777 case 1:
6778 return gen_rtx_REG (mode, FP_RETURN);
6780 case 2:
6781 return mips_return_fpr_pair (mode,
6782 TYPE_MODE (TREE_TYPE (fields[0])),
6783 int_byte_position (fields[0]),
6784 TYPE_MODE (TREE_TYPE (fields[1])),
6785 int_byte_position (fields[1]));
6788 /* If a value is passed in the most significant part of a register, see
6789 whether we have to round the mode up to a whole number of words. */
6790 if (mips_return_in_msb (valtype))
6792 HOST_WIDE_INT size = int_size_in_bytes (valtype);
6793 if (size % UNITS_PER_WORD != 0)
6795 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
6796 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6800 /* For EABI, the class of return register depends entirely on MODE.
6801 For example, "struct { some_type x; }" and "union { some_type x; }"
6802 are returned in the same way as a bare "some_type" would be.
6803 Other ABIs only use FPRs for scalar, complex or vector types. */
6804 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
6805 return gen_rtx_REG (mode, GP_RETURN);
6808 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
6809 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
6810 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
6811 return gen_rtx_REG (mode, FP_RETURN);
6813 /* Handle long doubles for n32 & n64. */
6814 if (mode == TFmode)
6815 return mips_return_fpr_pair (mode,
6816 DImode, 0,
6817 DImode, GET_MODE_SIZE (mode) / 2);
6819 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6820 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
6821 return mips_return_fpr_pair (mode,
6822 GET_MODE_INNER (mode), 0,
6823 GET_MODE_INNER (mode),
6824 GET_MODE_SIZE (mode) / 2);
6826 return gen_rtx_REG (mode, GP_RETURN);
6829 /* Return nonzero when an argument must be passed by reference. */
6831 static bool
6832 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6833 enum machine_mode mode, tree type,
6834 bool named ATTRIBUTE_UNUSED)
6836 if (mips_abi == ABI_EABI)
6838 int size;
6840 /* ??? How should SCmode be handled? */
6841 if (type == NULL_TREE || mode == DImode || mode == DFmode)
6842 return 0;
6844 size = int_size_in_bytes (type);
6845 return size == -1 || size > UNITS_PER_WORD;
6847 else
6849 /* If we have a variable-sized parameter, we have no choice. */
6850 return targetm.calls.must_pass_in_stack (mode, type);
6854 static bool
6855 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6856 enum machine_mode mode ATTRIBUTE_UNUSED,
6857 tree type ATTRIBUTE_UNUSED, bool named)
6859 return mips_abi == ABI_EABI && named;
6862 /* Return true if registers of class CLASS cannot change from mode FROM
6863 to mode TO. */
6865 bool
6866 mips_cannot_change_mode_class (enum machine_mode from,
6867 enum machine_mode to, enum reg_class class)
6869 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
6870 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
6872 if (TARGET_BIG_ENDIAN)
6874 /* When a multi-word value is stored in paired floating-point
6875 registers, the first register always holds the low word.
6876 We therefore can't allow FPRs to change between single-word
6877 and multi-word modes. */
6878 if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
6879 return true;
6881 else
6883 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
6884 in LO and HI, the high word always comes first. We therefore
6885 can't allow values stored in HI to change between single-word
6886 and multi-word modes. */
6887 if (reg_classes_intersect_p (HI_REG, class))
6888 return true;
6891 /* Loading a 32-bit value into a 64-bit floating-point register
6892 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
6893 We can't allow 64-bit float registers to change from SImode to
6894 to a wider mode. */
6895 if (TARGET_FLOAT64
6896 && from == SImode
6897 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
6898 && reg_classes_intersect_p (FP_REGS, class))
6899 return true;
6900 return false;
6903 /* Return true if X should not be moved directly into register $25.
6904 We need this because many versions of GAS will treat "la $25,foo" as
6905 part of a call sequence and so allow a global "foo" to be lazily bound. */
6907 bool
6908 mips_dangerous_for_la25_p (rtx x)
6910 HOST_WIDE_INT offset;
6912 if (TARGET_EXPLICIT_RELOCS)
6913 return false;
6915 mips_split_const (x, &x, &offset);
6916 return global_got_operand (x, VOIDmode);
6919 /* Implement PREFERRED_RELOAD_CLASS. */
6921 enum reg_class
6922 mips_preferred_reload_class (rtx x, enum reg_class class)
6924 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
6925 return LEA_REGS;
6927 if (TARGET_HARD_FLOAT
6928 && FLOAT_MODE_P (GET_MODE (x))
6929 && reg_class_subset_p (FP_REGS, class))
6930 return FP_REGS;
6932 if (reg_class_subset_p (GR_REGS, class))
6933 class = GR_REGS;
6935 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
6936 class = M16_REGS;
6938 return class;
6941 /* This function returns the register class required for a secondary
6942 register when copying between one of the registers in CLASS, and X,
6943 using MODE. If IN_P is nonzero, the copy is going from X to the
6944 register, otherwise the register is the source. A return value of
6945 NO_REGS means that no secondary register is required. */
6947 enum reg_class
6948 mips_secondary_reload_class (enum reg_class class,
6949 enum machine_mode mode, rtx x, int in_p)
6951 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
6952 int regno = -1;
6953 int gp_reg_p;
6955 if (REG_P (x)|| GET_CODE (x) == SUBREG)
6956 regno = true_regnum (x);
6958 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
6960 if (mips_dangerous_for_la25_p (x))
6962 gr_regs = LEA_REGS;
6963 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
6964 return gr_regs;
6967 /* Copying from HI or LO to anywhere other than a general register
6968 requires a general register. */
6969 if (class == HI_REG || class == LO_REG || class == MD_REGS)
6971 if (TARGET_MIPS16 && in_p)
6973 /* We can't really copy to HI or LO at all in mips16 mode. */
6974 return M16_REGS;
6976 return gp_reg_p ? NO_REGS : gr_regs;
6978 if (MD_REG_P (regno))
6980 if (TARGET_MIPS16 && ! in_p)
6982 /* We can't really copy to HI or LO at all in mips16 mode. */
6983 return M16_REGS;
6985 return class == gr_regs ? NO_REGS : gr_regs;
6988 /* We can only copy a value to a condition code register from a
6989 floating point register, and even then we require a scratch
6990 floating point register. We can only copy a value out of a
6991 condition code register into a general register. */
6992 if (class == ST_REGS)
6994 if (in_p)
6995 return FP_REGS;
6996 return gp_reg_p ? NO_REGS : gr_regs;
6998 if (ST_REG_P (regno))
7000 if (! in_p)
7001 return FP_REGS;
7002 return class == gr_regs ? NO_REGS : gr_regs;
7005 if (class == FP_REGS)
7007 if (MEM_P (x))
7009 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
7010 return NO_REGS;
7012 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7014 /* We can use the l.s and l.d macros to load floating-point
7015 constants. ??? For l.s, we could probably get better
7016 code by returning GR_REGS here. */
7017 return NO_REGS;
7019 else if (gp_reg_p || x == CONST0_RTX (mode))
7021 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
7022 return NO_REGS;
7024 else if (FP_REG_P (regno))
7026 /* In this case we can use mov.s or mov.d. */
7027 return NO_REGS;
7029 else
7031 /* Otherwise, we need to reload through an integer register. */
7032 return gr_regs;
7036 /* In mips16 mode, going between memory and anything but M16_REGS
7037 requires an M16_REG. */
7038 if (TARGET_MIPS16)
7040 if (class != M16_REGS && class != M16_NA_REGS)
7042 if (gp_reg_p)
7043 return NO_REGS;
7044 return M16_REGS;
7046 if (! gp_reg_p)
7048 if (class == M16_REGS || class == M16_NA_REGS)
7049 return NO_REGS;
7050 return M16_REGS;
7054 return NO_REGS;
7057 /* Implement CLASS_MAX_NREGS.
7059 Usually all registers are word-sized. The only supported exception
7060 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7061 registers. A word-based calculation is correct even in that case,
7062 since -msingle-float disallows multi-FPR values.
7064 The FP status registers are an exception to this rule. They are always
7065 4 bytes wide as they only hold condition code modes, and CCmode is always
7066 considered to be 4 bytes wide. */
7069 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7070 enum machine_mode mode)
7072 if (class == ST_REGS)
7073 return (GET_MODE_SIZE (mode) + 3) / 4;
7074 else
7075 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7078 static bool
7079 mips_valid_pointer_mode (enum machine_mode mode)
7081 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7084 /* Define this so that we can deal with a testcase like:
7086 char foo __attribute__ ((mode (SI)));
7088 then compiled with -mabi=64 and -mint64. We have no
7089 32-bit type at that point and so the default case
7090 always fails. Instead of special casing everything
7091 it's easier to accept SImode in this function and
7092 then punt to the default which will work for all
7093 of the cases where we deal with TARGET_64BIT, etc. */
7094 static bool
7095 mips_scalar_mode_supported_p (enum machine_mode mode)
7097 /* We can always handle SImode. */
7098 if (mode == SImode)
7099 return true;
7100 else
7101 return default_scalar_mode_supported_p (mode);
7106 /* Target hook for vector_mode_supported_p. */
7107 static bool
7108 mips_vector_mode_supported_p (enum machine_mode mode)
7110 if (mode == V2SFmode && TARGET_PAIRED_SINGLE_FLOAT)
7111 return true;
7112 else
7113 return false;
7116 /* If we can access small data directly (using gp-relative relocation
7117 operators) return the small data pointer, otherwise return null.
7119 For each mips16 function which refers to GP relative symbols, we
7120 use a pseudo register, initialized at the start of the function, to
7121 hold the $gp value. */
7123 static rtx
7124 mips16_gp_pseudo_reg (void)
7126 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7128 rtx unspec;
7129 rtx insn, scan;
7131 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7133 /* We want to initialize this to a value which gcc will believe
7134 is constant. */
7135 start_sequence ();
7136 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7137 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7138 gen_rtx_CONST (Pmode, unspec));
7139 insn = get_insns ();
7140 end_sequence ();
7142 push_topmost_sequence ();
7143 /* We need to emit the initialization after the FUNCTION_BEG
7144 note, so that it will be integrated. */
7145 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7146 if (NOTE_P (scan)
7147 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7148 break;
7149 if (scan == NULL_RTX)
7150 scan = get_insns ();
7151 insn = emit_insn_after (insn, scan);
7152 pop_topmost_sequence ();
7155 return cfun->machine->mips16_gp_pseudo_rtx;
7158 /* Write out code to move floating point arguments in or out of
7159 general registers. Output the instructions to FILE. FP_CODE is
7160 the code describing which arguments are present (see the comment at
7161 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7162 we are copying from the floating point registers. */
7164 static void
7165 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7167 const char *s;
7168 int gparg, fparg;
7169 unsigned int f;
7171 /* This code only works for the original 32 bit ABI and the O64 ABI. */
7172 gcc_assert (TARGET_OLDABI);
7174 if (from_fp_p)
7175 s = "mfc1";
7176 else
7177 s = "mtc1";
7178 gparg = GP_ARG_FIRST;
7179 fparg = FP_ARG_FIRST;
7180 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7182 if ((f & 3) == 1)
7184 if ((fparg & 1) != 0)
7185 ++fparg;
7186 fprintf (file, "\t%s\t%s,%s\n", s,
7187 reg_names[gparg], reg_names[fparg]);
7189 else if ((f & 3) == 2)
7191 if (TARGET_64BIT)
7192 fprintf (file, "\td%s\t%s,%s\n", s,
7193 reg_names[gparg], reg_names[fparg]);
7194 else
7196 if ((fparg & 1) != 0)
7197 ++fparg;
7198 if (TARGET_BIG_ENDIAN)
7199 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7200 reg_names[gparg], reg_names[fparg + 1], s,
7201 reg_names[gparg + 1], reg_names[fparg]);
7202 else
7203 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7204 reg_names[gparg], reg_names[fparg], s,
7205 reg_names[gparg + 1], reg_names[fparg + 1]);
7206 ++gparg;
7207 ++fparg;
7210 else
7211 gcc_unreachable ();
7213 ++gparg;
7214 ++fparg;
7218 /* Build a mips16 function stub. This is used for functions which
7219 take arguments in the floating point registers. It is 32 bit code
7220 that moves the floating point args into the general registers, and
7221 then jumps to the 16 bit code. */
7223 static void
7224 build_mips16_function_stub (FILE *file)
7226 const char *fnname;
7227 char *secname, *stubname;
7228 tree stubid, stubdecl;
7229 int need_comma;
7230 unsigned int f;
7232 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7233 secname = (char *) alloca (strlen (fnname) + 20);
7234 sprintf (secname, ".mips16.fn.%s", fnname);
7235 stubname = (char *) alloca (strlen (fnname) + 20);
7236 sprintf (stubname, "__fn_stub_%s", fnname);
7237 stubid = get_identifier (stubname);
7238 stubdecl = build_decl (FUNCTION_DECL, stubid,
7239 build_function_type (void_type_node, NULL_TREE));
7240 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7242 fprintf (file, "\t# Stub function for %s (", current_function_name ());
7243 need_comma = 0;
7244 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7246 fprintf (file, "%s%s",
7247 need_comma ? ", " : "",
7248 (f & 3) == 1 ? "float" : "double");
7249 need_comma = 1;
7251 fprintf (file, ")\n");
7253 fprintf (file, "\t.set\tnomips16\n");
7254 function_section (stubdecl);
7255 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7257 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7258 within a .ent, and we cannot emit another .ent. */
7259 if (!FUNCTION_NAME_ALREADY_DECLARED)
7261 fputs ("\t.ent\t", file);
7262 assemble_name (file, stubname);
7263 fputs ("\n", file);
7266 assemble_name (file, stubname);
7267 fputs (":\n", file);
7269 /* We don't want the assembler to insert any nops here. */
7270 fprintf (file, "\t.set\tnoreorder\n");
7272 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7274 fprintf (asm_out_file, "\t.set\tnoat\n");
7275 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
7276 assemble_name (file, fnname);
7277 fprintf (file, "\n");
7278 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7279 fprintf (asm_out_file, "\t.set\tat\n");
7281 /* Unfortunately, we can't fill the jump delay slot. We can't fill
7282 with one of the mfc1 instructions, because the result is not
7283 available for one instruction, so if the very first instruction
7284 in the function refers to the register, it will see the wrong
7285 value. */
7286 fprintf (file, "\tnop\n");
7288 fprintf (file, "\t.set\treorder\n");
7290 if (!FUNCTION_NAME_ALREADY_DECLARED)
7292 fputs ("\t.end\t", file);
7293 assemble_name (file, stubname);
7294 fputs ("\n", file);
7297 fprintf (file, "\t.set\tmips16\n");
7299 function_section (current_function_decl);
7302 /* We keep a list of functions for which we have already built stubs
7303 in build_mips16_call_stub. */
7305 struct mips16_stub
7307 struct mips16_stub *next;
7308 char *name;
7309 int fpret;
7312 static struct mips16_stub *mips16_stubs;
7314 /* Build a call stub for a mips16 call. A stub is needed if we are
7315 passing any floating point values which should go into the floating
7316 point registers. If we are, and the call turns out to be to a 32
7317 bit function, the stub will be used to move the values into the
7318 floating point registers before calling the 32 bit function. The
7319 linker will magically adjust the function call to either the 16 bit
7320 function or the 32 bit stub, depending upon where the function call
7321 is actually defined.
7323 Similarly, we need a stub if the return value might come back in a
7324 floating point register.
7326 RETVAL is the location of the return value, or null if this is
7327 a call rather than a call_value. FN is the address of the
7328 function and ARG_SIZE is the size of the arguments. FP_CODE
7329 is the code built by function_arg. This function returns a nonzero
7330 value if it builds the call instruction itself. */
7333 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
7335 int fpret;
7336 const char *fnname;
7337 char *secname, *stubname;
7338 struct mips16_stub *l;
7339 tree stubid, stubdecl;
7340 int need_comma;
7341 unsigned int f;
7343 /* We don't need to do anything if we aren't in mips16 mode, or if
7344 we were invoked with the -msoft-float option. */
7345 if (! TARGET_MIPS16 || ! mips16_hard_float)
7346 return 0;
7348 /* Figure out whether the value might come back in a floating point
7349 register. */
7350 fpret = (retval != 0
7351 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
7352 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
7354 /* We don't need to do anything if there were no floating point
7355 arguments and the value will not be returned in a floating point
7356 register. */
7357 if (fp_code == 0 && ! fpret)
7358 return 0;
7360 /* We don't need to do anything if this is a call to a special
7361 mips16 support function. */
7362 if (GET_CODE (fn) == SYMBOL_REF
7363 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
7364 return 0;
7366 /* This code will only work for o32 and o64 abis. The other ABI's
7367 require more sophisticated support. */
7368 gcc_assert (TARGET_OLDABI);
7370 /* We can only handle SFmode and DFmode floating point return
7371 values. */
7372 if (fpret)
7373 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
7375 /* If we're calling via a function pointer, then we must always call
7376 via a stub. There are magic stubs provided in libgcc.a for each
7377 of the required cases. Each of them expects the function address
7378 to arrive in register $2. */
7380 if (GET_CODE (fn) != SYMBOL_REF)
7382 char buf[30];
7383 tree id;
7384 rtx stub_fn, insn;
7386 /* ??? If this code is modified to support other ABI's, we need
7387 to handle PARALLEL return values here. */
7389 sprintf (buf, "__mips16_call_stub_%s%d",
7390 (fpret
7391 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
7392 : ""),
7393 fp_code);
7394 id = get_identifier (buf);
7395 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7397 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
7399 if (retval == NULL_RTX)
7400 insn = gen_call_internal (stub_fn, arg_size);
7401 else
7402 insn = gen_call_value_internal (retval, stub_fn, arg_size);
7403 insn = emit_call_insn (insn);
7405 /* Put the register usage information on the CALL. */
7406 CALL_INSN_FUNCTION_USAGE (insn) =
7407 gen_rtx_EXPR_LIST (VOIDmode,
7408 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
7409 CALL_INSN_FUNCTION_USAGE (insn));
7411 /* If we are handling a floating point return value, we need to
7412 save $18 in the function prologue. Putting a note on the
7413 call will mean that regs_ever_live[$18] will be true if the
7414 call is not eliminated, and we can check that in the prologue
7415 code. */
7416 if (fpret)
7417 CALL_INSN_FUNCTION_USAGE (insn) =
7418 gen_rtx_EXPR_LIST (VOIDmode,
7419 gen_rtx_USE (VOIDmode,
7420 gen_rtx_REG (word_mode, 18)),
7421 CALL_INSN_FUNCTION_USAGE (insn));
7423 /* Return 1 to tell the caller that we've generated the call
7424 insn. */
7425 return 1;
7428 /* We know the function we are going to call. If we have already
7429 built a stub, we don't need to do anything further. */
7431 fnname = XSTR (fn, 0);
7432 for (l = mips16_stubs; l != NULL; l = l->next)
7433 if (strcmp (l->name, fnname) == 0)
7434 break;
7436 if (l == NULL)
7438 /* Build a special purpose stub. When the linker sees a
7439 function call in mips16 code, it will check where the target
7440 is defined. If the target is a 32 bit call, the linker will
7441 search for the section defined here. It can tell which
7442 symbol this section is associated with by looking at the
7443 relocation information (the name is unreliable, since this
7444 might be a static function). If such a section is found, the
7445 linker will redirect the call to the start of the magic
7446 section.
7448 If the function does not return a floating point value, the
7449 special stub section is named
7450 .mips16.call.FNNAME
7452 If the function does return a floating point value, the stub
7453 section is named
7454 .mips16.call.fp.FNNAME
7457 secname = (char *) alloca (strlen (fnname) + 40);
7458 sprintf (secname, ".mips16.call.%s%s",
7459 fpret ? "fp." : "",
7460 fnname);
7461 stubname = (char *) alloca (strlen (fnname) + 20);
7462 sprintf (stubname, "__call_stub_%s%s",
7463 fpret ? "fp_" : "",
7464 fnname);
7465 stubid = get_identifier (stubname);
7466 stubdecl = build_decl (FUNCTION_DECL, stubid,
7467 build_function_type (void_type_node, NULL_TREE));
7468 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7470 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
7471 (fpret
7472 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
7473 : ""),
7474 fnname);
7475 need_comma = 0;
7476 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7478 fprintf (asm_out_file, "%s%s",
7479 need_comma ? ", " : "",
7480 (f & 3) == 1 ? "float" : "double");
7481 need_comma = 1;
7483 fprintf (asm_out_file, ")\n");
7485 fprintf (asm_out_file, "\t.set\tnomips16\n");
7486 assemble_start_function (stubdecl, stubname);
7488 if (!FUNCTION_NAME_ALREADY_DECLARED)
7490 fputs ("\t.ent\t", asm_out_file);
7491 assemble_name (asm_out_file, stubname);
7492 fputs ("\n", asm_out_file);
7494 assemble_name (asm_out_file, stubname);
7495 fputs (":\n", asm_out_file);
7498 /* We build the stub code by hand. That's the only way we can
7499 do it, since we can't generate 32 bit code during a 16 bit
7500 compilation. */
7502 /* We don't want the assembler to insert any nops here. */
7503 fprintf (asm_out_file, "\t.set\tnoreorder\n");
7505 mips16_fp_args (asm_out_file, fp_code, 0);
7507 if (! fpret)
7509 fprintf (asm_out_file, "\t.set\tnoat\n");
7510 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
7511 fnname);
7512 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7513 fprintf (asm_out_file, "\t.set\tat\n");
7514 /* Unfortunately, we can't fill the jump delay slot. We
7515 can't fill with one of the mtc1 instructions, because the
7516 result is not available for one instruction, so if the
7517 very first instruction in the function refers to the
7518 register, it will see the wrong value. */
7519 fprintf (asm_out_file, "\tnop\n");
7521 else
7523 fprintf (asm_out_file, "\tmove\t%s,%s\n",
7524 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
7525 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
7526 /* As above, we can't fill the delay slot. */
7527 fprintf (asm_out_file, "\tnop\n");
7528 if (GET_MODE (retval) == SFmode)
7529 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7530 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
7531 else
7533 if (TARGET_BIG_ENDIAN)
7535 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7536 reg_names[GP_REG_FIRST + 2],
7537 reg_names[FP_REG_FIRST + 1]);
7538 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7539 reg_names[GP_REG_FIRST + 3],
7540 reg_names[FP_REG_FIRST + 0]);
7542 else
7544 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7545 reg_names[GP_REG_FIRST + 2],
7546 reg_names[FP_REG_FIRST + 0]);
7547 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7548 reg_names[GP_REG_FIRST + 3],
7549 reg_names[FP_REG_FIRST + 1]);
7552 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
7553 /* As above, we can't fill the delay slot. */
7554 fprintf (asm_out_file, "\tnop\n");
7557 fprintf (asm_out_file, "\t.set\treorder\n");
7559 #ifdef ASM_DECLARE_FUNCTION_SIZE
7560 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
7561 #endif
7563 if (!FUNCTION_NAME_ALREADY_DECLARED)
7565 fputs ("\t.end\t", asm_out_file);
7566 assemble_name (asm_out_file, stubname);
7567 fputs ("\n", asm_out_file);
7570 fprintf (asm_out_file, "\t.set\tmips16\n");
7572 /* Record this stub. */
7573 l = (struct mips16_stub *) xmalloc (sizeof *l);
7574 l->name = xstrdup (fnname);
7575 l->fpret = fpret;
7576 l->next = mips16_stubs;
7577 mips16_stubs = l;
7580 /* If we expect a floating point return value, but we've built a
7581 stub which does not expect one, then we're in trouble. We can't
7582 use the existing stub, because it won't handle the floating point
7583 value. We can't build a new stub, because the linker won't know
7584 which stub to use for the various calls in this object file.
7585 Fortunately, this case is illegal, since it means that a function
7586 was declared in two different ways in a single compilation. */
7587 if (fpret && ! l->fpret)
7588 error ("cannot handle inconsistent calls to %qs", fnname);
7590 /* If we are calling a stub which handles a floating point return
7591 value, we need to arrange to save $18 in the prologue. We do
7592 this by marking the function call as using the register. The
7593 prologue will later see that it is used, and emit code to save
7594 it. */
7596 if (l->fpret)
7598 rtx insn;
7600 if (retval == NULL_RTX)
7601 insn = gen_call_internal (fn, arg_size);
7602 else
7603 insn = gen_call_value_internal (retval, fn, arg_size);
7604 insn = emit_call_insn (insn);
7606 CALL_INSN_FUNCTION_USAGE (insn) =
7607 gen_rtx_EXPR_LIST (VOIDmode,
7608 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
7609 CALL_INSN_FUNCTION_USAGE (insn));
7611 /* Return 1 to tell the caller that we've generated the call
7612 insn. */
7613 return 1;
7616 /* Return 0 to let the caller generate the call insn. */
7617 return 0;
7620 /* An entry in the mips16 constant pool. VALUE is the pool constant,
7621 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
7623 struct mips16_constant {
7624 struct mips16_constant *next;
7625 rtx value;
7626 rtx label;
7627 enum machine_mode mode;
7630 /* Information about an incomplete mips16 constant pool. FIRST is the
7631 first constant, HIGHEST_ADDRESS is the highest address that the first
7632 byte of the pool can have, and INSN_ADDRESS is the current instruction
7633 address. */
7635 struct mips16_constant_pool {
7636 struct mips16_constant *first;
7637 int highest_address;
7638 int insn_address;
7641 /* Add constant VALUE to POOL and return its label. MODE is the
7642 value's mode (used for CONST_INTs, etc.). */
7644 static rtx
7645 add_constant (struct mips16_constant_pool *pool,
7646 rtx value, enum machine_mode mode)
7648 struct mips16_constant **p, *c;
7649 bool first_of_size_p;
7651 /* See whether the constant is already in the pool. If so, return the
7652 existing label, otherwise leave P pointing to the place where the
7653 constant should be added.
7655 Keep the pool sorted in increasing order of mode size so that we can
7656 reduce the number of alignments needed. */
7657 first_of_size_p = true;
7658 for (p = &pool->first; *p != 0; p = &(*p)->next)
7660 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
7661 return (*p)->label;
7662 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
7663 break;
7664 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
7665 first_of_size_p = false;
7668 /* In the worst case, the constant needed by the earliest instruction
7669 will end up at the end of the pool. The entire pool must then be
7670 accessible from that instruction.
7672 When adding the first constant, set the pool's highest address to
7673 the address of the first out-of-range byte. Adjust this address
7674 downwards each time a new constant is added. */
7675 if (pool->first == 0)
7676 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
7677 is the address of the instruction with the lowest two bits clear.
7678 The base PC value for ld has the lowest three bits clear. Assume
7679 the worst case here. */
7680 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
7681 pool->highest_address -= GET_MODE_SIZE (mode);
7682 if (first_of_size_p)
7683 /* Take into account the worst possible padding due to alignment. */
7684 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
7686 /* Create a new entry. */
7687 c = (struct mips16_constant *) xmalloc (sizeof *c);
7688 c->value = value;
7689 c->mode = mode;
7690 c->label = gen_label_rtx ();
7691 c->next = *p;
7692 *p = c;
7694 return c->label;
7697 /* Output constant VALUE after instruction INSN and return the last
7698 instruction emitted. MODE is the mode of the constant. */
7700 static rtx
7701 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
7703 switch (GET_MODE_CLASS (mode))
7705 case MODE_INT:
7707 rtx size = GEN_INT (GET_MODE_SIZE (mode));
7708 return emit_insn_after (gen_consttable_int (value, size), insn);
7711 case MODE_FLOAT:
7712 return emit_insn_after (gen_consttable_float (value), insn);
7714 case MODE_VECTOR_FLOAT:
7715 case MODE_VECTOR_INT:
7717 int i;
7718 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
7719 insn = dump_constants_1 (GET_MODE_INNER (mode),
7720 CONST_VECTOR_ELT (value, i), insn);
7721 return insn;
7724 default:
7725 gcc_unreachable ();
7730 /* Dump out the constants in CONSTANTS after INSN. */
7732 static void
7733 dump_constants (struct mips16_constant *constants, rtx insn)
7735 struct mips16_constant *c, *next;
7736 int align;
7738 align = 0;
7739 for (c = constants; c != NULL; c = next)
7741 /* If necessary, increase the alignment of PC. */
7742 if (align < GET_MODE_SIZE (c->mode))
7744 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
7745 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
7747 align = GET_MODE_SIZE (c->mode);
7749 insn = emit_label_after (c->label, insn);
7750 insn = dump_constants_1 (c->mode, c->value, insn);
7752 next = c->next;
7753 free (c);
7756 emit_barrier_after (insn);
7759 /* Return the length of instruction INSN.
7761 ??? MIPS16 switch tables go in .text, but we don't define
7762 JUMP_TABLES_IN_TEXT_SECTION, so get_attr_length will not
7763 compute their lengths correctly. */
7765 static int
7766 mips16_insn_length (rtx insn)
7768 if (JUMP_P (insn))
7770 rtx body = PATTERN (insn);
7771 if (GET_CODE (body) == ADDR_VEC)
7772 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
7773 if (GET_CODE (body) == ADDR_DIFF_VEC)
7774 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
7776 return get_attr_length (insn);
7779 /* Rewrite *X so that constant pool references refer to the constant's
7780 label instead. DATA points to the constant pool structure. */
7782 static int
7783 mips16_rewrite_pool_refs (rtx *x, void *data)
7785 struct mips16_constant_pool *pool = data;
7786 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
7787 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
7788 get_pool_constant (*x),
7789 get_pool_mode (*x)));
7790 return 0;
7793 /* Build MIPS16 constant pools. */
7795 static void
7796 mips16_lay_out_constants (void)
7798 struct mips16_constant_pool pool;
7799 rtx insn, barrier;
7801 barrier = 0;
7802 memset (&pool, 0, sizeof (pool));
7803 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7805 /* Rewrite constant pool references in INSN. */
7806 if (INSN_P (insn))
7807 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
7809 pool.insn_address += mips16_insn_length (insn);
7811 if (pool.first != NULL)
7813 /* If there are no natural barriers between the first user of
7814 the pool and the highest acceptable address, we'll need to
7815 create a new instruction to jump around the constant pool.
7816 In the worst case, this instruction will be 4 bytes long.
7818 If it's too late to do this transformation after INSN,
7819 do it immediately before INSN. */
7820 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
7822 rtx label, jump;
7824 label = gen_label_rtx ();
7826 jump = emit_jump_insn_before (gen_jump (label), insn);
7827 JUMP_LABEL (jump) = label;
7828 LABEL_NUSES (label) = 1;
7829 barrier = emit_barrier_after (jump);
7831 emit_label_after (label, barrier);
7832 pool.insn_address += 4;
7835 /* See whether the constant pool is now out of range of the first
7836 user. If so, output the constants after the previous barrier.
7837 Note that any instructions between BARRIER and INSN (inclusive)
7838 will use negative offsets to refer to the pool. */
7839 if (pool.insn_address > pool.highest_address)
7841 dump_constants (pool.first, barrier);
7842 pool.first = NULL;
7843 barrier = 0;
7845 else if (BARRIER_P (insn))
7846 barrier = insn;
7849 dump_constants (pool.first, get_last_insn ());
7852 /* A temporary variable used by for_each_rtx callbacks, etc. */
7853 static rtx mips_sim_insn;
7855 /* A structure representing the state of the processor pipeline.
7856 Used by the mips_sim_* family of functions. */
7857 struct mips_sim {
7858 /* The maximum number of instructions that can be issued in a cycle.
7859 (Caches mips_issue_rate.) */
7860 unsigned int issue_rate;
7862 /* The current simulation time. */
7863 unsigned int time;
7865 /* How many more instructions can be issued in the current cycle. */
7866 unsigned int insns_left;
7868 /* LAST_SET[X].INSN is the last instruction to set register X.
7869 LAST_SET[X].TIME is the time at which that instruction was issued.
7870 INSN is null if no instruction has yet set register X. */
7871 struct {
7872 rtx insn;
7873 unsigned int time;
7874 } last_set[FIRST_PSEUDO_REGISTER];
7876 /* The pipeline's current DFA state. */
7877 state_t dfa_state;
7880 /* Reset STATE to the initial simulation state. */
7882 static void
7883 mips_sim_reset (struct mips_sim *state)
7885 state->time = 0;
7886 state->insns_left = state->issue_rate;
7887 memset (&state->last_set, 0, sizeof (state->last_set));
7888 state_reset (state->dfa_state);
7891 /* Initialize STATE before its first use. DFA_STATE points to an
7892 allocated but uninitialized DFA state. */
7894 static void
7895 mips_sim_init (struct mips_sim *state, state_t dfa_state)
7897 state->issue_rate = mips_issue_rate ();
7898 state->dfa_state = dfa_state;
7899 mips_sim_reset (state);
7902 /* Advance STATE by one clock cycle. */
7904 static void
7905 mips_sim_next_cycle (struct mips_sim *state)
7907 state->time++;
7908 state->insns_left = state->issue_rate;
7909 state_transition (state->dfa_state, 0);
7912 /* Advance simulation state STATE until instruction INSN can read
7913 register REG. */
7915 static void
7916 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
7918 unsigned int i;
7920 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
7921 if (state->last_set[REGNO (reg) + i].insn != 0)
7923 unsigned int t;
7925 t = state->last_set[REGNO (reg) + i].time;
7926 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
7927 while (state->time < t)
7928 mips_sim_next_cycle (state);
7932 /* A for_each_rtx callback. If *X is a register, advance simulation state
7933 DATA until mips_sim_insn can read the register's value. */
7935 static int
7936 mips_sim_wait_regs_2 (rtx *x, void *data)
7938 if (REG_P (*x))
7939 mips_sim_wait_reg (data, mips_sim_insn, *x);
7940 return 0;
7943 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
7945 static void
7946 mips_sim_wait_regs_1 (rtx *x, void *data)
7948 for_each_rtx (x, mips_sim_wait_regs_2, data);
7951 /* Advance simulation state STATE until all of INSN's register
7952 dependencies are satisfied. */
7954 static void
7955 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
7957 mips_sim_insn = insn;
7958 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
7961 /* Advance simulation state STATE until the units required by
7962 instruction INSN are available. */
7964 static void
7965 mips_sim_wait_units (struct mips_sim *state, rtx insn)
7967 state_t tmp_state;
7969 tmp_state = alloca (state_size ());
7970 while (state->insns_left == 0
7971 || (memcpy (tmp_state, state->dfa_state, state_size ()),
7972 state_transition (tmp_state, insn) >= 0))
7973 mips_sim_next_cycle (state);
7976 /* Advance simulation state STATE until INSN is ready to issue. */
7978 static void
7979 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
7981 mips_sim_wait_regs (state, insn);
7982 mips_sim_wait_units (state, insn);
7985 /* mips_sim_insn has just set X. Update the LAST_SET array
7986 in simulation state DATA. */
7988 static void
7989 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
7991 struct mips_sim *state;
7992 unsigned int i;
7994 state = data;
7995 if (REG_P (x))
7996 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
7998 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
7999 state->last_set[REGNO (x) + i].time = state->time;
8003 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
8004 can issue immediately (i.e., that mips_sim_wait_insn has already
8005 been called). */
8007 static void
8008 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8010 state_transition (state->dfa_state, insn);
8011 state->insns_left--;
8013 mips_sim_insn = insn;
8014 note_stores (PATTERN (insn), mips_sim_record_set, state);
8017 /* Simulate issuing a NOP in state STATE. */
8019 static void
8020 mips_sim_issue_nop (struct mips_sim *state)
8022 if (state->insns_left == 0)
8023 mips_sim_next_cycle (state);
8024 state->insns_left--;
8027 /* Update simulation state STATE so that it's ready to accept the instruction
8028 after INSN. INSN should be part of the main rtl chain, not a member of a
8029 SEQUENCE. */
8031 static void
8032 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8034 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
8035 if (JUMP_P (insn))
8036 mips_sim_issue_nop (state);
8038 switch (GET_CODE (SEQ_BEGIN (insn)))
8040 case CODE_LABEL:
8041 case CALL_INSN:
8042 /* We can't predict the processor state after a call or label. */
8043 mips_sim_reset (state);
8044 break;
8046 case JUMP_INSN:
8047 /* The delay slots of branch likely instructions are only executed
8048 when the branch is taken. Therefore, if the caller has simulated
8049 the delay slot instruction, STATE does not really reflect the state
8050 of the pipeline for the instruction after the delay slot. Also,
8051 branch likely instructions tend to incur a penalty when not taken,
8052 so there will probably be an extra delay between the branch and
8053 the instruction after the delay slot. */
8054 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8055 mips_sim_reset (state);
8056 break;
8058 default:
8059 break;
8063 /* The VR4130 pipeline issues aligned pairs of instructions together,
8064 but it stalls the second instruction if it depends on the first.
8065 In order to cut down the amount of logic required, this dependence
8066 check is not based on a full instruction decode. Instead, any non-SPECIAL
8067 instruction is assumed to modify the register specified by bits 20-16
8068 (which is usually the "rt" field).
8070 In beq, beql, bne and bnel instructions, the rt field is actually an
8071 input, so we can end up with a false dependence between the branch
8072 and its delay slot. If this situation occurs in instruction INSN,
8073 try to avoid it by swapping rs and rt. */
8075 static void
8076 vr4130_avoid_branch_rt_conflict (rtx insn)
8078 rtx first, second;
8080 first = SEQ_BEGIN (insn);
8081 second = SEQ_END (insn);
8082 if (JUMP_P (first)
8083 && NONJUMP_INSN_P (second)
8084 && GET_CODE (PATTERN (first)) == SET
8085 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8086 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8088 /* Check for the right kind of condition. */
8089 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8090 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8091 && REG_P (XEXP (cond, 0))
8092 && REG_P (XEXP (cond, 1))
8093 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8094 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8096 /* SECOND mentions the rt register but not the rs register. */
8097 rtx tmp = XEXP (cond, 0);
8098 XEXP (cond, 0) = XEXP (cond, 1);
8099 XEXP (cond, 1) = tmp;
8104 /* Implement -mvr4130-align. Go through each basic block and simulate the
8105 processor pipeline. If we find that a pair of instructions could execute
8106 in parallel, and the first of those instruction is not 8-byte aligned,
8107 insert a nop to make it aligned. */
8109 static void
8110 vr4130_align_insns (void)
8112 struct mips_sim state;
8113 rtx insn, subinsn, last, last2, next;
8114 bool aligned_p;
8116 dfa_start ();
8118 /* LAST is the last instruction before INSN to have a nonzero length.
8119 LAST2 is the last such instruction before LAST. */
8120 last = 0;
8121 last2 = 0;
8123 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8124 aligned_p = true;
8126 mips_sim_init (&state, alloca (state_size ()));
8127 for (insn = get_insns (); insn != 0; insn = next)
8129 unsigned int length;
8131 next = NEXT_INSN (insn);
8133 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8134 This isn't really related to the alignment pass, but we do it on
8135 the fly to avoid a separate instruction walk. */
8136 vr4130_avoid_branch_rt_conflict (insn);
8138 if (USEFUL_INSN_P (insn))
8139 FOR_EACH_SUBINSN (subinsn, insn)
8141 mips_sim_wait_insn (&state, subinsn);
8143 /* If we want this instruction to issue in parallel with the
8144 previous one, make sure that the previous instruction is
8145 aligned. There are several reasons why this isn't worthwhile
8146 when the second instruction is a call:
8148 - Calls are less likely to be performance critical,
8149 - There's a good chance that the delay slot can execute
8150 in parallel with the call.
8151 - The return address would then be unaligned.
8153 In general, if we're going to insert a nop between instructions
8154 X and Y, it's better to insert it immediately after X. That
8155 way, if the nop makes Y aligned, it will also align any labels
8156 between X and Y. */
8157 if (state.insns_left != state.issue_rate
8158 && !CALL_P (subinsn))
8160 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8162 /* SUBINSN is the first instruction in INSN and INSN is
8163 aligned. We want to align the previous instruction
8164 instead, so insert a nop between LAST2 and LAST.
8166 Note that LAST could be either a single instruction
8167 or a branch with a delay slot. In the latter case,
8168 LAST, like INSN, is already aligned, but the delay
8169 slot must have some extra delay that stops it from
8170 issuing at the same time as the branch. We therefore
8171 insert a nop before the branch in order to align its
8172 delay slot. */
8173 emit_insn_after (gen_nop (), last2);
8174 aligned_p = false;
8176 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8178 /* SUBINSN is the delay slot of INSN, but INSN is
8179 currently unaligned. Insert a nop between
8180 LAST and INSN to align it. */
8181 emit_insn_after (gen_nop (), last);
8182 aligned_p = true;
8185 mips_sim_issue_insn (&state, subinsn);
8187 mips_sim_finish_insn (&state, insn);
8189 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8190 length = get_attr_length (insn);
8191 if (length > 0)
8193 /* If the instruction is an asm statement or multi-instruction
8194 mips.md patern, the length is only an estimate. Insert an
8195 8 byte alignment after it so that the following instructions
8196 can be handled correctly. */
8197 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8198 && (recog_memoized (insn) < 0 || length >= 8))
8200 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8201 next = NEXT_INSN (next);
8202 mips_sim_next_cycle (&state);
8203 aligned_p = true;
8205 else if (length & 4)
8206 aligned_p = !aligned_p;
8207 last2 = last;
8208 last = insn;
8211 /* See whether INSN is an aligned label. */
8212 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8213 aligned_p = true;
8215 dfa_finish ();
8218 /* Subroutine of mips_reorg. If there is a hazard between INSN
8219 and a previous instruction, avoid it by inserting nops after
8220 instruction AFTER.
8222 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8223 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8224 before using the value of that register. *HILO_DELAY counts the
8225 number of instructions since the last hilo hazard (that is,
8226 the number of instructions since the last mflo or mfhi).
8228 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8229 for the next instruction.
8231 LO_REG is an rtx for the LO register, used in dependence checking. */
8233 static void
8234 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8235 rtx *delayed_reg, rtx lo_reg)
8237 rtx pattern, set;
8238 int nops, ninsns;
8240 if (!INSN_P (insn))
8241 return;
8243 pattern = PATTERN (insn);
8245 /* Do not put the whole function in .set noreorder if it contains
8246 an asm statement. We don't know whether there will be hazards
8247 between the asm statement and the gcc-generated code. */
8248 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8249 cfun->machine->all_noreorder_p = false;
8251 /* Ignore zero-length instructions (barriers and the like). */
8252 ninsns = get_attr_length (insn) / 4;
8253 if (ninsns == 0)
8254 return;
8256 /* Work out how many nops are needed. Note that we only care about
8257 registers that are explicitly mentioned in the instruction's pattern.
8258 It doesn't matter that calls use the argument registers or that they
8259 clobber hi and lo. */
8260 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8261 nops = 2 - *hilo_delay;
8262 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8263 nops = 1;
8264 else
8265 nops = 0;
8267 /* Insert the nops between this instruction and the previous one.
8268 Each new nop takes us further from the last hilo hazard. */
8269 *hilo_delay += nops;
8270 while (nops-- > 0)
8271 emit_insn_after (gen_hazard_nop (), after);
8273 /* Set up the state for the next instruction. */
8274 *hilo_delay += ninsns;
8275 *delayed_reg = 0;
8276 if (INSN_CODE (insn) >= 0)
8277 switch (get_attr_hazard (insn))
8279 case HAZARD_NONE:
8280 break;
8282 case HAZARD_HILO:
8283 *hilo_delay = 0;
8284 break;
8286 case HAZARD_DELAY:
8287 set = single_set (insn);
8288 gcc_assert (set != 0);
8289 *delayed_reg = SET_DEST (set);
8290 break;
8295 /* Go through the instruction stream and insert nops where necessary.
8296 See if the whole function can then be put into .set noreorder &
8297 .set nomacro. */
8299 static void
8300 mips_avoid_hazards (void)
8302 rtx insn, last_insn, lo_reg, delayed_reg;
8303 int hilo_delay, i;
8305 /* Force all instructions to be split into their final form. */
8306 split_all_insns_noflow ();
8308 /* Recalculate instruction lengths without taking nops into account. */
8309 cfun->machine->ignore_hazard_length_p = true;
8310 shorten_branches (get_insns ());
8312 /* The profiler code uses assembler macros. -mfix-vr4120 relies on
8313 assembler nop insertion. */
8314 cfun->machine->all_noreorder_p = (!current_function_profile
8315 && !TARGET_FIX_VR4120);
8317 last_insn = 0;
8318 hilo_delay = 2;
8319 delayed_reg = 0;
8320 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
8322 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
8323 if (INSN_P (insn))
8325 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8326 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8327 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
8328 &hilo_delay, &delayed_reg, lo_reg);
8329 else
8330 mips_avoid_hazard (last_insn, insn, &hilo_delay,
8331 &delayed_reg, lo_reg);
8333 last_insn = insn;
8338 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
8340 static void
8341 mips_reorg (void)
8343 if (TARGET_MIPS16)
8344 mips16_lay_out_constants ();
8345 else if (TARGET_EXPLICIT_RELOCS)
8347 if (mips_flag_delayed_branch)
8348 dbr_schedule (get_insns (), dump_file);
8349 mips_avoid_hazards ();
8350 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
8351 vr4130_align_insns ();
8355 /* This function does three things:
8357 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
8358 - Register the mips16 hardware floating point stubs.
8359 - Register the gofast functions if selected using --enable-gofast. */
8361 #include "config/gofast.h"
8363 static void
8364 mips_init_libfuncs (void)
8366 if (TARGET_FIX_VR4120)
8368 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
8369 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
8372 if (TARGET_MIPS16 && mips16_hard_float)
8374 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
8375 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
8376 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
8377 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
8379 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
8380 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
8381 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
8382 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
8383 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
8384 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
8386 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
8387 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
8389 if (TARGET_DOUBLE_FLOAT)
8391 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
8392 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
8393 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
8394 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
8396 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
8397 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
8398 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
8399 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
8400 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
8401 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
8403 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
8404 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
8406 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
8407 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
8410 else
8411 gofast_maybe_init_libfuncs ();
8414 /* Return a number assessing the cost of moving a register in class
8415 FROM to class TO. The classes are expressed using the enumeration
8416 values such as `GENERAL_REGS'. A value of 2 is the default; other
8417 values are interpreted relative to that.
8419 It is not required that the cost always equal 2 when FROM is the
8420 same as TO; on some machines it is expensive to move between
8421 registers if they are not general registers.
8423 If reload sees an insn consisting of a single `set' between two
8424 hard registers, and if `REGISTER_MOVE_COST' applied to their
8425 classes returns a value of 2, reload does not check to ensure that
8426 the constraints of the insn are met. Setting a cost of other than
8427 2 will allow reload to verify that the constraints are met. You
8428 should do this if the `movM' pattern's constraints do not allow
8429 such copying.
8431 ??? We make the cost of moving from HI/LO into general
8432 registers the same as for one of moving general registers to
8433 HI/LO for TARGET_MIPS16 in order to prevent allocating a
8434 pseudo to HI/LO. This might hurt optimizations though, it
8435 isn't clear if it is wise. And it might not work in all cases. We
8436 could solve the DImode LO reg problem by using a multiply, just
8437 like reload_{in,out}si. We could solve the SImode/HImode HI reg
8438 problem by using divide instructions. divu puts the remainder in
8439 the HI reg, so doing a divide by -1 will move the value in the HI
8440 reg for all values except -1. We could handle that case by using a
8441 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
8442 a compare/branch to test the input value to see which instruction
8443 we need to use. This gets pretty messy, but it is feasible. */
8446 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
8447 enum reg_class to, enum reg_class from)
8449 if (from == M16_REGS && GR_REG_CLASS_P (to))
8450 return 2;
8451 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
8452 return 2;
8453 else if (GR_REG_CLASS_P (from))
8455 if (to == M16_REGS)
8456 return 2;
8457 else if (to == M16_NA_REGS)
8458 return 2;
8459 else if (GR_REG_CLASS_P (to))
8461 if (TARGET_MIPS16)
8462 return 4;
8463 else
8464 return 2;
8466 else if (to == FP_REGS)
8467 return 4;
8468 else if (to == HI_REG || to == LO_REG || to == MD_REGS)
8470 if (TARGET_MIPS16)
8471 return 12;
8472 else
8473 return 6;
8475 else if (COP_REG_CLASS_P (to))
8477 return 5;
8479 } /* GR_REG_CLASS_P (from) */
8480 else if (from == FP_REGS)
8482 if (GR_REG_CLASS_P (to))
8483 return 4;
8484 else if (to == FP_REGS)
8485 return 2;
8486 else if (to == ST_REGS)
8487 return 8;
8488 } /* from == FP_REGS */
8489 else if (from == HI_REG || from == LO_REG || from == MD_REGS)
8491 if (GR_REG_CLASS_P (to))
8493 if (TARGET_MIPS16)
8494 return 12;
8495 else
8496 return 6;
8498 } /* from == HI_REG, etc. */
8499 else if (from == ST_REGS && GR_REG_CLASS_P (to))
8500 return 4;
8501 else if (COP_REG_CLASS_P (from))
8503 return 5;
8504 } /* COP_REG_CLASS_P (from) */
8506 /* Fall through. */
8508 return 12;
8511 /* Return the length of INSN. LENGTH is the initial length computed by
8512 attributes in the machine-description file. */
8515 mips_adjust_insn_length (rtx insn, int length)
8517 /* A unconditional jump has an unfilled delay slot if it is not part
8518 of a sequence. A conditional jump normally has a delay slot, but
8519 does not on MIPS16. */
8520 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
8521 length += 4;
8523 /* See how many nops might be needed to avoid hardware hazards. */
8524 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
8525 switch (get_attr_hazard (insn))
8527 case HAZARD_NONE:
8528 break;
8530 case HAZARD_DELAY:
8531 length += 4;
8532 break;
8534 case HAZARD_HILO:
8535 length += 8;
8536 break;
8539 /* All MIPS16 instructions are a measly two bytes. */
8540 if (TARGET_MIPS16)
8541 length /= 2;
8543 return length;
8547 /* Return an asm sequence to start a noat block and load the address
8548 of a label into $1. */
8550 const char *
8551 mips_output_load_label (void)
8553 if (TARGET_EXPLICIT_RELOCS)
8554 switch (mips_abi)
8556 case ABI_N32:
8557 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
8559 case ABI_64:
8560 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
8562 default:
8563 if (ISA_HAS_LOAD_DELAY)
8564 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
8565 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
8567 else
8569 if (Pmode == DImode)
8570 return "%[dla\t%@,%0";
8571 else
8572 return "%[la\t%@,%0";
8577 /* Output assembly instructions to peform a conditional branch.
8579 INSN is the branch instruction. OPERANDS[0] is the condition.
8580 OPERANDS[1] is the target of the branch. OPERANDS[2] is the target
8581 of the first operand to the condition. If TWO_OPERANDS_P is
8582 nonzero the comparison takes two operands; OPERANDS[3] will be the
8583 second operand.
8585 If INVERTED_P is nonzero we are to branch if the condition does
8586 not hold. If FLOAT_P is nonzero this is a floating-point comparison.
8588 LENGTH is the length (in bytes) of the sequence we are to generate.
8589 That tells us whether to generate a simple conditional branch, or a
8590 reversed conditional branch around a `jr' instruction. */
8591 const char *
8592 mips_output_conditional_branch (rtx insn, rtx *operands, int two_operands_p,
8593 int float_p, int inverted_p, int length)
8595 static char buffer[200];
8596 /* The kind of comparison we are doing. */
8597 enum rtx_code code = GET_CODE (operands[0]);
8598 /* Nonzero if the opcode for the comparison needs a `z' indicating
8599 that it is a comparison against zero. */
8600 int need_z_p;
8601 /* A string to use in the assembly output to represent the first
8602 operand. */
8603 const char *op1 = "%z2";
8604 /* A string to use in the assembly output to represent the second
8605 operand. Use the hard-wired zero register if there's no second
8606 operand. */
8607 const char *op2 = (two_operands_p ? ",%z3" : ",%.");
8608 /* The operand-printing string for the comparison. */
8609 const char *const comp = (float_p ? "%F0" : "%C0");
8610 /* The operand-printing string for the inverted comparison. */
8611 const char *const inverted_comp = (float_p ? "%W0" : "%N0");
8613 /* The MIPS processors (for levels of the ISA at least two), have
8614 "likely" variants of each branch instruction. These instructions
8615 annul the instruction in the delay slot if the branch is not
8616 taken. */
8617 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
8619 if (!two_operands_p)
8621 /* To compute whether than A > B, for example, we normally
8622 subtract B from A and then look at the sign bit. But, if we
8623 are doing an unsigned comparison, and B is zero, we don't
8624 have to do the subtraction. Instead, we can just check to
8625 see if A is nonzero. Thus, we change the CODE here to
8626 reflect the simpler comparison operation. */
8627 switch (code)
8629 case GTU:
8630 code = NE;
8631 break;
8633 case LEU:
8634 code = EQ;
8635 break;
8637 case GEU:
8638 /* A condition which will always be true. */
8639 code = EQ;
8640 op1 = "%.";
8641 break;
8643 case LTU:
8644 /* A condition which will always be false. */
8645 code = NE;
8646 op1 = "%.";
8647 break;
8649 default:
8650 /* Not a special case. */
8651 break;
8655 /* Relative comparisons are always done against zero. But
8656 equality comparisons are done between two operands, and therefore
8657 do not require a `z' in the assembly language output. */
8658 need_z_p = (!float_p && code != EQ && code != NE);
8659 /* For comparisons against zero, the zero is not provided
8660 explicitly. */
8661 if (need_z_p)
8662 op2 = "";
8664 /* Begin by terminating the buffer. That way we can always use
8665 strcat to add to it. */
8666 buffer[0] = '\0';
8668 switch (length)
8670 case 4:
8671 case 8:
8672 /* Just a simple conditional branch. */
8673 if (float_p)
8674 sprintf (buffer, "%%*b%s%%?\t%%Z2%%1%%/",
8675 inverted_p ? inverted_comp : comp);
8676 else
8677 sprintf (buffer, "%%*b%s%s%%?\t%s%s,%%1%%/",
8678 inverted_p ? inverted_comp : comp,
8679 need_z_p ? "z" : "",
8680 op1,
8681 op2);
8682 return buffer;
8684 case 12:
8685 case 16:
8686 case 24:
8687 case 28:
8689 /* Generate a reversed conditional branch around ` j'
8690 instruction:
8692 .set noreorder
8693 .set nomacro
8694 bc l
8695 delay_slot or #nop
8696 j target
8697 #nop
8699 .set macro
8700 .set reorder
8702 If the original branch was a likely branch, the delay slot
8703 must be executed only if the branch is taken, so generate:
8705 .set noreorder
8706 .set nomacro
8707 bc l
8708 #nop
8709 j target
8710 delay slot or #nop
8712 .set macro
8713 .set reorder
8715 When generating PIC, instead of:
8717 j target
8719 we emit:
8721 .set noat
8722 la $at, target
8723 jr $at
8724 .set at
8727 rtx orig_target;
8728 rtx target = gen_label_rtx ();
8730 orig_target = operands[1];
8731 operands[1] = target;
8732 /* Generate the reversed comparison. This takes four
8733 bytes. */
8734 if (float_p)
8735 sprintf (buffer, "%%*b%s\t%%Z2%%1",
8736 inverted_p ? comp : inverted_comp);
8737 else
8738 sprintf (buffer, "%%*b%s%s\t%s%s,%%1",
8739 inverted_p ? comp : inverted_comp,
8740 need_z_p ? "z" : "",
8741 op1,
8742 op2);
8743 output_asm_insn (buffer, operands);
8745 if (length != 16 && length != 28 && ! mips_branch_likely)
8747 /* Output delay slot instruction. */
8748 rtx insn = final_sequence;
8749 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8750 optimize, 0, 1, NULL);
8751 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8753 else
8754 output_asm_insn ("%#", 0);
8756 if (length <= 16)
8757 output_asm_insn ("j\t%0", &orig_target);
8758 else
8760 output_asm_insn (mips_output_load_label (), &orig_target);
8761 output_asm_insn ("jr\t%@%]", 0);
8764 if (length != 16 && length != 28 && mips_branch_likely)
8766 /* Output delay slot instruction. */
8767 rtx insn = final_sequence;
8768 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8769 optimize, 0, 1, NULL);
8770 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8772 else
8773 output_asm_insn ("%#", 0);
8775 (*targetm.asm_out.internal_label) (asm_out_file, "L",
8776 CODE_LABEL_NUMBER (target));
8778 return "";
8781 default:
8782 gcc_unreachable ();
8785 /* NOTREACHED */
8786 return 0;
8789 /* Used to output div or ddiv instruction DIVISION, which has the operands
8790 given by OPERANDS. Add in a divide-by-zero check if needed.
8792 When working around R4000 and R4400 errata, we need to make sure that
8793 the division is not immediately followed by a shift[1][2]. We also
8794 need to stop the division from being put into a branch delay slot[3].
8795 The easiest way to avoid both problems is to add a nop after the
8796 division. When a divide-by-zero check is needed, this nop can be
8797 used to fill the branch delay slot.
8799 [1] If a double-word or a variable shift executes immediately
8800 after starting an integer division, the shift may give an
8801 incorrect result. See quotations of errata #16 and #28 from
8802 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
8803 in mips.md for details.
8805 [2] A similar bug to [1] exists for all revisions of the
8806 R4000 and the R4400 when run in an MC configuration.
8807 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
8809 "19. In this following sequence:
8811 ddiv (or ddivu or div or divu)
8812 dsll32 (or dsrl32, dsra32)
8814 if an MPT stall occurs, while the divide is slipping the cpu
8815 pipeline, then the following double shift would end up with an
8816 incorrect result.
8818 Workaround: The compiler needs to avoid generating any
8819 sequence with divide followed by extended double shift."
8821 This erratum is also present in "MIPS R4400MC Errata, Processor
8822 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
8823 & 3.0" as errata #10 and #4, respectively.
8825 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
8826 (also valid for MIPS R4000MC processors):
8828 "52. R4000SC: This bug does not apply for the R4000PC.
8830 There are two flavors of this bug:
8832 1) If the instruction just after divide takes an RF exception
8833 (tlb-refill, tlb-invalid) and gets an instruction cache
8834 miss (both primary and secondary) and the line which is
8835 currently in secondary cache at this index had the first
8836 data word, where the bits 5..2 are set, then R4000 would
8837 get a wrong result for the div.
8841 div r8, r9
8842 ------------------- # end-of page. -tlb-refill
8846 div r8, r9
8847 ------------------- # end-of page. -tlb-invalid
8850 2) If the divide is in the taken branch delay slot, where the
8851 target takes RF exception and gets an I-cache miss for the
8852 exception vector or where I-cache miss occurs for the
8853 target address, under the above mentioned scenarios, the
8854 div would get wrong results.
8857 j r2 # to next page mapped or unmapped
8858 div r8,r9 # this bug would be there as long
8859 # as there is an ICache miss and
8860 nop # the "data pattern" is present
8863 beq r0, r0, NextPage # to Next page
8864 div r8,r9
8867 This bug is present for div, divu, ddiv, and ddivu
8868 instructions.
8870 Workaround: For item 1), OS could make sure that the next page
8871 after the divide instruction is also mapped. For item 2), the
8872 compiler could make sure that the divide instruction is not in
8873 the branch delay slot."
8875 These processors have PRId values of 0x00004220 and 0x00004300 for
8876 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
8878 const char *
8879 mips_output_division (const char *division, rtx *operands)
8881 const char *s;
8883 s = division;
8884 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
8886 output_asm_insn (s, operands);
8887 s = "nop";
8889 if (TARGET_CHECK_ZERO_DIV)
8891 if (TARGET_MIPS16)
8893 output_asm_insn (s, operands);
8894 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
8896 else if (GENERATE_DIVIDE_TRAPS)
8898 output_asm_insn (s, operands);
8899 s = "teq\t%2,%.,7";
8901 else
8903 output_asm_insn ("%(bne\t%2,%.,1f", operands);
8904 output_asm_insn (s, operands);
8905 s = "break\t7%)\n1:";
8908 return s;
8911 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
8912 with a final "000" replaced by "k". Ignore case.
8914 Note: this function is shared between GCC and GAS. */
8916 static bool
8917 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
8919 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
8920 given++, canonical++;
8922 return ((*given == 0 && *canonical == 0)
8923 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
8927 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
8928 CPU name. We've traditionally allowed a lot of variation here.
8930 Note: this function is shared between GCC and GAS. */
8932 static bool
8933 mips_matching_cpu_name_p (const char *canonical, const char *given)
8935 /* First see if the name matches exactly, or with a final "000"
8936 turned into "k". */
8937 if (mips_strict_matching_cpu_name_p (canonical, given))
8938 return true;
8940 /* If not, try comparing based on numerical designation alone.
8941 See if GIVEN is an unadorned number, or 'r' followed by a number. */
8942 if (TOLOWER (*given) == 'r')
8943 given++;
8944 if (!ISDIGIT (*given))
8945 return false;
8947 /* Skip over some well-known prefixes in the canonical name,
8948 hoping to find a number there too. */
8949 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
8950 canonical += 2;
8951 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
8952 canonical += 2;
8953 else if (TOLOWER (canonical[0]) == 'r')
8954 canonical += 1;
8956 return mips_strict_matching_cpu_name_p (canonical, given);
8960 /* Parse an option that takes the name of a processor as its argument.
8961 OPTION is the name of the option and CPU_STRING is the argument.
8962 Return the corresponding processor enumeration if the CPU_STRING is
8963 recognized, otherwise report an error and return null.
8965 A similar function exists in GAS. */
8967 static const struct mips_cpu_info *
8968 mips_parse_cpu (const char *option, const char *cpu_string)
8970 const struct mips_cpu_info *p;
8971 const char *s;
8973 /* In the past, we allowed upper-case CPU names, but it doesn't
8974 work well with the multilib machinery. */
8975 for (s = cpu_string; *s != 0; s++)
8976 if (ISUPPER (*s))
8978 warning ("the cpu name must be lower case");
8979 break;
8982 /* 'from-abi' selects the most compatible architecture for the given
8983 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
8984 EABIs, we have to decide whether we're using the 32-bit or 64-bit
8985 version. Look first at the -mgp options, if given, otherwise base
8986 the choice on MASK_64BIT in TARGET_DEFAULT. */
8987 if (strcasecmp (cpu_string, "from-abi") == 0)
8988 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
8989 : ABI_NEEDS_64BIT_REGS ? 3
8990 : (TARGET_64BIT ? 3 : 1));
8992 /* 'default' has traditionally been a no-op. Probably not very useful. */
8993 if (strcasecmp (cpu_string, "default") == 0)
8994 return 0;
8996 for (p = mips_cpu_info_table; p->name != 0; p++)
8997 if (mips_matching_cpu_name_p (p->name, cpu_string))
8998 return p;
9000 error ("bad value (%s) for %s", cpu_string, option);
9001 return 0;
9005 /* Return the processor associated with the given ISA level, or null
9006 if the ISA isn't valid. */
9008 static const struct mips_cpu_info *
9009 mips_cpu_info_from_isa (int isa)
9011 const struct mips_cpu_info *p;
9013 for (p = mips_cpu_info_table; p->name != 0; p++)
9014 if (p->isa == isa)
9015 return p;
9017 return 0;
9020 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
9021 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
9022 they only hold condition code modes, and CCmode is always considered to
9023 be 4 bytes wide. All other registers are word sized. */
9025 unsigned int
9026 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9028 if (ST_REG_P (regno))
9029 return ((GET_MODE_SIZE (mode) + 3) / 4);
9030 else if (! FP_REG_P (regno))
9031 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9032 else
9033 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9036 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
9037 all BLKmode objects are returned in memory. Under the new (N32 and
9038 64-bit MIPS ABIs) small structures are returned in a register.
9039 Objects with varying size must still be returned in memory, of
9040 course. */
9042 static bool
9043 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9045 if (TARGET_OLDABI)
9046 return (TYPE_MODE (type) == BLKmode);
9047 else
9048 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9049 || (int_size_in_bytes (type) == -1));
9052 static bool
9053 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9055 return !TARGET_OLDABI;
9058 /* Return true if INSN is a multiply-add or multiply-subtract
9059 instruction and PREV assigns to the accumulator operand. */
9061 bool
9062 mips_linked_madd_p (rtx prev, rtx insn)
9064 rtx x;
9066 x = single_set (insn);
9067 if (x == 0)
9068 return false;
9070 x = SET_SRC (x);
9072 if (GET_CODE (x) == PLUS
9073 && GET_CODE (XEXP (x, 0)) == MULT
9074 && reg_set_p (XEXP (x, 1), prev))
9075 return true;
9077 if (GET_CODE (x) == MINUS
9078 && GET_CODE (XEXP (x, 1)) == MULT
9079 && reg_set_p (XEXP (x, 0), prev))
9080 return true;
9082 return false;
9085 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9086 that may clobber hi or lo. */
9088 static rtx mips_macc_chains_last_hilo;
9090 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9091 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9093 static void
9094 mips_macc_chains_record (rtx insn)
9096 if (get_attr_may_clobber_hilo (insn))
9097 mips_macc_chains_last_hilo = insn;
9100 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9101 has NREADY elements, looking for a multiply-add or multiply-subtract
9102 instruction that is cumulative with mips_macc_chains_last_hilo.
9103 If there is one, promote it ahead of anything else that might
9104 clobber hi or lo. */
9106 static void
9107 mips_macc_chains_reorder (rtx *ready, int nready)
9109 int i, j;
9111 if (mips_macc_chains_last_hilo != 0)
9112 for (i = nready - 1; i >= 0; i--)
9113 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9115 for (j = nready - 1; j > i; j--)
9116 if (recog_memoized (ready[j]) >= 0
9117 && get_attr_may_clobber_hilo (ready[j]))
9119 mips_promote_ready (ready, i, j);
9120 break;
9122 break;
9126 /* The last instruction to be scheduled. */
9128 static rtx vr4130_last_insn;
9130 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9131 points to an rtx that is initially an instruction. Nullify the rtx
9132 if the instruction uses the value of register X. */
9134 static void
9135 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9137 rtx *insn_ptr = data;
9138 if (REG_P (x)
9139 && *insn_ptr != 0
9140 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9141 *insn_ptr = 0;
9144 /* Return true if there is true register dependence between vr4130_last_insn
9145 and INSN. */
9147 static bool
9148 vr4130_true_reg_dependence_p (rtx insn)
9150 note_stores (PATTERN (vr4130_last_insn),
9151 vr4130_true_reg_dependence_p_1, &insn);
9152 return insn == 0;
9155 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9156 the ready queue and that INSN2 is the instruction after it, return
9157 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9158 in which INSN1 and INSN2 can probably issue in parallel, but for
9159 which (INSN2, INSN1) should be less sensitive to instruction
9160 alignment than (INSN1, INSN2). See 4130.md for more details. */
9162 static bool
9163 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9165 rtx dep;
9167 /* Check for the following case:
9169 1) there is some other instruction X with an anti dependence on INSN1;
9170 2) X has a higher priority than INSN2; and
9171 3) X is an arithmetic instruction (and thus has no unit restrictions).
9173 If INSN1 is the last instruction blocking X, it would better to
9174 choose (INSN1, X) over (INSN2, INSN1). */
9175 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9176 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9177 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9178 && recog_memoized (XEXP (dep, 0)) >= 0
9179 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9180 return false;
9182 if (vr4130_last_insn != 0
9183 && recog_memoized (insn1) >= 0
9184 && recog_memoized (insn2) >= 0)
9186 /* See whether INSN1 and INSN2 use different execution units,
9187 or if they are both ALU-type instructions. If so, they can
9188 probably execute in parallel. */
9189 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9190 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9191 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9193 /* If only one of the instructions has a dependence on
9194 vr4130_last_insn, prefer to schedule the other one first. */
9195 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9196 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9197 if (dep1 != dep2)
9198 return dep1;
9200 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9201 is not an ALU-type instruction and if INSN1 uses the same
9202 execution unit. (Note that if this condition holds, we already
9203 know that INSN2 uses a different execution unit.) */
9204 if (class1 != VR4130_CLASS_ALU
9205 && recog_memoized (vr4130_last_insn) >= 0
9206 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9207 return true;
9210 return false;
9213 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9214 queue with at least two instructions. Swap the first two if
9215 vr4130_swap_insns_p says that it could be worthwhile. */
9217 static void
9218 vr4130_reorder (rtx *ready, int nready)
9220 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9221 mips_promote_ready (ready, nready - 2, nready - 1);
9224 /* Remove the instruction at index LOWER from ready queue READY and
9225 reinsert it in front of the instruction at index HIGHER. LOWER must
9226 be <= HIGHER. */
9228 static void
9229 mips_promote_ready (rtx *ready, int lower, int higher)
9231 rtx new_head;
9232 int i;
9234 new_head = ready[lower];
9235 for (i = lower; i < higher; i++)
9236 ready[i] = ready[i + 1];
9237 ready[i] = new_head;
9240 /* Implement TARGET_SCHED_REORDER. */
9242 static int
9243 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9244 rtx *ready, int *nreadyp, int cycle)
9246 if (!reload_completed && TUNE_MACC_CHAINS)
9248 if (cycle == 0)
9249 mips_macc_chains_last_hilo = 0;
9250 if (*nreadyp > 0)
9251 mips_macc_chains_reorder (ready, *nreadyp);
9253 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9255 if (cycle == 0)
9256 vr4130_last_insn = 0;
9257 if (*nreadyp > 1)
9258 vr4130_reorder (ready, *nreadyp);
9260 return mips_issue_rate ();
9263 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9265 static int
9266 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9267 rtx insn, int more)
9269 switch (GET_CODE (PATTERN (insn)))
9271 case USE:
9272 case CLOBBER:
9273 /* Don't count USEs and CLOBBERs against the issue rate. */
9274 break;
9276 default:
9277 more--;
9278 if (!reload_completed && TUNE_MACC_CHAINS)
9279 mips_macc_chains_record (insn);
9280 vr4130_last_insn = insn;
9281 break;
9283 return more;
9286 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9287 dependencies have no cost. */
9289 static int
9290 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9291 rtx dep ATTRIBUTE_UNUSED, int cost)
9293 if (REG_NOTE_KIND (link) != 0)
9294 return 0;
9295 return cost;
9298 /* Return the number of instructions that can be issued per cycle. */
9300 static int
9301 mips_issue_rate (void)
9303 switch (mips_tune)
9305 case PROCESSOR_R4130:
9306 case PROCESSOR_R5400:
9307 case PROCESSOR_R5500:
9308 case PROCESSOR_R7000:
9309 case PROCESSOR_R9000:
9310 return 2;
9312 case PROCESSOR_SB1:
9313 /* This is actually 4, but we get better performance if we claim 3.
9314 This is partly because of unwanted speculative code motion with the
9315 larger number, and partly because in most common cases we can't
9316 reach the theoretical max of 4. */
9317 return 3;
9319 default:
9320 return 1;
9324 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9325 be as wide as the scheduling freedom in the DFA. */
9327 static int
9328 mips_multipass_dfa_lookahead (void)
9330 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9331 if (mips_tune == PROCESSOR_SB1)
9332 return 4;
9334 return 0;
9337 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9338 return the first operand of the associated "pref" or "prefx" insn. */
9341 mips_prefetch_cookie (rtx write, rtx locality)
9343 /* store_streamed / load_streamed. */
9344 if (INTVAL (locality) <= 0)
9345 return GEN_INT (INTVAL (write) + 4);
9347 /* store / load. */
9348 if (INTVAL (locality) <= 2)
9349 return write;
9351 /* store_retained / load_retained. */
9352 return GEN_INT (INTVAL (write) + 6);
9355 /* MIPS builtin function support. */
9357 struct builtin_description
9359 /* The code of the main .md file instruction. See mips_builtin_type
9360 for more information. */
9361 enum insn_code icode;
9363 /* The floating-point comparison code to use with ICODE, if any. */
9364 enum mips_fp_condition cond;
9366 /* The name of the builtin function. */
9367 const char *name;
9369 /* Specifies how the function should be expanded. */
9370 enum mips_builtin_type builtin_type;
9372 /* The function's prototype. */
9373 enum mips_function_type function_type;
9375 /* The target flags required for this function. */
9376 int target_flags;
9379 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9380 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9381 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9382 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9383 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9385 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
9386 TARGET_FLAGS. */
9387 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9388 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9389 "__builtin_mips_" #INSN "_" #COND "_s", \
9390 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9391 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9392 "__builtin_mips_" #INSN "_" #COND "_d", \
9393 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9395 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9396 The lower and upper forms require TARGET_FLAGS while the any and all
9397 forms require MASK_MIPS3D. */
9398 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9399 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9400 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9401 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9402 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9403 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
9404 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9405 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9406 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
9407 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
9408 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9409 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
9410 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
9412 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
9413 require MASK_MIPS3D. */
9414 #define CMP_4S_BUILTINS(INSN, COND) \
9415 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9416 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
9417 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9418 MASK_MIPS3D }, \
9419 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9420 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
9421 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9422 MASK_MIPS3D }
9424 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
9425 instruction requires TARGET_FLAGS. */
9426 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
9427 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9428 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
9429 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9430 TARGET_FLAGS }, \
9431 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9432 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
9433 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9434 TARGET_FLAGS }
9436 /* Define all the builtins related to c.cond.fmt condition COND. */
9437 #define CMP_BUILTINS(COND) \
9438 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE), \
9439 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
9440 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
9441 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE), \
9442 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
9443 CMP_4S_BUILTINS (c, COND), \
9444 CMP_4S_BUILTINS (cabs, COND)
9446 /* __builtin_mips_abs_ps() maps to the standard absM2 pattern. */
9447 #define CODE_FOR_mips_abs_ps CODE_FOR_absv2sf2
9449 static const struct builtin_description mips_bdesc[] =
9451 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9452 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9453 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9454 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9455 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE),
9456 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE),
9457 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE),
9458 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE),
9460 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, MASK_PAIRED_SINGLE),
9461 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9462 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9463 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9464 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9466 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9467 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9468 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9469 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9470 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9471 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9473 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9474 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9475 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9476 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9477 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9478 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9480 MIPS_FP_CONDITIONS (CMP_BUILTINS)
9483 /* Builtin functions for the SB-1 processor. */
9485 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
9487 static const struct builtin_description sb1_bdesc[] =
9489 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE)
9492 /* This helps provide a mapping from builtin function codes to bdesc
9493 arrays. */
9495 struct bdesc_map
9497 /* The builtin function table that this entry describes. */
9498 const struct builtin_description *bdesc;
9500 /* The number of entries in the builtin function table. */
9501 unsigned int size;
9503 /* The target processor that supports these builtin functions.
9504 PROCESSOR_DEFAULT means we enable them for all processors. */
9505 enum processor_type proc;
9508 static const struct bdesc_map bdesc_arrays[] =
9510 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_DEFAULT },
9511 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 }
9514 /* Take the head of argument list *ARGLIST and convert it into a form
9515 suitable for input operand OP of instruction ICODE. Return the value
9516 and point *ARGLIST at the next element of the list. */
9518 static rtx
9519 mips_prepare_builtin_arg (enum insn_code icode,
9520 unsigned int op, tree *arglist)
9522 rtx value;
9523 enum machine_mode mode;
9525 value = expand_expr (TREE_VALUE (*arglist), NULL_RTX, VOIDmode, 0);
9526 mode = insn_data[icode].operand[op].mode;
9527 if (!insn_data[icode].operand[op].predicate (value, mode))
9528 value = copy_to_mode_reg (mode, value);
9530 *arglist = TREE_CHAIN (*arglist);
9531 return value;
9534 /* Return an rtx suitable for output operand OP of instruction ICODE.
9535 If TARGET is non-null, try to use it where possible. */
9537 static rtx
9538 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
9540 enum machine_mode mode;
9542 mode = insn_data[icode].operand[op].mode;
9543 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
9544 target = gen_reg_rtx (mode);
9546 return target;
9549 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
9552 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9553 enum machine_mode mode ATTRIBUTE_UNUSED,
9554 int ignore ATTRIBUTE_UNUSED)
9556 enum insn_code icode;
9557 enum mips_builtin_type type;
9558 tree fndecl, arglist;
9559 unsigned int fcode;
9560 const struct builtin_description *bdesc;
9561 const struct bdesc_map *m;
9563 fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9564 arglist = TREE_OPERAND (exp, 1);
9565 fcode = DECL_FUNCTION_CODE (fndecl);
9567 bdesc = NULL;
9568 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9570 if (fcode < m->size)
9572 bdesc = m->bdesc;
9573 icode = bdesc[fcode].icode;
9574 type = bdesc[fcode].builtin_type;
9575 break;
9577 fcode -= m->size;
9579 if (bdesc == NULL)
9580 return 0;
9582 switch (type)
9584 case MIPS_BUILTIN_DIRECT:
9585 return mips_expand_builtin_direct (icode, target, arglist);
9587 case MIPS_BUILTIN_MOVT:
9588 case MIPS_BUILTIN_MOVF:
9589 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
9590 target, arglist);
9592 case MIPS_BUILTIN_CMP_ANY:
9593 case MIPS_BUILTIN_CMP_ALL:
9594 case MIPS_BUILTIN_CMP_UPPER:
9595 case MIPS_BUILTIN_CMP_LOWER:
9596 case MIPS_BUILTIN_CMP_SINGLE:
9597 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
9598 target, arglist);
9600 default:
9601 return 0;
9605 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
9607 void
9608 mips_init_builtins (void)
9610 const struct builtin_description *d;
9611 const struct bdesc_map *m;
9612 tree types[(int) MIPS_MAX_FTYPE_MAX];
9613 tree V2SF_type_node;
9614 unsigned int offset;
9616 /* We have only builtins for -mpaired-single and -mips3d. */
9617 if (!TARGET_PAIRED_SINGLE_FLOAT)
9618 return;
9620 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
9622 types[MIPS_V2SF_FTYPE_V2SF]
9623 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
9625 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
9626 = build_function_type_list (V2SF_type_node,
9627 V2SF_type_node, V2SF_type_node, NULL_TREE);
9629 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
9630 = build_function_type_list (V2SF_type_node,
9631 V2SF_type_node, V2SF_type_node,
9632 integer_type_node, NULL_TREE);
9634 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
9635 = build_function_type_list (V2SF_type_node,
9636 V2SF_type_node, V2SF_type_node,
9637 V2SF_type_node, V2SF_type_node, NULL_TREE);
9639 types[MIPS_V2SF_FTYPE_SF_SF]
9640 = build_function_type_list (V2SF_type_node,
9641 float_type_node, float_type_node, NULL_TREE);
9643 types[MIPS_INT_FTYPE_V2SF_V2SF]
9644 = build_function_type_list (integer_type_node,
9645 V2SF_type_node, V2SF_type_node, NULL_TREE);
9647 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
9648 = build_function_type_list (integer_type_node,
9649 V2SF_type_node, V2SF_type_node,
9650 V2SF_type_node, V2SF_type_node, NULL_TREE);
9652 types[MIPS_INT_FTYPE_SF_SF]
9653 = build_function_type_list (integer_type_node,
9654 float_type_node, float_type_node, NULL_TREE);
9656 types[MIPS_INT_FTYPE_DF_DF]
9657 = build_function_type_list (integer_type_node,
9658 double_type_node, double_type_node, NULL_TREE);
9660 types[MIPS_SF_FTYPE_V2SF]
9661 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
9663 types[MIPS_SF_FTYPE_SF]
9664 = build_function_type_list (float_type_node,
9665 float_type_node, NULL_TREE);
9667 types[MIPS_SF_FTYPE_SF_SF]
9668 = build_function_type_list (float_type_node,
9669 float_type_node, float_type_node, NULL_TREE);
9671 types[MIPS_DF_FTYPE_DF]
9672 = build_function_type_list (double_type_node,
9673 double_type_node, NULL_TREE);
9675 types[MIPS_DF_FTYPE_DF_DF]
9676 = build_function_type_list (double_type_node,
9677 double_type_node, double_type_node, NULL_TREE);
9679 /* Iterate through all of the bdesc arrays, initializing all of the
9680 builtin functions. */
9682 offset = 0;
9683 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9685 if (m->proc == PROCESSOR_DEFAULT || (m->proc == mips_arch))
9686 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
9687 if ((d->target_flags & target_flags) == d->target_flags)
9688 lang_hooks.builtin_function (d->name, types[d->function_type],
9689 d - m->bdesc + offset,
9690 BUILT_IN_MD, NULL, NULL);
9691 offset += m->size;
9695 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
9696 .md pattern and ARGLIST is the list of function arguments. TARGET,
9697 if nonnull, suggests a good place to put the result. */
9699 static rtx
9700 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist)
9702 rtx ops[MAX_RECOG_OPERANDS];
9703 int i;
9705 target = mips_prepare_builtin_target (icode, 0, target);
9706 for (i = 1; i < insn_data[icode].n_operands; i++)
9707 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
9709 switch (insn_data[icode].n_operands)
9711 case 2:
9712 emit_insn (GEN_FCN (icode) (target, ops[1]));
9713 break;
9715 case 3:
9716 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2]));
9717 break;
9719 case 4:
9720 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2], ops[3]));
9721 break;
9723 default:
9724 gcc_unreachable ();
9726 return target;
9729 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
9730 function (TYPE says which). ARGLIST is the list of arguments to the
9731 function, ICODE is the instruction that should be used to compare
9732 the first two arguments, and COND is the condition it should test.
9733 TARGET, if nonnull, suggests a good place to put the result. */
9735 static rtx
9736 mips_expand_builtin_movtf (enum mips_builtin_type type,
9737 enum insn_code icode, enum mips_fp_condition cond,
9738 rtx target, tree arglist)
9740 rtx cmp_result, op0, op1;
9742 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
9743 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9744 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9745 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
9747 icode = CODE_FOR_mips_cond_move_tf_ps;
9748 target = mips_prepare_builtin_target (icode, 0, target);
9749 if (type == MIPS_BUILTIN_MOVT)
9751 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9752 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9754 else
9756 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9757 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9759 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
9760 return target;
9763 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
9764 of the comparison instruction and COND is the condition it should test.
9765 ARGLIST is the list of function arguments and TARGET, if nonnull,
9766 suggests a good place to put the boolean result. */
9768 static rtx
9769 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
9770 enum insn_code icode, enum mips_fp_condition cond,
9771 rtx target, tree arglist)
9773 rtx label1, label2, if_then_else;
9774 rtx pat, cmp_result, ops[MAX_RECOG_OPERANDS];
9775 rtx target_if_equal, target_if_unequal;
9776 int cmp_value, i;
9778 if (target == 0 || GET_MODE (target) != SImode)
9779 target = gen_reg_rtx (SImode);
9781 /* Prepare the operands to the comparison. */
9782 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
9783 for (i = 1; i < insn_data[icode].n_operands - 1; i++)
9784 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
9786 switch (insn_data[icode].n_operands)
9788 case 4:
9789 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond));
9790 break;
9792 case 6:
9793 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2],
9794 ops[3], ops[4], GEN_INT (cond));
9795 break;
9797 default:
9798 gcc_unreachable ();
9801 /* If the comparison sets more than one register, we define the result
9802 to be 0 if all registers are false and -1 if all registers are true.
9803 The value of the complete result is indeterminate otherwise. It is
9804 possible to test individual registers using SUBREGs.
9806 Set up CMP_RESULT, CMP_VALUE, TARGET_IF_EQUAL and TARGET_IF_UNEQUAL so
9807 that the result should be TARGET_IF_EQUAL if (EQ CMP_RESULT CMP_VALUE)
9808 and TARGET_IF_UNEQUAL otherwise. */
9809 if (builtin_type == MIPS_BUILTIN_CMP_ALL)
9811 cmp_value = -1;
9812 target_if_equal = const1_rtx;
9813 target_if_unequal = const0_rtx;
9815 else
9817 cmp_value = 0;
9818 target_if_equal = const0_rtx;
9819 target_if_unequal = const1_rtx;
9820 if (builtin_type == MIPS_BUILTIN_CMP_UPPER)
9821 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 4);
9822 else if (builtin_type == MIPS_BUILTIN_CMP_LOWER)
9823 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 0);
9826 /* First assume that CMP_RESULT == CMP_VALUE. */
9827 emit_move_insn (target, target_if_equal);
9829 /* Branch to LABEL1 if CMP_RESULT != CMP_VALUE. */
9830 emit_insn (pat);
9831 label1 = gen_label_rtx ();
9832 label2 = gen_label_rtx ();
9833 if_then_else
9834 = gen_rtx_IF_THEN_ELSE (VOIDmode,
9835 gen_rtx_fmt_ee (NE, GET_MODE (cmp_result),
9836 cmp_result, GEN_INT (cmp_value)),
9837 gen_rtx_LABEL_REF (VOIDmode, label1), pc_rtx);
9838 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, if_then_else));
9839 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
9840 gen_rtx_LABEL_REF (VOIDmode, label2)));
9841 emit_barrier ();
9842 emit_label (label1);
9844 /* Fix TARGET for CMP_RESULT != CMP_VALUE. */
9845 emit_move_insn (target, target_if_unequal);
9846 emit_label (label2);
9848 return target;
9851 #include "gt-mips.h"