* config/xtensa/elf.h (HANDLE_PRAGMA_PACK_PUSH_POP): Define.
[official-gcc.git] / gcc / config / mips / mips.c
blob52e2aabe1fe5f8b421d7180a51f34fe8bf39a454
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
59 #include "bitmap.h"
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
69 XVECEXP (X, 0, 0)
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save & restore registers.
78 Use a maximum gap of 0x100 in the mips16 case. We can then use
79 unextended instructions to save and restore registers, and to
80 allocate and deallocate the top part of the frame.
82 The value in the !mips16 case must be a SMALL_OPERAND and must
83 preserve the maximum stack alignment. */
84 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
86 /* True if INSN is a mips.md pattern or asm statement. */
87 #define USEFUL_INSN_P(INSN) \
88 (INSN_P (INSN) \
89 && GET_CODE (PATTERN (INSN)) != USE \
90 && GET_CODE (PATTERN (INSN)) != CLOBBER \
91 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
92 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
94 /* If INSN is a delayed branch sequence, return the first instruction
95 in the sequence, otherwise return INSN itself. */
96 #define SEQ_BEGIN(INSN) \
97 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
98 ? XVECEXP (PATTERN (INSN), 0, 0) \
99 : (INSN))
101 /* Likewise for the last instruction in a delayed branch sequence. */
102 #define SEQ_END(INSN) \
103 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
104 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
105 : (INSN))
107 /* Execute the following loop body with SUBINSN set to each instruction
108 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
109 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
110 for ((SUBINSN) = SEQ_BEGIN (INSN); \
111 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
112 (SUBINSN) = NEXT_INSN (SUBINSN))
114 /* Classifies an address.
116 ADDRESS_REG
117 A natural register + offset address. The register satisfies
118 mips_valid_base_register_p and the offset is a const_arith_operand.
120 ADDRESS_LO_SUM
121 A LO_SUM rtx. The first operand is a valid base register and
122 the second operand is a symbolic address.
124 ADDRESS_CONST_INT
125 A signed 16-bit constant address.
127 ADDRESS_SYMBOLIC:
128 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
129 enum mips_address_type {
130 ADDRESS_REG,
131 ADDRESS_LO_SUM,
132 ADDRESS_CONST_INT,
133 ADDRESS_SYMBOLIC
136 /* Classifies the prototype of a builtin function. */
137 enum mips_function_type
139 MIPS_V2SF_FTYPE_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
142 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
143 MIPS_V2SF_FTYPE_SF_SF,
144 MIPS_INT_FTYPE_V2SF_V2SF,
145 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
146 MIPS_INT_FTYPE_SF_SF,
147 MIPS_INT_FTYPE_DF_DF,
148 MIPS_SF_FTYPE_V2SF,
149 MIPS_SF_FTYPE_SF,
150 MIPS_SF_FTYPE_SF_SF,
151 MIPS_DF_FTYPE_DF,
152 MIPS_DF_FTYPE_DF_DF,
154 /* For MIPS DSP ASE */
155 MIPS_DI_FTYPE_DI_SI,
156 MIPS_DI_FTYPE_DI_SI_SI,
157 MIPS_DI_FTYPE_DI_V2HI_V2HI,
158 MIPS_DI_FTYPE_DI_V4QI_V4QI,
159 MIPS_SI_FTYPE_DI_SI,
160 MIPS_SI_FTYPE_PTR_SI,
161 MIPS_SI_FTYPE_SI,
162 MIPS_SI_FTYPE_SI_SI,
163 MIPS_SI_FTYPE_V2HI,
164 MIPS_SI_FTYPE_V2HI_V2HI,
165 MIPS_SI_FTYPE_V4QI,
166 MIPS_SI_FTYPE_V4QI_V4QI,
167 MIPS_SI_FTYPE_VOID,
168 MIPS_V2HI_FTYPE_SI,
169 MIPS_V2HI_FTYPE_SI_SI,
170 MIPS_V2HI_FTYPE_V2HI,
171 MIPS_V2HI_FTYPE_V2HI_SI,
172 MIPS_V2HI_FTYPE_V2HI_V2HI,
173 MIPS_V2HI_FTYPE_V4QI,
174 MIPS_V2HI_FTYPE_V4QI_V2HI,
175 MIPS_V4QI_FTYPE_SI,
176 MIPS_V4QI_FTYPE_V2HI_V2HI,
177 MIPS_V4QI_FTYPE_V4QI_SI,
178 MIPS_V4QI_FTYPE_V4QI_V4QI,
179 MIPS_VOID_FTYPE_SI_SI,
180 MIPS_VOID_FTYPE_V2HI_V2HI,
181 MIPS_VOID_FTYPE_V4QI_V4QI,
183 /* The last type. */
184 MIPS_MAX_FTYPE_MAX
187 /* Specifies how a builtin function should be converted into rtl. */
188 enum mips_builtin_type
190 /* The builtin corresponds directly to an .md pattern. The return
191 value is mapped to operand 0 and the arguments are mapped to
192 operands 1 and above. */
193 MIPS_BUILTIN_DIRECT,
195 /* The builtin corresponds directly to an .md pattern. There is no return
196 value and the arguments are mapped to operands 0 and above. */
197 MIPS_BUILTIN_DIRECT_NO_TARGET,
199 /* The builtin corresponds to a comparison instruction followed by
200 a mips_cond_move_tf_ps pattern. The first two arguments are the
201 values to compare and the second two arguments are the vector
202 operands for the movt.ps or movf.ps instruction (in assembly order). */
203 MIPS_BUILTIN_MOVF,
204 MIPS_BUILTIN_MOVT,
206 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
207 of this instruction is the result of the comparison, which has mode
208 CCV2 or CCV4. The function arguments are mapped to operands 1 and
209 above. The function's return value is an SImode boolean that is
210 true under the following conditions:
212 MIPS_BUILTIN_CMP_ANY: one of the registers is true
213 MIPS_BUILTIN_CMP_ALL: all of the registers are true
214 MIPS_BUILTIN_CMP_LOWER: the first register is true
215 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
216 MIPS_BUILTIN_CMP_ANY,
217 MIPS_BUILTIN_CMP_ALL,
218 MIPS_BUILTIN_CMP_UPPER,
219 MIPS_BUILTIN_CMP_LOWER,
221 /* As above, but the instruction only sets a single $fcc register. */
222 MIPS_BUILTIN_CMP_SINGLE,
224 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
225 MIPS_BUILTIN_BPOSGE32
228 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
229 #define MIPS_FP_CONDITIONS(MACRO) \
230 MACRO (f), \
231 MACRO (un), \
232 MACRO (eq), \
233 MACRO (ueq), \
234 MACRO (olt), \
235 MACRO (ult), \
236 MACRO (ole), \
237 MACRO (ule), \
238 MACRO (sf), \
239 MACRO (ngle), \
240 MACRO (seq), \
241 MACRO (ngl), \
242 MACRO (lt), \
243 MACRO (nge), \
244 MACRO (le), \
245 MACRO (ngt)
247 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
248 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
249 enum mips_fp_condition {
250 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
253 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
254 #define STRINGIFY(X) #X
255 static const char *const mips_fp_conditions[] = {
256 MIPS_FP_CONDITIONS (STRINGIFY)
259 /* A function to save or store a register. The first argument is the
260 register and the second is the stack slot. */
261 typedef void (*mips_save_restore_fn) (rtx, rtx);
263 struct mips16_constant;
264 struct mips_arg_info;
265 struct mips_address_info;
266 struct mips_integer_op;
267 struct mips_sim;
269 static enum mips_symbol_type mips_classify_symbol (rtx);
270 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
271 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
272 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
273 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
274 static bool mips_classify_address (struct mips_address_info *, rtx,
275 enum machine_mode, int);
276 static bool mips_cannot_force_const_mem (rtx);
277 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
278 static int mips_symbol_insns (enum mips_symbol_type);
279 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
280 static rtx mips_force_temporary (rtx, rtx);
281 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
282 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
283 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
284 static unsigned int mips_build_lower (struct mips_integer_op *,
285 unsigned HOST_WIDE_INT);
286 static unsigned int mips_build_integer (struct mips_integer_op *,
287 unsigned HOST_WIDE_INT);
288 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
289 static int m16_check_op (rtx, int, int, int);
290 static bool mips_rtx_costs (rtx, int, int, int *);
291 static int mips_address_cost (rtx);
292 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
293 static void mips_load_call_address (rtx, rtx, int);
294 static bool mips_function_ok_for_sibcall (tree, tree);
295 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
296 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
297 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
298 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
299 tree, int, struct mips_arg_info *);
300 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
301 static void mips_set_architecture (const struct mips_cpu_info *);
302 static void mips_set_tune (const struct mips_cpu_info *);
303 static bool mips_handle_option (size_t, const char *, int);
304 static struct machine_function *mips_init_machine_status (void);
305 static void print_operand_reloc (FILE *, rtx, const char **);
306 #if TARGET_IRIX
307 static void irix_output_external_libcall (rtx);
308 #endif
309 static void mips_file_start (void);
310 static void mips_file_end (void);
311 static bool mips_rewrite_small_data_p (rtx);
312 static int mips_small_data_pattern_1 (rtx *, void *);
313 static int mips_rewrite_small_data_1 (rtx *, void *);
314 static bool mips_function_has_gp_insn (void);
315 static unsigned int mips_global_pointer (void);
316 static bool mips_save_reg_p (unsigned int);
317 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
318 mips_save_restore_fn);
319 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
320 static void mips_output_cplocal (void);
321 static void mips_emit_loadgp (void);
322 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
323 static void mips_set_frame_expr (rtx);
324 static rtx mips_frame_set (rtx, rtx);
325 static void mips_save_reg (rtx, rtx);
326 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
327 static void mips_restore_reg (rtx, rtx);
328 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
329 HOST_WIDE_INT, tree);
330 static int symbolic_expression_p (rtx);
331 static section *mips_select_rtx_section (enum machine_mode, rtx,
332 unsigned HOST_WIDE_INT);
333 static section *mips_function_rodata_section (tree);
334 static bool mips_in_small_data_p (tree);
335 static bool mips_use_anchors_for_symbol_p (rtx);
336 static int mips_fpr_return_fields (tree, tree *);
337 static bool mips_return_in_msb (tree);
338 static rtx mips_return_fpr_pair (enum machine_mode mode,
339 enum machine_mode mode1, HOST_WIDE_INT,
340 enum machine_mode mode2, HOST_WIDE_INT);
341 static rtx mips16_gp_pseudo_reg (void);
342 static void mips16_fp_args (FILE *, int, int);
343 static void build_mips16_function_stub (FILE *);
344 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
345 static void dump_constants (struct mips16_constant *, rtx);
346 static int mips16_insn_length (rtx);
347 static int mips16_rewrite_pool_refs (rtx *, void *);
348 static void mips16_lay_out_constants (void);
349 static void mips_sim_reset (struct mips_sim *);
350 static void mips_sim_init (struct mips_sim *, state_t);
351 static void mips_sim_next_cycle (struct mips_sim *);
352 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
353 static int mips_sim_wait_regs_2 (rtx *, void *);
354 static void mips_sim_wait_regs_1 (rtx *, void *);
355 static void mips_sim_wait_regs (struct mips_sim *, rtx);
356 static void mips_sim_wait_units (struct mips_sim *, rtx);
357 static void mips_sim_wait_insn (struct mips_sim *, rtx);
358 static void mips_sim_record_set (rtx, rtx, void *);
359 static void mips_sim_issue_insn (struct mips_sim *, rtx);
360 static void mips_sim_issue_nop (struct mips_sim *);
361 static void mips_sim_finish_insn (struct mips_sim *, rtx);
362 static void vr4130_avoid_branch_rt_conflict (rtx);
363 static void vr4130_align_insns (void);
364 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
365 static void mips_avoid_hazards (void);
366 static void mips_reorg (void);
367 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
368 static bool mips_matching_cpu_name_p (const char *, const char *);
369 static const struct mips_cpu_info *mips_parse_cpu (const char *);
370 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
371 static bool mips_return_in_memory (tree, tree);
372 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
373 static void mips_macc_chains_record (rtx);
374 static void mips_macc_chains_reorder (rtx *, int);
375 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
376 static bool vr4130_true_reg_dependence_p (rtx);
377 static bool vr4130_swap_insns_p (rtx, rtx);
378 static void vr4130_reorder (rtx *, int);
379 static void mips_promote_ready (rtx *, int, int);
380 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
381 static int mips_variable_issue (FILE *, int, rtx, int);
382 static int mips_adjust_cost (rtx, rtx, rtx, int);
383 static int mips_issue_rate (void);
384 static int mips_multipass_dfa_lookahead (void);
385 static void mips_init_libfuncs (void);
386 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
387 tree, int *, int);
388 static tree mips_build_builtin_va_list (void);
389 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
390 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
391 tree, bool);
392 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
393 tree, bool);
394 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
395 tree, bool);
396 static bool mips_valid_pointer_mode (enum machine_mode);
397 static bool mips_vector_mode_supported_p (enum machine_mode);
398 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
399 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
400 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
401 static void mips_init_builtins (void);
402 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
403 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
404 enum insn_code, enum mips_fp_condition,
405 rtx, tree);
406 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
407 enum insn_code, enum mips_fp_condition,
408 rtx, tree);
409 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
410 static void mips_encode_section_info (tree, rtx, int);
411 static void mips_extra_live_on_entry (bitmap);
412 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
414 /* Structure to be filled in by compute_frame_size with register
415 save masks, and offsets for the current function. */
417 struct mips_frame_info GTY(())
419 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
420 HOST_WIDE_INT var_size; /* # bytes that variables take up */
421 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
422 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
423 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
424 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
425 unsigned int mask; /* mask of saved gp registers */
426 unsigned int fmask; /* mask of saved fp registers */
427 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
428 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
429 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
430 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
431 bool initialized; /* true if frame size already calculated */
432 int num_gp; /* number of gp registers saved */
433 int num_fp; /* number of fp registers saved */
436 struct machine_function GTY(()) {
437 /* Pseudo-reg holding the value of $28 in a mips16 function which
438 refers to GP relative global variables. */
439 rtx mips16_gp_pseudo_rtx;
441 /* The number of extra stack bytes taken up by register varargs.
442 This area is allocated by the callee at the very top of the frame. */
443 int varargs_size;
445 /* Current frame information, calculated by compute_frame_size. */
446 struct mips_frame_info frame;
448 /* The register to use as the global pointer within this function. */
449 unsigned int global_pointer;
451 /* True if mips_adjust_insn_length should ignore an instruction's
452 hazard attribute. */
453 bool ignore_hazard_length_p;
455 /* True if the whole function is suitable for .set noreorder and
456 .set nomacro. */
457 bool all_noreorder_p;
459 /* True if the function is known to have an instruction that needs $gp. */
460 bool has_gp_insn_p;
463 /* Information about a single argument. */
464 struct mips_arg_info
466 /* True if the argument is passed in a floating-point register, or
467 would have been if we hadn't run out of registers. */
468 bool fpr_p;
470 /* The number of words passed in registers, rounded up. */
471 unsigned int reg_words;
473 /* For EABI, the offset of the first register from GP_ARG_FIRST or
474 FP_ARG_FIRST. For other ABIs, the offset of the first register from
475 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
476 comment for details).
478 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
479 on the stack. */
480 unsigned int reg_offset;
482 /* The number of words that must be passed on the stack, rounded up. */
483 unsigned int stack_words;
485 /* The offset from the start of the stack overflow area of the argument's
486 first stack word. Only meaningful when STACK_WORDS is nonzero. */
487 unsigned int stack_offset;
491 /* Information about an address described by mips_address_type.
493 ADDRESS_CONST_INT
494 No fields are used.
496 ADDRESS_REG
497 REG is the base register and OFFSET is the constant offset.
499 ADDRESS_LO_SUM
500 REG is the register that contains the high part of the address,
501 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
502 is the type of OFFSET's symbol.
504 ADDRESS_SYMBOLIC
505 SYMBOL_TYPE is the type of symbol being referenced. */
507 struct mips_address_info
509 enum mips_address_type type;
510 rtx reg;
511 rtx offset;
512 enum mips_symbol_type symbol_type;
516 /* One stage in a constant building sequence. These sequences have
517 the form:
519 A = VALUE[0]
520 A = A CODE[1] VALUE[1]
521 A = A CODE[2] VALUE[2]
524 where A is an accumulator, each CODE[i] is a binary rtl operation
525 and each VALUE[i] is a constant integer. */
526 struct mips_integer_op {
527 enum rtx_code code;
528 unsigned HOST_WIDE_INT value;
532 /* The largest number of operations needed to load an integer constant.
533 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
534 When the lowest bit is clear, we can try, but reject a sequence with
535 an extra SLL at the end. */
536 #define MIPS_MAX_INTEGER_OPS 7
539 /* Global variables for machine-dependent things. */
541 /* Threshold for data being put into the small data/bss area, instead
542 of the normal data area. */
543 int mips_section_threshold = -1;
545 /* Count the number of .file directives, so that .loc is up to date. */
546 int num_source_filenames = 0;
548 /* Count the number of sdb related labels are generated (to find block
549 start and end boundaries). */
550 int sdb_label_count = 0;
552 /* Next label # for each statement for Silicon Graphics IRIS systems. */
553 int sym_lineno = 0;
555 /* Linked list of all externals that are to be emitted when optimizing
556 for the global pointer if they haven't been declared by the end of
557 the program with an appropriate .comm or initialization. */
559 struct extern_list GTY (())
561 struct extern_list *next; /* next external */
562 const char *name; /* name of the external */
563 int size; /* size in bytes */
566 static GTY (()) struct extern_list *extern_head = 0;
568 /* Name of the file containing the current function. */
569 const char *current_function_file = "";
571 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
572 int set_noreorder;
573 int set_noat;
574 int set_nomacro;
575 int set_volatile;
577 /* The next branch instruction is a branch likely, not branch normal. */
578 int mips_branch_likely;
580 /* The operands passed to the last cmpMM expander. */
581 rtx cmp_operands[2];
583 /* The target cpu for code generation. */
584 enum processor_type mips_arch;
585 const struct mips_cpu_info *mips_arch_info;
587 /* The target cpu for optimization and scheduling. */
588 enum processor_type mips_tune;
589 const struct mips_cpu_info *mips_tune_info;
591 /* Which instruction set architecture to use. */
592 int mips_isa;
594 /* Which ABI to use. */
595 int mips_abi = MIPS_ABI_DEFAULT;
597 /* Cost information to use. */
598 const struct mips_rtx_cost_data *mips_cost;
600 /* Whether we are generating mips16 hard float code. In mips16 mode
601 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
602 -msoft-float was not specified by the user, which means that we
603 should arrange to call mips32 hard floating point code. */
604 int mips16_hard_float;
606 /* The architecture selected by -mipsN. */
607 static const struct mips_cpu_info *mips_isa_info;
609 /* If TRUE, we split addresses into their high and low parts in the RTL. */
610 int mips_split_addresses;
612 /* Mode used for saving/restoring general purpose registers. */
613 static enum machine_mode gpr_mode;
615 /* Array giving truth value on whether or not a given hard register
616 can support a given mode. */
617 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
619 /* List of all MIPS punctuation characters used by print_operand. */
620 char mips_print_operand_punct[256];
622 /* Map GCC register number to debugger register number. */
623 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
625 /* A copy of the original flag_delayed_branch: see override_options. */
626 static int mips_flag_delayed_branch;
628 static GTY (()) int mips_output_filename_first_time = 1;
630 /* mips_split_p[X] is true if symbols of type X can be split by
631 mips_split_symbol(). */
632 bool mips_split_p[NUM_SYMBOL_TYPES];
634 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
635 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
636 if they are matched by a special .md file pattern. */
637 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
639 /* Likewise for HIGHs. */
640 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
642 /* Map hard register number to register class */
643 const enum reg_class mips_regno_to_class[] =
645 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
646 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
647 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
648 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
649 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
650 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
651 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
652 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
653 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
654 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
655 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
656 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
657 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
658 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
659 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
660 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
661 HI_REG, LO_REG, NO_REGS, ST_REGS,
662 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
663 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
664 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
665 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
666 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
667 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
668 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
669 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
670 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
671 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
672 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
673 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
674 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
675 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
676 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
677 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
678 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
679 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
680 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
681 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
682 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
683 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
684 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
685 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
686 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
687 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
688 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
689 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
690 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
691 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
694 /* Table of machine dependent attributes. */
695 const struct attribute_spec mips_attribute_table[] =
697 { "long_call", 0, 0, false, true, true, NULL },
698 { NULL, 0, 0, false, false, false, NULL }
701 /* A table describing all the processors gcc knows about. Names are
702 matched in the order listed. The first mention of an ISA level is
703 taken as the canonical name for that ISA.
705 To ease comparison, please keep this table in the same order as
706 gas's mips_cpu_info_table[]. */
707 const struct mips_cpu_info mips_cpu_info_table[] = {
708 /* Entries for generic ISAs */
709 { "mips1", PROCESSOR_R3000, 1 },
710 { "mips2", PROCESSOR_R6000, 2 },
711 { "mips3", PROCESSOR_R4000, 3 },
712 { "mips4", PROCESSOR_R8000, 4 },
713 { "mips32", PROCESSOR_4KC, 32 },
714 { "mips32r2", PROCESSOR_M4K, 33 },
715 { "mips64", PROCESSOR_5KC, 64 },
717 /* MIPS I */
718 { "r3000", PROCESSOR_R3000, 1 },
719 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
720 { "r3900", PROCESSOR_R3900, 1 },
722 /* MIPS II */
723 { "r6000", PROCESSOR_R6000, 2 },
725 /* MIPS III */
726 { "r4000", PROCESSOR_R4000, 3 },
727 { "vr4100", PROCESSOR_R4100, 3 },
728 { "vr4111", PROCESSOR_R4111, 3 },
729 { "vr4120", PROCESSOR_R4120, 3 },
730 { "vr4130", PROCESSOR_R4130, 3 },
731 { "vr4300", PROCESSOR_R4300, 3 },
732 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
733 { "r4600", PROCESSOR_R4600, 3 },
734 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
735 { "r4650", PROCESSOR_R4650, 3 },
737 /* MIPS IV */
738 { "r8000", PROCESSOR_R8000, 4 },
739 { "vr5000", PROCESSOR_R5000, 4 },
740 { "vr5400", PROCESSOR_R5400, 4 },
741 { "vr5500", PROCESSOR_R5500, 4 },
742 { "rm7000", PROCESSOR_R7000, 4 },
743 { "rm9000", PROCESSOR_R9000, 4 },
745 /* MIPS32 */
746 { "4kc", PROCESSOR_4KC, 32 },
747 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
748 { "4kp", PROCESSOR_4KP, 32 },
750 /* MIPS32 Release 2 */
751 { "m4k", PROCESSOR_M4K, 33 },
752 { "4kec", PROCESSOR_4KC, 33 },
753 { "4kem", PROCESSOR_4KC, 33 },
754 { "4kep", PROCESSOR_4KP, 33 },
755 { "24kc", PROCESSOR_24KC, 33 }, /* 24K no FPU */
756 { "24kf", PROCESSOR_24KF, 33 }, /* 24K 1:2 FPU */
757 { "24kx", PROCESSOR_24KX, 33 }, /* 24K 1:1 FPU */
758 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
759 { "24kef", PROCESSOR_24KF, 33 },
760 { "24kex", PROCESSOR_24KX, 33 },
761 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
762 { "34kf", PROCESSOR_24KF, 33 },
763 { "34kx", PROCESSOR_24KX, 33 },
765 /* MIPS64 */
766 { "5kc", PROCESSOR_5KC, 64 },
767 { "5kf", PROCESSOR_5KF, 64 },
768 { "20kc", PROCESSOR_20KC, 64 },
769 { "sb1", PROCESSOR_SB1, 64 },
770 { "sb1a", PROCESSOR_SB1A, 64 },
771 { "sr71000", PROCESSOR_SR71000, 64 },
773 /* End marker */
774 { 0, 0, 0 }
777 /* Default costs. If these are used for a processor we should look
778 up the actual costs. */
779 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
780 COSTS_N_INSNS (7), /* fp_mult_sf */ \
781 COSTS_N_INSNS (8), /* fp_mult_df */ \
782 COSTS_N_INSNS (23), /* fp_div_sf */ \
783 COSTS_N_INSNS (36), /* fp_div_df */ \
784 COSTS_N_INSNS (10), /* int_mult_si */ \
785 COSTS_N_INSNS (10), /* int_mult_di */ \
786 COSTS_N_INSNS (69), /* int_div_si */ \
787 COSTS_N_INSNS (69), /* int_div_di */ \
788 2, /* branch_cost */ \
789 4 /* memory_latency */
791 /* Need to replace these with the costs of calling the appropriate
792 libgcc routine. */
793 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
794 COSTS_N_INSNS (256), /* fp_mult_sf */ \
795 COSTS_N_INSNS (256), /* fp_mult_df */ \
796 COSTS_N_INSNS (256), /* fp_div_sf */ \
797 COSTS_N_INSNS (256) /* fp_div_df */
799 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
801 COSTS_N_INSNS (1), /* fp_add */
802 COSTS_N_INSNS (1), /* fp_mult_sf */
803 COSTS_N_INSNS (1), /* fp_mult_df */
804 COSTS_N_INSNS (1), /* fp_div_sf */
805 COSTS_N_INSNS (1), /* fp_div_df */
806 COSTS_N_INSNS (1), /* int_mult_si */
807 COSTS_N_INSNS (1), /* int_mult_di */
808 COSTS_N_INSNS (1), /* int_div_si */
809 COSTS_N_INSNS (1), /* int_div_di */
810 2, /* branch_cost */
811 4 /* memory_latency */
814 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
816 { /* R3000 */
817 COSTS_N_INSNS (2), /* fp_add */
818 COSTS_N_INSNS (4), /* fp_mult_sf */
819 COSTS_N_INSNS (5), /* fp_mult_df */
820 COSTS_N_INSNS (12), /* fp_div_sf */
821 COSTS_N_INSNS (19), /* fp_div_df */
822 COSTS_N_INSNS (12), /* int_mult_si */
823 COSTS_N_INSNS (12), /* int_mult_di */
824 COSTS_N_INSNS (35), /* int_div_si */
825 COSTS_N_INSNS (35), /* int_div_di */
826 1, /* branch_cost */
827 4 /* memory_latency */
830 { /* 4KC */
831 SOFT_FP_COSTS,
832 COSTS_N_INSNS (6), /* int_mult_si */
833 COSTS_N_INSNS (6), /* int_mult_di */
834 COSTS_N_INSNS (36), /* int_div_si */
835 COSTS_N_INSNS (36), /* int_div_di */
836 1, /* branch_cost */
837 4 /* memory_latency */
839 { /* 4KP */
840 SOFT_FP_COSTS,
841 COSTS_N_INSNS (36), /* int_mult_si */
842 COSTS_N_INSNS (36), /* int_mult_di */
843 COSTS_N_INSNS (37), /* int_div_si */
844 COSTS_N_INSNS (37), /* int_div_di */
845 1, /* branch_cost */
846 4 /* memory_latency */
848 { /* 5KC */
849 SOFT_FP_COSTS,
850 COSTS_N_INSNS (4), /* int_mult_si */
851 COSTS_N_INSNS (11), /* int_mult_di */
852 COSTS_N_INSNS (36), /* int_div_si */
853 COSTS_N_INSNS (68), /* int_div_di */
854 1, /* branch_cost */
855 4 /* memory_latency */
857 { /* 5KF */
858 COSTS_N_INSNS (4), /* fp_add */
859 COSTS_N_INSNS (4), /* fp_mult_sf */
860 COSTS_N_INSNS (5), /* fp_mult_df */
861 COSTS_N_INSNS (17), /* fp_div_sf */
862 COSTS_N_INSNS (32), /* fp_div_df */
863 COSTS_N_INSNS (4), /* int_mult_si */
864 COSTS_N_INSNS (11), /* int_mult_di */
865 COSTS_N_INSNS (36), /* int_div_si */
866 COSTS_N_INSNS (68), /* int_div_di */
867 1, /* branch_cost */
868 4 /* memory_latency */
870 { /* 20KC */
871 DEFAULT_COSTS
873 { /* 24KC */
874 SOFT_FP_COSTS,
875 COSTS_N_INSNS (5), /* int_mult_si */
876 COSTS_N_INSNS (5), /* int_mult_di */
877 COSTS_N_INSNS (41), /* int_div_si */
878 COSTS_N_INSNS (41), /* int_div_di */
879 1, /* branch_cost */
880 4 /* memory_latency */
882 { /* 24KF */
883 COSTS_N_INSNS (8), /* fp_add */
884 COSTS_N_INSNS (8), /* fp_mult_sf */
885 COSTS_N_INSNS (10), /* fp_mult_df */
886 COSTS_N_INSNS (34), /* fp_div_sf */
887 COSTS_N_INSNS (64), /* fp_div_df */
888 COSTS_N_INSNS (5), /* int_mult_si */
889 COSTS_N_INSNS (5), /* int_mult_di */
890 COSTS_N_INSNS (41), /* int_div_si */
891 COSTS_N_INSNS (41), /* int_div_di */
892 1, /* branch_cost */
893 4 /* memory_latency */
895 { /* 24KX */
896 COSTS_N_INSNS (4), /* fp_add */
897 COSTS_N_INSNS (4), /* fp_mult_sf */
898 COSTS_N_INSNS (5), /* fp_mult_df */
899 COSTS_N_INSNS (17), /* fp_div_sf */
900 COSTS_N_INSNS (32), /* fp_div_df */
901 COSTS_N_INSNS (5), /* int_mult_si */
902 COSTS_N_INSNS (5), /* int_mult_di */
903 COSTS_N_INSNS (41), /* int_div_si */
904 COSTS_N_INSNS (41), /* int_div_di */
905 1, /* branch_cost */
906 4 /* memory_latency */
908 { /* M4k */
909 DEFAULT_COSTS
911 { /* R3900 */
912 COSTS_N_INSNS (2), /* fp_add */
913 COSTS_N_INSNS (4), /* fp_mult_sf */
914 COSTS_N_INSNS (5), /* fp_mult_df */
915 COSTS_N_INSNS (12), /* fp_div_sf */
916 COSTS_N_INSNS (19), /* fp_div_df */
917 COSTS_N_INSNS (2), /* int_mult_si */
918 COSTS_N_INSNS (2), /* int_mult_di */
919 COSTS_N_INSNS (35), /* int_div_si */
920 COSTS_N_INSNS (35), /* int_div_di */
921 1, /* branch_cost */
922 4 /* memory_latency */
924 { /* R6000 */
925 COSTS_N_INSNS (3), /* fp_add */
926 COSTS_N_INSNS (5), /* fp_mult_sf */
927 COSTS_N_INSNS (6), /* fp_mult_df */
928 COSTS_N_INSNS (15), /* fp_div_sf */
929 COSTS_N_INSNS (16), /* fp_div_df */
930 COSTS_N_INSNS (17), /* int_mult_si */
931 COSTS_N_INSNS (17), /* int_mult_di */
932 COSTS_N_INSNS (38), /* int_div_si */
933 COSTS_N_INSNS (38), /* int_div_di */
934 2, /* branch_cost */
935 6 /* memory_latency */
937 { /* R4000 */
938 COSTS_N_INSNS (6), /* fp_add */
939 COSTS_N_INSNS (7), /* fp_mult_sf */
940 COSTS_N_INSNS (8), /* fp_mult_df */
941 COSTS_N_INSNS (23), /* fp_div_sf */
942 COSTS_N_INSNS (36), /* fp_div_df */
943 COSTS_N_INSNS (10), /* int_mult_si */
944 COSTS_N_INSNS (10), /* int_mult_di */
945 COSTS_N_INSNS (69), /* int_div_si */
946 COSTS_N_INSNS (69), /* int_div_di */
947 2, /* branch_cost */
948 6 /* memory_latency */
950 { /* R4100 */
951 DEFAULT_COSTS
953 { /* R4111 */
954 DEFAULT_COSTS
956 { /* R4120 */
957 DEFAULT_COSTS
959 { /* R4130 */
960 /* The only costs that appear to be updated here are
961 integer multiplication. */
962 SOFT_FP_COSTS,
963 COSTS_N_INSNS (4), /* int_mult_si */
964 COSTS_N_INSNS (6), /* int_mult_di */
965 COSTS_N_INSNS (69), /* int_div_si */
966 COSTS_N_INSNS (69), /* int_div_di */
967 1, /* branch_cost */
968 4 /* memory_latency */
970 { /* R4300 */
971 DEFAULT_COSTS
973 { /* R4600 */
974 DEFAULT_COSTS
976 { /* R4650 */
977 DEFAULT_COSTS
979 { /* R5000 */
980 COSTS_N_INSNS (6), /* fp_add */
981 COSTS_N_INSNS (4), /* fp_mult_sf */
982 COSTS_N_INSNS (5), /* fp_mult_df */
983 COSTS_N_INSNS (23), /* fp_div_sf */
984 COSTS_N_INSNS (36), /* fp_div_df */
985 COSTS_N_INSNS (5), /* int_mult_si */
986 COSTS_N_INSNS (5), /* int_mult_di */
987 COSTS_N_INSNS (36), /* int_div_si */
988 COSTS_N_INSNS (36), /* int_div_di */
989 1, /* branch_cost */
990 4 /* memory_latency */
992 { /* R5400 */
993 COSTS_N_INSNS (6), /* fp_add */
994 COSTS_N_INSNS (5), /* fp_mult_sf */
995 COSTS_N_INSNS (6), /* fp_mult_df */
996 COSTS_N_INSNS (30), /* fp_div_sf */
997 COSTS_N_INSNS (59), /* fp_div_df */
998 COSTS_N_INSNS (3), /* int_mult_si */
999 COSTS_N_INSNS (4), /* int_mult_di */
1000 COSTS_N_INSNS (42), /* int_div_si */
1001 COSTS_N_INSNS (74), /* int_div_di */
1002 1, /* branch_cost */
1003 4 /* memory_latency */
1005 { /* R5500 */
1006 COSTS_N_INSNS (6), /* fp_add */
1007 COSTS_N_INSNS (5), /* fp_mult_sf */
1008 COSTS_N_INSNS (6), /* fp_mult_df */
1009 COSTS_N_INSNS (30), /* fp_div_sf */
1010 COSTS_N_INSNS (59), /* fp_div_df */
1011 COSTS_N_INSNS (5), /* int_mult_si */
1012 COSTS_N_INSNS (9), /* int_mult_di */
1013 COSTS_N_INSNS (42), /* int_div_si */
1014 COSTS_N_INSNS (74), /* int_div_di */
1015 1, /* branch_cost */
1016 4 /* memory_latency */
1018 { /* R7000 */
1019 /* The only costs that are changed here are
1020 integer multiplication. */
1021 COSTS_N_INSNS (6), /* fp_add */
1022 COSTS_N_INSNS (7), /* fp_mult_sf */
1023 COSTS_N_INSNS (8), /* fp_mult_df */
1024 COSTS_N_INSNS (23), /* fp_div_sf */
1025 COSTS_N_INSNS (36), /* fp_div_df */
1026 COSTS_N_INSNS (5), /* int_mult_si */
1027 COSTS_N_INSNS (9), /* int_mult_di */
1028 COSTS_N_INSNS (69), /* int_div_si */
1029 COSTS_N_INSNS (69), /* int_div_di */
1030 1, /* branch_cost */
1031 4 /* memory_latency */
1033 { /* R8000 */
1034 DEFAULT_COSTS
1036 { /* R9000 */
1037 /* The only costs that are changed here are
1038 integer multiplication. */
1039 COSTS_N_INSNS (6), /* fp_add */
1040 COSTS_N_INSNS (7), /* fp_mult_sf */
1041 COSTS_N_INSNS (8), /* fp_mult_df */
1042 COSTS_N_INSNS (23), /* fp_div_sf */
1043 COSTS_N_INSNS (36), /* fp_div_df */
1044 COSTS_N_INSNS (3), /* int_mult_si */
1045 COSTS_N_INSNS (8), /* int_mult_di */
1046 COSTS_N_INSNS (69), /* int_div_si */
1047 COSTS_N_INSNS (69), /* int_div_di */
1048 1, /* branch_cost */
1049 4 /* memory_latency */
1051 { /* SB1 */
1052 /* These costs are the same as the SB-1A below. */
1053 COSTS_N_INSNS (4), /* fp_add */
1054 COSTS_N_INSNS (4), /* fp_mult_sf */
1055 COSTS_N_INSNS (4), /* fp_mult_df */
1056 COSTS_N_INSNS (24), /* fp_div_sf */
1057 COSTS_N_INSNS (32), /* fp_div_df */
1058 COSTS_N_INSNS (3), /* int_mult_si */
1059 COSTS_N_INSNS (4), /* int_mult_di */
1060 COSTS_N_INSNS (36), /* int_div_si */
1061 COSTS_N_INSNS (68), /* int_div_di */
1062 1, /* branch_cost */
1063 4 /* memory_latency */
1065 { /* SB1-A */
1066 /* These costs are the same as the SB-1 above. */
1067 COSTS_N_INSNS (4), /* fp_add */
1068 COSTS_N_INSNS (4), /* fp_mult_sf */
1069 COSTS_N_INSNS (4), /* fp_mult_df */
1070 COSTS_N_INSNS (24), /* fp_div_sf */
1071 COSTS_N_INSNS (32), /* fp_div_df */
1072 COSTS_N_INSNS (3), /* int_mult_si */
1073 COSTS_N_INSNS (4), /* int_mult_di */
1074 COSTS_N_INSNS (36), /* int_div_si */
1075 COSTS_N_INSNS (68), /* int_div_di */
1076 1, /* branch_cost */
1077 4 /* memory_latency */
1079 { /* SR71000 */
1080 DEFAULT_COSTS
1085 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
1086 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1087 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1088 #endif
1090 /* Initialize the GCC target structure. */
1091 #undef TARGET_ASM_ALIGNED_HI_OP
1092 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1093 #undef TARGET_ASM_ALIGNED_SI_OP
1094 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1095 #undef TARGET_ASM_ALIGNED_DI_OP
1096 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1098 #undef TARGET_ASM_FUNCTION_PROLOGUE
1099 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1100 #undef TARGET_ASM_FUNCTION_EPILOGUE
1101 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1102 #undef TARGET_ASM_SELECT_RTX_SECTION
1103 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1104 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1105 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1107 #undef TARGET_SCHED_REORDER
1108 #define TARGET_SCHED_REORDER mips_sched_reorder
1109 #undef TARGET_SCHED_VARIABLE_ISSUE
1110 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1111 #undef TARGET_SCHED_ADJUST_COST
1112 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1113 #undef TARGET_SCHED_ISSUE_RATE
1114 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1115 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1116 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1117 mips_multipass_dfa_lookahead
1119 #undef TARGET_DEFAULT_TARGET_FLAGS
1120 #define TARGET_DEFAULT_TARGET_FLAGS \
1121 (TARGET_DEFAULT \
1122 | TARGET_CPU_DEFAULT \
1123 | TARGET_ENDIAN_DEFAULT \
1124 | TARGET_FP_EXCEPTIONS_DEFAULT \
1125 | MASK_CHECK_ZERO_DIV \
1126 | MASK_FUSED_MADD)
1127 #undef TARGET_HANDLE_OPTION
1128 #define TARGET_HANDLE_OPTION mips_handle_option
1130 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1131 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1133 #undef TARGET_VALID_POINTER_MODE
1134 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1135 #undef TARGET_RTX_COSTS
1136 #define TARGET_RTX_COSTS mips_rtx_costs
1137 #undef TARGET_ADDRESS_COST
1138 #define TARGET_ADDRESS_COST mips_address_cost
1140 #undef TARGET_IN_SMALL_DATA_P
1141 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1143 #undef TARGET_MACHINE_DEPENDENT_REORG
1144 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1146 #undef TARGET_ASM_FILE_START
1147 #undef TARGET_ASM_FILE_END
1148 #define TARGET_ASM_FILE_START mips_file_start
1149 #define TARGET_ASM_FILE_END mips_file_end
1150 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1151 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1153 #undef TARGET_INIT_LIBFUNCS
1154 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1156 #undef TARGET_BUILD_BUILTIN_VA_LIST
1157 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1158 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1159 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1161 #undef TARGET_PROMOTE_FUNCTION_ARGS
1162 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1163 #undef TARGET_PROMOTE_FUNCTION_RETURN
1164 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1165 #undef TARGET_PROMOTE_PROTOTYPES
1166 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1168 #undef TARGET_RETURN_IN_MEMORY
1169 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1170 #undef TARGET_RETURN_IN_MSB
1171 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1173 #undef TARGET_ASM_OUTPUT_MI_THUNK
1174 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1175 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1176 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1178 #undef TARGET_SETUP_INCOMING_VARARGS
1179 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1180 #undef TARGET_STRICT_ARGUMENT_NAMING
1181 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1182 #undef TARGET_MUST_PASS_IN_STACK
1183 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1184 #undef TARGET_PASS_BY_REFERENCE
1185 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1186 #undef TARGET_CALLEE_COPIES
1187 #define TARGET_CALLEE_COPIES mips_callee_copies
1188 #undef TARGET_ARG_PARTIAL_BYTES
1189 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1191 #undef TARGET_MODE_REP_EXTENDED
1192 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1194 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1195 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1197 #undef TARGET_INIT_BUILTINS
1198 #define TARGET_INIT_BUILTINS mips_init_builtins
1199 #undef TARGET_EXPAND_BUILTIN
1200 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1202 #undef TARGET_HAVE_TLS
1203 #define TARGET_HAVE_TLS HAVE_AS_TLS
1205 #undef TARGET_CANNOT_FORCE_CONST_MEM
1206 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1208 #undef TARGET_ENCODE_SECTION_INFO
1209 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1211 #undef TARGET_ATTRIBUTE_TABLE
1212 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1214 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1215 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1217 #undef TARGET_MIN_ANCHOR_OFFSET
1218 #define TARGET_MIN_ANCHOR_OFFSET -32768
1219 #undef TARGET_MAX_ANCHOR_OFFSET
1220 #define TARGET_MAX_ANCHOR_OFFSET 32767
1221 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1222 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1223 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1224 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1226 struct gcc_target targetm = TARGET_INITIALIZER;
1228 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
1230 static enum mips_symbol_type
1231 mips_classify_symbol (rtx x)
1233 tree decl;
1235 if (GET_CODE (x) == LABEL_REF)
1237 if (TARGET_MIPS16)
1238 return SYMBOL_CONSTANT_POOL;
1239 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1240 return SYMBOL_GOT_LOCAL;
1241 return SYMBOL_GENERAL;
1244 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1246 if (SYMBOL_REF_TLS_MODEL (x))
1247 return SYMBOL_TLS;
1249 if (CONSTANT_POOL_ADDRESS_P (x))
1251 if (TARGET_MIPS16)
1252 return SYMBOL_CONSTANT_POOL;
1254 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1255 return SYMBOL_SMALL_DATA;
1258 /* Do not use small-data accesses for weak symbols; they may end up
1259 being zero. */
1260 if (SYMBOL_REF_SMALL_P (x)
1261 && !SYMBOL_REF_WEAK (x))
1262 return SYMBOL_SMALL_DATA;
1264 if (TARGET_ABICALLS)
1266 decl = SYMBOL_REF_DECL (x);
1267 if (decl == 0)
1269 if (!SYMBOL_REF_LOCAL_P (x))
1270 return SYMBOL_GOT_GLOBAL;
1272 else
1274 /* Don't use GOT accesses for locally-binding symbols if
1275 TARGET_ABSOLUTE_ABICALLS. Otherwise, there are three
1276 cases to consider:
1278 - o32 PIC (either with or without explicit relocs)
1279 - n32/n64 PIC without explicit relocs
1280 - n32/n64 PIC with explicit relocs
1282 In the first case, both local and global accesses will use an
1283 R_MIPS_GOT16 relocation. We must correctly predict which of
1284 the two semantics (local or global) the assembler and linker
1285 will apply. The choice doesn't depend on the symbol's
1286 visibility, so we deliberately ignore decl_visibility and
1287 binds_local_p here.
1289 In the second case, the assembler will not use R_MIPS_GOT16
1290 relocations, but it chooses between local and global accesses
1291 in the same way as for o32 PIC.
1293 In the third case we have more freedom since both forms of
1294 access will work for any kind of symbol. However, there seems
1295 little point in doing things differently.
1297 Note that weakref symbols are not TREE_PUBLIC, but their
1298 targets are global or weak symbols. Relocations in the
1299 object file will be against the target symbol, so it's
1300 that symbol's binding that matters here. */
1301 if (DECL_P (decl)
1302 && (TREE_PUBLIC (decl) || DECL_WEAK (decl))
1303 && !(TARGET_ABSOLUTE_ABICALLS && targetm.binds_local_p (decl)))
1304 return SYMBOL_GOT_GLOBAL;
1307 if (!TARGET_ABSOLUTE_ABICALLS)
1308 return SYMBOL_GOT_LOCAL;
1311 return SYMBOL_GENERAL;
1315 /* Split X into a base and a constant offset, storing them in *BASE
1316 and *OFFSET respectively. */
1318 static void
1319 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
1321 *offset = 0;
1323 if (GET_CODE (x) == CONST)
1325 x = XEXP (x, 0);
1326 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1328 *offset += INTVAL (XEXP (x, 1));
1329 x = XEXP (x, 0);
1332 *base = x;
1336 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
1337 to the same object as SYMBOL, or to the same object_block. */
1339 static bool
1340 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
1342 if (GET_CODE (symbol) != SYMBOL_REF)
1343 return false;
1345 if (CONSTANT_POOL_ADDRESS_P (symbol)
1346 && offset >= 0
1347 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
1348 return true;
1350 if (SYMBOL_REF_DECL (symbol) != 0
1351 && offset >= 0
1352 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
1353 return true;
1355 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
1356 && SYMBOL_REF_BLOCK (symbol)
1357 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
1358 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
1359 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
1360 return true;
1362 return false;
1366 /* Return true if X is a symbolic constant that can be calculated in
1367 the same way as a bare symbol. If it is, store the type of the
1368 symbol in *SYMBOL_TYPE. */
1370 bool
1371 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
1373 HOST_WIDE_INT offset;
1375 mips_split_const (x, &x, &offset);
1376 if (UNSPEC_ADDRESS_P (x))
1377 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1378 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1380 *symbol_type = mips_classify_symbol (x);
1381 if (*symbol_type == SYMBOL_TLS)
1382 return false;
1384 else
1385 return false;
1387 if (offset == 0)
1388 return true;
1390 /* Check whether a nonzero offset is valid for the underlying
1391 relocations. */
1392 switch (*symbol_type)
1394 case SYMBOL_GENERAL:
1395 case SYMBOL_64_HIGH:
1396 case SYMBOL_64_MID:
1397 case SYMBOL_64_LOW:
1398 /* If the target has 64-bit pointers and the object file only
1399 supports 32-bit symbols, the values of those symbols will be
1400 sign-extended. In this case we can't allow an arbitrary offset
1401 in case the 32-bit value X + OFFSET has a different sign from X. */
1402 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1403 return mips_offset_within_object_p (x, offset);
1405 /* In other cases the relocations can handle any offset. */
1406 return true;
1408 case SYMBOL_CONSTANT_POOL:
1409 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1410 In this case, we no longer have access to the underlying constant,
1411 but the original symbol-based access was known to be valid. */
1412 if (GET_CODE (x) == LABEL_REF)
1413 return true;
1415 /* Fall through. */
1417 case SYMBOL_SMALL_DATA:
1418 /* Make sure that the offset refers to something within the
1419 underlying object. This should guarantee that the final
1420 PC- or GP-relative offset is within the 16-bit limit. */
1421 return mips_offset_within_object_p (x, offset);
1423 case SYMBOL_GOT_LOCAL:
1424 case SYMBOL_GOTOFF_PAGE:
1425 /* The linker should provide enough local GOT entries for a
1426 16-bit offset. Larger offsets may lead to GOT overflow. */
1427 return SMALL_OPERAND (offset);
1429 case SYMBOL_GOT_GLOBAL:
1430 case SYMBOL_GOTOFF_GLOBAL:
1431 case SYMBOL_GOTOFF_CALL:
1432 case SYMBOL_GOTOFF_LOADGP:
1433 case SYMBOL_TLSGD:
1434 case SYMBOL_TLSLDM:
1435 case SYMBOL_DTPREL:
1436 case SYMBOL_TPREL:
1437 case SYMBOL_GOTTPREL:
1438 case SYMBOL_TLS:
1439 return false;
1441 gcc_unreachable ();
1445 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1448 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1450 if (regno >= FIRST_PSEUDO_REGISTER)
1452 if (!strict)
1453 return true;
1454 regno = reg_renumber[regno];
1457 /* These fake registers will be eliminated to either the stack or
1458 hard frame pointer, both of which are usually valid base registers.
1459 Reload deals with the cases where the eliminated form isn't valid. */
1460 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1461 return true;
1463 /* In mips16 mode, the stack pointer can only address word and doubleword
1464 values, nothing smaller. There are two problems here:
1466 (a) Instantiating virtual registers can introduce new uses of the
1467 stack pointer. If these virtual registers are valid addresses,
1468 the stack pointer should be too.
1470 (b) Most uses of the stack pointer are not made explicit until
1471 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1472 We don't know until that stage whether we'll be eliminating to the
1473 stack pointer (which needs the restriction) or the hard frame
1474 pointer (which doesn't).
1476 All in all, it seems more consistent to only enforce this restriction
1477 during and after reload. */
1478 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1479 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1481 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1485 /* Return true if X is a valid base register for the given mode.
1486 Allow only hard registers if STRICT. */
1488 static bool
1489 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1491 if (!strict && GET_CODE (x) == SUBREG)
1492 x = SUBREG_REG (x);
1494 return (REG_P (x)
1495 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1499 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1500 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1502 static bool
1503 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1504 enum machine_mode mode)
1506 switch (symbol_type)
1508 case SYMBOL_GENERAL:
1509 return !TARGET_MIPS16;
1511 case SYMBOL_SMALL_DATA:
1512 return true;
1514 case SYMBOL_CONSTANT_POOL:
1515 /* PC-relative addressing is only available for lw and ld. */
1516 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1518 case SYMBOL_GOT_LOCAL:
1519 return true;
1521 case SYMBOL_GOT_GLOBAL:
1522 /* The address will have to be loaded from the GOT first. */
1523 return false;
1525 case SYMBOL_GOTOFF_PAGE:
1526 case SYMBOL_GOTOFF_GLOBAL:
1527 case SYMBOL_GOTOFF_CALL:
1528 case SYMBOL_GOTOFF_LOADGP:
1529 case SYMBOL_TLS:
1530 case SYMBOL_TLSGD:
1531 case SYMBOL_TLSLDM:
1532 case SYMBOL_DTPREL:
1533 case SYMBOL_GOTTPREL:
1534 case SYMBOL_TPREL:
1535 case SYMBOL_64_HIGH:
1536 case SYMBOL_64_MID:
1537 case SYMBOL_64_LOW:
1538 return true;
1540 gcc_unreachable ();
1544 /* Return true if X is a valid address for machine mode MODE. If it is,
1545 fill in INFO appropriately. STRICT is true if we should only accept
1546 hard base registers. */
1548 static bool
1549 mips_classify_address (struct mips_address_info *info, rtx x,
1550 enum machine_mode mode, int strict)
1552 switch (GET_CODE (x))
1554 case REG:
1555 case SUBREG:
1556 info->type = ADDRESS_REG;
1557 info->reg = x;
1558 info->offset = const0_rtx;
1559 return mips_valid_base_register_p (info->reg, mode, strict);
1561 case PLUS:
1562 info->type = ADDRESS_REG;
1563 info->reg = XEXP (x, 0);
1564 info->offset = XEXP (x, 1);
1565 return (mips_valid_base_register_p (info->reg, mode, strict)
1566 && const_arith_operand (info->offset, VOIDmode));
1568 case LO_SUM:
1569 info->type = ADDRESS_LO_SUM;
1570 info->reg = XEXP (x, 0);
1571 info->offset = XEXP (x, 1);
1572 return (mips_valid_base_register_p (info->reg, mode, strict)
1573 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1574 && mips_symbolic_address_p (info->symbol_type, mode)
1575 && mips_lo_relocs[info->symbol_type] != 0);
1577 case CONST_INT:
1578 /* Small-integer addresses don't occur very often, but they
1579 are legitimate if $0 is a valid base register. */
1580 info->type = ADDRESS_CONST_INT;
1581 return !TARGET_MIPS16 && SMALL_INT (x);
1583 case CONST:
1584 case LABEL_REF:
1585 case SYMBOL_REF:
1586 info->type = ADDRESS_SYMBOLIC;
1587 return (mips_symbolic_constant_p (x, &info->symbol_type)
1588 && mips_symbolic_address_p (info->symbol_type, mode)
1589 && !mips_split_p[info->symbol_type]);
1591 default:
1592 return false;
1596 /* Return true if X is a thread-local symbol. */
1598 static bool
1599 mips_tls_operand_p (rtx x)
1601 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1604 /* Return true if X can not be forced into a constant pool. */
1606 static int
1607 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1609 return mips_tls_operand_p (*x);
1612 /* Return true if X can not be forced into a constant pool. */
1614 static bool
1615 mips_cannot_force_const_mem (rtx x)
1617 rtx base;
1618 HOST_WIDE_INT offset;
1620 if (!TARGET_MIPS16)
1622 /* As an optimization, reject constants that mips_legitimize_move
1623 can expand inline.
1625 Suppose we have a multi-instruction sequence that loads constant C
1626 into register R. If R does not get allocated a hard register, and
1627 R is used in an operand that allows both registers and memory
1628 references, reload will consider forcing C into memory and using
1629 one of the instruction's memory alternatives. Returning false
1630 here will force it to use an input reload instead. */
1631 if (GET_CODE (x) == CONST_INT)
1632 return true;
1634 mips_split_const (x, &base, &offset);
1635 if (symbolic_operand (base, VOIDmode) && SMALL_OPERAND (offset))
1636 return true;
1639 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1640 return true;
1642 return false;
1645 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. MIPS16 uses per-function
1646 constant pools, but normal-mode code doesn't need to. */
1648 static bool
1649 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1650 rtx x ATTRIBUTE_UNUSED)
1652 return !TARGET_MIPS16;
1655 /* Return the number of instructions needed to load a symbol of the
1656 given type into a register. If valid in an address, the same number
1657 of instructions are needed for loads and stores. Treat extended
1658 mips16 instructions as two instructions. */
1660 static int
1661 mips_symbol_insns (enum mips_symbol_type type)
1663 switch (type)
1665 case SYMBOL_GENERAL:
1666 /* In mips16 code, general symbols must be fetched from the
1667 constant pool. */
1668 if (TARGET_MIPS16)
1669 return 0;
1671 /* When using 64-bit symbols, we need 5 preparatory instructions,
1672 such as:
1674 lui $at,%highest(symbol)
1675 daddiu $at,$at,%higher(symbol)
1676 dsll $at,$at,16
1677 daddiu $at,$at,%hi(symbol)
1678 dsll $at,$at,16
1680 The final address is then $at + %lo(symbol). With 32-bit
1681 symbols we just need a preparatory lui. */
1682 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1684 case SYMBOL_SMALL_DATA:
1685 return 1;
1687 case SYMBOL_CONSTANT_POOL:
1688 /* This case is for mips16 only. Assume we'll need an
1689 extended instruction. */
1690 return 2;
1692 case SYMBOL_GOT_LOCAL:
1693 case SYMBOL_GOT_GLOBAL:
1694 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1695 the local/global classification is accurate. See override_options
1696 for details.
1698 The worst cases are:
1700 (1) For local symbols when generating o32 or o64 code. The assembler
1701 will use:
1703 lw $at,%got(symbol)
1706 ...and the final address will be $at + %lo(symbol).
1708 (2) For global symbols when -mxgot. The assembler will use:
1710 lui $at,%got_hi(symbol)
1711 (d)addu $at,$at,$gp
1713 ...and the final address will be $at + %got_lo(symbol). */
1714 return 3;
1716 case SYMBOL_GOTOFF_PAGE:
1717 case SYMBOL_GOTOFF_GLOBAL:
1718 case SYMBOL_GOTOFF_CALL:
1719 case SYMBOL_GOTOFF_LOADGP:
1720 case SYMBOL_64_HIGH:
1721 case SYMBOL_64_MID:
1722 case SYMBOL_64_LOW:
1723 case SYMBOL_TLSGD:
1724 case SYMBOL_TLSLDM:
1725 case SYMBOL_DTPREL:
1726 case SYMBOL_GOTTPREL:
1727 case SYMBOL_TPREL:
1728 /* Check whether the offset is a 16- or 32-bit value. */
1729 return mips_split_p[type] ? 2 : 1;
1731 case SYMBOL_TLS:
1732 /* We don't treat a bare TLS symbol as a constant. */
1733 return 0;
1735 gcc_unreachable ();
1738 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1740 bool
1741 mips_stack_address_p (rtx x, enum machine_mode mode)
1743 struct mips_address_info addr;
1745 return (mips_classify_address (&addr, x, mode, false)
1746 && addr.type == ADDRESS_REG
1747 && addr.reg == stack_pointer_rtx);
1750 /* Return true if a value at OFFSET bytes from BASE can be accessed
1751 using an unextended mips16 instruction. MODE is the mode of the
1752 value.
1754 Usually the offset in an unextended instruction is a 5-bit field.
1755 The offset is unsigned and shifted left once for HIs, twice
1756 for SIs, and so on. An exception is SImode accesses off the
1757 stack pointer, which have an 8-bit immediate field. */
1759 static bool
1760 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1762 if (TARGET_MIPS16
1763 && GET_CODE (offset) == CONST_INT
1764 && INTVAL (offset) >= 0
1765 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1767 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1768 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1769 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1771 return false;
1775 /* Return the number of instructions needed to load or store a value
1776 of mode MODE at X. Return 0 if X isn't valid for MODE.
1778 For mips16 code, count extended instructions as two instructions. */
1781 mips_address_insns (rtx x, enum machine_mode mode)
1783 struct mips_address_info addr;
1784 int factor;
1786 if (mode == BLKmode)
1787 /* BLKmode is used for single unaligned loads and stores. */
1788 factor = 1;
1789 else
1790 /* Each word of a multi-word value will be accessed individually. */
1791 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1793 if (mips_classify_address (&addr, x, mode, false))
1794 switch (addr.type)
1796 case ADDRESS_REG:
1797 if (TARGET_MIPS16
1798 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1799 return factor * 2;
1800 return factor;
1802 case ADDRESS_LO_SUM:
1803 return (TARGET_MIPS16 ? factor * 2 : factor);
1805 case ADDRESS_CONST_INT:
1806 return factor;
1808 case ADDRESS_SYMBOLIC:
1809 return factor * mips_symbol_insns (addr.symbol_type);
1811 return 0;
1815 /* Likewise for constant X. */
1818 mips_const_insns (rtx x)
1820 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1821 enum mips_symbol_type symbol_type;
1822 HOST_WIDE_INT offset;
1824 switch (GET_CODE (x))
1826 case HIGH:
1827 if (TARGET_MIPS16
1828 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1829 || !mips_split_p[symbol_type])
1830 return 0;
1832 return 1;
1834 case CONST_INT:
1835 if (TARGET_MIPS16)
1836 /* Unsigned 8-bit constants can be loaded using an unextended
1837 LI instruction. Unsigned 16-bit constants can be loaded
1838 using an extended LI. Negative constants must be loaded
1839 using LI and then negated. */
1840 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1841 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1842 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1843 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1844 : 0);
1846 return mips_build_integer (codes, INTVAL (x));
1848 case CONST_DOUBLE:
1849 case CONST_VECTOR:
1850 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1852 case CONST:
1853 if (CONST_GP_P (x))
1854 return 1;
1856 /* See if we can refer to X directly. */
1857 if (mips_symbolic_constant_p (x, &symbol_type))
1858 return mips_symbol_insns (symbol_type);
1860 /* Otherwise try splitting the constant into a base and offset.
1861 16-bit offsets can be added using an extra addiu. Larger offsets
1862 must be calculated separately and then added to the base. */
1863 mips_split_const (x, &x, &offset);
1864 if (offset != 0)
1866 int n = mips_const_insns (x);
1867 if (n != 0)
1869 if (SMALL_OPERAND (offset))
1870 return n + 1;
1871 else
1872 return n + 1 + mips_build_integer (codes, offset);
1875 return 0;
1877 case SYMBOL_REF:
1878 case LABEL_REF:
1879 return mips_symbol_insns (mips_classify_symbol (x));
1881 default:
1882 return 0;
1887 /* Return the number of instructions needed for memory reference X.
1888 Count extended mips16 instructions as two instructions. */
1891 mips_fetch_insns (rtx x)
1893 gcc_assert (MEM_P (x));
1894 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1898 /* Return the number of instructions needed for an integer division. */
1901 mips_idiv_insns (void)
1903 int count;
1905 count = 1;
1906 if (TARGET_CHECK_ZERO_DIV)
1908 if (GENERATE_DIVIDE_TRAPS)
1909 count++;
1910 else
1911 count += 2;
1914 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1915 count++;
1916 return count;
1919 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1920 returns a nonzero value if X is a legitimate address for a memory
1921 operand of the indicated MODE. STRICT is nonzero if this function
1922 is called during reload. */
1924 bool
1925 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1927 struct mips_address_info addr;
1929 return mips_classify_address (&addr, x, mode, strict);
1933 /* Copy VALUE to a register and return that register. If new psuedos
1934 are allowed, copy it into a new register, otherwise use DEST. */
1936 static rtx
1937 mips_force_temporary (rtx dest, rtx value)
1939 if (!no_new_pseudos)
1940 return force_reg (Pmode, value);
1941 else
1943 emit_move_insn (copy_rtx (dest), value);
1944 return dest;
1949 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1950 and is used to load the high part into a register. */
1953 mips_split_symbol (rtx temp, rtx addr)
1955 rtx high;
1957 if (TARGET_MIPS16)
1958 high = mips16_gp_pseudo_reg ();
1959 else
1960 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1961 return gen_rtx_LO_SUM (Pmode, high, addr);
1965 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1966 type SYMBOL_TYPE. */
1969 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1971 rtx base;
1972 HOST_WIDE_INT offset;
1974 mips_split_const (address, &base, &offset);
1975 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1976 UNSPEC_ADDRESS_FIRST + symbol_type);
1977 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1981 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1982 high part to BASE and return the result. Just return BASE otherwise.
1983 TEMP is available as a temporary register if needed.
1985 The returned expression can be used as the first operand to a LO_SUM. */
1987 static rtx
1988 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1989 enum mips_symbol_type symbol_type)
1991 if (mips_split_p[symbol_type])
1993 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1994 addr = mips_force_temporary (temp, addr);
1995 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1997 return base;
2001 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2002 mips_force_temporary; it is only needed when OFFSET is not a
2003 SMALL_OPERAND. */
2005 static rtx
2006 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2008 if (!SMALL_OPERAND (offset))
2010 rtx high;
2011 if (TARGET_MIPS16)
2013 /* Load the full offset into a register so that we can use
2014 an unextended instruction for the address itself. */
2015 high = GEN_INT (offset);
2016 offset = 0;
2018 else
2020 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2021 high = GEN_INT (CONST_HIGH_PART (offset));
2022 offset = CONST_LOW_PART (offset);
2024 high = mips_force_temporary (temp, high);
2025 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2027 return plus_constant (reg, offset);
2030 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2031 referencing, and TYPE is the symbol type to use (either global
2032 dynamic or local dynamic). V0 is an RTX for the return value
2033 location. The entire insn sequence is returned. */
2035 static GTY(()) rtx mips_tls_symbol;
2037 static rtx
2038 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2040 rtx insn, loc, tga, a0;
2042 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2044 if (!mips_tls_symbol)
2045 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2047 loc = mips_unspec_address (sym, type);
2049 start_sequence ();
2051 emit_insn (gen_rtx_SET (Pmode, a0,
2052 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2053 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2054 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2055 CONST_OR_PURE_CALL_P (insn) = 1;
2056 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2057 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2058 insn = get_insns ();
2060 end_sequence ();
2062 return insn;
2065 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2066 return value will be a valid address and move_operand (either a REG
2067 or a LO_SUM). */
2069 static rtx
2070 mips_legitimize_tls_address (rtx loc)
2072 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2073 enum tls_model model;
2075 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2076 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2078 model = SYMBOL_REF_TLS_MODEL (loc);
2079 /* Only TARGET_ABICALLS code can have more than one module; other
2080 code must be be static and should not use a GOT. All TLS models
2081 reduce to local exec in this situation. */
2082 if (!TARGET_ABICALLS)
2083 model = TLS_MODEL_LOCAL_EXEC;
2085 switch (model)
2087 case TLS_MODEL_GLOBAL_DYNAMIC:
2088 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2089 dest = gen_reg_rtx (Pmode);
2090 emit_libcall_block (insn, dest, v0, loc);
2091 break;
2093 case TLS_MODEL_LOCAL_DYNAMIC:
2094 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2095 tmp1 = gen_reg_rtx (Pmode);
2097 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2098 share the LDM result with other LD model accesses. */
2099 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2100 UNSPEC_TLS_LDM);
2101 emit_libcall_block (insn, tmp1, v0, eqv);
2103 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2104 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2105 mips_unspec_address (loc, SYMBOL_DTPREL));
2106 break;
2108 case TLS_MODEL_INITIAL_EXEC:
2109 tmp1 = gen_reg_rtx (Pmode);
2110 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2111 if (Pmode == DImode)
2113 emit_insn (gen_tls_get_tp_di (v1));
2114 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2116 else
2118 emit_insn (gen_tls_get_tp_si (v1));
2119 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2121 dest = gen_reg_rtx (Pmode);
2122 emit_insn (gen_add3_insn (dest, tmp1, v1));
2123 break;
2125 case TLS_MODEL_LOCAL_EXEC:
2126 if (Pmode == DImode)
2127 emit_insn (gen_tls_get_tp_di (v1));
2128 else
2129 emit_insn (gen_tls_get_tp_si (v1));
2131 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2132 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2133 mips_unspec_address (loc, SYMBOL_TPREL));
2134 break;
2136 default:
2137 gcc_unreachable ();
2140 return dest;
2143 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2144 be legitimized in a way that the generic machinery might not expect,
2145 put the new address in *XLOC and return true. MODE is the mode of
2146 the memory being accessed. */
2148 bool
2149 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2151 enum mips_symbol_type symbol_type;
2153 if (mips_tls_operand_p (*xloc))
2155 *xloc = mips_legitimize_tls_address (*xloc);
2156 return true;
2159 /* See if the address can split into a high part and a LO_SUM. */
2160 if (mips_symbolic_constant_p (*xloc, &symbol_type)
2161 && mips_symbolic_address_p (symbol_type, mode)
2162 && mips_split_p[symbol_type])
2164 *xloc = mips_split_symbol (0, *xloc);
2165 return true;
2168 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2170 /* Handle REG + CONSTANT using mips_add_offset. */
2171 rtx reg;
2173 reg = XEXP (*xloc, 0);
2174 if (!mips_valid_base_register_p (reg, mode, 0))
2175 reg = copy_to_mode_reg (Pmode, reg);
2176 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2177 return true;
2180 return false;
2184 /* Subroutine of mips_build_integer (with the same interface).
2185 Assume that the final action in the sequence should be a left shift. */
2187 static unsigned int
2188 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2190 unsigned int i, shift;
2192 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2193 since signed numbers are easier to load than unsigned ones. */
2194 shift = 0;
2195 while ((value & 1) == 0)
2196 value /= 2, shift++;
2198 i = mips_build_integer (codes, value);
2199 codes[i].code = ASHIFT;
2200 codes[i].value = shift;
2201 return i + 1;
2205 /* As for mips_build_shift, but assume that the final action will be
2206 an IOR or PLUS operation. */
2208 static unsigned int
2209 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2211 unsigned HOST_WIDE_INT high;
2212 unsigned int i;
2214 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2215 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2217 /* The constant is too complex to load with a simple lui/ori pair
2218 so our goal is to clear as many trailing zeros as possible.
2219 In this case, we know bit 16 is set and that the low 16 bits
2220 form a negative number. If we subtract that number from VALUE,
2221 we will clear at least the lowest 17 bits, maybe more. */
2222 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2223 codes[i].code = PLUS;
2224 codes[i].value = CONST_LOW_PART (value);
2226 else
2228 i = mips_build_integer (codes, high);
2229 codes[i].code = IOR;
2230 codes[i].value = value & 0xffff;
2232 return i + 1;
2236 /* Fill CODES with a sequence of rtl operations to load VALUE.
2237 Return the number of operations needed. */
2239 static unsigned int
2240 mips_build_integer (struct mips_integer_op *codes,
2241 unsigned HOST_WIDE_INT value)
2243 if (SMALL_OPERAND (value)
2244 || SMALL_OPERAND_UNSIGNED (value)
2245 || LUI_OPERAND (value))
2247 /* The value can be loaded with a single instruction. */
2248 codes[0].code = UNKNOWN;
2249 codes[0].value = value;
2250 return 1;
2252 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2254 /* Either the constant is a simple LUI/ORI combination or its
2255 lowest bit is set. We don't want to shift in this case. */
2256 return mips_build_lower (codes, value);
2258 else if ((value & 0xffff) == 0)
2260 /* The constant will need at least three actions. The lowest
2261 16 bits are clear, so the final action will be a shift. */
2262 return mips_build_shift (codes, value);
2264 else
2266 /* The final action could be a shift, add or inclusive OR.
2267 Rather than use a complex condition to select the best
2268 approach, try both mips_build_shift and mips_build_lower
2269 and pick the one that gives the shortest sequence.
2270 Note that this case is only used once per constant. */
2271 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2272 unsigned int cost, alt_cost;
2274 cost = mips_build_shift (codes, value);
2275 alt_cost = mips_build_lower (alt_codes, value);
2276 if (alt_cost < cost)
2278 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2279 cost = alt_cost;
2281 return cost;
2286 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2288 void
2289 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2291 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2292 enum machine_mode mode;
2293 unsigned int i, cost;
2294 rtx x;
2296 mode = GET_MODE (dest);
2297 cost = mips_build_integer (codes, value);
2299 /* Apply each binary operation to X. Invariant: X is a legitimate
2300 source operand for a SET pattern. */
2301 x = GEN_INT (codes[0].value);
2302 for (i = 1; i < cost; i++)
2304 if (no_new_pseudos)
2306 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2307 x = temp;
2309 else
2310 x = force_reg (mode, x);
2311 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2314 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2318 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2319 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2320 move_operand. */
2322 static void
2323 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2325 rtx base;
2326 HOST_WIDE_INT offset;
2328 /* Split moves of big integers into smaller pieces. */
2329 if (splittable_const_int_operand (src, mode))
2331 mips_move_integer (dest, dest, INTVAL (src));
2332 return;
2335 /* Split moves of symbolic constants into high/low pairs. */
2336 if (splittable_symbolic_operand (src, mode))
2338 emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2339 return;
2342 if (mips_tls_operand_p (src))
2344 emit_move_insn (dest, mips_legitimize_tls_address (src));
2345 return;
2348 /* If we have (const (plus symbol offset)), load the symbol first
2349 and then add in the offset. This is usually better than forcing
2350 the constant into memory, at least in non-mips16 code. */
2351 mips_split_const (src, &base, &offset);
2352 if (!TARGET_MIPS16
2353 && offset != 0
2354 && (!no_new_pseudos || SMALL_OPERAND (offset)))
2356 base = mips_force_temporary (dest, base);
2357 emit_move_insn (dest, mips_add_offset (0, base, offset));
2358 return;
2361 src = force_const_mem (mode, src);
2363 /* When using explicit relocs, constant pool references are sometimes
2364 not legitimate addresses. */
2365 if (!memory_operand (src, VOIDmode))
2366 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2367 emit_move_insn (dest, src);
2371 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2372 sequence that is valid. */
2374 bool
2375 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2377 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2379 emit_move_insn (dest, force_reg (mode, src));
2380 return true;
2383 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2384 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2385 && REG_P (src) && MD_REG_P (REGNO (src))
2386 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2388 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2389 if (GET_MODE_SIZE (mode) <= 4)
2390 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2391 gen_rtx_REG (SImode, REGNO (src)),
2392 gen_rtx_REG (SImode, other_regno)));
2393 else
2394 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2395 gen_rtx_REG (DImode, REGNO (src)),
2396 gen_rtx_REG (DImode, other_regno)));
2397 return true;
2400 /* We need to deal with constants that would be legitimate
2401 immediate_operands but not legitimate move_operands. */
2402 if (CONSTANT_P (src) && !move_operand (src, mode))
2404 mips_legitimize_const_move (mode, dest, src);
2405 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2406 return true;
2408 return false;
2411 /* We need a lot of little routines to check constant values on the
2412 mips16. These are used to figure out how long the instruction will
2413 be. It would be much better to do this using constraints, but
2414 there aren't nearly enough letters available. */
2416 static int
2417 m16_check_op (rtx op, int low, int high, int mask)
2419 return (GET_CODE (op) == CONST_INT
2420 && INTVAL (op) >= low
2421 && INTVAL (op) <= high
2422 && (INTVAL (op) & mask) == 0);
2426 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2428 return m16_check_op (op, 0x1, 0x8, 0);
2432 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2434 return m16_check_op (op, - 0x8, 0x7, 0);
2438 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2440 return m16_check_op (op, - 0x7, 0x8, 0);
2444 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2446 return m16_check_op (op, - 0x10, 0xf, 0);
2450 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2452 return m16_check_op (op, - 0xf, 0x10, 0);
2456 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2458 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2462 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2464 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2468 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2470 return m16_check_op (op, - 0x80, 0x7f, 0);
2474 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2476 return m16_check_op (op, - 0x7f, 0x80, 0);
2480 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2482 return m16_check_op (op, 0x0, 0xff, 0);
2486 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2488 return m16_check_op (op, - 0xff, 0x0, 0);
2492 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2494 return m16_check_op (op, - 0x1, 0xfe, 0);
2498 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2500 return m16_check_op (op, 0x0, 0xff << 2, 3);
2504 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2506 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2510 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2512 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2516 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2518 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2521 static bool
2522 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2524 enum machine_mode mode = GET_MODE (x);
2525 bool float_mode_p = FLOAT_MODE_P (mode);
2527 switch (code)
2529 case CONST_INT:
2530 if (TARGET_MIPS16)
2532 /* A number between 1 and 8 inclusive is efficient for a shift.
2533 Otherwise, we will need an extended instruction. */
2534 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2535 || (outer_code) == LSHIFTRT)
2537 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2538 *total = 0;
2539 else
2540 *total = COSTS_N_INSNS (1);
2541 return true;
2544 /* We can use cmpi for an xor with an unsigned 16 bit value. */
2545 if ((outer_code) == XOR
2546 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2548 *total = 0;
2549 return true;
2552 /* We may be able to use slt or sltu for a comparison with a
2553 signed 16 bit value. (The boundary conditions aren't quite
2554 right, but this is just a heuristic anyhow.) */
2555 if (((outer_code) == LT || (outer_code) == LE
2556 || (outer_code) == GE || (outer_code) == GT
2557 || (outer_code) == LTU || (outer_code) == LEU
2558 || (outer_code) == GEU || (outer_code) == GTU)
2559 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2561 *total = 0;
2562 return true;
2565 /* Equality comparisons with 0 are cheap. */
2566 if (((outer_code) == EQ || (outer_code) == NE)
2567 && INTVAL (x) == 0)
2569 *total = 0;
2570 return true;
2573 /* Constants in the range 0...255 can be loaded with an unextended
2574 instruction. They are therefore as cheap as a register move.
2576 Given the choice between "li R1,0...255" and "move R1,R2"
2577 (where R2 is a known constant), it is usually better to use "li",
2578 since we do not want to unnecessarily extend the lifetime
2579 of R2. */
2580 if (outer_code == SET
2581 && INTVAL (x) >= 0
2582 && INTVAL (x) < 256)
2584 *total = 0;
2585 return true;
2588 else
2590 /* These can be used anywhere. */
2591 *total = 0;
2592 return true;
2595 /* Otherwise fall through to the handling below because
2596 we'll need to construct the constant. */
2598 case CONST:
2599 case SYMBOL_REF:
2600 case LABEL_REF:
2601 case CONST_DOUBLE:
2602 if (LEGITIMATE_CONSTANT_P (x))
2604 *total = COSTS_N_INSNS (1);
2605 return true;
2607 else
2609 /* The value will need to be fetched from the constant pool. */
2610 *total = CONSTANT_POOL_COST;
2611 return true;
2614 case MEM:
2616 /* If the address is legitimate, return the number of
2617 instructions it needs, otherwise use the default handling. */
2618 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
2619 if (n > 0)
2621 *total = COSTS_N_INSNS (n + 1);
2622 return true;
2624 return false;
2627 case FFS:
2628 *total = COSTS_N_INSNS (6);
2629 return true;
2631 case NOT:
2632 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2633 return true;
2635 case AND:
2636 case IOR:
2637 case XOR:
2638 if (mode == DImode && !TARGET_64BIT)
2640 *total = COSTS_N_INSNS (2);
2641 return true;
2643 return false;
2645 case ASHIFT:
2646 case ASHIFTRT:
2647 case LSHIFTRT:
2648 if (mode == DImode && !TARGET_64BIT)
2650 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2651 ? 4 : 12);
2652 return true;
2654 return false;
2656 case ABS:
2657 if (float_mode_p)
2658 *total = COSTS_N_INSNS (1);
2659 else
2660 *total = COSTS_N_INSNS (4);
2661 return true;
2663 case LO_SUM:
2664 *total = COSTS_N_INSNS (1);
2665 return true;
2667 case PLUS:
2668 case MINUS:
2669 if (float_mode_p)
2671 *total = mips_cost->fp_add;
2672 return true;
2675 else if (mode == DImode && !TARGET_64BIT)
2677 *total = COSTS_N_INSNS (4);
2678 return true;
2680 return false;
2682 case NEG:
2683 if (mode == DImode && !TARGET_64BIT)
2685 *total = COSTS_N_INSNS (4);
2686 return true;
2688 return false;
2690 case MULT:
2691 if (mode == SFmode)
2692 *total = mips_cost->fp_mult_sf;
2694 else if (mode == DFmode)
2695 *total = mips_cost->fp_mult_df;
2697 else if (mode == SImode)
2698 *total = mips_cost->int_mult_si;
2700 else
2701 *total = mips_cost->int_mult_di;
2703 return true;
2705 case DIV:
2706 case MOD:
2707 if (float_mode_p)
2709 if (mode == SFmode)
2710 *total = mips_cost->fp_div_sf;
2711 else
2712 *total = mips_cost->fp_div_df;
2714 return true;
2716 /* Fall through. */
2718 case UDIV:
2719 case UMOD:
2720 if (mode == DImode)
2721 *total = mips_cost->int_div_di;
2722 else
2723 *total = mips_cost->int_div_si;
2725 return true;
2727 case SIGN_EXTEND:
2728 /* A sign extend from SImode to DImode in 64 bit mode is often
2729 zero instructions, because the result can often be used
2730 directly by another instruction; we'll call it one. */
2731 if (TARGET_64BIT && mode == DImode
2732 && GET_MODE (XEXP (x, 0)) == SImode)
2733 *total = COSTS_N_INSNS (1);
2734 else
2735 *total = COSTS_N_INSNS (2);
2736 return true;
2738 case ZERO_EXTEND:
2739 if (TARGET_64BIT && mode == DImode
2740 && GET_MODE (XEXP (x, 0)) == SImode)
2741 *total = COSTS_N_INSNS (2);
2742 else
2743 *total = COSTS_N_INSNS (1);
2744 return true;
2746 case FLOAT:
2747 case UNSIGNED_FLOAT:
2748 case FIX:
2749 case FLOAT_EXTEND:
2750 case FLOAT_TRUNCATE:
2751 case SQRT:
2752 *total = mips_cost->fp_add;
2753 return true;
2755 default:
2756 return false;
2760 /* Provide the costs of an addressing mode that contains ADDR.
2761 If ADDR is not a valid address, its cost is irrelevant. */
2763 static int
2764 mips_address_cost (rtx addr)
2766 return mips_address_insns (addr, SImode);
2769 /* Return one word of double-word value OP, taking into account the fixed
2770 endianness of certain registers. HIGH_P is true to select the high part,
2771 false to select the low part. */
2774 mips_subword (rtx op, int high_p)
2776 unsigned int byte;
2777 enum machine_mode mode;
2779 mode = GET_MODE (op);
2780 if (mode == VOIDmode)
2781 mode = DImode;
2783 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2784 byte = UNITS_PER_WORD;
2785 else
2786 byte = 0;
2788 if (REG_P (op))
2790 if (FP_REG_P (REGNO (op)))
2791 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2792 if (ACC_HI_REG_P (REGNO (op)))
2793 return gen_rtx_REG (word_mode, high_p ? REGNO (op) : REGNO (op) + 1);
2796 if (MEM_P (op))
2797 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2799 return simplify_gen_subreg (word_mode, op, mode, byte);
2803 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2805 bool
2806 mips_split_64bit_move_p (rtx dest, rtx src)
2808 if (TARGET_64BIT)
2809 return false;
2811 /* FP->FP moves can be done in a single instruction. */
2812 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2813 return false;
2815 /* Check for floating-point loads and stores. They can be done using
2816 ldc1 and sdc1 on MIPS II and above. */
2817 if (mips_isa > 1)
2819 if (FP_REG_RTX_P (dest) && MEM_P (src))
2820 return false;
2821 if (FP_REG_RTX_P (src) && MEM_P (dest))
2822 return false;
2824 return true;
2828 /* Split a 64-bit move from SRC to DEST assuming that
2829 mips_split_64bit_move_p holds.
2831 Moves into and out of FPRs cause some difficulty here. Such moves
2832 will always be DFmode, since paired FPRs are not allowed to store
2833 DImode values. The most natural representation would be two separate
2834 32-bit moves, such as:
2836 (set (reg:SI $f0) (mem:SI ...))
2837 (set (reg:SI $f1) (mem:SI ...))
2839 However, the second insn is invalid because odd-numbered FPRs are
2840 not allowed to store independent values. Use the patterns load_df_low,
2841 load_df_high and store_df_high instead. */
2843 void
2844 mips_split_64bit_move (rtx dest, rtx src)
2846 if (FP_REG_RTX_P (dest))
2848 /* Loading an FPR from memory or from GPRs. */
2849 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2850 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2851 copy_rtx (dest)));
2853 else if (FP_REG_RTX_P (src))
2855 /* Storing an FPR into memory or GPRs. */
2856 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2857 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2859 else
2861 /* The operation can be split into two normal moves. Decide in
2862 which order to do them. */
2863 rtx low_dest;
2865 low_dest = mips_subword (dest, 0);
2866 if (REG_P (low_dest)
2867 && reg_overlap_mentioned_p (low_dest, src))
2869 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2870 emit_move_insn (low_dest, mips_subword (src, 0));
2872 else
2874 emit_move_insn (low_dest, mips_subword (src, 0));
2875 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2880 /* Return the appropriate instructions to move SRC into DEST. Assume
2881 that SRC is operand 1 and DEST is operand 0. */
2883 const char *
2884 mips_output_move (rtx dest, rtx src)
2886 enum rtx_code dest_code, src_code;
2887 bool dbl_p;
2889 dest_code = GET_CODE (dest);
2890 src_code = GET_CODE (src);
2891 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2893 if (dbl_p && mips_split_64bit_move_p (dest, src))
2894 return "#";
2896 if ((src_code == REG && GP_REG_P (REGNO (src)))
2897 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2899 if (dest_code == REG)
2901 if (GP_REG_P (REGNO (dest)))
2902 return "move\t%0,%z1";
2904 if (MD_REG_P (REGNO (dest)))
2905 return "mt%0\t%z1";
2907 if (DSP_ACC_REG_P (REGNO (dest)))
2909 static char retval[] = "mt__\t%z1,%q0";
2910 retval[2] = reg_names[REGNO (dest)][4];
2911 retval[3] = reg_names[REGNO (dest)][5];
2912 return retval;
2915 if (FP_REG_P (REGNO (dest)))
2916 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2918 if (ALL_COP_REG_P (REGNO (dest)))
2920 static char retval[] = "dmtc_\t%z1,%0";
2922 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2923 return (dbl_p ? retval : retval + 1);
2926 if (dest_code == MEM)
2927 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2929 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2931 if (src_code == REG)
2933 if (DSP_ACC_REG_P (REGNO (src)))
2935 static char retval[] = "mf__\t%0,%q1";
2936 retval[2] = reg_names[REGNO (src)][4];
2937 retval[3] = reg_names[REGNO (src)][5];
2938 return retval;
2941 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2942 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2944 if (FP_REG_P (REGNO (src)))
2945 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2947 if (ALL_COP_REG_P (REGNO (src)))
2949 static char retval[] = "dmfc_\t%0,%1";
2951 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2952 return (dbl_p ? retval : retval + 1);
2956 if (src_code == MEM)
2957 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2959 if (src_code == CONST_INT)
2961 /* Don't use the X format, because that will give out of
2962 range numbers for 64 bit hosts and 32 bit targets. */
2963 if (!TARGET_MIPS16)
2964 return "li\t%0,%1\t\t\t# %X1";
2966 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2967 return "li\t%0,%1";
2969 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2970 return "#";
2973 if (src_code == HIGH)
2974 return "lui\t%0,%h1";
2976 if (CONST_GP_P (src))
2977 return "move\t%0,%1";
2979 if (symbolic_operand (src, VOIDmode))
2980 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2982 if (src_code == REG && FP_REG_P (REGNO (src)))
2984 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2986 if (GET_MODE (dest) == V2SFmode)
2987 return "mov.ps\t%0,%1";
2988 else
2989 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2992 if (dest_code == MEM)
2993 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2995 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2997 if (src_code == MEM)
2998 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3000 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3002 static char retval[] = "l_c_\t%0,%1";
3004 retval[1] = (dbl_p ? 'd' : 'w');
3005 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3006 return retval;
3008 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3010 static char retval[] = "s_c_\t%1,%0";
3012 retval[1] = (dbl_p ? 'd' : 'w');
3013 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3014 return retval;
3016 gcc_unreachable ();
3019 /* Restore $gp from its save slot. Valid only when using o32 or
3020 o64 abicalls. */
3022 void
3023 mips_restore_gp (void)
3025 rtx address, slot;
3027 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3029 address = mips_add_offset (pic_offset_table_rtx,
3030 frame_pointer_needed
3031 ? hard_frame_pointer_rtx
3032 : stack_pointer_rtx,
3033 current_function_outgoing_args_size);
3034 slot = gen_rtx_MEM (Pmode, address);
3036 emit_move_insn (pic_offset_table_rtx, slot);
3037 if (!TARGET_EXPLICIT_RELOCS)
3038 emit_insn (gen_blockage ());
3041 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3043 static void
3044 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3046 emit_insn (gen_rtx_SET (VOIDmode, target,
3047 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3050 /* Return true if CMP1 is a suitable second operand for relational
3051 operator CODE. See also the *sCC patterns in mips.md. */
3053 static bool
3054 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3056 switch (code)
3058 case GT:
3059 case GTU:
3060 return reg_or_0_operand (cmp1, VOIDmode);
3062 case GE:
3063 case GEU:
3064 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3066 case LT:
3067 case LTU:
3068 return arith_operand (cmp1, VOIDmode);
3070 case LE:
3071 return sle_operand (cmp1, VOIDmode);
3073 case LEU:
3074 return sleu_operand (cmp1, VOIDmode);
3076 default:
3077 gcc_unreachable ();
3081 /* Canonicalize LE or LEU comparisons into LT comparisons when
3082 possible to avoid extra instructions or inverting the
3083 comparison. */
3085 static bool
3086 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3087 enum machine_mode mode)
3089 HOST_WIDE_INT original, plus_one;
3091 if (GET_CODE (*cmp1) != CONST_INT)
3092 return false;
3094 original = INTVAL (*cmp1);
3095 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3097 switch (*code)
3099 case LE:
3100 if (original < plus_one)
3102 *code = LT;
3103 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3104 return true;
3106 break;
3108 case LEU:
3109 if (plus_one != 0)
3111 *code = LTU;
3112 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3113 return true;
3115 break;
3117 default:
3118 return false;
3121 return false;
3125 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3126 result in TARGET. CMP0 and TARGET are register_operands that have
3127 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3128 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3130 static void
3131 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3132 rtx target, rtx cmp0, rtx cmp1)
3134 /* First see if there is a MIPS instruction that can do this operation
3135 with CMP1 in its current form. If not, try to canonicalize the
3136 comparison to LT. If that fails, try doing the same for the
3137 inverse operation. If that also fails, force CMP1 into a register
3138 and try again. */
3139 if (mips_relational_operand_ok_p (code, cmp1))
3140 mips_emit_binary (code, target, cmp0, cmp1);
3141 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3142 mips_emit_binary (code, target, cmp0, cmp1);
3143 else
3145 enum rtx_code inv_code = reverse_condition (code);
3146 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3148 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3149 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3151 else if (invert_ptr == 0)
3153 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3154 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3155 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3157 else
3159 *invert_ptr = !*invert_ptr;
3160 mips_emit_binary (inv_code, target, cmp0, cmp1);
3165 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3166 The register will have the same mode as CMP0. */
3168 static rtx
3169 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3171 if (cmp1 == const0_rtx)
3172 return cmp0;
3174 if (uns_arith_operand (cmp1, VOIDmode))
3175 return expand_binop (GET_MODE (cmp0), xor_optab,
3176 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3178 return expand_binop (GET_MODE (cmp0), sub_optab,
3179 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3182 /* Convert *CODE into a code that can be used in a floating-point
3183 scc instruction (c.<cond>.<fmt>). Return true if the values of
3184 the condition code registers will be inverted, with 0 indicating
3185 that the condition holds. */
3187 static bool
3188 mips_reverse_fp_cond_p (enum rtx_code *code)
3190 switch (*code)
3192 case NE:
3193 case LTGT:
3194 case ORDERED:
3195 *code = reverse_condition_maybe_unordered (*code);
3196 return true;
3198 default:
3199 return false;
3203 /* Convert a comparison into something that can be used in a branch or
3204 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3205 being compared and *CODE is the code used to compare them.
3207 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3208 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3209 otherwise any standard branch condition can be used. The standard branch
3210 conditions are:
3212 - EQ/NE between two registers.
3213 - any comparison between a register and zero. */
3215 static void
3216 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3218 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3220 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3222 *op0 = cmp_operands[0];
3223 *op1 = cmp_operands[1];
3225 else if (*code == EQ || *code == NE)
3227 if (need_eq_ne_p)
3229 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3230 *op1 = const0_rtx;
3232 else
3234 *op0 = cmp_operands[0];
3235 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3238 else
3240 /* The comparison needs a separate scc instruction. Store the
3241 result of the scc in *OP0 and compare it against zero. */
3242 bool invert = false;
3243 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3244 *op1 = const0_rtx;
3245 mips_emit_int_relational (*code, &invert, *op0,
3246 cmp_operands[0], cmp_operands[1]);
3247 *code = (invert ? EQ : NE);
3250 else
3252 enum rtx_code cmp_code;
3254 /* Floating-point tests use a separate c.cond.fmt comparison to
3255 set a condition code register. The branch or conditional move
3256 will then compare that register against zero.
3258 Set CMP_CODE to the code of the comparison instruction and
3259 *CODE to the code that the branch or move should use. */
3260 cmp_code = *code;
3261 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3262 *op0 = (ISA_HAS_8CC
3263 ? gen_reg_rtx (CCmode)
3264 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3265 *op1 = const0_rtx;
3266 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3270 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3271 Store the result in TARGET and return true if successful.
3273 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3275 bool
3276 mips_emit_scc (enum rtx_code code, rtx target)
3278 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3279 return false;
3281 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3282 if (code == EQ || code == NE)
3284 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3285 mips_emit_binary (code, target, zie, const0_rtx);
3287 else
3288 mips_emit_int_relational (code, 0, target,
3289 cmp_operands[0], cmp_operands[1]);
3290 return true;
3293 /* Emit the common code for doing conditional branches.
3294 operand[0] is the label to jump to.
3295 The comparison operands are saved away by cmp{si,di,sf,df}. */
3297 void
3298 gen_conditional_branch (rtx *operands, enum rtx_code code)
3300 rtx op0, op1, condition;
3302 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3303 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3304 emit_jump_insn (gen_condjump (condition, operands[0]));
3307 /* Implement:
3309 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3310 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3312 void
3313 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3314 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3316 rtx cmp_result;
3317 bool reversed_p;
3319 reversed_p = mips_reverse_fp_cond_p (&cond);
3320 cmp_result = gen_reg_rtx (CCV2mode);
3321 emit_insn (gen_scc_ps (cmp_result,
3322 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3323 if (reversed_p)
3324 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3325 cmp_result));
3326 else
3327 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3328 cmp_result));
3331 /* Emit the common code for conditional moves. OPERANDS is the array
3332 of operands passed to the conditional move define_expand. */
3334 void
3335 gen_conditional_move (rtx *operands)
3337 enum rtx_code code;
3338 rtx op0, op1;
3340 code = GET_CODE (operands[1]);
3341 mips_emit_compare (&code, &op0, &op1, true);
3342 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3343 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3344 gen_rtx_fmt_ee (code,
3345 GET_MODE (op0),
3346 op0, op1),
3347 operands[2], operands[3])));
3350 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3351 the conditional_trap expander. */
3353 void
3354 mips_gen_conditional_trap (rtx *operands)
3356 rtx op0, op1;
3357 enum rtx_code cmp_code = GET_CODE (operands[0]);
3358 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3360 /* MIPS conditional trap machine instructions don't have GT or LE
3361 flavors, so we must invert the comparison and convert to LT and
3362 GE, respectively. */
3363 switch (cmp_code)
3365 case GT: cmp_code = LT; break;
3366 case LE: cmp_code = GE; break;
3367 case GTU: cmp_code = LTU; break;
3368 case LEU: cmp_code = GEU; break;
3369 default: break;
3371 if (cmp_code == GET_CODE (operands[0]))
3373 op0 = cmp_operands[0];
3374 op1 = cmp_operands[1];
3376 else
3378 op0 = cmp_operands[1];
3379 op1 = cmp_operands[0];
3381 op0 = force_reg (mode, op0);
3382 if (!arith_operand (op1, mode))
3383 op1 = force_reg (mode, op1);
3385 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3386 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3387 operands[1]));
3390 /* Load function address ADDR into register DEST. SIBCALL_P is true
3391 if the address is needed for a sibling call. */
3393 static void
3394 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3396 /* If we're generating PIC, and this call is to a global function,
3397 try to allow its address to be resolved lazily. This isn't
3398 possible for NewABI sibcalls since the value of $gp on entry
3399 to the stub would be our caller's gp, not ours. */
3400 if (TARGET_EXPLICIT_RELOCS
3401 && !(sibcall_p && TARGET_NEWABI)
3402 && global_got_operand (addr, VOIDmode))
3404 rtx high, lo_sum_symbol;
3406 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3407 addr, SYMBOL_GOTOFF_CALL);
3408 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3409 if (Pmode == SImode)
3410 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3411 else
3412 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3414 else
3415 emit_move_insn (dest, addr);
3419 /* Expand a call or call_value instruction. RESULT is where the
3420 result will go (null for calls), ADDR is the address of the
3421 function, ARGS_SIZE is the size of the arguments and AUX is
3422 the value passed to us by mips_function_arg. SIBCALL_P is true
3423 if we are expanding a sibling call, false if we're expanding
3424 a normal call. */
3426 void
3427 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3429 rtx orig_addr, pattern, insn;
3431 orig_addr = addr;
3432 if (!call_insn_operand (addr, VOIDmode))
3434 addr = gen_reg_rtx (Pmode);
3435 mips_load_call_address (addr, orig_addr, sibcall_p);
3438 if (TARGET_MIPS16
3439 && mips16_hard_float
3440 && build_mips16_call_stub (result, addr, args_size,
3441 aux == 0 ? 0 : (int) GET_MODE (aux)))
3442 return;
3444 if (result == 0)
3445 pattern = (sibcall_p
3446 ? gen_sibcall_internal (addr, args_size)
3447 : gen_call_internal (addr, args_size));
3448 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3450 rtx reg1, reg2;
3452 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3453 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3454 pattern =
3455 (sibcall_p
3456 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3457 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3459 else
3460 pattern = (sibcall_p
3461 ? gen_sibcall_value_internal (result, addr, args_size)
3462 : gen_call_value_internal (result, addr, args_size));
3464 insn = emit_call_insn (pattern);
3466 /* Lazy-binding stubs require $gp to be valid on entry. */
3467 if (global_got_operand (orig_addr, VOIDmode))
3468 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3472 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3474 static bool
3475 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3476 tree exp ATTRIBUTE_UNUSED)
3478 return TARGET_SIBCALLS;
3481 /* Emit code to move general operand SRC into condition-code
3482 register DEST. SCRATCH is a scratch TFmode float register.
3483 The sequence is:
3485 FP1 = SRC
3486 FP2 = 0.0f
3487 DEST = FP2 < FP1
3489 where FP1 and FP2 are single-precision float registers
3490 taken from SCRATCH. */
3492 void
3493 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3495 rtx fp1, fp2;
3497 /* Change the source to SFmode. */
3498 if (MEM_P (src))
3499 src = adjust_address (src, SFmode, 0);
3500 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3501 src = gen_rtx_REG (SFmode, true_regnum (src));
3503 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3504 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
3506 emit_move_insn (copy_rtx (fp1), src);
3507 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3508 emit_insn (gen_slt_sf (dest, fp2, fp1));
3511 /* Emit code to change the current function's return address to
3512 ADDRESS. SCRATCH is available as a scratch register, if needed.
3513 ADDRESS and SCRATCH are both word-mode GPRs. */
3515 void
3516 mips_set_return_address (rtx address, rtx scratch)
3518 rtx slot_address;
3520 compute_frame_size (get_frame_size ());
3521 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3522 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3523 cfun->machine->frame.gp_sp_offset);
3525 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3528 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3529 Assume that the areas do not overlap. */
3531 static void
3532 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3534 HOST_WIDE_INT offset, delta;
3535 unsigned HOST_WIDE_INT bits;
3536 int i;
3537 enum machine_mode mode;
3538 rtx *regs;
3540 /* Work out how many bits to move at a time. If both operands have
3541 half-word alignment, it is usually better to move in half words.
3542 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3543 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3544 Otherwise move word-sized chunks. */
3545 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3546 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3547 bits = BITS_PER_WORD / 2;
3548 else
3549 bits = BITS_PER_WORD;
3551 mode = mode_for_size (bits, MODE_INT, 0);
3552 delta = bits / BITS_PER_UNIT;
3554 /* Allocate a buffer for the temporary registers. */
3555 regs = alloca (sizeof (rtx) * length / delta);
3557 /* Load as many BITS-sized chunks as possible. Use a normal load if
3558 the source has enough alignment, otherwise use left/right pairs. */
3559 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3561 regs[i] = gen_reg_rtx (mode);
3562 if (MEM_ALIGN (src) >= bits)
3563 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3564 else
3566 rtx part = adjust_address (src, BLKmode, offset);
3567 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3568 gcc_unreachable ();
3572 /* Copy the chunks to the destination. */
3573 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3574 if (MEM_ALIGN (dest) >= bits)
3575 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3576 else
3578 rtx part = adjust_address (dest, BLKmode, offset);
3579 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3580 gcc_unreachable ();
3583 /* Mop up any left-over bytes. */
3584 if (offset < length)
3586 src = adjust_address (src, BLKmode, offset);
3587 dest = adjust_address (dest, BLKmode, offset);
3588 move_by_pieces (dest, src, length - offset,
3589 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3593 #define MAX_MOVE_REGS 4
3594 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3597 /* Helper function for doing a loop-based block operation on memory
3598 reference MEM. Each iteration of the loop will operate on LENGTH
3599 bytes of MEM.
3601 Create a new base register for use within the loop and point it to
3602 the start of MEM. Create a new memory reference that uses this
3603 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3605 static void
3606 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3607 rtx *loop_reg, rtx *loop_mem)
3609 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3611 /* Although the new mem does not refer to a known location,
3612 it does keep up to LENGTH bytes of alignment. */
3613 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3614 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3618 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3619 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3620 memory regions do not overlap. */
3622 static void
3623 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3625 rtx label, src_reg, dest_reg, final_src;
3626 HOST_WIDE_INT leftover;
3628 leftover = length % MAX_MOVE_BYTES;
3629 length -= leftover;
3631 /* Create registers and memory references for use within the loop. */
3632 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3633 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3635 /* Calculate the value that SRC_REG should have after the last iteration
3636 of the loop. */
3637 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3638 0, 0, OPTAB_WIDEN);
3640 /* Emit the start of the loop. */
3641 label = gen_label_rtx ();
3642 emit_label (label);
3644 /* Emit the loop body. */
3645 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3647 /* Move on to the next block. */
3648 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3649 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3651 /* Emit the loop condition. */
3652 if (Pmode == DImode)
3653 emit_insn (gen_cmpdi (src_reg, final_src));
3654 else
3655 emit_insn (gen_cmpsi (src_reg, final_src));
3656 emit_jump_insn (gen_bne (label));
3658 /* Mop up any left-over bytes. */
3659 if (leftover)
3660 mips_block_move_straight (dest, src, leftover);
3663 /* Expand a movmemsi instruction. */
3665 bool
3666 mips_expand_block_move (rtx dest, rtx src, rtx length)
3668 if (GET_CODE (length) == CONST_INT)
3670 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3672 mips_block_move_straight (dest, src, INTVAL (length));
3673 return true;
3675 else if (optimize)
3677 mips_block_move_loop (dest, src, INTVAL (length));
3678 return true;
3681 return false;
3684 /* Argument support functions. */
3686 /* Initialize CUMULATIVE_ARGS for a function. */
3688 void
3689 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3690 rtx libname ATTRIBUTE_UNUSED)
3692 static CUMULATIVE_ARGS zero_cum;
3693 tree param, next_param;
3695 *cum = zero_cum;
3696 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3698 /* Determine if this function has variable arguments. This is
3699 indicated by the last argument being 'void_type_mode' if there
3700 are no variable arguments. The standard MIPS calling sequence
3701 passes all arguments in the general purpose registers in this case. */
3703 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3704 param != 0; param = next_param)
3706 next_param = TREE_CHAIN (param);
3707 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3708 cum->gp_reg_found = 1;
3713 /* Fill INFO with information about a single argument. CUM is the
3714 cumulative state for earlier arguments. MODE is the mode of this
3715 argument and TYPE is its type (if known). NAMED is true if this
3716 is a named (fixed) argument rather than a variable one. */
3718 static void
3719 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3720 tree type, int named, struct mips_arg_info *info)
3722 bool doubleword_aligned_p;
3723 unsigned int num_bytes, num_words, max_regs;
3725 /* Work out the size of the argument. */
3726 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3727 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3729 /* Decide whether it should go in a floating-point register, assuming
3730 one is free. Later code checks for availability.
3732 The checks against UNITS_PER_FPVALUE handle the soft-float and
3733 single-float cases. */
3734 switch (mips_abi)
3736 case ABI_EABI:
3737 /* The EABI conventions have traditionally been defined in terms
3738 of TYPE_MODE, regardless of the actual type. */
3739 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3740 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3741 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3742 break;
3744 case ABI_32:
3745 case ABI_O64:
3746 /* Only leading floating-point scalars are passed in
3747 floating-point registers. We also handle vector floats the same
3748 say, which is OK because they are not covered by the standard ABI. */
3749 info->fpr_p = (!cum->gp_reg_found
3750 && cum->arg_number < 2
3751 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3752 || VECTOR_FLOAT_TYPE_P (type))
3753 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3754 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3755 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3756 break;
3758 case ABI_N32:
3759 case ABI_64:
3760 /* Scalar and complex floating-point types are passed in
3761 floating-point registers. */
3762 info->fpr_p = (named
3763 && (type == 0 || FLOAT_TYPE_P (type))
3764 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3765 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3766 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3767 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3769 /* ??? According to the ABI documentation, the real and imaginary
3770 parts of complex floats should be passed in individual registers.
3771 The real and imaginary parts of stack arguments are supposed
3772 to be contiguous and there should be an extra word of padding
3773 at the end.
3775 This has two problems. First, it makes it impossible to use a
3776 single "void *" va_list type, since register and stack arguments
3777 are passed differently. (At the time of writing, MIPSpro cannot
3778 handle complex float varargs correctly.) Second, it's unclear
3779 what should happen when there is only one register free.
3781 For now, we assume that named complex floats should go into FPRs
3782 if there are two FPRs free, otherwise they should be passed in the
3783 same way as a struct containing two floats. */
3784 if (info->fpr_p
3785 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3786 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3788 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3789 info->fpr_p = false;
3790 else
3791 num_words = 2;
3793 break;
3795 default:
3796 gcc_unreachable ();
3799 /* See whether the argument has doubleword alignment. */
3800 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3802 /* Set REG_OFFSET to the register count we're interested in.
3803 The EABI allocates the floating-point registers separately,
3804 but the other ABIs allocate them like integer registers. */
3805 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3806 ? cum->num_fprs
3807 : cum->num_gprs);
3809 /* Advance to an even register if the argument is doubleword-aligned. */
3810 if (doubleword_aligned_p)
3811 info->reg_offset += info->reg_offset & 1;
3813 /* Work out the offset of a stack argument. */
3814 info->stack_offset = cum->stack_words;
3815 if (doubleword_aligned_p)
3816 info->stack_offset += info->stack_offset & 1;
3818 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3820 /* Partition the argument between registers and stack. */
3821 info->reg_words = MIN (num_words, max_regs);
3822 info->stack_words = num_words - info->reg_words;
3826 /* Implement FUNCTION_ARG_ADVANCE. */
3828 void
3829 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3830 tree type, int named)
3832 struct mips_arg_info info;
3834 mips_arg_info (cum, mode, type, named, &info);
3836 if (!info.fpr_p)
3837 cum->gp_reg_found = true;
3839 /* See the comment above the cumulative args structure in mips.h
3840 for an explanation of what this code does. It assumes the O32
3841 ABI, which passes at most 2 arguments in float registers. */
3842 if (cum->arg_number < 2 && info.fpr_p)
3843 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3845 if (mips_abi != ABI_EABI || !info.fpr_p)
3846 cum->num_gprs = info.reg_offset + info.reg_words;
3847 else if (info.reg_words > 0)
3848 cum->num_fprs += FP_INC;
3850 if (info.stack_words > 0)
3851 cum->stack_words = info.stack_offset + info.stack_words;
3853 cum->arg_number++;
3856 /* Implement FUNCTION_ARG. */
3858 struct rtx_def *
3859 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3860 tree type, int named)
3862 struct mips_arg_info info;
3864 /* We will be called with a mode of VOIDmode after the last argument
3865 has been seen. Whatever we return will be passed to the call
3866 insn. If we need a mips16 fp_code, return a REG with the code
3867 stored as the mode. */
3868 if (mode == VOIDmode)
3870 if (TARGET_MIPS16 && cum->fp_code != 0)
3871 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3873 else
3874 return 0;
3877 mips_arg_info (cum, mode, type, named, &info);
3879 /* Return straight away if the whole argument is passed on the stack. */
3880 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3881 return 0;
3883 if (type != 0
3884 && TREE_CODE (type) == RECORD_TYPE
3885 && TARGET_NEWABI
3886 && TYPE_SIZE_UNIT (type)
3887 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3888 && named)
3890 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3891 structure contains a double in its entirety, then that 64 bit
3892 chunk is passed in a floating point register. */
3893 tree field;
3895 /* First check to see if there is any such field. */
3896 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3897 if (TREE_CODE (field) == FIELD_DECL
3898 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3899 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3900 && host_integerp (bit_position (field), 0)
3901 && int_bit_position (field) % BITS_PER_WORD == 0)
3902 break;
3904 if (field != 0)
3906 /* Now handle the special case by returning a PARALLEL
3907 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3908 chunks are passed in registers. */
3909 unsigned int i;
3910 HOST_WIDE_INT bitpos;
3911 rtx ret;
3913 /* assign_parms checks the mode of ENTRY_PARM, so we must
3914 use the actual mode here. */
3915 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3917 bitpos = 0;
3918 field = TYPE_FIELDS (type);
3919 for (i = 0; i < info.reg_words; i++)
3921 rtx reg;
3923 for (; field; field = TREE_CHAIN (field))
3924 if (TREE_CODE (field) == FIELD_DECL
3925 && int_bit_position (field) >= bitpos)
3926 break;
3928 if (field
3929 && int_bit_position (field) == bitpos
3930 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3931 && !TARGET_SOFT_FLOAT
3932 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3933 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3934 else
3935 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3937 XVECEXP (ret, 0, i)
3938 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3939 GEN_INT (bitpos / BITS_PER_UNIT));
3941 bitpos += BITS_PER_WORD;
3943 return ret;
3947 /* Handle the n32/n64 conventions for passing complex floating-point
3948 arguments in FPR pairs. The real part goes in the lower register
3949 and the imaginary part goes in the upper register. */
3950 if (TARGET_NEWABI
3951 && info.fpr_p
3952 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3954 rtx real, imag;
3955 enum machine_mode inner;
3956 int reg;
3958 inner = GET_MODE_INNER (mode);
3959 reg = FP_ARG_FIRST + info.reg_offset;
3960 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
3962 /* Real part in registers, imaginary part on stack. */
3963 gcc_assert (info.stack_words == info.reg_words);
3964 return gen_rtx_REG (inner, reg);
3966 else
3968 gcc_assert (info.stack_words == 0);
3969 real = gen_rtx_EXPR_LIST (VOIDmode,
3970 gen_rtx_REG (inner, reg),
3971 const0_rtx);
3972 imag = gen_rtx_EXPR_LIST (VOIDmode,
3973 gen_rtx_REG (inner,
3974 reg + info.reg_words / 2),
3975 GEN_INT (GET_MODE_SIZE (inner)));
3976 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3980 if (!info.fpr_p)
3981 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3982 else if (info.reg_offset == 1)
3983 /* This code handles the special o32 case in which the second word
3984 of the argument structure is passed in floating-point registers. */
3985 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3986 else
3987 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3991 /* Implement TARGET_ARG_PARTIAL_BYTES. */
3993 static int
3994 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
3995 enum machine_mode mode, tree type, bool named)
3997 struct mips_arg_info info;
3999 mips_arg_info (cum, mode, type, named, &info);
4000 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4004 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4005 PARM_BOUNDARY bits of alignment, but will be given anything up
4006 to STACK_BOUNDARY bits if the type requires it. */
4009 function_arg_boundary (enum machine_mode mode, tree type)
4011 unsigned int alignment;
4013 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4014 if (alignment < PARM_BOUNDARY)
4015 alignment = PARM_BOUNDARY;
4016 if (alignment > STACK_BOUNDARY)
4017 alignment = STACK_BOUNDARY;
4018 return alignment;
4021 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4022 upward rather than downward. In other words, return true if the
4023 first byte of the stack slot has useful data, false if the last
4024 byte does. */
4026 bool
4027 mips_pad_arg_upward (enum machine_mode mode, tree type)
4029 /* On little-endian targets, the first byte of every stack argument
4030 is passed in the first byte of the stack slot. */
4031 if (!BYTES_BIG_ENDIAN)
4032 return true;
4034 /* Otherwise, integral types are padded downward: the last byte of a
4035 stack argument is passed in the last byte of the stack slot. */
4036 if (type != 0
4037 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4038 : GET_MODE_CLASS (mode) == MODE_INT)
4039 return false;
4041 /* Big-endian o64 pads floating-point arguments downward. */
4042 if (mips_abi == ABI_O64)
4043 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4044 return false;
4046 /* Other types are padded upward for o32, o64, n32 and n64. */
4047 if (mips_abi != ABI_EABI)
4048 return true;
4050 /* Arguments smaller than a stack slot are padded downward. */
4051 if (mode != BLKmode)
4052 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4053 else
4054 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4058 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4059 if the least significant byte of the register has useful data. Return
4060 the opposite if the most significant byte does. */
4062 bool
4063 mips_pad_reg_upward (enum machine_mode mode, tree type)
4065 /* No shifting is required for floating-point arguments. */
4066 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4067 return !BYTES_BIG_ENDIAN;
4069 /* Otherwise, apply the same padding to register arguments as we do
4070 to stack arguments. */
4071 return mips_pad_arg_upward (mode, type);
4074 static void
4075 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4076 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4077 int no_rtl)
4079 CUMULATIVE_ARGS local_cum;
4080 int gp_saved, fp_saved;
4082 /* The caller has advanced CUM up to, but not beyond, the last named
4083 argument. Advance a local copy of CUM past the last "real" named
4084 argument, to find out how many registers are left over. */
4086 local_cum = *cum;
4087 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4089 /* Found out how many registers we need to save. */
4090 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4091 fp_saved = (EABI_FLOAT_VARARGS_P
4092 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4093 : 0);
4095 if (!no_rtl)
4097 if (gp_saved > 0)
4099 rtx ptr, mem;
4101 ptr = plus_constant (virtual_incoming_args_rtx,
4102 REG_PARM_STACK_SPACE (cfun->decl)
4103 - gp_saved * UNITS_PER_WORD);
4104 mem = gen_rtx_MEM (BLKmode, ptr);
4105 set_mem_alias_set (mem, get_varargs_alias_set ());
4107 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4108 mem, gp_saved);
4110 if (fp_saved > 0)
4112 /* We can't use move_block_from_reg, because it will use
4113 the wrong mode. */
4114 enum machine_mode mode;
4115 int off, i;
4117 /* Set OFF to the offset from virtual_incoming_args_rtx of
4118 the first float register. The FP save area lies below
4119 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4120 off = -gp_saved * UNITS_PER_WORD;
4121 off &= ~(UNITS_PER_FPVALUE - 1);
4122 off -= fp_saved * UNITS_PER_FPREG;
4124 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4126 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
4128 rtx ptr, mem;
4130 ptr = plus_constant (virtual_incoming_args_rtx, off);
4131 mem = gen_rtx_MEM (mode, ptr);
4132 set_mem_alias_set (mem, get_varargs_alias_set ());
4133 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4134 off += UNITS_PER_HWFPVALUE;
4138 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4139 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4140 + fp_saved * UNITS_PER_FPREG);
4143 /* Create the va_list data type.
4144 We keep 3 pointers, and two offsets.
4145 Two pointers are to the overflow area, which starts at the CFA.
4146 One of these is constant, for addressing into the GPR save area below it.
4147 The other is advanced up the stack through the overflow region.
4148 The third pointer is to the GPR save area. Since the FPR save area
4149 is just below it, we can address FPR slots off this pointer.
4150 We also keep two one-byte offsets, which are to be subtracted from the
4151 constant pointers to yield addresses in the GPR and FPR save areas.
4152 These are downcounted as float or non-float arguments are used,
4153 and when they get to zero, the argument must be obtained from the
4154 overflow region.
4155 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4156 pointer is enough. It's started at the GPR save area, and is
4157 advanced, period.
4158 Note that the GPR save area is not constant size, due to optimization
4159 in the prologue. Hence, we can't use a design with two pointers
4160 and two offsets, although we could have designed this with two pointers
4161 and three offsets. */
4163 static tree
4164 mips_build_builtin_va_list (void)
4166 if (EABI_FLOAT_VARARGS_P)
4168 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4169 tree array, index;
4171 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4173 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4174 ptr_type_node);
4175 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4176 ptr_type_node);
4177 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4178 ptr_type_node);
4179 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4180 unsigned_char_type_node);
4181 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4182 unsigned_char_type_node);
4183 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4184 warn on every user file. */
4185 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4186 array = build_array_type (unsigned_char_type_node,
4187 build_index_type (index));
4188 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4190 DECL_FIELD_CONTEXT (f_ovfl) = record;
4191 DECL_FIELD_CONTEXT (f_gtop) = record;
4192 DECL_FIELD_CONTEXT (f_ftop) = record;
4193 DECL_FIELD_CONTEXT (f_goff) = record;
4194 DECL_FIELD_CONTEXT (f_foff) = record;
4195 DECL_FIELD_CONTEXT (f_res) = record;
4197 TYPE_FIELDS (record) = f_ovfl;
4198 TREE_CHAIN (f_ovfl) = f_gtop;
4199 TREE_CHAIN (f_gtop) = f_ftop;
4200 TREE_CHAIN (f_ftop) = f_goff;
4201 TREE_CHAIN (f_goff) = f_foff;
4202 TREE_CHAIN (f_foff) = f_res;
4204 layout_type (record);
4205 return record;
4207 else if (TARGET_IRIX && TARGET_IRIX6)
4208 /* On IRIX 6, this type is 'char *'. */
4209 return build_pointer_type (char_type_node);
4210 else
4211 /* Otherwise, we use 'void *'. */
4212 return ptr_type_node;
4215 /* Implement va_start. */
4217 void
4218 mips_va_start (tree valist, rtx nextarg)
4220 if (EABI_FLOAT_VARARGS_P)
4222 const CUMULATIVE_ARGS *cum;
4223 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4224 tree ovfl, gtop, ftop, goff, foff;
4225 tree t;
4226 int gpr_save_area_size;
4227 int fpr_save_area_size;
4228 int fpr_offset;
4230 cum = &current_function_args_info;
4231 gpr_save_area_size
4232 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4233 fpr_save_area_size
4234 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4236 f_ovfl = TYPE_FIELDS (va_list_type_node);
4237 f_gtop = TREE_CHAIN (f_ovfl);
4238 f_ftop = TREE_CHAIN (f_gtop);
4239 f_goff = TREE_CHAIN (f_ftop);
4240 f_foff = TREE_CHAIN (f_goff);
4242 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4243 NULL_TREE);
4244 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4245 NULL_TREE);
4246 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4247 NULL_TREE);
4248 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4249 NULL_TREE);
4250 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4251 NULL_TREE);
4253 /* Emit code to initialize OVFL, which points to the next varargs
4254 stack argument. CUM->STACK_WORDS gives the number of stack
4255 words used by named arguments. */
4256 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4257 if (cum->stack_words > 0)
4258 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), t,
4259 build_int_cst (NULL_TREE,
4260 cum->stack_words * UNITS_PER_WORD));
4261 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4262 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4264 /* Emit code to initialize GTOP, the top of the GPR save area. */
4265 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4266 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
4267 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4269 /* Emit code to initialize FTOP, the top of the FPR save area.
4270 This address is gpr_save_area_bytes below GTOP, rounded
4271 down to the next fp-aligned boundary. */
4272 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4273 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4274 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4275 if (fpr_offset)
4276 t = build2 (PLUS_EXPR, TREE_TYPE (ftop), t,
4277 build_int_cst (NULL_TREE, -fpr_offset));
4278 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
4279 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4281 /* Emit code to initialize GOFF, the offset from GTOP of the
4282 next GPR argument. */
4283 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
4284 build_int_cst (NULL_TREE, gpr_save_area_size));
4285 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4287 /* Likewise emit code to initialize FOFF, the offset from FTOP
4288 of the next FPR argument. */
4289 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
4290 build_int_cst (NULL_TREE, fpr_save_area_size));
4291 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4293 else
4295 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4296 std_expand_builtin_va_start (valist, nextarg);
4300 /* Implement va_arg. */
4302 static tree
4303 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4305 HOST_WIDE_INT size, rsize;
4306 tree addr;
4307 bool indirect;
4309 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4311 if (indirect)
4312 type = build_pointer_type (type);
4314 size = int_size_in_bytes (type);
4315 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4317 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4318 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4319 else
4321 /* Not a simple merged stack. */
4323 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4324 tree ovfl, top, off, align;
4325 HOST_WIDE_INT osize;
4326 tree t, u;
4328 f_ovfl = TYPE_FIELDS (va_list_type_node);
4329 f_gtop = TREE_CHAIN (f_ovfl);
4330 f_ftop = TREE_CHAIN (f_gtop);
4331 f_goff = TREE_CHAIN (f_ftop);
4332 f_foff = TREE_CHAIN (f_goff);
4334 /* We maintain separate pointers and offsets for floating-point
4335 and integer arguments, but we need similar code in both cases.
4336 Let:
4338 TOP be the top of the register save area;
4339 OFF be the offset from TOP of the next register;
4340 ADDR_RTX be the address of the argument;
4341 RSIZE be the number of bytes used to store the argument
4342 when it's in the register save area;
4343 OSIZE be the number of bytes used to store it when it's
4344 in the stack overflow area; and
4345 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4347 The code we want is:
4349 1: off &= -rsize; // round down
4350 2: if (off != 0)
4351 3: {
4352 4: addr_rtx = top - off;
4353 5: off -= rsize;
4354 6: }
4355 7: else
4356 8: {
4357 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4358 10: addr_rtx = ovfl + PADDING;
4359 11: ovfl += osize;
4360 14: }
4362 [1] and [9] can sometimes be optimized away. */
4364 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4365 NULL_TREE);
4367 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4368 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4370 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4371 NULL_TREE);
4372 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4373 NULL_TREE);
4375 /* When floating-point registers are saved to the stack,
4376 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4377 of the float's precision. */
4378 rsize = UNITS_PER_HWFPVALUE;
4380 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4381 (= PARM_BOUNDARY bits). This can be different from RSIZE
4382 in two cases:
4384 (1) On 32-bit targets when TYPE is a structure such as:
4386 struct s { float f; };
4388 Such structures are passed in paired FPRs, so RSIZE
4389 will be 8 bytes. However, the structure only takes
4390 up 4 bytes of memory, so OSIZE will only be 4.
4392 (2) In combinations such as -mgp64 -msingle-float
4393 -fshort-double. Doubles passed in registers
4394 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4395 but those passed on the stack take up
4396 UNITS_PER_WORD bytes. */
4397 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4399 else
4401 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4402 NULL_TREE);
4403 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4404 NULL_TREE);
4405 if (rsize > UNITS_PER_WORD)
4407 /* [1] Emit code for: off &= -rsize. */
4408 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4409 build_int_cst (NULL_TREE, -rsize));
4410 t = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
4411 gimplify_and_add (t, pre_p);
4413 osize = rsize;
4416 /* [2] Emit code to branch if off == 0. */
4417 t = build2 (NE_EXPR, boolean_type_node, off,
4418 build_int_cst (TREE_TYPE (off), 0));
4419 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4421 /* [5] Emit code for: off -= rsize. We do this as a form of
4422 post-increment not available to C. Also widen for the
4423 coming pointer arithmetic. */
4424 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4425 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4426 t = fold_convert (sizetype, t);
4427 t = fold_convert (TREE_TYPE (top), t);
4429 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4430 the argument has RSIZE - SIZE bytes of leading padding. */
4431 t = build2 (MINUS_EXPR, TREE_TYPE (top), top, t);
4432 if (BYTES_BIG_ENDIAN && rsize > size)
4434 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
4435 rsize - size));
4436 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4438 COND_EXPR_THEN (addr) = t;
4440 if (osize > UNITS_PER_WORD)
4442 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4443 u = fold_convert (TREE_TYPE (ovfl),
4444 build_int_cst (NULL_TREE, osize - 1));
4445 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4446 u = fold_convert (TREE_TYPE (ovfl),
4447 build_int_cst (NULL_TREE, -osize));
4448 t = build2 (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
4449 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4451 else
4452 align = NULL;
4454 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4455 post-increment ovfl by osize. On big-endian machines,
4456 the argument has OSIZE - SIZE bytes of leading padding. */
4457 u = fold_convert (TREE_TYPE (ovfl),
4458 build_int_cst (NULL_TREE, osize));
4459 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4460 if (BYTES_BIG_ENDIAN && osize > size)
4462 u = fold_convert (TREE_TYPE (t),
4463 build_int_cst (NULL_TREE, osize - size));
4464 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4467 /* String [9] and [10,11] together. */
4468 if (align)
4469 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4470 COND_EXPR_ELSE (addr) = t;
4472 addr = fold_convert (build_pointer_type (type), addr);
4473 addr = build_va_arg_indirect_ref (addr);
4476 if (indirect)
4477 addr = build_va_arg_indirect_ref (addr);
4479 return addr;
4482 /* Return true if it is possible to use left/right accesses for a
4483 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4484 returning true, update *OP, *LEFT and *RIGHT as follows:
4486 *OP is a BLKmode reference to the whole field.
4488 *LEFT is a QImode reference to the first byte if big endian or
4489 the last byte if little endian. This address can be used in the
4490 left-side instructions (lwl, swl, ldl, sdl).
4492 *RIGHT is a QImode reference to the opposite end of the field and
4493 can be used in the patterning right-side instruction. */
4495 static bool
4496 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4497 rtx *left, rtx *right)
4499 rtx first, last;
4501 /* Check that the operand really is a MEM. Not all the extv and
4502 extzv predicates are checked. */
4503 if (!MEM_P (*op))
4504 return false;
4506 /* Check that the size is valid. */
4507 if (width != 32 && (!TARGET_64BIT || width != 64))
4508 return false;
4510 /* We can only access byte-aligned values. Since we are always passed
4511 a reference to the first byte of the field, it is not necessary to
4512 do anything with BITPOS after this check. */
4513 if (bitpos % BITS_PER_UNIT != 0)
4514 return false;
4516 /* Reject aligned bitfields: we want to use a normal load or store
4517 instead of a left/right pair. */
4518 if (MEM_ALIGN (*op) >= width)
4519 return false;
4521 /* Adjust *OP to refer to the whole field. This also has the effect
4522 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4523 *op = adjust_address (*op, BLKmode, 0);
4524 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4526 /* Get references to both ends of the field. We deliberately don't
4527 use the original QImode *OP for FIRST since the new BLKmode one
4528 might have a simpler address. */
4529 first = adjust_address (*op, QImode, 0);
4530 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4532 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4533 be the upper word and RIGHT the lower word. */
4534 if (TARGET_BIG_ENDIAN)
4535 *left = first, *right = last;
4536 else
4537 *left = last, *right = first;
4539 return true;
4543 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4544 Return true on success. We only handle cases where zero_extract is
4545 equivalent to sign_extract. */
4547 bool
4548 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4550 rtx left, right, temp;
4552 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4553 paradoxical word_mode subreg. This is the only case in which
4554 we allow the destination to be larger than the source. */
4555 if (GET_CODE (dest) == SUBREG
4556 && GET_MODE (dest) == DImode
4557 && SUBREG_BYTE (dest) == 0
4558 && GET_MODE (SUBREG_REG (dest)) == SImode)
4559 dest = SUBREG_REG (dest);
4561 /* After the above adjustment, the destination must be the same
4562 width as the source. */
4563 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4564 return false;
4566 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4567 return false;
4569 temp = gen_reg_rtx (GET_MODE (dest));
4570 if (GET_MODE (dest) == DImode)
4572 emit_insn (gen_mov_ldl (temp, src, left));
4573 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4575 else
4577 emit_insn (gen_mov_lwl (temp, src, left));
4578 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4580 return true;
4584 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4585 true on success. */
4587 bool
4588 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4590 rtx left, right;
4591 enum machine_mode mode;
4593 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4594 return false;
4596 mode = mode_for_size (width, MODE_INT, 0);
4597 src = gen_lowpart (mode, src);
4599 if (mode == DImode)
4601 emit_insn (gen_mov_sdl (dest, src, left));
4602 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4604 else
4606 emit_insn (gen_mov_swl (dest, src, left));
4607 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4609 return true;
4612 /* Return true if X is a MEM with the same size as MODE. */
4614 bool
4615 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4617 rtx size;
4619 if (!MEM_P (x))
4620 return false;
4622 size = MEM_SIZE (x);
4623 return size && INTVAL (size) == GET_MODE_SIZE (mode);
4626 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4627 source of an "ext" instruction or the destination of an "ins"
4628 instruction. OP must be a register operand and the following
4629 conditions must hold:
4631 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4632 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4633 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4635 Also reject lengths equal to a word as they are better handled
4636 by the move patterns. */
4638 bool
4639 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4641 HOST_WIDE_INT len, pos;
4643 if (!ISA_HAS_EXT_INS
4644 || !register_operand (op, VOIDmode)
4645 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4646 return false;
4648 len = INTVAL (size);
4649 pos = INTVAL (position);
4651 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4652 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4653 return false;
4655 return true;
4658 /* Set up globals to generate code for the ISA or processor
4659 described by INFO. */
4661 static void
4662 mips_set_architecture (const struct mips_cpu_info *info)
4664 if (info != 0)
4666 mips_arch_info = info;
4667 mips_arch = info->cpu;
4668 mips_isa = info->isa;
4673 /* Likewise for tuning. */
4675 static void
4676 mips_set_tune (const struct mips_cpu_info *info)
4678 if (info != 0)
4680 mips_tune_info = info;
4681 mips_tune = info->cpu;
4685 /* Implement TARGET_HANDLE_OPTION. */
4687 static bool
4688 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4690 switch (code)
4692 case OPT_mabi_:
4693 if (strcmp (arg, "32") == 0)
4694 mips_abi = ABI_32;
4695 else if (strcmp (arg, "o64") == 0)
4696 mips_abi = ABI_O64;
4697 else if (strcmp (arg, "n32") == 0)
4698 mips_abi = ABI_N32;
4699 else if (strcmp (arg, "64") == 0)
4700 mips_abi = ABI_64;
4701 else if (strcmp (arg, "eabi") == 0)
4702 mips_abi = ABI_EABI;
4703 else
4704 return false;
4705 return true;
4707 case OPT_march_:
4708 case OPT_mtune_:
4709 return mips_parse_cpu (arg) != 0;
4711 case OPT_mips:
4712 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4713 return mips_isa_info != 0;
4715 case OPT_mno_flush_func:
4716 mips_cache_flush_func = NULL;
4717 return true;
4719 default:
4720 return true;
4724 /* Set up the threshold for data to go into the small data area, instead
4725 of the normal data area, and detect any conflicts in the switches. */
4727 void
4728 override_options (void)
4730 int i, start, regno;
4731 enum machine_mode mode;
4733 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4735 /* The following code determines the architecture and register size.
4736 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4737 The GAS and GCC code should be kept in sync as much as possible. */
4739 if (mips_arch_string != 0)
4740 mips_set_architecture (mips_parse_cpu (mips_arch_string));
4742 if (mips_isa_info != 0)
4744 if (mips_arch_info == 0)
4745 mips_set_architecture (mips_isa_info);
4746 else if (mips_arch_info->isa != mips_isa_info->isa)
4747 error ("-%s conflicts with the other architecture options, "
4748 "which specify a %s processor",
4749 mips_isa_info->name,
4750 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
4753 if (mips_arch_info == 0)
4755 #ifdef MIPS_CPU_STRING_DEFAULT
4756 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
4757 #else
4758 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4759 #endif
4762 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4763 error ("-march=%s is not compatible with the selected ABI",
4764 mips_arch_info->name);
4766 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4767 if (mips_tune_string != 0)
4768 mips_set_tune (mips_parse_cpu (mips_tune_string));
4770 if (mips_tune_info == 0)
4771 mips_set_tune (mips_arch_info);
4773 /* Set cost structure for the processor. */
4774 if (optimize_size)
4775 mips_cost = &mips_rtx_cost_optimize_size;
4776 else
4777 mips_cost = &mips_rtx_cost_data[mips_tune];
4779 if ((target_flags_explicit & MASK_64BIT) != 0)
4781 /* The user specified the size of the integer registers. Make sure
4782 it agrees with the ABI and ISA. */
4783 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4784 error ("-mgp64 used with a 32-bit processor");
4785 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4786 error ("-mgp32 used with a 64-bit ABI");
4787 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4788 error ("-mgp64 used with a 32-bit ABI");
4790 else
4792 /* Infer the integer register size from the ABI and processor.
4793 Restrict ourselves to 32-bit registers if that's all the
4794 processor has, or if the ABI cannot handle 64-bit registers. */
4795 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4796 target_flags &= ~MASK_64BIT;
4797 else
4798 target_flags |= MASK_64BIT;
4801 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4803 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4804 only one right answer here. */
4805 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4806 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4807 else if (!TARGET_64BIT && TARGET_FLOAT64)
4808 error ("unsupported combination: %s", "-mgp32 -mfp64");
4809 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4810 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4812 else
4814 /* -msingle-float selects 32-bit float registers. Otherwise the
4815 float registers should be the same size as the integer ones. */
4816 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4817 target_flags |= MASK_FLOAT64;
4818 else
4819 target_flags &= ~MASK_FLOAT64;
4822 /* End of code shared with GAS. */
4824 if ((target_flags_explicit & MASK_LONG64) == 0)
4826 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4827 target_flags |= MASK_LONG64;
4828 else
4829 target_flags &= ~MASK_LONG64;
4832 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4833 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4835 /* For some configurations, it is useful to have -march control
4836 the default setting of MASK_SOFT_FLOAT. */
4837 switch ((int) mips_arch)
4839 case PROCESSOR_R4100:
4840 case PROCESSOR_R4111:
4841 case PROCESSOR_R4120:
4842 case PROCESSOR_R4130:
4843 target_flags |= MASK_SOFT_FLOAT;
4844 break;
4846 default:
4847 target_flags &= ~MASK_SOFT_FLOAT;
4848 break;
4852 if (!TARGET_OLDABI)
4853 flag_pcc_struct_return = 0;
4855 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4857 /* If neither -mbranch-likely nor -mno-branch-likely was given
4858 on the command line, set MASK_BRANCHLIKELY based on the target
4859 architecture.
4861 By default, we enable use of Branch Likely instructions on
4862 all architectures which support them with the following
4863 exceptions: when creating MIPS32 or MIPS64 code, and when
4864 tuning for architectures where their use tends to hurt
4865 performance.
4867 The MIPS32 and MIPS64 architecture specifications say "Software
4868 is strongly encouraged to avoid use of Branch Likely
4869 instructions, as they will be removed from a future revision
4870 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4871 issue those instructions unless instructed to do so by
4872 -mbranch-likely. */
4873 if (ISA_HAS_BRANCHLIKELY
4874 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4875 && !(TUNE_MIPS5500 || TUNE_SB1))
4876 target_flags |= MASK_BRANCHLIKELY;
4877 else
4878 target_flags &= ~MASK_BRANCHLIKELY;
4880 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4881 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
4883 /* The effect of -mabicalls isn't defined for the EABI. */
4884 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4886 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4887 target_flags &= ~MASK_ABICALLS;
4890 if (TARGET_ABICALLS)
4892 /* We need to set flag_pic for executables as well as DSOs
4893 because we may reference symbols that are not defined in
4894 the final executable. (MIPS does not use things like
4895 copy relocs, for example.)
4897 Also, there is a body of code that uses __PIC__ to distinguish
4898 between -mabicalls and -mno-abicalls code. */
4899 flag_pic = 1;
4900 if (mips_section_threshold > 0)
4901 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
4904 /* mips_split_addresses is a half-way house between explicit
4905 relocations and the traditional assembler macros. It can
4906 split absolute 32-bit symbolic constants into a high/lo_sum
4907 pair but uses macros for other sorts of access.
4909 Like explicit relocation support for REL targets, it relies
4910 on GNU extensions in the assembler and the linker.
4912 Although this code should work for -O0, it has traditionally
4913 been treated as an optimization. */
4914 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4915 && optimize && !flag_pic
4916 && !ABI_HAS_64BIT_SYMBOLS)
4917 mips_split_addresses = 1;
4918 else
4919 mips_split_addresses = 0;
4921 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4922 faster code, but at the expense of more nops. Enable it at -O3 and
4923 above. */
4924 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4925 target_flags |= MASK_VR4130_ALIGN;
4927 /* When compiling for the mips16, we cannot use floating point. We
4928 record the original hard float value in mips16_hard_float. */
4929 if (TARGET_MIPS16)
4931 if (TARGET_SOFT_FLOAT)
4932 mips16_hard_float = 0;
4933 else
4934 mips16_hard_float = 1;
4935 target_flags |= MASK_SOFT_FLOAT;
4937 /* Don't run the scheduler before reload, since it tends to
4938 increase register pressure. */
4939 flag_schedule_insns = 0;
4941 /* Don't do hot/cold partitioning. The constant layout code expects
4942 the whole function to be in a single section. */
4943 flag_reorder_blocks_and_partition = 0;
4945 /* Silently disable -mexplicit-relocs since it doesn't apply
4946 to mips16 code. Even so, it would overly pedantic to warn
4947 about "-mips16 -mexplicit-relocs", especially given that
4948 we use a %gprel() operator. */
4949 target_flags &= ~MASK_EXPLICIT_RELOCS;
4952 /* When using explicit relocs, we call dbr_schedule from within
4953 mips_reorg. */
4954 if (TARGET_EXPLICIT_RELOCS)
4956 mips_flag_delayed_branch = flag_delayed_branch;
4957 flag_delayed_branch = 0;
4960 #ifdef MIPS_TFMODE_FORMAT
4961 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4962 #endif
4964 /* Make sure that the user didn't turn off paired single support when
4965 MIPS-3D support is requested. */
4966 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
4967 && !TARGET_PAIRED_SINGLE_FLOAT)
4968 error ("-mips3d requires -mpaired-single");
4970 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
4971 if (TARGET_MIPS3D)
4972 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
4974 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4975 and TARGET_HARD_FLOAT are both true. */
4976 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4977 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4979 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4980 enabled. */
4981 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4982 error ("-mips3d/-mpaired-single must be used with -mips64");
4984 if (TARGET_MIPS16 && TARGET_DSP)
4985 error ("-mips16 and -mdsp cannot be used together");
4987 mips_print_operand_punct['?'] = 1;
4988 mips_print_operand_punct['#'] = 1;
4989 mips_print_operand_punct['/'] = 1;
4990 mips_print_operand_punct['&'] = 1;
4991 mips_print_operand_punct['!'] = 1;
4992 mips_print_operand_punct['*'] = 1;
4993 mips_print_operand_punct['@'] = 1;
4994 mips_print_operand_punct['.'] = 1;
4995 mips_print_operand_punct['('] = 1;
4996 mips_print_operand_punct[')'] = 1;
4997 mips_print_operand_punct['['] = 1;
4998 mips_print_operand_punct[']'] = 1;
4999 mips_print_operand_punct['<'] = 1;
5000 mips_print_operand_punct['>'] = 1;
5001 mips_print_operand_punct['{'] = 1;
5002 mips_print_operand_punct['}'] = 1;
5003 mips_print_operand_punct['^'] = 1;
5004 mips_print_operand_punct['$'] = 1;
5005 mips_print_operand_punct['+'] = 1;
5006 mips_print_operand_punct['~'] = 1;
5008 /* Set up array to map GCC register number to debug register number.
5009 Ignore the special purpose register numbers. */
5011 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5012 mips_dbx_regno[i] = -1;
5014 start = GP_DBX_FIRST - GP_REG_FIRST;
5015 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5016 mips_dbx_regno[i] = i + start;
5018 start = FP_DBX_FIRST - FP_REG_FIRST;
5019 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5020 mips_dbx_regno[i] = i + start;
5022 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5023 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5025 /* Set up array giving whether a given register can hold a given mode. */
5027 for (mode = VOIDmode;
5028 mode != MAX_MACHINE_MODE;
5029 mode = (enum machine_mode) ((int)mode + 1))
5031 register int size = GET_MODE_SIZE (mode);
5032 register enum mode_class class = GET_MODE_CLASS (mode);
5034 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5036 register int temp;
5038 if (mode == CCV2mode)
5039 temp = (ISA_HAS_8CC
5040 && ST_REG_P (regno)
5041 && (regno - ST_REG_FIRST) % 2 == 0);
5043 else if (mode == CCV4mode)
5044 temp = (ISA_HAS_8CC
5045 && ST_REG_P (regno)
5046 && (regno - ST_REG_FIRST) % 4 == 0);
5048 else if (mode == CCmode)
5050 if (! ISA_HAS_8CC)
5051 temp = (regno == FPSW_REGNUM);
5052 else
5053 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5054 || FP_REG_P (regno));
5057 else if (GP_REG_P (regno))
5058 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5060 else if (FP_REG_P (regno))
5061 temp = ((regno % FP_INC) == 0)
5062 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5063 || class == MODE_VECTOR_FLOAT)
5064 && size <= UNITS_PER_FPVALUE)
5065 /* Allow integer modes that fit into a single
5066 register. We need to put integers into FPRs
5067 when using instructions like cvt and trunc.
5068 We can't allow sizes smaller than a word,
5069 the FPU has no appropriate load/store
5070 instructions for those. */
5071 || (class == MODE_INT
5072 && size >= MIN_UNITS_PER_WORD
5073 && size <= UNITS_PER_FPREG)
5074 /* Allow TFmode for CCmode reloads. */
5075 || (ISA_HAS_8CC && mode == TFmode));
5077 else if (ACC_REG_P (regno))
5078 temp = (INTEGRAL_MODE_P (mode)
5079 && (size <= UNITS_PER_WORD
5080 || (ACC_HI_REG_P (regno)
5081 && size == 2 * UNITS_PER_WORD)));
5083 else if (ALL_COP_REG_P (regno))
5084 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5085 else
5086 temp = 0;
5088 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5092 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5093 initialized yet, so we can't use that here. */
5094 gpr_mode = TARGET_64BIT ? DImode : SImode;
5096 /* Provide default values for align_* for 64-bit targets. */
5097 if (TARGET_64BIT && !TARGET_MIPS16)
5099 if (align_loops == 0)
5100 align_loops = 8;
5101 if (align_jumps == 0)
5102 align_jumps = 8;
5103 if (align_functions == 0)
5104 align_functions = 8;
5107 /* Function to allocate machine-dependent function status. */
5108 init_machine_status = &mips_init_machine_status;
5110 if (ABI_HAS_64BIT_SYMBOLS)
5112 if (TARGET_EXPLICIT_RELOCS)
5114 mips_split_p[SYMBOL_64_HIGH] = true;
5115 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5116 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5118 mips_split_p[SYMBOL_64_MID] = true;
5119 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5120 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5122 mips_split_p[SYMBOL_64_LOW] = true;
5123 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5124 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5126 mips_split_p[SYMBOL_GENERAL] = true;
5127 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5130 else
5132 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5134 mips_split_p[SYMBOL_GENERAL] = true;
5135 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
5136 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5140 if (TARGET_MIPS16)
5142 /* The high part is provided by a pseudo copy of $gp. */
5143 mips_split_p[SYMBOL_SMALL_DATA] = true;
5144 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
5147 if (TARGET_EXPLICIT_RELOCS)
5149 /* Small data constants are kept whole until after reload,
5150 then lowered by mips_rewrite_small_data. */
5151 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
5153 mips_split_p[SYMBOL_GOT_LOCAL] = true;
5154 if (TARGET_NEWABI)
5156 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5157 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
5159 else
5161 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5162 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
5165 if (TARGET_XGOT)
5167 /* The HIGH and LO_SUM are matched by special .md patterns. */
5168 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
5170 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
5171 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
5172 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
5174 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5175 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5176 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5178 else
5180 if (TARGET_NEWABI)
5181 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
5182 else
5183 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
5184 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5188 if (TARGET_NEWABI)
5190 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5191 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5192 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5195 /* Thread-local relocation operators. */
5196 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5197 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5198 mips_split_p[SYMBOL_DTPREL] = 1;
5199 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5200 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5201 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5202 mips_split_p[SYMBOL_TPREL] = 1;
5203 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5204 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5206 /* We don't have a thread pointer access instruction on MIPS16, or
5207 appropriate TLS relocations. */
5208 if (TARGET_MIPS16)
5209 targetm.have_tls = false;
5211 /* Default to working around R4000 errata only if the processor
5212 was selected explicitly. */
5213 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5214 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5215 target_flags |= MASK_FIX_R4000;
5217 /* Default to working around R4400 errata only if the processor
5218 was selected explicitly. */
5219 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5220 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5221 target_flags |= MASK_FIX_R4400;
5224 /* Implement CONDITIONAL_REGISTER_USAGE. */
5226 void
5227 mips_conditional_register_usage (void)
5229 if (!TARGET_DSP)
5231 int regno;
5233 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5234 fixed_regs[regno] = call_used_regs[regno] = 1;
5236 if (!TARGET_HARD_FLOAT)
5238 int regno;
5240 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5241 fixed_regs[regno] = call_used_regs[regno] = 1;
5242 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5243 fixed_regs[regno] = call_used_regs[regno] = 1;
5245 else if (! ISA_HAS_8CC)
5247 int regno;
5249 /* We only have a single condition code register. We
5250 implement this by hiding all the condition code registers,
5251 and generating RTL that refers directly to ST_REG_FIRST. */
5252 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5253 fixed_regs[regno] = call_used_regs[regno] = 1;
5255 /* In mips16 mode, we permit the $t temporary registers to be used
5256 for reload. We prohibit the unused $s registers, since they
5257 are caller saved, and saving them via a mips16 register would
5258 probably waste more time than just reloading the value. */
5259 if (TARGET_MIPS16)
5261 fixed_regs[18] = call_used_regs[18] = 1;
5262 fixed_regs[19] = call_used_regs[19] = 1;
5263 fixed_regs[20] = call_used_regs[20] = 1;
5264 fixed_regs[21] = call_used_regs[21] = 1;
5265 fixed_regs[22] = call_used_regs[22] = 1;
5266 fixed_regs[23] = call_used_regs[23] = 1;
5267 fixed_regs[26] = call_used_regs[26] = 1;
5268 fixed_regs[27] = call_used_regs[27] = 1;
5269 fixed_regs[30] = call_used_regs[30] = 1;
5271 /* fp20-23 are now caller saved. */
5272 if (mips_abi == ABI_64)
5274 int regno;
5275 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5276 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5278 /* Odd registers from fp21 to fp31 are now caller saved. */
5279 if (mips_abi == ABI_N32)
5281 int regno;
5282 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5283 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5287 /* Allocate a chunk of memory for per-function machine-dependent data. */
5288 static struct machine_function *
5289 mips_init_machine_status (void)
5291 return ((struct machine_function *)
5292 ggc_alloc_cleared (sizeof (struct machine_function)));
5295 /* On the mips16, we want to allocate $24 (T_REG) before other
5296 registers for instructions for which it is possible. This helps
5297 avoid shuffling registers around in order to set up for an xor,
5298 encouraging the compiler to use a cmp instead. */
5300 void
5301 mips_order_regs_for_local_alloc (void)
5303 register int i;
5305 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5306 reg_alloc_order[i] = i;
5308 if (TARGET_MIPS16)
5310 /* It really doesn't matter where we put register 0, since it is
5311 a fixed register anyhow. */
5312 reg_alloc_order[0] = 24;
5313 reg_alloc_order[24] = 0;
5318 /* The MIPS debug format wants all automatic variables and arguments
5319 to be in terms of the virtual frame pointer (stack pointer before
5320 any adjustment in the function), while the MIPS 3.0 linker wants
5321 the frame pointer to be the stack pointer after the initial
5322 adjustment. So, we do the adjustment here. The arg pointer (which
5323 is eliminated) points to the virtual frame pointer, while the frame
5324 pointer (which may be eliminated) points to the stack pointer after
5325 the initial adjustments. */
5327 HOST_WIDE_INT
5328 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5330 rtx offset2 = const0_rtx;
5331 rtx reg = eliminate_constant_term (addr, &offset2);
5333 if (offset == 0)
5334 offset = INTVAL (offset2);
5336 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5337 || reg == hard_frame_pointer_rtx)
5339 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5340 ? compute_frame_size (get_frame_size ())
5341 : cfun->machine->frame.total_size;
5343 /* MIPS16 frame is smaller */
5344 if (frame_pointer_needed && TARGET_MIPS16)
5345 frame_size -= cfun->machine->frame.args_size;
5347 offset = offset - frame_size;
5350 /* sdbout_parms does not want this to crash for unrecognized cases. */
5351 #if 0
5352 else if (reg != arg_pointer_rtx)
5353 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5354 addr);
5355 #endif
5357 return offset;
5360 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5362 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5363 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5364 'h' OP is HIGH, prints %hi(X),
5365 'd' output integer constant in decimal,
5366 'z' if the operand is 0, use $0 instead of normal operand.
5367 'D' print second part of double-word register or memory operand.
5368 'L' print low-order register of double-word register operand.
5369 'M' print high-order register of double-word register operand.
5370 'C' print part of opcode for a branch condition.
5371 'F' print part of opcode for a floating-point branch condition.
5372 'N' print part of opcode for a branch condition, inverted.
5373 'W' print part of opcode for a floating-point branch condition, inverted.
5374 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5375 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5376 't' like 'T', but with the EQ/NE cases reversed
5377 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5378 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5379 'R' print the reloc associated with LO_SUM
5380 'q' print DSP accumulator registers
5382 The punctuation characters are:
5384 '(' Turn on .set noreorder
5385 ')' Turn on .set reorder
5386 '[' Turn on .set noat
5387 ']' Turn on .set at
5388 '<' Turn on .set nomacro
5389 '>' Turn on .set macro
5390 '{' Turn on .set volatile (not GAS)
5391 '}' Turn on .set novolatile (not GAS)
5392 '&' Turn on .set noreorder if filling delay slots
5393 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5394 '!' Turn on .set nomacro if filling delay slots
5395 '#' Print nop if in a .set noreorder section.
5396 '/' Like '#', but does nothing within a delayed branch sequence
5397 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5398 '@' Print the name of the assembler temporary register (at or $1).
5399 '.' Print the name of the register with a hard-wired zero (zero or $0).
5400 '^' Print the name of the pic call-through register (t9 or $25).
5401 '$' Print the name of the stack pointer register (sp or $29).
5402 '+' Print the name of the gp register (usually gp or $28).
5403 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5405 void
5406 print_operand (FILE *file, rtx op, int letter)
5408 register enum rtx_code code;
5410 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5412 switch (letter)
5414 case '?':
5415 if (mips_branch_likely)
5416 putc ('l', file);
5417 break;
5419 case '@':
5420 fputs (reg_names [GP_REG_FIRST + 1], file);
5421 break;
5423 case '^':
5424 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5425 break;
5427 case '.':
5428 fputs (reg_names [GP_REG_FIRST + 0], file);
5429 break;
5431 case '$':
5432 fputs (reg_names[STACK_POINTER_REGNUM], file);
5433 break;
5435 case '+':
5436 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5437 break;
5439 case '&':
5440 if (final_sequence != 0 && set_noreorder++ == 0)
5441 fputs (".set\tnoreorder\n\t", file);
5442 break;
5444 case '*':
5445 if (final_sequence != 0)
5447 if (set_noreorder++ == 0)
5448 fputs (".set\tnoreorder\n\t", file);
5450 if (set_nomacro++ == 0)
5451 fputs (".set\tnomacro\n\t", file);
5453 break;
5455 case '!':
5456 if (final_sequence != 0 && set_nomacro++ == 0)
5457 fputs ("\n\t.set\tnomacro", file);
5458 break;
5460 case '#':
5461 if (set_noreorder != 0)
5462 fputs ("\n\tnop", file);
5463 break;
5465 case '/':
5466 /* Print an extra newline so that the delayed insn is separated
5467 from the following ones. This looks neater and is consistent
5468 with non-nop delayed sequences. */
5469 if (set_noreorder != 0 && final_sequence == 0)
5470 fputs ("\n\tnop\n", file);
5471 break;
5473 case '(':
5474 if (set_noreorder++ == 0)
5475 fputs (".set\tnoreorder\n\t", file);
5476 break;
5478 case ')':
5479 if (set_noreorder == 0)
5480 error ("internal error: %%) found without a %%( in assembler pattern");
5482 else if (--set_noreorder == 0)
5483 fputs ("\n\t.set\treorder", file);
5485 break;
5487 case '[':
5488 if (set_noat++ == 0)
5489 fputs (".set\tnoat\n\t", file);
5490 break;
5492 case ']':
5493 if (set_noat == 0)
5494 error ("internal error: %%] found without a %%[ in assembler pattern");
5495 else if (--set_noat == 0)
5496 fputs ("\n\t.set\tat", file);
5498 break;
5500 case '<':
5501 if (set_nomacro++ == 0)
5502 fputs (".set\tnomacro\n\t", file);
5503 break;
5505 case '>':
5506 if (set_nomacro == 0)
5507 error ("internal error: %%> found without a %%< in assembler pattern");
5508 else if (--set_nomacro == 0)
5509 fputs ("\n\t.set\tmacro", file);
5511 break;
5513 case '{':
5514 if (set_volatile++ == 0)
5515 fputs ("#.set\tvolatile\n\t", file);
5516 break;
5518 case '}':
5519 if (set_volatile == 0)
5520 error ("internal error: %%} found without a %%{ in assembler pattern");
5521 else if (--set_volatile == 0)
5522 fputs ("\n\t#.set\tnovolatile", file);
5524 break;
5526 case '~':
5528 if (align_labels_log > 0)
5529 ASM_OUTPUT_ALIGN (file, align_labels_log);
5531 break;
5533 default:
5534 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5535 break;
5538 return;
5541 if (! op)
5543 error ("PRINT_OPERAND null pointer");
5544 return;
5547 code = GET_CODE (op);
5549 if (letter == 'C')
5550 switch (code)
5552 case EQ: fputs ("eq", file); break;
5553 case NE: fputs ("ne", file); break;
5554 case GT: fputs ("gt", file); break;
5555 case GE: fputs ("ge", file); break;
5556 case LT: fputs ("lt", file); break;
5557 case LE: fputs ("le", file); break;
5558 case GTU: fputs ("gtu", file); break;
5559 case GEU: fputs ("geu", file); break;
5560 case LTU: fputs ("ltu", file); break;
5561 case LEU: fputs ("leu", file); break;
5562 default:
5563 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5566 else if (letter == 'N')
5567 switch (code)
5569 case EQ: fputs ("ne", file); break;
5570 case NE: fputs ("eq", file); break;
5571 case GT: fputs ("le", file); break;
5572 case GE: fputs ("lt", file); break;
5573 case LT: fputs ("ge", file); break;
5574 case LE: fputs ("gt", file); break;
5575 case GTU: fputs ("leu", file); break;
5576 case GEU: fputs ("ltu", file); break;
5577 case LTU: fputs ("geu", file); break;
5578 case LEU: fputs ("gtu", file); break;
5579 default:
5580 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5583 else if (letter == 'F')
5584 switch (code)
5586 case EQ: fputs ("c1f", file); break;
5587 case NE: fputs ("c1t", file); break;
5588 default:
5589 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5592 else if (letter == 'W')
5593 switch (code)
5595 case EQ: fputs ("c1t", file); break;
5596 case NE: fputs ("c1f", file); break;
5597 default:
5598 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5601 else if (letter == 'h')
5603 if (GET_CODE (op) == HIGH)
5604 op = XEXP (op, 0);
5606 print_operand_reloc (file, op, mips_hi_relocs);
5609 else if (letter == 'R')
5610 print_operand_reloc (file, op, mips_lo_relocs);
5612 else if (letter == 'Y')
5614 if (GET_CODE (op) == CONST_INT
5615 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5616 < ARRAY_SIZE (mips_fp_conditions)))
5617 fputs (mips_fp_conditions[INTVAL (op)], file);
5618 else
5619 output_operand_lossage ("invalid %%Y value");
5622 else if (letter == 'Z')
5624 if (ISA_HAS_8CC)
5626 print_operand (file, op, 0);
5627 fputc (',', file);
5631 else if (letter == 'q')
5633 int regnum;
5635 if (code != REG)
5636 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5638 regnum = REGNO (op);
5639 if (MD_REG_P (regnum))
5640 fprintf (file, "$ac0");
5641 else if (DSP_ACC_REG_P (regnum))
5642 fprintf (file, "$ac%c", reg_names[regnum][3]);
5643 else
5644 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5647 else if (code == REG || code == SUBREG)
5649 register int regnum;
5651 if (code == REG)
5652 regnum = REGNO (op);
5653 else
5654 regnum = true_regnum (op);
5656 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5657 || (letter == 'L' && WORDS_BIG_ENDIAN)
5658 || letter == 'D')
5659 regnum++;
5661 fprintf (file, "%s", reg_names[regnum]);
5664 else if (code == MEM)
5666 if (letter == 'D')
5667 output_address (plus_constant (XEXP (op, 0), 4));
5668 else
5669 output_address (XEXP (op, 0));
5672 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
5673 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
5675 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
5676 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
5678 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
5679 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
5681 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
5682 fputs (reg_names[GP_REG_FIRST], file);
5684 else if (letter == 'd' || letter == 'x' || letter == 'X')
5685 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
5687 else if (letter == 'T' || letter == 't')
5689 int truth = (code == NE) == (letter == 'T');
5690 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
5693 else if (CONST_GP_P (op))
5694 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
5696 else
5697 output_addr_const (file, op);
5701 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
5702 RELOCS is the array of relocations to use. */
5704 static void
5705 print_operand_reloc (FILE *file, rtx op, const char **relocs)
5707 enum mips_symbol_type symbol_type;
5708 const char *p;
5709 rtx base;
5710 HOST_WIDE_INT offset;
5712 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
5713 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
5715 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
5716 mips_split_const (op, &base, &offset);
5717 if (UNSPEC_ADDRESS_P (base))
5718 op = plus_constant (UNSPEC_ADDRESS (base), offset);
5720 fputs (relocs[symbol_type], file);
5721 output_addr_const (file, op);
5722 for (p = relocs[symbol_type]; *p != 0; p++)
5723 if (*p == '(')
5724 fputc (')', file);
5727 /* Output address operand X to FILE. */
5729 void
5730 print_operand_address (FILE *file, rtx x)
5732 struct mips_address_info addr;
5734 if (mips_classify_address (&addr, x, word_mode, true))
5735 switch (addr.type)
5737 case ADDRESS_REG:
5738 print_operand (file, addr.offset, 0);
5739 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5740 return;
5742 case ADDRESS_LO_SUM:
5743 print_operand (file, addr.offset, 'R');
5744 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5745 return;
5747 case ADDRESS_CONST_INT:
5748 output_addr_const (file, x);
5749 fprintf (file, "(%s)", reg_names[0]);
5750 return;
5752 case ADDRESS_SYMBOLIC:
5753 output_addr_const (file, x);
5754 return;
5756 gcc_unreachable ();
5759 /* When using assembler macros, keep track of all of small-data externs
5760 so that mips_file_end can emit the appropriate declarations for them.
5762 In most cases it would be safe (though pointless) to emit .externs
5763 for other symbols too. One exception is when an object is within
5764 the -G limit but declared by the user to be in a section other
5765 than .sbss or .sdata. */
5768 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5770 register struct extern_list *p;
5772 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5774 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5775 p->next = extern_head;
5776 p->name = name;
5777 p->size = int_size_in_bytes (TREE_TYPE (decl));
5778 extern_head = p;
5781 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5783 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5784 p->next = extern_head;
5785 p->name = name;
5786 p->size = -1;
5787 extern_head = p;
5790 return 0;
5793 #if TARGET_IRIX
5794 static void
5795 irix_output_external_libcall (rtx fun)
5797 register struct extern_list *p;
5799 if (mips_abi == ABI_32)
5801 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5802 p->next = extern_head;
5803 p->name = XSTR (fun, 0);
5804 p->size = -1;
5805 extern_head = p;
5808 #endif
5810 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5811 put out a MIPS ECOFF file and a stab. */
5813 void
5814 mips_output_filename (FILE *stream, const char *name)
5817 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5818 directives. */
5819 if (write_symbols == DWARF2_DEBUG)
5820 return;
5821 else if (mips_output_filename_first_time)
5823 mips_output_filename_first_time = 0;
5824 num_source_filenames += 1;
5825 current_function_file = name;
5826 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5827 output_quoted_string (stream, name);
5828 putc ('\n', stream);
5831 /* If we are emitting stabs, let dbxout.c handle this (except for
5832 the mips_output_filename_first_time case). */
5833 else if (write_symbols == DBX_DEBUG)
5834 return;
5836 else if (name != current_function_file
5837 && strcmp (name, current_function_file) != 0)
5839 num_source_filenames += 1;
5840 current_function_file = name;
5841 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5842 output_quoted_string (stream, name);
5843 putc ('\n', stream);
5847 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5848 that should be written before the opening quote, such as "\t.ascii\t"
5849 for real string data or "\t# " for a comment. */
5851 void
5852 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5853 const char *prefix)
5855 size_t i;
5856 int cur_pos = 17;
5857 register const unsigned char *string =
5858 (const unsigned char *)string_param;
5860 fprintf (stream, "%s\"", prefix);
5861 for (i = 0; i < len; i++)
5863 register int c = string[i];
5865 if (ISPRINT (c))
5867 if (c == '\\' || c == '\"')
5869 putc ('\\', stream);
5870 cur_pos++;
5872 putc (c, stream);
5873 cur_pos++;
5875 else
5877 fprintf (stream, "\\%03o", c);
5878 cur_pos += 4;
5881 if (cur_pos > 72 && i+1 < len)
5883 cur_pos = 17;
5884 fprintf (stream, "\"\n%s\"", prefix);
5887 fprintf (stream, "\"\n");
5890 /* Implement TARGET_ASM_FILE_START. */
5892 static void
5893 mips_file_start (void)
5895 default_file_start ();
5897 if (!TARGET_IRIX)
5899 /* Generate a special section to describe the ABI switches used to
5900 produce the resultant binary. This used to be done by the assembler
5901 setting bits in the ELF header's flags field, but we have run out of
5902 bits. GDB needs this information in order to be able to correctly
5903 debug these binaries. See the function mips_gdbarch_init() in
5904 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5905 causes unnecessary IRIX 6 ld warnings. */
5906 const char * abi_string = NULL;
5908 switch (mips_abi)
5910 case ABI_32: abi_string = "abi32"; break;
5911 case ABI_N32: abi_string = "abiN32"; break;
5912 case ABI_64: abi_string = "abi64"; break;
5913 case ABI_O64: abi_string = "abiO64"; break;
5914 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5915 default:
5916 gcc_unreachable ();
5918 /* Note - we use fprintf directly rather than calling switch_to_section
5919 because in this way we can avoid creating an allocated section. We
5920 do not want this section to take up any space in the running
5921 executable. */
5922 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5924 /* There is no ELF header flag to distinguish long32 forms of the
5925 EABI from long64 forms. Emit a special section to help tools
5926 such as GDB. Do the same for o64, which is sometimes used with
5927 -mlong64. */
5928 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
5929 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5930 TARGET_LONG64 ? 64 : 32);
5932 /* Restore the default section. */
5933 fprintf (asm_out_file, "\t.previous\n");
5936 /* Generate the pseudo ops that System V.4 wants. */
5937 if (TARGET_ABICALLS)
5938 fprintf (asm_out_file, "\t.abicalls\n");
5940 if (TARGET_MIPS16)
5941 fprintf (asm_out_file, "\t.set\tmips16\n");
5943 if (flag_verbose_asm)
5944 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5945 ASM_COMMENT_START,
5946 mips_section_threshold, mips_arch_info->name, mips_isa);
5949 #ifdef BSS_SECTION_ASM_OP
5950 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5951 in the use of sbss. */
5953 void
5954 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5955 unsigned HOST_WIDE_INT size, int align)
5957 extern tree last_assemble_variable_decl;
5959 if (mips_in_small_data_p (decl))
5960 switch_to_section (get_named_section (NULL, ".sbss", 0));
5961 else
5962 switch_to_section (bss_section);
5963 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5964 last_assemble_variable_decl = decl;
5965 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5966 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5968 #endif
5970 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5971 .externs for any small-data variables that turned out to be external. */
5973 static void
5974 mips_file_end (void)
5976 tree name_tree;
5977 struct extern_list *p;
5979 if (extern_head)
5981 fputs ("\n", asm_out_file);
5983 for (p = extern_head; p != 0; p = p->next)
5985 name_tree = get_identifier (p->name);
5987 /* Positively ensure only one .extern for any given symbol. */
5988 if (!TREE_ASM_WRITTEN (name_tree)
5989 && TREE_SYMBOL_REFERENCED (name_tree))
5991 TREE_ASM_WRITTEN (name_tree) = 1;
5992 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5993 `.global name .text' directive for every used but
5994 undefined function. If we don't, the linker may perform
5995 an optimization (skipping over the insns that set $gp)
5996 when it is unsafe. */
5997 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5999 fputs ("\t.globl ", asm_out_file);
6000 assemble_name (asm_out_file, p->name);
6001 fputs (" .text\n", asm_out_file);
6003 else
6005 fputs ("\t.extern\t", asm_out_file);
6006 assemble_name (asm_out_file, p->name);
6007 fprintf (asm_out_file, ", %d\n", p->size);
6014 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6015 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6017 void
6018 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6019 unsigned HOST_WIDE_INT size,
6020 unsigned int align)
6022 /* If the target wants uninitialized const declarations in
6023 .rdata then don't put them in .comm. */
6024 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6025 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6026 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6028 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6029 targetm.asm_out.globalize_label (stream, name);
6031 switch_to_section (readonly_data_section);
6032 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6033 mips_declare_object (stream, name, "",
6034 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6035 size);
6037 else
6038 mips_declare_common_object (stream, name, "\n\t.comm\t",
6039 size, align, true);
6042 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6043 NAME is the name of the object and ALIGN is the required alignment
6044 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6045 alignment argument. */
6047 void
6048 mips_declare_common_object (FILE *stream, const char *name,
6049 const char *init_string,
6050 unsigned HOST_WIDE_INT size,
6051 unsigned int align, bool takes_alignment_p)
6053 if (!takes_alignment_p)
6055 size += (align / BITS_PER_UNIT) - 1;
6056 size -= size % (align / BITS_PER_UNIT);
6057 mips_declare_object (stream, name, init_string,
6058 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6060 else
6061 mips_declare_object (stream, name, init_string,
6062 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6063 size, align / BITS_PER_UNIT);
6066 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6067 macros, mark the symbol as written so that mips_file_end won't emit an
6068 .extern for it. STREAM is the output file, NAME is the name of the
6069 symbol, INIT_STRING is the string that should be written before the
6070 symbol and FINAL_STRING is the string that should be written after it.
6071 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6073 void
6074 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6075 const char *final_string, ...)
6077 va_list ap;
6079 fputs (init_string, stream);
6080 assemble_name (stream, name);
6081 va_start (ap, final_string);
6082 vfprintf (stream, final_string, ap);
6083 va_end (ap);
6085 if (!TARGET_EXPLICIT_RELOCS)
6087 tree name_tree = get_identifier (name);
6088 TREE_ASM_WRITTEN (name_tree) = 1;
6092 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6093 extern int size_directive_output;
6095 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6096 definitions except that it uses mips_declare_object() to emit the label. */
6098 void
6099 mips_declare_object_name (FILE *stream, const char *name,
6100 tree decl ATTRIBUTE_UNUSED)
6102 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6103 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6104 #endif
6106 size_directive_output = 0;
6107 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6109 HOST_WIDE_INT size;
6111 size_directive_output = 1;
6112 size = int_size_in_bytes (TREE_TYPE (decl));
6113 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6116 mips_declare_object (stream, name, "", ":\n");
6119 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6121 void
6122 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6124 const char *name;
6126 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6127 if (!flag_inhibit_size_directive
6128 && DECL_SIZE (decl) != 0
6129 && !at_end && top_level
6130 && DECL_INITIAL (decl) == error_mark_node
6131 && !size_directive_output)
6133 HOST_WIDE_INT size;
6135 size_directive_output = 1;
6136 size = int_size_in_bytes (TREE_TYPE (decl));
6137 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6140 #endif
6142 /* Return true if X is a small data address that can be rewritten
6143 as a LO_SUM. */
6145 static bool
6146 mips_rewrite_small_data_p (rtx x)
6148 enum mips_symbol_type symbol_type;
6150 return (TARGET_EXPLICIT_RELOCS
6151 && mips_symbolic_constant_p (x, &symbol_type)
6152 && symbol_type == SYMBOL_SMALL_DATA);
6156 /* A for_each_rtx callback for mips_small_data_pattern_p. */
6158 static int
6159 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6161 if (GET_CODE (*loc) == LO_SUM)
6162 return -1;
6164 return mips_rewrite_small_data_p (*loc);
6167 /* Return true if OP refers to small data symbols directly, not through
6168 a LO_SUM. */
6170 bool
6171 mips_small_data_pattern_p (rtx op)
6173 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6176 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
6178 static int
6179 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6181 if (mips_rewrite_small_data_p (*loc))
6182 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6184 if (GET_CODE (*loc) == LO_SUM)
6185 return -1;
6187 return 0;
6190 /* If possible, rewrite OP so that it refers to small data using
6191 explicit relocations. */
6194 mips_rewrite_small_data (rtx op)
6196 op = copy_insn (op);
6197 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6198 return op;
6201 /* Return true if the current function has an insn that implicitly
6202 refers to $gp. */
6204 static bool
6205 mips_function_has_gp_insn (void)
6207 /* Don't bother rechecking if we found one last time. */
6208 if (!cfun->machine->has_gp_insn_p)
6210 rtx insn;
6212 push_topmost_sequence ();
6213 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6214 if (INSN_P (insn)
6215 && GET_CODE (PATTERN (insn)) != USE
6216 && GET_CODE (PATTERN (insn)) != CLOBBER
6217 && (get_attr_got (insn) != GOT_UNSET
6218 || small_data_pattern (PATTERN (insn), VOIDmode)))
6219 break;
6220 pop_topmost_sequence ();
6222 cfun->machine->has_gp_insn_p = (insn != 0);
6224 return cfun->machine->has_gp_insn_p;
6228 /* Return the register that should be used as the global pointer
6229 within this function. Return 0 if the function doesn't need
6230 a global pointer. */
6232 static unsigned int
6233 mips_global_pointer (void)
6235 unsigned int regno;
6237 /* $gp is always available in non-abicalls code. */
6238 if (!TARGET_ABICALLS)
6239 return GLOBAL_POINTER_REGNUM;
6241 /* We must always provide $gp when it is used implicitly. */
6242 if (!TARGET_EXPLICIT_RELOCS)
6243 return GLOBAL_POINTER_REGNUM;
6245 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6246 a valid gp. */
6247 if (current_function_profile)
6248 return GLOBAL_POINTER_REGNUM;
6250 /* If the function has a nonlocal goto, $gp must hold the correct
6251 global pointer for the target function. */
6252 if (current_function_has_nonlocal_goto)
6253 return GLOBAL_POINTER_REGNUM;
6255 /* If the gp is never referenced, there's no need to initialize it.
6256 Note that reload can sometimes introduce constant pool references
6257 into a function that otherwise didn't need them. For example,
6258 suppose we have an instruction like:
6260 (set (reg:DF R1) (float:DF (reg:SI R2)))
6262 If R2 turns out to be constant such as 1, the instruction may have a
6263 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6264 using this constant if R2 doesn't get allocated to a register.
6266 In cases like these, reload will have added the constant to the pool
6267 but no instruction will yet refer to it. */
6268 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
6269 && !current_function_uses_const_pool
6270 && !mips_function_has_gp_insn ())
6271 return 0;
6273 /* We need a global pointer, but perhaps we can use a call-clobbered
6274 register instead of $gp. */
6275 if (TARGET_NEWABI && current_function_is_leaf)
6276 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6277 if (!regs_ever_live[regno]
6278 && call_used_regs[regno]
6279 && !fixed_regs[regno]
6280 && regno != PIC_FUNCTION_ADDR_REGNUM)
6281 return regno;
6283 return GLOBAL_POINTER_REGNUM;
6287 /* Return true if the current function must save REGNO. */
6289 static bool
6290 mips_save_reg_p (unsigned int regno)
6292 /* We only need to save $gp for NewABI PIC. */
6293 if (regno == GLOBAL_POINTER_REGNUM)
6294 return (TARGET_ABICALLS && TARGET_NEWABI
6295 && cfun->machine->global_pointer == regno);
6297 /* Check call-saved registers. */
6298 if (regs_ever_live[regno] && !call_used_regs[regno])
6299 return true;
6301 /* We need to save the old frame pointer before setting up a new one. */
6302 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6303 return true;
6305 /* We need to save the incoming return address if it is ever clobbered
6306 within the function. */
6307 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
6308 return true;
6310 if (TARGET_MIPS16)
6312 tree return_type;
6314 return_type = DECL_RESULT (current_function_decl);
6316 /* $18 is a special case in mips16 code. It may be used to call
6317 a function which returns a floating point value, but it is
6318 marked in call_used_regs. */
6319 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
6320 return true;
6322 /* $31 is also a special case. It will be used to copy a return
6323 value into the floating point registers if the return value is
6324 floating point. */
6325 if (regno == GP_REG_FIRST + 31
6326 && mips16_hard_float
6327 && !aggregate_value_p (return_type, current_function_decl)
6328 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6329 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6330 return true;
6333 return false;
6337 /* Return the bytes needed to compute the frame pointer from the current
6338 stack pointer. SIZE is the size (in bytes) of the local variables.
6340 MIPS stack frames look like:
6342 Before call After call
6343 +-----------------------+ +-----------------------+
6344 high | | | |
6345 mem. | | | |
6346 | caller's temps. | | caller's temps. |
6347 | | | |
6348 +-----------------------+ +-----------------------+
6349 | | | |
6350 | arguments on stack. | | arguments on stack. |
6351 | | | |
6352 +-----------------------+ +-----------------------+
6353 | 4 words to save | | 4 words to save |
6354 | arguments passed | | arguments passed |
6355 | in registers, even | | in registers, even |
6356 SP->| if not passed. | VFP->| if not passed. |
6357 +-----------------------+ +-----------------------+
6359 | fp register save |
6361 +-----------------------+
6363 | gp register save |
6365 +-----------------------+
6367 | local variables |
6369 +-----------------------+
6371 | alloca allocations |
6373 +-----------------------+
6375 | GP save for V.4 abi |
6377 +-----------------------+
6379 | arguments on stack |
6381 +-----------------------+
6382 | 4 words to save |
6383 | arguments passed |
6384 | in registers, even |
6385 low SP->| if not passed. |
6386 memory +-----------------------+
6390 HOST_WIDE_INT
6391 compute_frame_size (HOST_WIDE_INT size)
6393 unsigned int regno;
6394 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6395 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6396 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6397 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6398 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6399 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6400 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6401 unsigned int mask; /* mask of saved gp registers */
6402 unsigned int fmask; /* mask of saved fp registers */
6404 cfun->machine->global_pointer = mips_global_pointer ();
6406 gp_reg_size = 0;
6407 fp_reg_size = 0;
6408 mask = 0;
6409 fmask = 0;
6410 var_size = MIPS_STACK_ALIGN (size);
6411 args_size = current_function_outgoing_args_size;
6412 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6414 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6415 functions. If the function has local variables, we're committed
6416 to allocating it anyway. Otherwise reclaim it here. */
6417 if (var_size == 0 && current_function_is_leaf)
6418 cprestore_size = args_size = 0;
6420 /* The MIPS 3.0 linker does not like functions that dynamically
6421 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6422 looks like we are trying to create a second frame pointer to the
6423 function, so allocate some stack space to make it happy. */
6425 if (args_size == 0 && current_function_calls_alloca)
6426 args_size = 4 * UNITS_PER_WORD;
6428 total_size = var_size + args_size + cprestore_size;
6430 /* Calculate space needed for gp registers. */
6431 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6432 if (mips_save_reg_p (regno))
6434 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6435 mask |= 1 << (regno - GP_REG_FIRST);
6438 /* We need to restore these for the handler. */
6439 if (current_function_calls_eh_return)
6441 unsigned int i;
6442 for (i = 0; ; ++i)
6444 regno = EH_RETURN_DATA_REGNO (i);
6445 if (regno == INVALID_REGNUM)
6446 break;
6447 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6448 mask |= 1 << (regno - GP_REG_FIRST);
6452 /* This loop must iterate over the same space as its companion in
6453 save_restore_insns. */
6454 for (regno = (FP_REG_LAST - FP_INC + 1);
6455 regno >= FP_REG_FIRST;
6456 regno -= FP_INC)
6458 if (mips_save_reg_p (regno))
6460 fp_reg_size += FP_INC * UNITS_PER_FPREG;
6461 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
6465 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6466 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6468 /* Add in the space required for saving incoming register arguments. */
6469 total_size += current_function_pretend_args_size;
6470 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6472 /* Save other computed information. */
6473 cfun->machine->frame.total_size = total_size;
6474 cfun->machine->frame.var_size = var_size;
6475 cfun->machine->frame.args_size = args_size;
6476 cfun->machine->frame.cprestore_size = cprestore_size;
6477 cfun->machine->frame.gp_reg_size = gp_reg_size;
6478 cfun->machine->frame.fp_reg_size = fp_reg_size;
6479 cfun->machine->frame.mask = mask;
6480 cfun->machine->frame.fmask = fmask;
6481 cfun->machine->frame.initialized = reload_completed;
6482 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6483 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
6485 if (mask)
6487 HOST_WIDE_INT offset;
6489 offset = (args_size + cprestore_size + var_size
6490 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
6491 cfun->machine->frame.gp_sp_offset = offset;
6492 cfun->machine->frame.gp_save_offset = offset - total_size;
6494 else
6496 cfun->machine->frame.gp_sp_offset = 0;
6497 cfun->machine->frame.gp_save_offset = 0;
6500 if (fmask)
6502 HOST_WIDE_INT offset;
6504 offset = (args_size + cprestore_size + var_size
6505 + gp_reg_rounded + fp_reg_size
6506 - FP_INC * UNITS_PER_FPREG);
6507 cfun->machine->frame.fp_sp_offset = offset;
6508 cfun->machine->frame.fp_save_offset = offset - total_size;
6510 else
6512 cfun->machine->frame.fp_sp_offset = 0;
6513 cfun->machine->frame.fp_save_offset = 0;
6516 /* Ok, we're done. */
6517 return total_size;
6520 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6521 pointer or argument pointer. TO is either the stack pointer or
6522 hard frame pointer. */
6524 HOST_WIDE_INT
6525 mips_initial_elimination_offset (int from, int to)
6527 HOST_WIDE_INT offset;
6529 compute_frame_size (get_frame_size ());
6531 /* Set OFFSET to the offset from the stack pointer. */
6532 switch (from)
6534 case FRAME_POINTER_REGNUM:
6535 offset = 0;
6536 break;
6538 case ARG_POINTER_REGNUM:
6539 offset = (cfun->machine->frame.total_size
6540 - current_function_pretend_args_size);
6541 break;
6543 default:
6544 gcc_unreachable ();
6547 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6548 offset -= cfun->machine->frame.args_size;
6550 return offset;
6553 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6554 back to a previous frame. */
6556 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6558 if (count != 0)
6559 return const0_rtx;
6561 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6564 /* Use FN to save or restore register REGNO. MODE is the register's
6565 mode and OFFSET is the offset of its save slot from the current
6566 stack pointer. */
6568 static void
6569 mips_save_restore_reg (enum machine_mode mode, int regno,
6570 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6572 rtx mem;
6574 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
6576 fn (gen_rtx_REG (mode, regno), mem);
6580 /* Call FN for each register that is saved by the current function.
6581 SP_OFFSET is the offset of the current stack pointer from the start
6582 of the frame. */
6584 static void
6585 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6587 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
6589 enum machine_mode fpr_mode;
6590 HOST_WIDE_INT offset;
6591 int regno;
6593 /* Save registers starting from high to low. The debuggers prefer at least
6594 the return register be stored at func+4, and also it allows us not to
6595 need a nop in the epilog if at least one register is reloaded in
6596 addition to return address. */
6597 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6598 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6599 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6601 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6602 offset -= GET_MODE_SIZE (gpr_mode);
6605 /* This loop must iterate over the same space as its companion in
6606 compute_frame_size. */
6607 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6608 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6609 for (regno = (FP_REG_LAST - FP_INC + 1);
6610 regno >= FP_REG_FIRST;
6611 regno -= FP_INC)
6612 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
6614 mips_save_restore_reg (fpr_mode, regno, offset, fn);
6615 offset -= GET_MODE_SIZE (fpr_mode);
6617 #undef BITSET_P
6620 /* If we're generating n32 or n64 abicalls, and the current function
6621 does not use $28 as its global pointer, emit a cplocal directive.
6622 Use pic_offset_table_rtx as the argument to the directive. */
6624 static void
6625 mips_output_cplocal (void)
6627 if (!TARGET_EXPLICIT_RELOCS
6628 && cfun->machine->global_pointer > 0
6629 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
6630 output_asm_insn (".cplocal %+", 0);
6633 /* Return the style of GP load sequence that is being used for the
6634 current function. */
6636 enum mips_loadgp_style
6637 mips_current_loadgp_style (void)
6639 if (!TARGET_ABICALLS || cfun->machine->global_pointer == 0)
6640 return LOADGP_NONE;
6642 if (TARGET_ABSOLUTE_ABICALLS)
6643 return LOADGP_ABSOLUTE;
6645 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
6648 /* The __gnu_local_gp symbol. */
6650 static GTY(()) rtx mips_gnu_local_gp;
6652 /* If we're generating n32 or n64 abicalls, emit instructions
6653 to set up the global pointer. */
6655 static void
6656 mips_emit_loadgp (void)
6658 rtx addr, offset, incoming_address;
6660 switch (mips_current_loadgp_style ())
6662 case LOADGP_ABSOLUTE:
6663 if (mips_gnu_local_gp == NULL)
6665 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
6666 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
6668 emit_insn (gen_loadgp_noshared (mips_gnu_local_gp));
6669 break;
6671 case LOADGP_NEWABI:
6672 addr = XEXP (DECL_RTL (current_function_decl), 0);
6673 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
6674 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6675 emit_insn (gen_loadgp (offset, incoming_address));
6676 if (!TARGET_EXPLICIT_RELOCS)
6677 emit_insn (gen_loadgp_blockage ());
6678 break;
6680 default:
6681 break;
6685 /* Set up the stack and frame (if desired) for the function. */
6687 static void
6688 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6690 const char *fnname;
6691 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
6693 #ifdef SDB_DEBUGGING_INFO
6694 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
6695 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
6696 #endif
6698 /* In mips16 mode, we may need to generate a 32 bit to handle
6699 floating point arguments. The linker will arrange for any 32 bit
6700 functions to call this stub, which will then jump to the 16 bit
6701 function proper. */
6702 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
6703 && current_function_args_info.fp_code != 0)
6704 build_mips16_function_stub (file);
6706 if (!FUNCTION_NAME_ALREADY_DECLARED)
6708 /* Get the function name the same way that toplev.c does before calling
6709 assemble_start_function. This is needed so that the name used here
6710 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6711 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6713 if (!flag_inhibit_size_directive)
6715 fputs ("\t.ent\t", file);
6716 assemble_name (file, fnname);
6717 fputs ("\n", file);
6720 assemble_name (file, fnname);
6721 fputs (":\n", file);
6724 /* Stop mips_file_end from treating this function as external. */
6725 if (TARGET_IRIX && mips_abi == ABI_32)
6726 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6728 if (!flag_inhibit_size_directive)
6730 /* .frame FRAMEREG, FRAMESIZE, RETREG */
6731 fprintf (file,
6732 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6733 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6734 ", args= " HOST_WIDE_INT_PRINT_DEC
6735 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6736 (reg_names[(frame_pointer_needed)
6737 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6738 ((frame_pointer_needed && TARGET_MIPS16)
6739 ? tsize - cfun->machine->frame.args_size
6740 : tsize),
6741 reg_names[GP_REG_FIRST + 31],
6742 cfun->machine->frame.var_size,
6743 cfun->machine->frame.num_gp,
6744 cfun->machine->frame.num_fp,
6745 cfun->machine->frame.args_size,
6746 cfun->machine->frame.cprestore_size);
6748 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6749 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6750 cfun->machine->frame.mask,
6751 cfun->machine->frame.gp_save_offset);
6752 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6753 cfun->machine->frame.fmask,
6754 cfun->machine->frame.fp_save_offset);
6756 /* Require:
6757 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6758 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6761 if (mips_current_loadgp_style () == LOADGP_OLDABI)
6763 /* Handle the initialization of $gp for SVR4 PIC. */
6764 if (!cfun->machine->all_noreorder_p)
6765 output_asm_insn ("%(.cpload\t%^%)", 0);
6766 else
6767 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6769 else if (cfun->machine->all_noreorder_p)
6770 output_asm_insn ("%(%<", 0);
6772 /* Tell the assembler which register we're using as the global
6773 pointer. This is needed for thunks, since they can use either
6774 explicit relocs or assembler macros. */
6775 mips_output_cplocal ();
6778 /* Make the last instruction frame related and note that it performs
6779 the operation described by FRAME_PATTERN. */
6781 static void
6782 mips_set_frame_expr (rtx frame_pattern)
6784 rtx insn;
6786 insn = get_last_insn ();
6787 RTX_FRAME_RELATED_P (insn) = 1;
6788 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6789 frame_pattern,
6790 REG_NOTES (insn));
6794 /* Return a frame-related rtx that stores REG at MEM.
6795 REG must be a single register. */
6797 static rtx
6798 mips_frame_set (rtx mem, rtx reg)
6800 rtx set;
6802 /* If we're saving the return address register and the dwarf return
6803 address column differs from the hard register number, adjust the
6804 note reg to refer to the former. */
6805 if (REGNO (reg) == GP_REG_FIRST + 31
6806 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
6807 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
6809 set = gen_rtx_SET (VOIDmode, mem, reg);
6810 RTX_FRAME_RELATED_P (set) = 1;
6812 return set;
6816 /* Save register REG to MEM. Make the instruction frame-related. */
6818 static void
6819 mips_save_reg (rtx reg, rtx mem)
6821 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6823 rtx x1, x2;
6825 if (mips_split_64bit_move_p (mem, reg))
6826 mips_split_64bit_move (mem, reg);
6827 else
6828 emit_move_insn (mem, reg);
6830 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6831 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6832 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6834 else
6836 if (TARGET_MIPS16
6837 && REGNO (reg) != GP_REG_FIRST + 31
6838 && !M16_REG_P (REGNO (reg)))
6840 /* Save a non-mips16 register by moving it through a temporary.
6841 We don't need to do this for $31 since there's a special
6842 instruction for it. */
6843 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6844 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6846 else
6847 emit_move_insn (mem, reg);
6849 mips_set_frame_expr (mips_frame_set (mem, reg));
6854 /* Expand the prologue into a bunch of separate insns. */
6856 void
6857 mips_expand_prologue (void)
6859 HOST_WIDE_INT size;
6861 if (cfun->machine->global_pointer > 0)
6862 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6864 size = compute_frame_size (get_frame_size ());
6866 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6867 bytes beforehand; this is enough to cover the register save area
6868 without going out of range. */
6869 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6871 HOST_WIDE_INT step1;
6873 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6874 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6875 stack_pointer_rtx,
6876 GEN_INT (-step1)))) = 1;
6877 size -= step1;
6878 mips_for_each_saved_reg (size, mips_save_reg);
6881 /* Allocate the rest of the frame. */
6882 if (size > 0)
6884 if (SMALL_OPERAND (-size))
6885 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6886 stack_pointer_rtx,
6887 GEN_INT (-size)))) = 1;
6888 else
6890 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6891 if (TARGET_MIPS16)
6893 /* There are no instructions to add or subtract registers
6894 from the stack pointer, so use the frame pointer as a
6895 temporary. We should always be using a frame pointer
6896 in this case anyway. */
6897 gcc_assert (frame_pointer_needed);
6898 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6899 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6900 hard_frame_pointer_rtx,
6901 MIPS_PROLOGUE_TEMP (Pmode)));
6902 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6904 else
6905 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6906 stack_pointer_rtx,
6907 MIPS_PROLOGUE_TEMP (Pmode)));
6909 /* Describe the combined effect of the previous instructions. */
6910 mips_set_frame_expr
6911 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6912 plus_constant (stack_pointer_rtx, -size)));
6916 /* Set up the frame pointer, if we're using one. In mips16 code,
6917 we point the frame pointer ahead of the outgoing argument area.
6918 This should allow more variables & incoming arguments to be
6919 accessed with unextended instructions. */
6920 if (frame_pointer_needed)
6922 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6924 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6925 if (SMALL_OPERAND (cfun->machine->frame.args_size))
6926 RTX_FRAME_RELATED_P
6927 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6928 stack_pointer_rtx,
6929 offset))) = 1;
6930 else
6932 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
6933 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6934 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6935 hard_frame_pointer_rtx,
6936 MIPS_PROLOGUE_TEMP (Pmode)));
6937 mips_set_frame_expr
6938 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
6939 plus_constant (stack_pointer_rtx,
6940 cfun->machine->frame.args_size)));
6943 else
6944 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6945 stack_pointer_rtx)) = 1;
6948 mips_emit_loadgp ();
6950 /* If generating o32/o64 abicalls, save $gp on the stack. */
6951 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6952 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6954 /* If we are profiling, make sure no instructions are scheduled before
6955 the call to mcount. */
6957 if (current_function_profile)
6958 emit_insn (gen_blockage ());
6961 /* Do any necessary cleanup after a function to restore stack, frame,
6962 and regs. */
6964 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6966 static void
6967 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6968 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6970 /* Reinstate the normal $gp. */
6971 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6972 mips_output_cplocal ();
6974 if (cfun->machine->all_noreorder_p)
6976 /* Avoid using %>%) since it adds excess whitespace. */
6977 output_asm_insn (".set\tmacro", 0);
6978 output_asm_insn (".set\treorder", 0);
6979 set_noreorder = set_nomacro = 0;
6982 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6984 const char *fnname;
6986 /* Get the function name the same way that toplev.c does before calling
6987 assemble_start_function. This is needed so that the name used here
6988 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6989 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6990 fputs ("\t.end\t", file);
6991 assemble_name (file, fnname);
6992 fputs ("\n", file);
6996 /* Emit instructions to restore register REG from slot MEM. */
6998 static void
6999 mips_restore_reg (rtx reg, rtx mem)
7001 /* There's no mips16 instruction to load $31 directly. Load into
7002 $7 instead and adjust the return insn appropriately. */
7003 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
7004 reg = gen_rtx_REG (GET_MODE (reg), 7);
7006 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
7008 /* Can't restore directly; move through a temporary. */
7009 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
7010 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
7012 else
7013 emit_move_insn (reg, mem);
7017 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
7018 if this epilogue precedes a sibling call, false if it is for a normal
7019 "epilogue" pattern. */
7021 void
7022 mips_expand_epilogue (int sibcall_p)
7024 HOST_WIDE_INT step1, step2;
7025 rtx base, target;
7027 if (!sibcall_p && mips_can_use_return_insn ())
7029 emit_jump_insn (gen_return ());
7030 return;
7033 /* Split the frame into two. STEP1 is the amount of stack we should
7034 deallocate before restoring the registers. STEP2 is the amount we
7035 should deallocate afterwards.
7037 Start off by assuming that no registers need to be restored. */
7038 step1 = cfun->machine->frame.total_size;
7039 step2 = 0;
7041 /* Work out which register holds the frame address. Account for the
7042 frame pointer offset used by mips16 code. */
7043 if (!frame_pointer_needed)
7044 base = stack_pointer_rtx;
7045 else
7047 base = hard_frame_pointer_rtx;
7048 if (TARGET_MIPS16)
7049 step1 -= cfun->machine->frame.args_size;
7052 /* If we need to restore registers, deallocate as much stack as
7053 possible in the second step without going out of range. */
7054 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7056 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
7057 step1 -= step2;
7060 /* Set TARGET to BASE + STEP1. */
7061 target = base;
7062 if (step1 > 0)
7064 rtx adjust;
7066 /* Get an rtx for STEP1 that we can add to BASE. */
7067 adjust = GEN_INT (step1);
7068 if (!SMALL_OPERAND (step1))
7070 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
7071 adjust = MIPS_EPILOGUE_TEMP (Pmode);
7074 /* Normal mode code can copy the result straight into $sp. */
7075 if (!TARGET_MIPS16)
7076 target = stack_pointer_rtx;
7078 emit_insn (gen_add3_insn (target, base, adjust));
7081 /* Copy TARGET into the stack pointer. */
7082 if (target != stack_pointer_rtx)
7083 emit_move_insn (stack_pointer_rtx, target);
7085 /* If we're using addressing macros for n32/n64 abicalls, $gp is
7086 implicitly used by all SYMBOL_REFs. We must emit a blockage
7087 insn before restoring it. */
7088 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
7089 emit_insn (gen_blockage ());
7091 /* Restore the registers. */
7092 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
7093 mips_restore_reg);
7095 /* Deallocate the final bit of the frame. */
7096 if (step2 > 0)
7097 emit_insn (gen_add3_insn (stack_pointer_rtx,
7098 stack_pointer_rtx,
7099 GEN_INT (step2)));
7101 /* Add in the __builtin_eh_return stack adjustment. We need to
7102 use a temporary in mips16 code. */
7103 if (current_function_calls_eh_return)
7105 if (TARGET_MIPS16)
7107 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
7108 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
7109 MIPS_EPILOGUE_TEMP (Pmode),
7110 EH_RETURN_STACKADJ_RTX));
7111 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
7113 else
7114 emit_insn (gen_add3_insn (stack_pointer_rtx,
7115 stack_pointer_rtx,
7116 EH_RETURN_STACKADJ_RTX));
7119 if (!sibcall_p)
7121 /* The mips16 loads the return address into $7, not $31. */
7122 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
7123 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7124 GP_REG_FIRST + 7)));
7125 else
7126 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7127 GP_REG_FIRST + 31)));
7131 /* Return nonzero if this function is known to have a null epilogue.
7132 This allows the optimizer to omit jumps to jumps if no stack
7133 was created. */
7136 mips_can_use_return_insn (void)
7138 tree return_type;
7140 if (! reload_completed)
7141 return 0;
7143 if (regs_ever_live[31] || current_function_profile)
7144 return 0;
7146 return_type = DECL_RESULT (current_function_decl);
7148 /* In mips16 mode, a function which returns a floating point value
7149 needs to arrange to copy the return value into the floating point
7150 registers. */
7151 if (TARGET_MIPS16
7152 && mips16_hard_float
7153 && ! aggregate_value_p (return_type, current_function_decl)
7154 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
7155 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
7156 return 0;
7158 if (cfun->machine->frame.initialized)
7159 return cfun->machine->frame.total_size == 0;
7161 return compute_frame_size (get_frame_size ()) == 0;
7164 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
7165 in order to avoid duplicating too much logic from elsewhere. */
7167 static void
7168 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7169 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7170 tree function)
7172 rtx this, temp1, temp2, insn, fnaddr;
7174 /* Pretend to be a post-reload pass while generating rtl. */
7175 no_new_pseudos = 1;
7176 reload_completed = 1;
7177 reset_block_changes ();
7179 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
7180 for TARGET_NEWABI since the latter is a call-saved register. */
7181 if (TARGET_ABICALLS)
7182 cfun->machine->global_pointer
7183 = REGNO (pic_offset_table_rtx)
7184 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
7186 /* Set up the global pointer for n32 or n64 abicalls. */
7187 mips_emit_loadgp ();
7189 /* We need two temporary registers in some cases. */
7190 temp1 = gen_rtx_REG (Pmode, 2);
7191 temp2 = gen_rtx_REG (Pmode, 3);
7193 /* Find out which register contains the "this" pointer. */
7194 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7195 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
7196 else
7197 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
7199 /* Add DELTA to THIS. */
7200 if (delta != 0)
7202 rtx offset = GEN_INT (delta);
7203 if (!SMALL_OPERAND (delta))
7205 emit_move_insn (temp1, offset);
7206 offset = temp1;
7208 emit_insn (gen_add3_insn (this, this, offset));
7211 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
7212 if (vcall_offset != 0)
7214 rtx addr;
7216 /* Set TEMP1 to *THIS. */
7217 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
7219 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
7220 addr = mips_add_offset (temp2, temp1, vcall_offset);
7222 /* Load the offset and add it to THIS. */
7223 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
7224 emit_insn (gen_add3_insn (this, this, temp1));
7227 /* Jump to the target function. Use a sibcall if direct jumps are
7228 allowed, otherwise load the address into a register first. */
7229 fnaddr = XEXP (DECL_RTL (function), 0);
7230 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
7232 /* This is messy. gas treats "la $25,foo" as part of a call
7233 sequence and may allow a global "foo" to be lazily bound.
7234 The general move patterns therefore reject this combination.
7236 In this context, lazy binding would actually be OK for o32 and o64,
7237 but it's still wrong for n32 and n64; see mips_load_call_address.
7238 We must therefore load the address via a temporary register if
7239 mips_dangerous_for_la25_p.
7241 If we jump to the temporary register rather than $25, the assembler
7242 can use the move insn to fill the jump's delay slot. */
7243 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
7244 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7245 mips_load_call_address (temp1, fnaddr, true);
7247 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
7248 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
7249 emit_jump_insn (gen_indirect_jump (temp1));
7251 else
7253 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
7254 SIBLING_CALL_P (insn) = 1;
7257 /* Run just enough of rest_of_compilation. This sequence was
7258 "borrowed" from alpha.c. */
7259 insn = get_insns ();
7260 insn_locators_initialize ();
7261 split_all_insns_noflow ();
7262 if (TARGET_MIPS16)
7263 mips16_lay_out_constants ();
7264 shorten_branches (insn);
7265 final_start_function (insn, file, 1);
7266 final (insn, file, 1);
7267 final_end_function ();
7269 /* Clean up the vars set above. Note that final_end_function resets
7270 the global pointer for us. */
7271 reload_completed = 0;
7272 no_new_pseudos = 0;
7275 /* Returns nonzero if X contains a SYMBOL_REF. */
7277 static int
7278 symbolic_expression_p (rtx x)
7280 if (GET_CODE (x) == SYMBOL_REF)
7281 return 1;
7283 if (GET_CODE (x) == CONST)
7284 return symbolic_expression_p (XEXP (x, 0));
7286 if (UNARY_P (x))
7287 return symbolic_expression_p (XEXP (x, 0));
7289 if (ARITHMETIC_P (x))
7290 return (symbolic_expression_p (XEXP (x, 0))
7291 || symbolic_expression_p (XEXP (x, 1)));
7293 return 0;
7296 /* Choose the section to use for the constant rtx expression X that has
7297 mode MODE. */
7299 static section *
7300 mips_select_rtx_section (enum machine_mode mode, rtx x,
7301 unsigned HOST_WIDE_INT align)
7303 if (TARGET_MIPS16)
7305 /* In mips16 mode, the constant table always goes in the same section
7306 as the function, so that constants can be loaded using PC relative
7307 addressing. */
7308 return function_section (current_function_decl);
7310 else if (TARGET_EMBEDDED_DATA)
7312 /* For embedded applications, always put constants in read-only data,
7313 in order to reduce RAM usage. */
7314 return mergeable_constant_section (mode, align, 0);
7316 else
7318 /* For hosted applications, always put constants in small data if
7319 possible, as this gives the best performance. */
7320 /* ??? Consider using mergeable small data sections. */
7322 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
7323 && mips_section_threshold > 0)
7324 return get_named_section (NULL, ".sdata", 0);
7325 else if (flag_pic && symbolic_expression_p (x))
7326 return get_named_section (NULL, ".data.rel.ro", 3);
7327 else
7328 return mergeable_constant_section (mode, align, 0);
7332 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7334 The complication here is that, with the combination TARGET_ABICALLS
7335 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
7336 therefore not be included in the read-only part of a DSO. Handle such
7337 cases by selecting a normal data section instead of a read-only one.
7338 The logic apes that in default_function_rodata_section. */
7340 static section *
7341 mips_function_rodata_section (tree decl)
7343 if (!TARGET_ABICALLS || TARGET_GPWORD)
7344 return default_function_rodata_section (decl);
7346 if (decl && DECL_SECTION_NAME (decl))
7348 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7349 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7351 char *rname = ASTRDUP (name);
7352 rname[14] = 'd';
7353 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7355 else if (flag_function_sections && flag_data_sections
7356 && strncmp (name, ".text.", 6) == 0)
7358 char *rname = ASTRDUP (name);
7359 memcpy (rname + 1, "data", 4);
7360 return get_section (rname, SECTION_WRITE, decl);
7363 return data_section;
7366 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
7367 locally-defined objects go in a small data section. It also controls
7368 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
7369 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
7371 static bool
7372 mips_in_small_data_p (tree decl)
7374 HOST_WIDE_INT size;
7376 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7377 return false;
7379 /* We don't yet generate small-data references for -mabicalls. See related
7380 -G handling in override_options. */
7381 if (TARGET_ABICALLS)
7382 return false;
7384 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7386 const char *name;
7388 /* Reject anything that isn't in a known small-data section. */
7389 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7390 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7391 return false;
7393 /* If a symbol is defined externally, the assembler will use the
7394 usual -G rules when deciding how to implement macros. */
7395 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
7396 return true;
7398 else if (TARGET_EMBEDDED_DATA)
7400 /* Don't put constants into the small data section: we want them
7401 to be in ROM rather than RAM. */
7402 if (TREE_CODE (decl) != VAR_DECL)
7403 return false;
7405 if (TREE_READONLY (decl)
7406 && !TREE_SIDE_EFFECTS (decl)
7407 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7408 return false;
7411 size = int_size_in_bytes (TREE_TYPE (decl));
7412 return (size > 0 && size <= mips_section_threshold);
7415 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7416 anchors for small data: the GP register acts as an anchor in that
7417 case. We also don't want to use them for PC-relative accesses,
7418 where the PC acts as an anchor. */
7420 static bool
7421 mips_use_anchors_for_symbol_p (rtx symbol)
7423 switch (mips_classify_symbol (symbol))
7425 case SYMBOL_CONSTANT_POOL:
7426 case SYMBOL_SMALL_DATA:
7427 return false;
7429 default:
7430 return true;
7434 /* See whether VALTYPE is a record whose fields should be returned in
7435 floating-point registers. If so, return the number of fields and
7436 list them in FIELDS (which should have two elements). Return 0
7437 otherwise.
7439 For n32 & n64, a structure with one or two fields is returned in
7440 floating-point registers as long as every field has a floating-point
7441 type. */
7443 static int
7444 mips_fpr_return_fields (tree valtype, tree *fields)
7446 tree field;
7447 int i;
7449 if (!TARGET_NEWABI)
7450 return 0;
7452 if (TREE_CODE (valtype) != RECORD_TYPE)
7453 return 0;
7455 i = 0;
7456 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
7458 if (TREE_CODE (field) != FIELD_DECL)
7459 continue;
7461 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
7462 return 0;
7464 if (i == 2)
7465 return 0;
7467 fields[i++] = field;
7469 return i;
7473 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
7474 a value in the most significant part of $2/$3 if:
7476 - the target is big-endian;
7478 - the value has a structure or union type (we generalize this to
7479 cover aggregates from other languages too); and
7481 - the structure is not returned in floating-point registers. */
7483 static bool
7484 mips_return_in_msb (tree valtype)
7486 tree fields[2];
7488 return (TARGET_NEWABI
7489 && TARGET_BIG_ENDIAN
7490 && AGGREGATE_TYPE_P (valtype)
7491 && mips_fpr_return_fields (valtype, fields) == 0);
7495 /* Return a composite value in a pair of floating-point registers.
7496 MODE1 and OFFSET1 are the mode and byte offset for the first value,
7497 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
7498 complete value.
7500 For n32 & n64, $f0 always holds the first value and $f2 the second.
7501 Otherwise the values are packed together as closely as possible. */
7503 static rtx
7504 mips_return_fpr_pair (enum machine_mode mode,
7505 enum machine_mode mode1, HOST_WIDE_INT offset1,
7506 enum machine_mode mode2, HOST_WIDE_INT offset2)
7508 int inc;
7510 inc = (TARGET_NEWABI ? 2 : FP_INC);
7511 return gen_rtx_PARALLEL
7512 (mode,
7513 gen_rtvec (2,
7514 gen_rtx_EXPR_LIST (VOIDmode,
7515 gen_rtx_REG (mode1, FP_RETURN),
7516 GEN_INT (offset1)),
7517 gen_rtx_EXPR_LIST (VOIDmode,
7518 gen_rtx_REG (mode2, FP_RETURN + inc),
7519 GEN_INT (offset2))));
7524 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
7525 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
7526 VALTYPE is null and MODE is the mode of the return value. */
7529 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
7530 enum machine_mode mode)
7532 if (valtype)
7534 tree fields[2];
7535 int unsignedp;
7537 mode = TYPE_MODE (valtype);
7538 unsignedp = TYPE_UNSIGNED (valtype);
7540 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
7541 true, we must promote the mode just as PROMOTE_MODE does. */
7542 mode = promote_mode (valtype, mode, &unsignedp, 1);
7544 /* Handle structures whose fields are returned in $f0/$f2. */
7545 switch (mips_fpr_return_fields (valtype, fields))
7547 case 1:
7548 return gen_rtx_REG (mode, FP_RETURN);
7550 case 2:
7551 return mips_return_fpr_pair (mode,
7552 TYPE_MODE (TREE_TYPE (fields[0])),
7553 int_byte_position (fields[0]),
7554 TYPE_MODE (TREE_TYPE (fields[1])),
7555 int_byte_position (fields[1]));
7558 /* If a value is passed in the most significant part of a register, see
7559 whether we have to round the mode up to a whole number of words. */
7560 if (mips_return_in_msb (valtype))
7562 HOST_WIDE_INT size = int_size_in_bytes (valtype);
7563 if (size % UNITS_PER_WORD != 0)
7565 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
7566 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7570 /* For EABI, the class of return register depends entirely on MODE.
7571 For example, "struct { some_type x; }" and "union { some_type x; }"
7572 are returned in the same way as a bare "some_type" would be.
7573 Other ABIs only use FPRs for scalar, complex or vector types. */
7574 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
7575 return gen_rtx_REG (mode, GP_RETURN);
7578 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
7579 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7580 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
7581 return gen_rtx_REG (mode, FP_RETURN);
7583 /* Handle long doubles for n32 & n64. */
7584 if (mode == TFmode)
7585 return mips_return_fpr_pair (mode,
7586 DImode, 0,
7587 DImode, GET_MODE_SIZE (mode) / 2);
7589 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7590 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
7591 return mips_return_fpr_pair (mode,
7592 GET_MODE_INNER (mode), 0,
7593 GET_MODE_INNER (mode),
7594 GET_MODE_SIZE (mode) / 2);
7596 return gen_rtx_REG (mode, GP_RETURN);
7599 /* Return nonzero when an argument must be passed by reference. */
7601 static bool
7602 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7603 enum machine_mode mode, tree type,
7604 bool named ATTRIBUTE_UNUSED)
7606 if (mips_abi == ABI_EABI)
7608 int size;
7610 /* ??? How should SCmode be handled? */
7611 if (mode == DImode || mode == DFmode)
7612 return 0;
7614 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
7615 return size == -1 || size > UNITS_PER_WORD;
7617 else
7619 /* If we have a variable-sized parameter, we have no choice. */
7620 return targetm.calls.must_pass_in_stack (mode, type);
7624 static bool
7625 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7626 enum machine_mode mode ATTRIBUTE_UNUSED,
7627 tree type ATTRIBUTE_UNUSED, bool named)
7629 return mips_abi == ABI_EABI && named;
7632 /* Return true if registers of class CLASS cannot change from mode FROM
7633 to mode TO. */
7635 bool
7636 mips_cannot_change_mode_class (enum machine_mode from,
7637 enum machine_mode to, enum reg_class class)
7639 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
7640 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
7642 if (TARGET_BIG_ENDIAN)
7644 /* When a multi-word value is stored in paired floating-point
7645 registers, the first register always holds the low word.
7646 We therefore can't allow FPRs to change between single-word
7647 and multi-word modes. */
7648 if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
7649 return true;
7651 else
7653 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
7654 in LO and HI, the high word always comes first. We therefore
7655 can't allow values stored in HI to change between single-word
7656 and multi-word modes.
7657 This rule applies to both the original HI/LO pair and the new
7658 DSP accumulators. */
7659 if (reg_classes_intersect_p (ACC_REGS, class))
7660 return true;
7663 /* Loading a 32-bit value into a 64-bit floating-point register
7664 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
7665 We can't allow 64-bit float registers to change from SImode to
7666 to a wider mode. */
7667 if (TARGET_FLOAT64
7668 && from == SImode
7669 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
7670 && reg_classes_intersect_p (FP_REGS, class))
7671 return true;
7672 return false;
7675 /* Return true if X should not be moved directly into register $25.
7676 We need this because many versions of GAS will treat "la $25,foo" as
7677 part of a call sequence and so allow a global "foo" to be lazily bound. */
7679 bool
7680 mips_dangerous_for_la25_p (rtx x)
7682 HOST_WIDE_INT offset;
7684 if (TARGET_EXPLICIT_RELOCS)
7685 return false;
7687 mips_split_const (x, &x, &offset);
7688 return global_got_operand (x, VOIDmode);
7691 /* Implement PREFERRED_RELOAD_CLASS. */
7693 enum reg_class
7694 mips_preferred_reload_class (rtx x, enum reg_class class)
7696 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
7697 return LEA_REGS;
7699 if (TARGET_HARD_FLOAT
7700 && FLOAT_MODE_P (GET_MODE (x))
7701 && reg_class_subset_p (FP_REGS, class))
7702 return FP_REGS;
7704 if (reg_class_subset_p (GR_REGS, class))
7705 class = GR_REGS;
7707 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
7708 class = M16_REGS;
7710 return class;
7713 /* This function returns the register class required for a secondary
7714 register when copying between one of the registers in CLASS, and X,
7715 using MODE. If IN_P is nonzero, the copy is going from X to the
7716 register, otherwise the register is the source. A return value of
7717 NO_REGS means that no secondary register is required. */
7719 enum reg_class
7720 mips_secondary_reload_class (enum reg_class class,
7721 enum machine_mode mode, rtx x, int in_p)
7723 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
7724 int regno = -1;
7725 int gp_reg_p;
7727 if (REG_P (x)|| GET_CODE (x) == SUBREG)
7728 regno = true_regnum (x);
7730 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
7732 if (mips_dangerous_for_la25_p (x))
7734 gr_regs = LEA_REGS;
7735 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
7736 return gr_regs;
7739 /* Copying from HI or LO to anywhere other than a general register
7740 requires a general register.
7741 This rule applies to both the original HI/LO pair and the new
7742 DSP accumulators. */
7743 if (reg_class_subset_p (class, ACC_REGS))
7745 if (TARGET_MIPS16 && in_p)
7747 /* We can't really copy to HI or LO at all in mips16 mode. */
7748 return M16_REGS;
7750 return gp_reg_p ? NO_REGS : gr_regs;
7752 if (ACC_REG_P (regno))
7754 if (TARGET_MIPS16 && ! in_p)
7756 /* We can't really copy to HI or LO at all in mips16 mode. */
7757 return M16_REGS;
7759 return class == gr_regs ? NO_REGS : gr_regs;
7762 /* We can only copy a value to a condition code register from a
7763 floating point register, and even then we require a scratch
7764 floating point register. We can only copy a value out of a
7765 condition code register into a general register. */
7766 if (class == ST_REGS)
7768 if (in_p)
7769 return FP_REGS;
7770 return gp_reg_p ? NO_REGS : gr_regs;
7772 if (ST_REG_P (regno))
7774 if (! in_p)
7775 return FP_REGS;
7776 return class == gr_regs ? NO_REGS : gr_regs;
7779 if (class == FP_REGS)
7781 if (MEM_P (x))
7783 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
7784 return NO_REGS;
7786 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7788 /* We can use the l.s and l.d macros to load floating-point
7789 constants. ??? For l.s, we could probably get better
7790 code by returning GR_REGS here. */
7791 return NO_REGS;
7793 else if (gp_reg_p || x == CONST0_RTX (mode))
7795 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
7796 return NO_REGS;
7798 else if (FP_REG_P (regno))
7800 /* In this case we can use mov.s or mov.d. */
7801 return NO_REGS;
7803 else
7805 /* Otherwise, we need to reload through an integer register. */
7806 return gr_regs;
7810 /* In mips16 mode, going between memory and anything but M16_REGS
7811 requires an M16_REG. */
7812 if (TARGET_MIPS16)
7814 if (class != M16_REGS && class != M16_NA_REGS)
7816 if (gp_reg_p)
7817 return NO_REGS;
7818 return M16_REGS;
7820 if (! gp_reg_p)
7822 if (class == M16_REGS || class == M16_NA_REGS)
7823 return NO_REGS;
7824 return M16_REGS;
7828 return NO_REGS;
7831 /* Implement CLASS_MAX_NREGS.
7833 Usually all registers are word-sized. The only supported exception
7834 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7835 registers. A word-based calculation is correct even in that case,
7836 since -msingle-float disallows multi-FPR values.
7838 The FP status registers are an exception to this rule. They are always
7839 4 bytes wide as they only hold condition code modes, and CCmode is always
7840 considered to be 4 bytes wide. */
7843 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7844 enum machine_mode mode)
7846 if (class == ST_REGS)
7847 return (GET_MODE_SIZE (mode) + 3) / 4;
7848 else
7849 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7852 static bool
7853 mips_valid_pointer_mode (enum machine_mode mode)
7855 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7858 /* Target hook for vector_mode_supported_p. */
7860 static bool
7861 mips_vector_mode_supported_p (enum machine_mode mode)
7863 switch (mode)
7865 case V2SFmode:
7866 return TARGET_PAIRED_SINGLE_FLOAT;
7868 case V2HImode:
7869 case V4QImode:
7870 return TARGET_DSP;
7872 default:
7873 return false;
7877 /* If we can access small data directly (using gp-relative relocation
7878 operators) return the small data pointer, otherwise return null.
7880 For each mips16 function which refers to GP relative symbols, we
7881 use a pseudo register, initialized at the start of the function, to
7882 hold the $gp value. */
7884 static rtx
7885 mips16_gp_pseudo_reg (void)
7887 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7889 rtx unspec;
7890 rtx insn, scan;
7892 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7894 /* We want to initialize this to a value which gcc will believe
7895 is constant. */
7896 start_sequence ();
7897 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7898 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7899 gen_rtx_CONST (Pmode, unspec));
7900 insn = get_insns ();
7901 end_sequence ();
7903 push_topmost_sequence ();
7904 /* We need to emit the initialization after the FUNCTION_BEG
7905 note, so that it will be integrated. */
7906 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7907 if (NOTE_P (scan)
7908 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7909 break;
7910 if (scan == NULL_RTX)
7911 scan = get_insns ();
7912 insn = emit_insn_after (insn, scan);
7913 pop_topmost_sequence ();
7916 return cfun->machine->mips16_gp_pseudo_rtx;
7919 /* Write out code to move floating point arguments in or out of
7920 general registers. Output the instructions to FILE. FP_CODE is
7921 the code describing which arguments are present (see the comment at
7922 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7923 we are copying from the floating point registers. */
7925 static void
7926 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7928 const char *s;
7929 int gparg, fparg;
7930 unsigned int f;
7932 /* This code only works for the original 32 bit ABI and the O64 ABI. */
7933 gcc_assert (TARGET_OLDABI);
7935 if (from_fp_p)
7936 s = "mfc1";
7937 else
7938 s = "mtc1";
7939 gparg = GP_ARG_FIRST;
7940 fparg = FP_ARG_FIRST;
7941 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7943 if ((f & 3) == 1)
7945 if ((fparg & 1) != 0)
7946 ++fparg;
7947 fprintf (file, "\t%s\t%s,%s\n", s,
7948 reg_names[gparg], reg_names[fparg]);
7950 else if ((f & 3) == 2)
7952 if (TARGET_64BIT)
7953 fprintf (file, "\td%s\t%s,%s\n", s,
7954 reg_names[gparg], reg_names[fparg]);
7955 else
7957 if ((fparg & 1) != 0)
7958 ++fparg;
7959 if (TARGET_BIG_ENDIAN)
7960 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7961 reg_names[gparg], reg_names[fparg + 1], s,
7962 reg_names[gparg + 1], reg_names[fparg]);
7963 else
7964 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7965 reg_names[gparg], reg_names[fparg], s,
7966 reg_names[gparg + 1], reg_names[fparg + 1]);
7967 ++gparg;
7968 ++fparg;
7971 else
7972 gcc_unreachable ();
7974 ++gparg;
7975 ++fparg;
7979 /* Build a mips16 function stub. This is used for functions which
7980 take arguments in the floating point registers. It is 32 bit code
7981 that moves the floating point args into the general registers, and
7982 then jumps to the 16 bit code. */
7984 static void
7985 build_mips16_function_stub (FILE *file)
7987 const char *fnname;
7988 char *secname, *stubname;
7989 tree stubid, stubdecl;
7990 int need_comma;
7991 unsigned int f;
7993 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7994 secname = (char *) alloca (strlen (fnname) + 20);
7995 sprintf (secname, ".mips16.fn.%s", fnname);
7996 stubname = (char *) alloca (strlen (fnname) + 20);
7997 sprintf (stubname, "__fn_stub_%s", fnname);
7998 stubid = get_identifier (stubname);
7999 stubdecl = build_decl (FUNCTION_DECL, stubid,
8000 build_function_type (void_type_node, NULL_TREE));
8001 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8003 fprintf (file, "\t# Stub function for %s (", current_function_name ());
8004 need_comma = 0;
8005 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
8007 fprintf (file, "%s%s",
8008 need_comma ? ", " : "",
8009 (f & 3) == 1 ? "float" : "double");
8010 need_comma = 1;
8012 fprintf (file, ")\n");
8014 fprintf (file, "\t.set\tnomips16\n");
8015 switch_to_section (function_section (stubdecl));
8016 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
8018 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
8019 within a .ent, and we cannot emit another .ent. */
8020 if (!FUNCTION_NAME_ALREADY_DECLARED)
8022 fputs ("\t.ent\t", file);
8023 assemble_name (file, stubname);
8024 fputs ("\n", file);
8027 assemble_name (file, stubname);
8028 fputs (":\n", file);
8030 /* We don't want the assembler to insert any nops here. */
8031 fprintf (file, "\t.set\tnoreorder\n");
8033 mips16_fp_args (file, current_function_args_info.fp_code, 1);
8035 fprintf (asm_out_file, "\t.set\tnoat\n");
8036 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
8037 assemble_name (file, fnname);
8038 fprintf (file, "\n");
8039 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8040 fprintf (asm_out_file, "\t.set\tat\n");
8042 /* Unfortunately, we can't fill the jump delay slot. We can't fill
8043 with one of the mfc1 instructions, because the result is not
8044 available for one instruction, so if the very first instruction
8045 in the function refers to the register, it will see the wrong
8046 value. */
8047 fprintf (file, "\tnop\n");
8049 fprintf (file, "\t.set\treorder\n");
8051 if (!FUNCTION_NAME_ALREADY_DECLARED)
8053 fputs ("\t.end\t", file);
8054 assemble_name (file, stubname);
8055 fputs ("\n", file);
8058 fprintf (file, "\t.set\tmips16\n");
8060 switch_to_section (function_section (current_function_decl));
8063 /* We keep a list of functions for which we have already built stubs
8064 in build_mips16_call_stub. */
8066 struct mips16_stub
8068 struct mips16_stub *next;
8069 char *name;
8070 int fpret;
8073 static struct mips16_stub *mips16_stubs;
8075 /* Build a call stub for a mips16 call. A stub is needed if we are
8076 passing any floating point values which should go into the floating
8077 point registers. If we are, and the call turns out to be to a 32
8078 bit function, the stub will be used to move the values into the
8079 floating point registers before calling the 32 bit function. The
8080 linker will magically adjust the function call to either the 16 bit
8081 function or the 32 bit stub, depending upon where the function call
8082 is actually defined.
8084 Similarly, we need a stub if the return value might come back in a
8085 floating point register.
8087 RETVAL is the location of the return value, or null if this is
8088 a call rather than a call_value. FN is the address of the
8089 function and ARG_SIZE is the size of the arguments. FP_CODE
8090 is the code built by function_arg. This function returns a nonzero
8091 value if it builds the call instruction itself. */
8094 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
8096 int fpret;
8097 const char *fnname;
8098 char *secname, *stubname;
8099 struct mips16_stub *l;
8100 tree stubid, stubdecl;
8101 int need_comma;
8102 unsigned int f;
8104 /* We don't need to do anything if we aren't in mips16 mode, or if
8105 we were invoked with the -msoft-float option. */
8106 if (! TARGET_MIPS16 || ! mips16_hard_float)
8107 return 0;
8109 /* Figure out whether the value might come back in a floating point
8110 register. */
8111 fpret = (retval != 0
8112 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
8113 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
8115 /* We don't need to do anything if there were no floating point
8116 arguments and the value will not be returned in a floating point
8117 register. */
8118 if (fp_code == 0 && ! fpret)
8119 return 0;
8121 /* We don't need to do anything if this is a call to a special
8122 mips16 support function. */
8123 if (GET_CODE (fn) == SYMBOL_REF
8124 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
8125 return 0;
8127 /* This code will only work for o32 and o64 abis. The other ABI's
8128 require more sophisticated support. */
8129 gcc_assert (TARGET_OLDABI);
8131 /* We can only handle SFmode and DFmode floating point return
8132 values. */
8133 if (fpret)
8134 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
8136 /* If we're calling via a function pointer, then we must always call
8137 via a stub. There are magic stubs provided in libgcc.a for each
8138 of the required cases. Each of them expects the function address
8139 to arrive in register $2. */
8141 if (GET_CODE (fn) != SYMBOL_REF)
8143 char buf[30];
8144 tree id;
8145 rtx stub_fn, insn;
8147 /* ??? If this code is modified to support other ABI's, we need
8148 to handle PARALLEL return values here. */
8150 sprintf (buf, "__mips16_call_stub_%s%d",
8151 (fpret
8152 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
8153 : ""),
8154 fp_code);
8155 id = get_identifier (buf);
8156 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8158 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
8160 if (retval == NULL_RTX)
8161 insn = gen_call_internal (stub_fn, arg_size);
8162 else
8163 insn = gen_call_value_internal (retval, stub_fn, arg_size);
8164 insn = emit_call_insn (insn);
8166 /* Put the register usage information on the CALL. */
8167 CALL_INSN_FUNCTION_USAGE (insn) =
8168 gen_rtx_EXPR_LIST (VOIDmode,
8169 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
8170 CALL_INSN_FUNCTION_USAGE (insn));
8172 /* If we are handling a floating point return value, we need to
8173 save $18 in the function prologue. Putting a note on the
8174 call will mean that regs_ever_live[$18] will be true if the
8175 call is not eliminated, and we can check that in the prologue
8176 code. */
8177 if (fpret)
8178 CALL_INSN_FUNCTION_USAGE (insn) =
8179 gen_rtx_EXPR_LIST (VOIDmode,
8180 gen_rtx_USE (VOIDmode,
8181 gen_rtx_REG (word_mode, 18)),
8182 CALL_INSN_FUNCTION_USAGE (insn));
8184 /* Return 1 to tell the caller that we've generated the call
8185 insn. */
8186 return 1;
8189 /* We know the function we are going to call. If we have already
8190 built a stub, we don't need to do anything further. */
8192 fnname = XSTR (fn, 0);
8193 for (l = mips16_stubs; l != NULL; l = l->next)
8194 if (strcmp (l->name, fnname) == 0)
8195 break;
8197 if (l == NULL)
8199 /* Build a special purpose stub. When the linker sees a
8200 function call in mips16 code, it will check where the target
8201 is defined. If the target is a 32 bit call, the linker will
8202 search for the section defined here. It can tell which
8203 symbol this section is associated with by looking at the
8204 relocation information (the name is unreliable, since this
8205 might be a static function). If such a section is found, the
8206 linker will redirect the call to the start of the magic
8207 section.
8209 If the function does not return a floating point value, the
8210 special stub section is named
8211 .mips16.call.FNNAME
8213 If the function does return a floating point value, the stub
8214 section is named
8215 .mips16.call.fp.FNNAME
8218 secname = (char *) alloca (strlen (fnname) + 40);
8219 sprintf (secname, ".mips16.call.%s%s",
8220 fpret ? "fp." : "",
8221 fnname);
8222 stubname = (char *) alloca (strlen (fnname) + 20);
8223 sprintf (stubname, "__call_stub_%s%s",
8224 fpret ? "fp_" : "",
8225 fnname);
8226 stubid = get_identifier (stubname);
8227 stubdecl = build_decl (FUNCTION_DECL, stubid,
8228 build_function_type (void_type_node, NULL_TREE));
8229 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8231 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
8232 (fpret
8233 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
8234 : ""),
8235 fnname);
8236 need_comma = 0;
8237 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8239 fprintf (asm_out_file, "%s%s",
8240 need_comma ? ", " : "",
8241 (f & 3) == 1 ? "float" : "double");
8242 need_comma = 1;
8244 fprintf (asm_out_file, ")\n");
8246 fprintf (asm_out_file, "\t.set\tnomips16\n");
8247 assemble_start_function (stubdecl, stubname);
8249 if (!FUNCTION_NAME_ALREADY_DECLARED)
8251 fputs ("\t.ent\t", asm_out_file);
8252 assemble_name (asm_out_file, stubname);
8253 fputs ("\n", asm_out_file);
8255 assemble_name (asm_out_file, stubname);
8256 fputs (":\n", asm_out_file);
8259 /* We build the stub code by hand. That's the only way we can
8260 do it, since we can't generate 32 bit code during a 16 bit
8261 compilation. */
8263 /* We don't want the assembler to insert any nops here. */
8264 fprintf (asm_out_file, "\t.set\tnoreorder\n");
8266 mips16_fp_args (asm_out_file, fp_code, 0);
8268 if (! fpret)
8270 fprintf (asm_out_file, "\t.set\tnoat\n");
8271 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
8272 fnname);
8273 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8274 fprintf (asm_out_file, "\t.set\tat\n");
8275 /* Unfortunately, we can't fill the jump delay slot. We
8276 can't fill with one of the mtc1 instructions, because the
8277 result is not available for one instruction, so if the
8278 very first instruction in the function refers to the
8279 register, it will see the wrong value. */
8280 fprintf (asm_out_file, "\tnop\n");
8282 else
8284 fprintf (asm_out_file, "\tmove\t%s,%s\n",
8285 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
8286 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
8287 /* As above, we can't fill the delay slot. */
8288 fprintf (asm_out_file, "\tnop\n");
8289 if (GET_MODE (retval) == SFmode)
8290 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8291 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
8292 else
8294 if (TARGET_BIG_ENDIAN)
8296 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8297 reg_names[GP_REG_FIRST + 2],
8298 reg_names[FP_REG_FIRST + 1]);
8299 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8300 reg_names[GP_REG_FIRST + 3],
8301 reg_names[FP_REG_FIRST + 0]);
8303 else
8305 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8306 reg_names[GP_REG_FIRST + 2],
8307 reg_names[FP_REG_FIRST + 0]);
8308 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8309 reg_names[GP_REG_FIRST + 3],
8310 reg_names[FP_REG_FIRST + 1]);
8313 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
8314 /* As above, we can't fill the delay slot. */
8315 fprintf (asm_out_file, "\tnop\n");
8318 fprintf (asm_out_file, "\t.set\treorder\n");
8320 #ifdef ASM_DECLARE_FUNCTION_SIZE
8321 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
8322 #endif
8324 if (!FUNCTION_NAME_ALREADY_DECLARED)
8326 fputs ("\t.end\t", asm_out_file);
8327 assemble_name (asm_out_file, stubname);
8328 fputs ("\n", asm_out_file);
8331 fprintf (asm_out_file, "\t.set\tmips16\n");
8333 /* Record this stub. */
8334 l = (struct mips16_stub *) xmalloc (sizeof *l);
8335 l->name = xstrdup (fnname);
8336 l->fpret = fpret;
8337 l->next = mips16_stubs;
8338 mips16_stubs = l;
8341 /* If we expect a floating point return value, but we've built a
8342 stub which does not expect one, then we're in trouble. We can't
8343 use the existing stub, because it won't handle the floating point
8344 value. We can't build a new stub, because the linker won't know
8345 which stub to use for the various calls in this object file.
8346 Fortunately, this case is illegal, since it means that a function
8347 was declared in two different ways in a single compilation. */
8348 if (fpret && ! l->fpret)
8349 error ("cannot handle inconsistent calls to %qs", fnname);
8351 /* If we are calling a stub which handles a floating point return
8352 value, we need to arrange to save $18 in the prologue. We do
8353 this by marking the function call as using the register. The
8354 prologue will later see that it is used, and emit code to save
8355 it. */
8357 if (l->fpret)
8359 rtx insn;
8361 if (retval == NULL_RTX)
8362 insn = gen_call_internal (fn, arg_size);
8363 else
8364 insn = gen_call_value_internal (retval, fn, arg_size);
8365 insn = emit_call_insn (insn);
8367 CALL_INSN_FUNCTION_USAGE (insn) =
8368 gen_rtx_EXPR_LIST (VOIDmode,
8369 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
8370 CALL_INSN_FUNCTION_USAGE (insn));
8372 /* Return 1 to tell the caller that we've generated the call
8373 insn. */
8374 return 1;
8377 /* Return 0 to let the caller generate the call insn. */
8378 return 0;
8381 /* An entry in the mips16 constant pool. VALUE is the pool constant,
8382 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
8384 struct mips16_constant {
8385 struct mips16_constant *next;
8386 rtx value;
8387 rtx label;
8388 enum machine_mode mode;
8391 /* Information about an incomplete mips16 constant pool. FIRST is the
8392 first constant, HIGHEST_ADDRESS is the highest address that the first
8393 byte of the pool can have, and INSN_ADDRESS is the current instruction
8394 address. */
8396 struct mips16_constant_pool {
8397 struct mips16_constant *first;
8398 int highest_address;
8399 int insn_address;
8402 /* Add constant VALUE to POOL and return its label. MODE is the
8403 value's mode (used for CONST_INTs, etc.). */
8405 static rtx
8406 add_constant (struct mips16_constant_pool *pool,
8407 rtx value, enum machine_mode mode)
8409 struct mips16_constant **p, *c;
8410 bool first_of_size_p;
8412 /* See whether the constant is already in the pool. If so, return the
8413 existing label, otherwise leave P pointing to the place where the
8414 constant should be added.
8416 Keep the pool sorted in increasing order of mode size so that we can
8417 reduce the number of alignments needed. */
8418 first_of_size_p = true;
8419 for (p = &pool->first; *p != 0; p = &(*p)->next)
8421 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
8422 return (*p)->label;
8423 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
8424 break;
8425 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
8426 first_of_size_p = false;
8429 /* In the worst case, the constant needed by the earliest instruction
8430 will end up at the end of the pool. The entire pool must then be
8431 accessible from that instruction.
8433 When adding the first constant, set the pool's highest address to
8434 the address of the first out-of-range byte. Adjust this address
8435 downwards each time a new constant is added. */
8436 if (pool->first == 0)
8437 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
8438 is the address of the instruction with the lowest two bits clear.
8439 The base PC value for ld has the lowest three bits clear. Assume
8440 the worst case here. */
8441 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
8442 pool->highest_address -= GET_MODE_SIZE (mode);
8443 if (first_of_size_p)
8444 /* Take into account the worst possible padding due to alignment. */
8445 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
8447 /* Create a new entry. */
8448 c = (struct mips16_constant *) xmalloc (sizeof *c);
8449 c->value = value;
8450 c->mode = mode;
8451 c->label = gen_label_rtx ();
8452 c->next = *p;
8453 *p = c;
8455 return c->label;
8458 /* Output constant VALUE after instruction INSN and return the last
8459 instruction emitted. MODE is the mode of the constant. */
8461 static rtx
8462 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
8464 switch (GET_MODE_CLASS (mode))
8466 case MODE_INT:
8468 rtx size = GEN_INT (GET_MODE_SIZE (mode));
8469 return emit_insn_after (gen_consttable_int (value, size), insn);
8472 case MODE_FLOAT:
8473 return emit_insn_after (gen_consttable_float (value), insn);
8475 case MODE_VECTOR_FLOAT:
8476 case MODE_VECTOR_INT:
8478 int i;
8479 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
8480 insn = dump_constants_1 (GET_MODE_INNER (mode),
8481 CONST_VECTOR_ELT (value, i), insn);
8482 return insn;
8485 default:
8486 gcc_unreachable ();
8491 /* Dump out the constants in CONSTANTS after INSN. */
8493 static void
8494 dump_constants (struct mips16_constant *constants, rtx insn)
8496 struct mips16_constant *c, *next;
8497 int align;
8499 align = 0;
8500 for (c = constants; c != NULL; c = next)
8502 /* If necessary, increase the alignment of PC. */
8503 if (align < GET_MODE_SIZE (c->mode))
8505 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
8506 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
8508 align = GET_MODE_SIZE (c->mode);
8510 insn = emit_label_after (c->label, insn);
8511 insn = dump_constants_1 (c->mode, c->value, insn);
8513 next = c->next;
8514 free (c);
8517 emit_barrier_after (insn);
8520 /* Return the length of instruction INSN. */
8522 static int
8523 mips16_insn_length (rtx insn)
8525 if (JUMP_P (insn))
8527 rtx body = PATTERN (insn);
8528 if (GET_CODE (body) == ADDR_VEC)
8529 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
8530 if (GET_CODE (body) == ADDR_DIFF_VEC)
8531 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
8533 return get_attr_length (insn);
8536 /* Rewrite *X so that constant pool references refer to the constant's
8537 label instead. DATA points to the constant pool structure. */
8539 static int
8540 mips16_rewrite_pool_refs (rtx *x, void *data)
8542 struct mips16_constant_pool *pool = data;
8543 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
8544 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
8545 get_pool_constant (*x),
8546 get_pool_mode (*x)));
8547 return 0;
8550 /* Build MIPS16 constant pools. */
8552 static void
8553 mips16_lay_out_constants (void)
8555 struct mips16_constant_pool pool;
8556 rtx insn, barrier;
8558 barrier = 0;
8559 memset (&pool, 0, sizeof (pool));
8560 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8562 /* Rewrite constant pool references in INSN. */
8563 if (INSN_P (insn))
8564 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
8566 pool.insn_address += mips16_insn_length (insn);
8568 if (pool.first != NULL)
8570 /* If there are no natural barriers between the first user of
8571 the pool and the highest acceptable address, we'll need to
8572 create a new instruction to jump around the constant pool.
8573 In the worst case, this instruction will be 4 bytes long.
8575 If it's too late to do this transformation after INSN,
8576 do it immediately before INSN. */
8577 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
8579 rtx label, jump;
8581 label = gen_label_rtx ();
8583 jump = emit_jump_insn_before (gen_jump (label), insn);
8584 JUMP_LABEL (jump) = label;
8585 LABEL_NUSES (label) = 1;
8586 barrier = emit_barrier_after (jump);
8588 emit_label_after (label, barrier);
8589 pool.insn_address += 4;
8592 /* See whether the constant pool is now out of range of the first
8593 user. If so, output the constants after the previous barrier.
8594 Note that any instructions between BARRIER and INSN (inclusive)
8595 will use negative offsets to refer to the pool. */
8596 if (pool.insn_address > pool.highest_address)
8598 dump_constants (pool.first, barrier);
8599 pool.first = NULL;
8600 barrier = 0;
8602 else if (BARRIER_P (insn))
8603 barrier = insn;
8606 dump_constants (pool.first, get_last_insn ());
8609 /* A temporary variable used by for_each_rtx callbacks, etc. */
8610 static rtx mips_sim_insn;
8612 /* A structure representing the state of the processor pipeline.
8613 Used by the mips_sim_* family of functions. */
8614 struct mips_sim {
8615 /* The maximum number of instructions that can be issued in a cycle.
8616 (Caches mips_issue_rate.) */
8617 unsigned int issue_rate;
8619 /* The current simulation time. */
8620 unsigned int time;
8622 /* How many more instructions can be issued in the current cycle. */
8623 unsigned int insns_left;
8625 /* LAST_SET[X].INSN is the last instruction to set register X.
8626 LAST_SET[X].TIME is the time at which that instruction was issued.
8627 INSN is null if no instruction has yet set register X. */
8628 struct {
8629 rtx insn;
8630 unsigned int time;
8631 } last_set[FIRST_PSEUDO_REGISTER];
8633 /* The pipeline's current DFA state. */
8634 state_t dfa_state;
8637 /* Reset STATE to the initial simulation state. */
8639 static void
8640 mips_sim_reset (struct mips_sim *state)
8642 state->time = 0;
8643 state->insns_left = state->issue_rate;
8644 memset (&state->last_set, 0, sizeof (state->last_set));
8645 state_reset (state->dfa_state);
8648 /* Initialize STATE before its first use. DFA_STATE points to an
8649 allocated but uninitialized DFA state. */
8651 static void
8652 mips_sim_init (struct mips_sim *state, state_t dfa_state)
8654 state->issue_rate = mips_issue_rate ();
8655 state->dfa_state = dfa_state;
8656 mips_sim_reset (state);
8659 /* Advance STATE by one clock cycle. */
8661 static void
8662 mips_sim_next_cycle (struct mips_sim *state)
8664 state->time++;
8665 state->insns_left = state->issue_rate;
8666 state_transition (state->dfa_state, 0);
8669 /* Advance simulation state STATE until instruction INSN can read
8670 register REG. */
8672 static void
8673 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
8675 unsigned int i;
8677 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
8678 if (state->last_set[REGNO (reg) + i].insn != 0)
8680 unsigned int t;
8682 t = state->last_set[REGNO (reg) + i].time;
8683 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
8684 while (state->time < t)
8685 mips_sim_next_cycle (state);
8689 /* A for_each_rtx callback. If *X is a register, advance simulation state
8690 DATA until mips_sim_insn can read the register's value. */
8692 static int
8693 mips_sim_wait_regs_2 (rtx *x, void *data)
8695 if (REG_P (*x))
8696 mips_sim_wait_reg (data, mips_sim_insn, *x);
8697 return 0;
8700 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
8702 static void
8703 mips_sim_wait_regs_1 (rtx *x, void *data)
8705 for_each_rtx (x, mips_sim_wait_regs_2, data);
8708 /* Advance simulation state STATE until all of INSN's register
8709 dependencies are satisfied. */
8711 static void
8712 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
8714 mips_sim_insn = insn;
8715 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
8718 /* Advance simulation state STATE until the units required by
8719 instruction INSN are available. */
8721 static void
8722 mips_sim_wait_units (struct mips_sim *state, rtx insn)
8724 state_t tmp_state;
8726 tmp_state = alloca (state_size ());
8727 while (state->insns_left == 0
8728 || (memcpy (tmp_state, state->dfa_state, state_size ()),
8729 state_transition (tmp_state, insn) >= 0))
8730 mips_sim_next_cycle (state);
8733 /* Advance simulation state STATE until INSN is ready to issue. */
8735 static void
8736 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
8738 mips_sim_wait_regs (state, insn);
8739 mips_sim_wait_units (state, insn);
8742 /* mips_sim_insn has just set X. Update the LAST_SET array
8743 in simulation state DATA. */
8745 static void
8746 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8748 struct mips_sim *state;
8749 unsigned int i;
8751 state = data;
8752 if (REG_P (x))
8753 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
8755 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
8756 state->last_set[REGNO (x) + i].time = state->time;
8760 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
8761 can issue immediately (i.e., that mips_sim_wait_insn has already
8762 been called). */
8764 static void
8765 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8767 state_transition (state->dfa_state, insn);
8768 state->insns_left--;
8770 mips_sim_insn = insn;
8771 note_stores (PATTERN (insn), mips_sim_record_set, state);
8774 /* Simulate issuing a NOP in state STATE. */
8776 static void
8777 mips_sim_issue_nop (struct mips_sim *state)
8779 if (state->insns_left == 0)
8780 mips_sim_next_cycle (state);
8781 state->insns_left--;
8784 /* Update simulation state STATE so that it's ready to accept the instruction
8785 after INSN. INSN should be part of the main rtl chain, not a member of a
8786 SEQUENCE. */
8788 static void
8789 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8791 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
8792 if (JUMP_P (insn))
8793 mips_sim_issue_nop (state);
8795 switch (GET_CODE (SEQ_BEGIN (insn)))
8797 case CODE_LABEL:
8798 case CALL_INSN:
8799 /* We can't predict the processor state after a call or label. */
8800 mips_sim_reset (state);
8801 break;
8803 case JUMP_INSN:
8804 /* The delay slots of branch likely instructions are only executed
8805 when the branch is taken. Therefore, if the caller has simulated
8806 the delay slot instruction, STATE does not really reflect the state
8807 of the pipeline for the instruction after the delay slot. Also,
8808 branch likely instructions tend to incur a penalty when not taken,
8809 so there will probably be an extra delay between the branch and
8810 the instruction after the delay slot. */
8811 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8812 mips_sim_reset (state);
8813 break;
8815 default:
8816 break;
8820 /* The VR4130 pipeline issues aligned pairs of instructions together,
8821 but it stalls the second instruction if it depends on the first.
8822 In order to cut down the amount of logic required, this dependence
8823 check is not based on a full instruction decode. Instead, any non-SPECIAL
8824 instruction is assumed to modify the register specified by bits 20-16
8825 (which is usually the "rt" field).
8827 In beq, beql, bne and bnel instructions, the rt field is actually an
8828 input, so we can end up with a false dependence between the branch
8829 and its delay slot. If this situation occurs in instruction INSN,
8830 try to avoid it by swapping rs and rt. */
8832 static void
8833 vr4130_avoid_branch_rt_conflict (rtx insn)
8835 rtx first, second;
8837 first = SEQ_BEGIN (insn);
8838 second = SEQ_END (insn);
8839 if (JUMP_P (first)
8840 && NONJUMP_INSN_P (second)
8841 && GET_CODE (PATTERN (first)) == SET
8842 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8843 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8845 /* Check for the right kind of condition. */
8846 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8847 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8848 && REG_P (XEXP (cond, 0))
8849 && REG_P (XEXP (cond, 1))
8850 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8851 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8853 /* SECOND mentions the rt register but not the rs register. */
8854 rtx tmp = XEXP (cond, 0);
8855 XEXP (cond, 0) = XEXP (cond, 1);
8856 XEXP (cond, 1) = tmp;
8861 /* Implement -mvr4130-align. Go through each basic block and simulate the
8862 processor pipeline. If we find that a pair of instructions could execute
8863 in parallel, and the first of those instruction is not 8-byte aligned,
8864 insert a nop to make it aligned. */
8866 static void
8867 vr4130_align_insns (void)
8869 struct mips_sim state;
8870 rtx insn, subinsn, last, last2, next;
8871 bool aligned_p;
8873 dfa_start ();
8875 /* LAST is the last instruction before INSN to have a nonzero length.
8876 LAST2 is the last such instruction before LAST. */
8877 last = 0;
8878 last2 = 0;
8880 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8881 aligned_p = true;
8883 mips_sim_init (&state, alloca (state_size ()));
8884 for (insn = get_insns (); insn != 0; insn = next)
8886 unsigned int length;
8888 next = NEXT_INSN (insn);
8890 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8891 This isn't really related to the alignment pass, but we do it on
8892 the fly to avoid a separate instruction walk. */
8893 vr4130_avoid_branch_rt_conflict (insn);
8895 if (USEFUL_INSN_P (insn))
8896 FOR_EACH_SUBINSN (subinsn, insn)
8898 mips_sim_wait_insn (&state, subinsn);
8900 /* If we want this instruction to issue in parallel with the
8901 previous one, make sure that the previous instruction is
8902 aligned. There are several reasons why this isn't worthwhile
8903 when the second instruction is a call:
8905 - Calls are less likely to be performance critical,
8906 - There's a good chance that the delay slot can execute
8907 in parallel with the call.
8908 - The return address would then be unaligned.
8910 In general, if we're going to insert a nop between instructions
8911 X and Y, it's better to insert it immediately after X. That
8912 way, if the nop makes Y aligned, it will also align any labels
8913 between X and Y. */
8914 if (state.insns_left != state.issue_rate
8915 && !CALL_P (subinsn))
8917 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8919 /* SUBINSN is the first instruction in INSN and INSN is
8920 aligned. We want to align the previous instruction
8921 instead, so insert a nop between LAST2 and LAST.
8923 Note that LAST could be either a single instruction
8924 or a branch with a delay slot. In the latter case,
8925 LAST, like INSN, is already aligned, but the delay
8926 slot must have some extra delay that stops it from
8927 issuing at the same time as the branch. We therefore
8928 insert a nop before the branch in order to align its
8929 delay slot. */
8930 emit_insn_after (gen_nop (), last2);
8931 aligned_p = false;
8933 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8935 /* SUBINSN is the delay slot of INSN, but INSN is
8936 currently unaligned. Insert a nop between
8937 LAST and INSN to align it. */
8938 emit_insn_after (gen_nop (), last);
8939 aligned_p = true;
8942 mips_sim_issue_insn (&state, subinsn);
8944 mips_sim_finish_insn (&state, insn);
8946 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8947 length = get_attr_length (insn);
8948 if (length > 0)
8950 /* If the instruction is an asm statement or multi-instruction
8951 mips.md patern, the length is only an estimate. Insert an
8952 8 byte alignment after it so that the following instructions
8953 can be handled correctly. */
8954 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8955 && (recog_memoized (insn) < 0 || length >= 8))
8957 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8958 next = NEXT_INSN (next);
8959 mips_sim_next_cycle (&state);
8960 aligned_p = true;
8962 else if (length & 4)
8963 aligned_p = !aligned_p;
8964 last2 = last;
8965 last = insn;
8968 /* See whether INSN is an aligned label. */
8969 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8970 aligned_p = true;
8972 dfa_finish ();
8975 /* Subroutine of mips_reorg. If there is a hazard between INSN
8976 and a previous instruction, avoid it by inserting nops after
8977 instruction AFTER.
8979 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8980 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8981 before using the value of that register. *HILO_DELAY counts the
8982 number of instructions since the last hilo hazard (that is,
8983 the number of instructions since the last mflo or mfhi).
8985 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8986 for the next instruction.
8988 LO_REG is an rtx for the LO register, used in dependence checking. */
8990 static void
8991 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8992 rtx *delayed_reg, rtx lo_reg)
8994 rtx pattern, set;
8995 int nops, ninsns;
8997 if (!INSN_P (insn))
8998 return;
9000 pattern = PATTERN (insn);
9002 /* Do not put the whole function in .set noreorder if it contains
9003 an asm statement. We don't know whether there will be hazards
9004 between the asm statement and the gcc-generated code. */
9005 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
9006 cfun->machine->all_noreorder_p = false;
9008 /* Ignore zero-length instructions (barriers and the like). */
9009 ninsns = get_attr_length (insn) / 4;
9010 if (ninsns == 0)
9011 return;
9013 /* Work out how many nops are needed. Note that we only care about
9014 registers that are explicitly mentioned in the instruction's pattern.
9015 It doesn't matter that calls use the argument registers or that they
9016 clobber hi and lo. */
9017 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
9018 nops = 2 - *hilo_delay;
9019 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
9020 nops = 1;
9021 else
9022 nops = 0;
9024 /* Insert the nops between this instruction and the previous one.
9025 Each new nop takes us further from the last hilo hazard. */
9026 *hilo_delay += nops;
9027 while (nops-- > 0)
9028 emit_insn_after (gen_hazard_nop (), after);
9030 /* Set up the state for the next instruction. */
9031 *hilo_delay += ninsns;
9032 *delayed_reg = 0;
9033 if (INSN_CODE (insn) >= 0)
9034 switch (get_attr_hazard (insn))
9036 case HAZARD_NONE:
9037 break;
9039 case HAZARD_HILO:
9040 *hilo_delay = 0;
9041 break;
9043 case HAZARD_DELAY:
9044 set = single_set (insn);
9045 gcc_assert (set != 0);
9046 *delayed_reg = SET_DEST (set);
9047 break;
9052 /* Go through the instruction stream and insert nops where necessary.
9053 See if the whole function can then be put into .set noreorder &
9054 .set nomacro. */
9056 static void
9057 mips_avoid_hazards (void)
9059 rtx insn, last_insn, lo_reg, delayed_reg;
9060 int hilo_delay, i;
9062 /* Force all instructions to be split into their final form. */
9063 split_all_insns_noflow ();
9065 /* Recalculate instruction lengths without taking nops into account. */
9066 cfun->machine->ignore_hazard_length_p = true;
9067 shorten_branches (get_insns ());
9069 cfun->machine->all_noreorder_p = true;
9071 /* Profiled functions can't be all noreorder because the profiler
9072 support uses assembler macros. */
9073 if (current_function_profile)
9074 cfun->machine->all_noreorder_p = false;
9076 /* Code compiled with -mfix-vr4120 can't be all noreorder because
9077 we rely on the assembler to work around some errata. */
9078 if (TARGET_FIX_VR4120)
9079 cfun->machine->all_noreorder_p = false;
9081 /* The same is true for -mfix-vr4130 if we might generate mflo or
9082 mfhi instructions. Note that we avoid using mflo and mfhi if
9083 the VR4130 macc and dmacc instructions are available instead;
9084 see the *mfhilo_{si,di}_macc patterns. */
9085 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
9086 cfun->machine->all_noreorder_p = false;
9088 last_insn = 0;
9089 hilo_delay = 2;
9090 delayed_reg = 0;
9091 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
9093 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
9094 if (INSN_P (insn))
9096 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
9097 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
9098 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
9099 &hilo_delay, &delayed_reg, lo_reg);
9100 else
9101 mips_avoid_hazard (last_insn, insn, &hilo_delay,
9102 &delayed_reg, lo_reg);
9104 last_insn = insn;
9109 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
9111 static void
9112 mips_reorg (void)
9114 if (TARGET_MIPS16)
9115 mips16_lay_out_constants ();
9116 else if (TARGET_EXPLICIT_RELOCS)
9118 if (mips_flag_delayed_branch)
9119 dbr_schedule (get_insns ());
9120 mips_avoid_hazards ();
9121 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
9122 vr4130_align_insns ();
9126 /* This function does three things:
9128 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
9129 - Register the mips16 hardware floating point stubs.
9130 - Register the gofast functions if selected using --enable-gofast. */
9132 #include "config/gofast.h"
9134 static void
9135 mips_init_libfuncs (void)
9137 if (TARGET_FIX_VR4120)
9139 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9140 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9143 if (TARGET_MIPS16 && mips16_hard_float)
9145 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9146 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9147 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9148 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9150 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9151 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9152 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9153 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9154 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9155 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9157 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9158 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9160 if (TARGET_DOUBLE_FLOAT)
9162 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9163 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9164 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9165 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9167 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9168 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9169 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9170 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9171 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9172 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9174 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
9175 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
9177 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
9178 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
9181 else
9182 gofast_maybe_init_libfuncs ();
9185 /* Return a number assessing the cost of moving a register in class
9186 FROM to class TO. The classes are expressed using the enumeration
9187 values such as `GENERAL_REGS'. A value of 2 is the default; other
9188 values are interpreted relative to that.
9190 It is not required that the cost always equal 2 when FROM is the
9191 same as TO; on some machines it is expensive to move between
9192 registers if they are not general registers.
9194 If reload sees an insn consisting of a single `set' between two
9195 hard registers, and if `REGISTER_MOVE_COST' applied to their
9196 classes returns a value of 2, reload does not check to ensure that
9197 the constraints of the insn are met. Setting a cost of other than
9198 2 will allow reload to verify that the constraints are met. You
9199 should do this if the `movM' pattern's constraints do not allow
9200 such copying.
9202 ??? We make the cost of moving from HI/LO into general
9203 registers the same as for one of moving general registers to
9204 HI/LO for TARGET_MIPS16 in order to prevent allocating a
9205 pseudo to HI/LO. This might hurt optimizations though, it
9206 isn't clear if it is wise. And it might not work in all cases. We
9207 could solve the DImode LO reg problem by using a multiply, just
9208 like reload_{in,out}si. We could solve the SImode/HImode HI reg
9209 problem by using divide instructions. divu puts the remainder in
9210 the HI reg, so doing a divide by -1 will move the value in the HI
9211 reg for all values except -1. We could handle that case by using a
9212 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
9213 a compare/branch to test the input value to see which instruction
9214 we need to use. This gets pretty messy, but it is feasible. */
9217 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9218 enum reg_class to, enum reg_class from)
9220 if (from == M16_REGS && GR_REG_CLASS_P (to))
9221 return 2;
9222 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
9223 return 2;
9224 else if (GR_REG_CLASS_P (from))
9226 if (to == M16_REGS)
9227 return 2;
9228 else if (to == M16_NA_REGS)
9229 return 2;
9230 else if (GR_REG_CLASS_P (to))
9232 if (TARGET_MIPS16)
9233 return 4;
9234 else
9235 return 2;
9237 else if (to == FP_REGS)
9238 return 4;
9239 else if (reg_class_subset_p (to, ACC_REGS))
9241 if (TARGET_MIPS16)
9242 return 12;
9243 else
9244 return 6;
9246 else if (COP_REG_CLASS_P (to))
9248 return 5;
9251 else if (from == FP_REGS)
9253 if (GR_REG_CLASS_P (to))
9254 return 4;
9255 else if (to == FP_REGS)
9256 return 2;
9257 else if (to == ST_REGS)
9258 return 8;
9260 else if (reg_class_subset_p (from, ACC_REGS))
9262 if (GR_REG_CLASS_P (to))
9264 if (TARGET_MIPS16)
9265 return 12;
9266 else
9267 return 6;
9270 else if (from == ST_REGS && GR_REG_CLASS_P (to))
9271 return 4;
9272 else if (COP_REG_CLASS_P (from))
9274 return 5;
9277 /* Fall through.
9278 ??? What cases are these? Shouldn't we return 2 here? */
9280 return 12;
9283 /* Return the length of INSN. LENGTH is the initial length computed by
9284 attributes in the machine-description file. */
9287 mips_adjust_insn_length (rtx insn, int length)
9289 /* A unconditional jump has an unfilled delay slot if it is not part
9290 of a sequence. A conditional jump normally has a delay slot, but
9291 does not on MIPS16. */
9292 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9293 length += 4;
9295 /* See how many nops might be needed to avoid hardware hazards. */
9296 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9297 switch (get_attr_hazard (insn))
9299 case HAZARD_NONE:
9300 break;
9302 case HAZARD_DELAY:
9303 length += 4;
9304 break;
9306 case HAZARD_HILO:
9307 length += 8;
9308 break;
9311 /* All MIPS16 instructions are a measly two bytes. */
9312 if (TARGET_MIPS16)
9313 length /= 2;
9315 return length;
9319 /* Return an asm sequence to start a noat block and load the address
9320 of a label into $1. */
9322 const char *
9323 mips_output_load_label (void)
9325 if (TARGET_EXPLICIT_RELOCS)
9326 switch (mips_abi)
9328 case ABI_N32:
9329 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9331 case ABI_64:
9332 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9334 default:
9335 if (ISA_HAS_LOAD_DELAY)
9336 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9337 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9339 else
9341 if (Pmode == DImode)
9342 return "%[dla\t%@,%0";
9343 else
9344 return "%[la\t%@,%0";
9348 /* Return the assembly code for INSN, which has the operands given by
9349 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9350 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9351 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9352 version of BRANCH_IF_TRUE. */
9354 const char *
9355 mips_output_conditional_branch (rtx insn, rtx *operands,
9356 const char *branch_if_true,
9357 const char *branch_if_false)
9359 unsigned int length;
9360 rtx taken, not_taken;
9362 length = get_attr_length (insn);
9363 if (length <= 8)
9365 /* Just a simple conditional branch. */
9366 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9367 return branch_if_true;
9370 /* Generate a reversed branch around a direct jump. This fallback does
9371 not use branch-likely instructions. */
9372 mips_branch_likely = false;
9373 not_taken = gen_label_rtx ();
9374 taken = operands[1];
9376 /* Generate the reversed branch to NOT_TAKEN. */
9377 operands[1] = not_taken;
9378 output_asm_insn (branch_if_false, operands);
9380 /* If INSN has a delay slot, we must provide delay slots for both the
9381 branch to NOT_TAKEN and the conditional jump. We must also ensure
9382 that INSN's delay slot is executed in the appropriate cases. */
9383 if (final_sequence)
9385 /* This first delay slot will always be executed, so use INSN's
9386 delay slot if is not annulled. */
9387 if (!INSN_ANNULLED_BRANCH_P (insn))
9389 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9390 asm_out_file, optimize, 1, NULL);
9391 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9393 else
9394 output_asm_insn ("nop", 0);
9395 fprintf (asm_out_file, "\n");
9398 /* Output the unconditional branch to TAKEN. */
9399 if (length <= 16)
9400 output_asm_insn ("j\t%0%/", &taken);
9401 else
9403 output_asm_insn (mips_output_load_label (), &taken);
9404 output_asm_insn ("jr\t%@%]%/", 0);
9407 /* Now deal with its delay slot; see above. */
9408 if (final_sequence)
9410 /* This delay slot will only be executed if the branch is taken.
9411 Use INSN's delay slot if is annulled. */
9412 if (INSN_ANNULLED_BRANCH_P (insn))
9414 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9415 asm_out_file, optimize, 1, NULL);
9416 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9418 else
9419 output_asm_insn ("nop", 0);
9420 fprintf (asm_out_file, "\n");
9423 /* Output NOT_TAKEN. */
9424 (*targetm.asm_out.internal_label) (asm_out_file, "L",
9425 CODE_LABEL_NUMBER (not_taken));
9426 return "";
9429 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9430 if some ordered condition is true. The condition is given by
9431 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9432 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9433 its second is always zero. */
9435 const char *
9436 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9438 const char *branch[2];
9440 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9441 Make BRANCH[0] branch on the inverse condition. */
9442 switch (GET_CODE (operands[0]))
9444 /* These cases are equivalent to comparisons against zero. */
9445 case LEU:
9446 inverted_p = !inverted_p;
9447 /* Fall through. */
9448 case GTU:
9449 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9450 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9451 break;
9453 /* These cases are always true or always false. */
9454 case LTU:
9455 inverted_p = !inverted_p;
9456 /* Fall through. */
9457 case GEU:
9458 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9459 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9460 break;
9462 default:
9463 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9464 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9465 break;
9467 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9470 /* Used to output div or ddiv instruction DIVISION, which has the operands
9471 given by OPERANDS. Add in a divide-by-zero check if needed.
9473 When working around R4000 and R4400 errata, we need to make sure that
9474 the division is not immediately followed by a shift[1][2]. We also
9475 need to stop the division from being put into a branch delay slot[3].
9476 The easiest way to avoid both problems is to add a nop after the
9477 division. When a divide-by-zero check is needed, this nop can be
9478 used to fill the branch delay slot.
9480 [1] If a double-word or a variable shift executes immediately
9481 after starting an integer division, the shift may give an
9482 incorrect result. See quotations of errata #16 and #28 from
9483 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9484 in mips.md for details.
9486 [2] A similar bug to [1] exists for all revisions of the
9487 R4000 and the R4400 when run in an MC configuration.
9488 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9490 "19. In this following sequence:
9492 ddiv (or ddivu or div or divu)
9493 dsll32 (or dsrl32, dsra32)
9495 if an MPT stall occurs, while the divide is slipping the cpu
9496 pipeline, then the following double shift would end up with an
9497 incorrect result.
9499 Workaround: The compiler needs to avoid generating any
9500 sequence with divide followed by extended double shift."
9502 This erratum is also present in "MIPS R4400MC Errata, Processor
9503 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9504 & 3.0" as errata #10 and #4, respectively.
9506 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9507 (also valid for MIPS R4000MC processors):
9509 "52. R4000SC: This bug does not apply for the R4000PC.
9511 There are two flavors of this bug:
9513 1) If the instruction just after divide takes an RF exception
9514 (tlb-refill, tlb-invalid) and gets an instruction cache
9515 miss (both primary and secondary) and the line which is
9516 currently in secondary cache at this index had the first
9517 data word, where the bits 5..2 are set, then R4000 would
9518 get a wrong result for the div.
9522 div r8, r9
9523 ------------------- # end-of page. -tlb-refill
9527 div r8, r9
9528 ------------------- # end-of page. -tlb-invalid
9531 2) If the divide is in the taken branch delay slot, where the
9532 target takes RF exception and gets an I-cache miss for the
9533 exception vector or where I-cache miss occurs for the
9534 target address, under the above mentioned scenarios, the
9535 div would get wrong results.
9538 j r2 # to next page mapped or unmapped
9539 div r8,r9 # this bug would be there as long
9540 # as there is an ICache miss and
9541 nop # the "data pattern" is present
9544 beq r0, r0, NextPage # to Next page
9545 div r8,r9
9548 This bug is present for div, divu, ddiv, and ddivu
9549 instructions.
9551 Workaround: For item 1), OS could make sure that the next page
9552 after the divide instruction is also mapped. For item 2), the
9553 compiler could make sure that the divide instruction is not in
9554 the branch delay slot."
9556 These processors have PRId values of 0x00004220 and 0x00004300 for
9557 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9559 const char *
9560 mips_output_division (const char *division, rtx *operands)
9562 const char *s;
9564 s = division;
9565 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9567 output_asm_insn (s, operands);
9568 s = "nop";
9570 if (TARGET_CHECK_ZERO_DIV)
9572 if (TARGET_MIPS16)
9574 output_asm_insn (s, operands);
9575 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9577 else if (GENERATE_DIVIDE_TRAPS)
9579 output_asm_insn (s, operands);
9580 s = "teq\t%2,%.,7";
9582 else
9584 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9585 output_asm_insn (s, operands);
9586 s = "break\t7%)\n1:";
9589 return s;
9592 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
9593 with a final "000" replaced by "k". Ignore case.
9595 Note: this function is shared between GCC and GAS. */
9597 static bool
9598 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
9600 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
9601 given++, canonical++;
9603 return ((*given == 0 && *canonical == 0)
9604 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
9608 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
9609 CPU name. We've traditionally allowed a lot of variation here.
9611 Note: this function is shared between GCC and GAS. */
9613 static bool
9614 mips_matching_cpu_name_p (const char *canonical, const char *given)
9616 /* First see if the name matches exactly, or with a final "000"
9617 turned into "k". */
9618 if (mips_strict_matching_cpu_name_p (canonical, given))
9619 return true;
9621 /* If not, try comparing based on numerical designation alone.
9622 See if GIVEN is an unadorned number, or 'r' followed by a number. */
9623 if (TOLOWER (*given) == 'r')
9624 given++;
9625 if (!ISDIGIT (*given))
9626 return false;
9628 /* Skip over some well-known prefixes in the canonical name,
9629 hoping to find a number there too. */
9630 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
9631 canonical += 2;
9632 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
9633 canonical += 2;
9634 else if (TOLOWER (canonical[0]) == 'r')
9635 canonical += 1;
9637 return mips_strict_matching_cpu_name_p (canonical, given);
9641 /* Return the mips_cpu_info entry for the processor or ISA given
9642 by CPU_STRING. Return null if the string isn't recognized.
9644 A similar function exists in GAS. */
9646 static const struct mips_cpu_info *
9647 mips_parse_cpu (const char *cpu_string)
9649 const struct mips_cpu_info *p;
9650 const char *s;
9652 /* In the past, we allowed upper-case CPU names, but it doesn't
9653 work well with the multilib machinery. */
9654 for (s = cpu_string; *s != 0; s++)
9655 if (ISUPPER (*s))
9657 warning (0, "the cpu name must be lower case");
9658 break;
9661 /* 'from-abi' selects the most compatible architecture for the given
9662 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
9663 EABIs, we have to decide whether we're using the 32-bit or 64-bit
9664 version. Look first at the -mgp options, if given, otherwise base
9665 the choice on MASK_64BIT in TARGET_DEFAULT. */
9666 if (strcasecmp (cpu_string, "from-abi") == 0)
9667 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
9668 : ABI_NEEDS_64BIT_REGS ? 3
9669 : (TARGET_64BIT ? 3 : 1));
9671 /* 'default' has traditionally been a no-op. Probably not very useful. */
9672 if (strcasecmp (cpu_string, "default") == 0)
9673 return 0;
9675 for (p = mips_cpu_info_table; p->name != 0; p++)
9676 if (mips_matching_cpu_name_p (p->name, cpu_string))
9677 return p;
9679 return 0;
9683 /* Return the processor associated with the given ISA level, or null
9684 if the ISA isn't valid. */
9686 static const struct mips_cpu_info *
9687 mips_cpu_info_from_isa (int isa)
9689 const struct mips_cpu_info *p;
9691 for (p = mips_cpu_info_table; p->name != 0; p++)
9692 if (p->isa == isa)
9693 return p;
9695 return 0;
9698 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
9699 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
9700 they only hold condition code modes, and CCmode is always considered to
9701 be 4 bytes wide. All other registers are word sized. */
9703 unsigned int
9704 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9706 if (ST_REG_P (regno))
9707 return ((GET_MODE_SIZE (mode) + 3) / 4);
9708 else if (! FP_REG_P (regno))
9709 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9710 else
9711 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9714 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
9715 all BLKmode objects are returned in memory. Under the new (N32 and
9716 64-bit MIPS ABIs) small structures are returned in a register.
9717 Objects with varying size must still be returned in memory, of
9718 course. */
9720 static bool
9721 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9723 if (TARGET_OLDABI)
9724 return (TYPE_MODE (type) == BLKmode);
9725 else
9726 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9727 || (int_size_in_bytes (type) == -1));
9730 static bool
9731 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9733 return !TARGET_OLDABI;
9736 /* Return true if INSN is a multiply-add or multiply-subtract
9737 instruction and PREV assigns to the accumulator operand. */
9739 bool
9740 mips_linked_madd_p (rtx prev, rtx insn)
9742 rtx x;
9744 x = single_set (insn);
9745 if (x == 0)
9746 return false;
9748 x = SET_SRC (x);
9750 if (GET_CODE (x) == PLUS
9751 && GET_CODE (XEXP (x, 0)) == MULT
9752 && reg_set_p (XEXP (x, 1), prev))
9753 return true;
9755 if (GET_CODE (x) == MINUS
9756 && GET_CODE (XEXP (x, 1)) == MULT
9757 && reg_set_p (XEXP (x, 0), prev))
9758 return true;
9760 return false;
9763 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9764 that may clobber hi or lo. */
9766 static rtx mips_macc_chains_last_hilo;
9768 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9769 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9771 static void
9772 mips_macc_chains_record (rtx insn)
9774 if (get_attr_may_clobber_hilo (insn))
9775 mips_macc_chains_last_hilo = insn;
9778 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9779 has NREADY elements, looking for a multiply-add or multiply-subtract
9780 instruction that is cumulative with mips_macc_chains_last_hilo.
9781 If there is one, promote it ahead of anything else that might
9782 clobber hi or lo. */
9784 static void
9785 mips_macc_chains_reorder (rtx *ready, int nready)
9787 int i, j;
9789 if (mips_macc_chains_last_hilo != 0)
9790 for (i = nready - 1; i >= 0; i--)
9791 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9793 for (j = nready - 1; j > i; j--)
9794 if (recog_memoized (ready[j]) >= 0
9795 && get_attr_may_clobber_hilo (ready[j]))
9797 mips_promote_ready (ready, i, j);
9798 break;
9800 break;
9804 /* The last instruction to be scheduled. */
9806 static rtx vr4130_last_insn;
9808 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9809 points to an rtx that is initially an instruction. Nullify the rtx
9810 if the instruction uses the value of register X. */
9812 static void
9813 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9815 rtx *insn_ptr = data;
9816 if (REG_P (x)
9817 && *insn_ptr != 0
9818 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9819 *insn_ptr = 0;
9822 /* Return true if there is true register dependence between vr4130_last_insn
9823 and INSN. */
9825 static bool
9826 vr4130_true_reg_dependence_p (rtx insn)
9828 note_stores (PATTERN (vr4130_last_insn),
9829 vr4130_true_reg_dependence_p_1, &insn);
9830 return insn == 0;
9833 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9834 the ready queue and that INSN2 is the instruction after it, return
9835 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9836 in which INSN1 and INSN2 can probably issue in parallel, but for
9837 which (INSN2, INSN1) should be less sensitive to instruction
9838 alignment than (INSN1, INSN2). See 4130.md for more details. */
9840 static bool
9841 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9843 rtx dep;
9845 /* Check for the following case:
9847 1) there is some other instruction X with an anti dependence on INSN1;
9848 2) X has a higher priority than INSN2; and
9849 3) X is an arithmetic instruction (and thus has no unit restrictions).
9851 If INSN1 is the last instruction blocking X, it would better to
9852 choose (INSN1, X) over (INSN2, INSN1). */
9853 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9854 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9855 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9856 && recog_memoized (XEXP (dep, 0)) >= 0
9857 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9858 return false;
9860 if (vr4130_last_insn != 0
9861 && recog_memoized (insn1) >= 0
9862 && recog_memoized (insn2) >= 0)
9864 /* See whether INSN1 and INSN2 use different execution units,
9865 or if they are both ALU-type instructions. If so, they can
9866 probably execute in parallel. */
9867 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9868 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9869 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9871 /* If only one of the instructions has a dependence on
9872 vr4130_last_insn, prefer to schedule the other one first. */
9873 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9874 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9875 if (dep1 != dep2)
9876 return dep1;
9878 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9879 is not an ALU-type instruction and if INSN1 uses the same
9880 execution unit. (Note that if this condition holds, we already
9881 know that INSN2 uses a different execution unit.) */
9882 if (class1 != VR4130_CLASS_ALU
9883 && recog_memoized (vr4130_last_insn) >= 0
9884 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9885 return true;
9888 return false;
9891 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9892 queue with at least two instructions. Swap the first two if
9893 vr4130_swap_insns_p says that it could be worthwhile. */
9895 static void
9896 vr4130_reorder (rtx *ready, int nready)
9898 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9899 mips_promote_ready (ready, nready - 2, nready - 1);
9902 /* Remove the instruction at index LOWER from ready queue READY and
9903 reinsert it in front of the instruction at index HIGHER. LOWER must
9904 be <= HIGHER. */
9906 static void
9907 mips_promote_ready (rtx *ready, int lower, int higher)
9909 rtx new_head;
9910 int i;
9912 new_head = ready[lower];
9913 for (i = lower; i < higher; i++)
9914 ready[i] = ready[i + 1];
9915 ready[i] = new_head;
9918 /* Implement TARGET_SCHED_REORDER. */
9920 static int
9921 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9922 rtx *ready, int *nreadyp, int cycle)
9924 if (!reload_completed && TUNE_MACC_CHAINS)
9926 if (cycle == 0)
9927 mips_macc_chains_last_hilo = 0;
9928 if (*nreadyp > 0)
9929 mips_macc_chains_reorder (ready, *nreadyp);
9931 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9933 if (cycle == 0)
9934 vr4130_last_insn = 0;
9935 if (*nreadyp > 1)
9936 vr4130_reorder (ready, *nreadyp);
9938 return mips_issue_rate ();
9941 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9943 static int
9944 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9945 rtx insn, int more)
9947 switch (GET_CODE (PATTERN (insn)))
9949 case USE:
9950 case CLOBBER:
9951 /* Don't count USEs and CLOBBERs against the issue rate. */
9952 break;
9954 default:
9955 more--;
9956 if (!reload_completed && TUNE_MACC_CHAINS)
9957 mips_macc_chains_record (insn);
9958 vr4130_last_insn = insn;
9959 break;
9961 return more;
9964 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9965 dependencies have no cost. */
9967 static int
9968 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9969 rtx dep ATTRIBUTE_UNUSED, int cost)
9971 if (REG_NOTE_KIND (link) != 0)
9972 return 0;
9973 return cost;
9976 /* Return the number of instructions that can be issued per cycle. */
9978 static int
9979 mips_issue_rate (void)
9981 switch (mips_tune)
9983 case PROCESSOR_R4130:
9984 case PROCESSOR_R5400:
9985 case PROCESSOR_R5500:
9986 case PROCESSOR_R7000:
9987 case PROCESSOR_R9000:
9988 return 2;
9990 case PROCESSOR_SB1:
9991 case PROCESSOR_SB1A:
9992 /* This is actually 4, but we get better performance if we claim 3.
9993 This is partly because of unwanted speculative code motion with the
9994 larger number, and partly because in most common cases we can't
9995 reach the theoretical max of 4. */
9996 return 3;
9998 default:
9999 return 1;
10003 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
10004 be as wide as the scheduling freedom in the DFA. */
10006 static int
10007 mips_multipass_dfa_lookahead (void)
10009 /* Can schedule up to 4 of the 6 function units in any one cycle. */
10010 if (TUNE_SB1)
10011 return 4;
10013 return 0;
10016 /* Implements a store data bypass check. We need this because the cprestore
10017 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
10018 default routine to abort. We just return false for that case. */
10019 /* ??? Should try to give a better result here than assuming false. */
10022 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
10024 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
10025 return false;
10027 return ! store_data_bypass_p (out_insn, in_insn);
10030 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
10031 return the first operand of the associated "pref" or "prefx" insn. */
10034 mips_prefetch_cookie (rtx write, rtx locality)
10036 /* store_streamed / load_streamed. */
10037 if (INTVAL (locality) <= 0)
10038 return GEN_INT (INTVAL (write) + 4);
10040 /* store / load. */
10041 if (INTVAL (locality) <= 2)
10042 return write;
10044 /* store_retained / load_retained. */
10045 return GEN_INT (INTVAL (write) + 6);
10048 /* MIPS builtin function support. */
10050 struct builtin_description
10052 /* The code of the main .md file instruction. See mips_builtin_type
10053 for more information. */
10054 enum insn_code icode;
10056 /* The floating-point comparison code to use with ICODE, if any. */
10057 enum mips_fp_condition cond;
10059 /* The name of the builtin function. */
10060 const char *name;
10062 /* Specifies how the function should be expanded. */
10063 enum mips_builtin_type builtin_type;
10065 /* The function's prototype. */
10066 enum mips_function_type function_type;
10068 /* The target flags required for this function. */
10069 int target_flags;
10072 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
10073 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
10074 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10075 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10076 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
10078 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
10079 TARGET_FLAGS. */
10080 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
10081 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
10082 "__builtin_mips_" #INSN "_" #COND "_s", \
10083 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
10084 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
10085 "__builtin_mips_" #INSN "_" #COND "_d", \
10086 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
10088 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10089 The lower and upper forms require TARGET_FLAGS while the any and all
10090 forms require MASK_MIPS3D. */
10091 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
10092 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10093 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
10094 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10095 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10096 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
10097 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10098 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10099 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
10100 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
10101 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10102 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
10103 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
10105 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10106 require MASK_MIPS3D. */
10107 #define CMP_4S_BUILTINS(INSN, COND) \
10108 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10109 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
10110 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10111 MASK_MIPS3D }, \
10112 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10113 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
10114 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10115 MASK_MIPS3D }
10117 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10118 instruction requires TARGET_FLAGS. */
10119 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
10120 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10121 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
10122 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10123 TARGET_FLAGS }, \
10124 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10125 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
10126 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10127 TARGET_FLAGS }
10129 /* Define all the builtins related to c.cond.fmt condition COND. */
10130 #define CMP_BUILTINS(COND) \
10131 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10132 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
10133 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
10134 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10135 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
10136 CMP_4S_BUILTINS (c, COND), \
10137 CMP_4S_BUILTINS (cabs, COND)
10139 static const struct builtin_description mips_bdesc[] =
10141 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10142 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10143 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10144 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10145 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10146 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10147 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10148 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10150 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10151 MASK_PAIRED_SINGLE_FLOAT),
10152 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10153 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10154 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10155 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10157 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10158 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10159 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10160 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10161 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10162 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10164 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10165 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10166 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10167 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10168 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10169 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10171 MIPS_FP_CONDITIONS (CMP_BUILTINS)
10174 /* Builtin functions for the SB-1 processor. */
10176 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10178 static const struct builtin_description sb1_bdesc[] =
10180 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10183 /* Builtin functions for DSP ASE. */
10185 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10186 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10187 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10188 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10190 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10191 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
10192 builtin_description fields. */
10193 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10194 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10195 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10197 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10198 branch instruction. TARGET_FLAGS is a builtin_description field. */
10199 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
10200 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
10201 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10203 static const struct builtin_description dsp_bdesc[] =
10205 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10206 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10207 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10208 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10209 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10210 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10211 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10212 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10213 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10214 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10215 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10216 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10217 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10218 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10219 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10220 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10221 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10222 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10223 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10224 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10225 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10226 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10227 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10228 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10229 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10230 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10231 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10232 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10233 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10234 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10235 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10236 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10237 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10238 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10239 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10240 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10241 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10242 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10243 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10244 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10245 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10246 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10247 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10248 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10249 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10250 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10251 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10252 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10253 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10254 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10255 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10256 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10257 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10258 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10259 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10260 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10261 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10262 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10263 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10264 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10265 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10266 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10267 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10268 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10269 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10270 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10271 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10272 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10273 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10274 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10275 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10276 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10277 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10278 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10279 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10280 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10281 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10282 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10283 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10284 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10285 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10286 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10287 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10288 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10289 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10290 BPOSGE_BUILTIN (32, MASK_DSP)
10293 /* This helps provide a mapping from builtin function codes to bdesc
10294 arrays. */
10296 struct bdesc_map
10298 /* The builtin function table that this entry describes. */
10299 const struct builtin_description *bdesc;
10301 /* The number of entries in the builtin function table. */
10302 unsigned int size;
10304 /* The target processor that supports these builtin functions.
10305 PROCESSOR_MAX means we enable them for all processors. */
10306 enum processor_type proc;
10309 static const struct bdesc_map bdesc_arrays[] =
10311 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX },
10312 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 },
10313 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX }
10316 /* Take the head of argument list *ARGLIST and convert it into a form
10317 suitable for input operand OP of instruction ICODE. Return the value
10318 and point *ARGLIST at the next element of the list. */
10320 static rtx
10321 mips_prepare_builtin_arg (enum insn_code icode,
10322 unsigned int op, tree *arglist)
10324 rtx value;
10325 enum machine_mode mode;
10327 value = expand_normal (TREE_VALUE (*arglist));
10328 mode = insn_data[icode].operand[op].mode;
10329 if (!insn_data[icode].operand[op].predicate (value, mode))
10331 value = copy_to_mode_reg (mode, value);
10332 /* Check the predicate again. */
10333 if (!insn_data[icode].operand[op].predicate (value, mode))
10335 error ("invalid argument to builtin function");
10336 return const0_rtx;
10340 *arglist = TREE_CHAIN (*arglist);
10341 return value;
10344 /* Return an rtx suitable for output operand OP of instruction ICODE.
10345 If TARGET is non-null, try to use it where possible. */
10347 static rtx
10348 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10350 enum machine_mode mode;
10352 mode = insn_data[icode].operand[op].mode;
10353 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10354 target = gen_reg_rtx (mode);
10356 return target;
10359 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
10362 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10363 enum machine_mode mode ATTRIBUTE_UNUSED,
10364 int ignore ATTRIBUTE_UNUSED)
10366 enum insn_code icode;
10367 enum mips_builtin_type type;
10368 tree fndecl, arglist;
10369 unsigned int fcode;
10370 const struct builtin_description *bdesc;
10371 const struct bdesc_map *m;
10373 fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
10374 arglist = TREE_OPERAND (exp, 1);
10375 fcode = DECL_FUNCTION_CODE (fndecl);
10377 bdesc = NULL;
10378 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10380 if (fcode < m->size)
10382 bdesc = m->bdesc;
10383 icode = bdesc[fcode].icode;
10384 type = bdesc[fcode].builtin_type;
10385 break;
10387 fcode -= m->size;
10389 if (bdesc == NULL)
10390 return 0;
10392 switch (type)
10394 case MIPS_BUILTIN_DIRECT:
10395 return mips_expand_builtin_direct (icode, target, arglist, true);
10397 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10398 return mips_expand_builtin_direct (icode, target, arglist, false);
10400 case MIPS_BUILTIN_MOVT:
10401 case MIPS_BUILTIN_MOVF:
10402 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
10403 target, arglist);
10405 case MIPS_BUILTIN_CMP_ANY:
10406 case MIPS_BUILTIN_CMP_ALL:
10407 case MIPS_BUILTIN_CMP_UPPER:
10408 case MIPS_BUILTIN_CMP_LOWER:
10409 case MIPS_BUILTIN_CMP_SINGLE:
10410 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
10411 target, arglist);
10413 case MIPS_BUILTIN_BPOSGE32:
10414 return mips_expand_builtin_bposge (type, target);
10416 default:
10417 return 0;
10421 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
10423 void
10424 mips_init_builtins (void)
10426 const struct builtin_description *d;
10427 const struct bdesc_map *m;
10428 tree types[(int) MIPS_MAX_FTYPE_MAX];
10429 tree V2SF_type_node;
10430 tree V2HI_type_node;
10431 tree V4QI_type_node;
10432 unsigned int offset;
10434 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
10435 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
10436 return;
10438 if (TARGET_PAIRED_SINGLE_FLOAT)
10440 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
10442 types[MIPS_V2SF_FTYPE_V2SF]
10443 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
10445 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
10446 = build_function_type_list (V2SF_type_node,
10447 V2SF_type_node, V2SF_type_node, NULL_TREE);
10449 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
10450 = build_function_type_list (V2SF_type_node,
10451 V2SF_type_node, V2SF_type_node,
10452 integer_type_node, NULL_TREE);
10454 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
10455 = build_function_type_list (V2SF_type_node,
10456 V2SF_type_node, V2SF_type_node,
10457 V2SF_type_node, V2SF_type_node, NULL_TREE);
10459 types[MIPS_V2SF_FTYPE_SF_SF]
10460 = build_function_type_list (V2SF_type_node,
10461 float_type_node, float_type_node, NULL_TREE);
10463 types[MIPS_INT_FTYPE_V2SF_V2SF]
10464 = build_function_type_list (integer_type_node,
10465 V2SF_type_node, V2SF_type_node, NULL_TREE);
10467 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
10468 = build_function_type_list (integer_type_node,
10469 V2SF_type_node, V2SF_type_node,
10470 V2SF_type_node, V2SF_type_node, NULL_TREE);
10472 types[MIPS_INT_FTYPE_SF_SF]
10473 = build_function_type_list (integer_type_node,
10474 float_type_node, float_type_node, NULL_TREE);
10476 types[MIPS_INT_FTYPE_DF_DF]
10477 = build_function_type_list (integer_type_node,
10478 double_type_node, double_type_node, NULL_TREE);
10480 types[MIPS_SF_FTYPE_V2SF]
10481 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
10483 types[MIPS_SF_FTYPE_SF]
10484 = build_function_type_list (float_type_node,
10485 float_type_node, NULL_TREE);
10487 types[MIPS_SF_FTYPE_SF_SF]
10488 = build_function_type_list (float_type_node,
10489 float_type_node, float_type_node, NULL_TREE);
10491 types[MIPS_DF_FTYPE_DF]
10492 = build_function_type_list (double_type_node,
10493 double_type_node, NULL_TREE);
10495 types[MIPS_DF_FTYPE_DF_DF]
10496 = build_function_type_list (double_type_node,
10497 double_type_node, double_type_node, NULL_TREE);
10500 if (TARGET_DSP)
10502 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
10503 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
10505 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
10506 = build_function_type_list (V2HI_type_node,
10507 V2HI_type_node, V2HI_type_node,
10508 NULL_TREE);
10510 types[MIPS_SI_FTYPE_SI_SI]
10511 = build_function_type_list (intSI_type_node,
10512 intSI_type_node, intSI_type_node,
10513 NULL_TREE);
10515 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
10516 = build_function_type_list (V4QI_type_node,
10517 V4QI_type_node, V4QI_type_node,
10518 NULL_TREE);
10520 types[MIPS_SI_FTYPE_V4QI]
10521 = build_function_type_list (intSI_type_node,
10522 V4QI_type_node,
10523 NULL_TREE);
10525 types[MIPS_V2HI_FTYPE_V2HI]
10526 = build_function_type_list (V2HI_type_node,
10527 V2HI_type_node,
10528 NULL_TREE);
10530 types[MIPS_SI_FTYPE_SI]
10531 = build_function_type_list (intSI_type_node,
10532 intSI_type_node,
10533 NULL_TREE);
10535 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
10536 = build_function_type_list (V4QI_type_node,
10537 V2HI_type_node, V2HI_type_node,
10538 NULL_TREE);
10540 types[MIPS_V2HI_FTYPE_SI_SI]
10541 = build_function_type_list (V2HI_type_node,
10542 intSI_type_node, intSI_type_node,
10543 NULL_TREE);
10545 types[MIPS_SI_FTYPE_V2HI]
10546 = build_function_type_list (intSI_type_node,
10547 V2HI_type_node,
10548 NULL_TREE);
10550 types[MIPS_V2HI_FTYPE_V4QI]
10551 = build_function_type_list (V2HI_type_node,
10552 V4QI_type_node,
10553 NULL_TREE);
10555 types[MIPS_V4QI_FTYPE_V4QI_SI]
10556 = build_function_type_list (V4QI_type_node,
10557 V4QI_type_node, intSI_type_node,
10558 NULL_TREE);
10560 types[MIPS_V2HI_FTYPE_V2HI_SI]
10561 = build_function_type_list (V2HI_type_node,
10562 V2HI_type_node, intSI_type_node,
10563 NULL_TREE);
10565 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
10566 = build_function_type_list (V2HI_type_node,
10567 V4QI_type_node, V2HI_type_node,
10568 NULL_TREE);
10570 types[MIPS_SI_FTYPE_V2HI_V2HI]
10571 = build_function_type_list (intSI_type_node,
10572 V2HI_type_node, V2HI_type_node,
10573 NULL_TREE);
10575 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
10576 = build_function_type_list (intDI_type_node,
10577 intDI_type_node, V4QI_type_node, V4QI_type_node,
10578 NULL_TREE);
10580 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
10581 = build_function_type_list (intDI_type_node,
10582 intDI_type_node, V2HI_type_node, V2HI_type_node,
10583 NULL_TREE);
10585 types[MIPS_DI_FTYPE_DI_SI_SI]
10586 = build_function_type_list (intDI_type_node,
10587 intDI_type_node, intSI_type_node, intSI_type_node,
10588 NULL_TREE);
10590 types[MIPS_V4QI_FTYPE_SI]
10591 = build_function_type_list (V4QI_type_node,
10592 intSI_type_node,
10593 NULL_TREE);
10595 types[MIPS_V2HI_FTYPE_SI]
10596 = build_function_type_list (V2HI_type_node,
10597 intSI_type_node,
10598 NULL_TREE);
10600 types[MIPS_VOID_FTYPE_V4QI_V4QI]
10601 = build_function_type_list (void_type_node,
10602 V4QI_type_node, V4QI_type_node,
10603 NULL_TREE);
10605 types[MIPS_SI_FTYPE_V4QI_V4QI]
10606 = build_function_type_list (intSI_type_node,
10607 V4QI_type_node, V4QI_type_node,
10608 NULL_TREE);
10610 types[MIPS_VOID_FTYPE_V2HI_V2HI]
10611 = build_function_type_list (void_type_node,
10612 V2HI_type_node, V2HI_type_node,
10613 NULL_TREE);
10615 types[MIPS_SI_FTYPE_DI_SI]
10616 = build_function_type_list (intSI_type_node,
10617 intDI_type_node, intSI_type_node,
10618 NULL_TREE);
10620 types[MIPS_DI_FTYPE_DI_SI]
10621 = build_function_type_list (intDI_type_node,
10622 intDI_type_node, intSI_type_node,
10623 NULL_TREE);
10625 types[MIPS_VOID_FTYPE_SI_SI]
10626 = build_function_type_list (void_type_node,
10627 intSI_type_node, intSI_type_node,
10628 NULL_TREE);
10630 types[MIPS_SI_FTYPE_PTR_SI]
10631 = build_function_type_list (intSI_type_node,
10632 ptr_type_node, intSI_type_node,
10633 NULL_TREE);
10635 types[MIPS_SI_FTYPE_VOID]
10636 = build_function_type (intSI_type_node, void_list_node);
10639 /* Iterate through all of the bdesc arrays, initializing all of the
10640 builtin functions. */
10642 offset = 0;
10643 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10645 if (m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
10646 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10647 if ((d->target_flags & target_flags) == d->target_flags)
10648 add_builtin_function (d->name, types[d->function_type],
10649 d - m->bdesc + offset,
10650 BUILT_IN_MD, NULL, NULL);
10651 offset += m->size;
10655 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
10656 .md pattern and ARGLIST is the list of function arguments. TARGET,
10657 if nonnull, suggests a good place to put the result.
10658 HAS_TARGET indicates the function must return something. */
10660 static rtx
10661 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist,
10662 bool has_target)
10664 rtx ops[MAX_RECOG_OPERANDS];
10665 int i = 0;
10667 if (has_target)
10669 /* We save target to ops[0]. */
10670 ops[0] = mips_prepare_builtin_target (icode, 0, target);
10671 i = 1;
10674 /* We need to test if arglist is not zero. Some instructions have extra
10675 clobber registers. */
10676 for (; i < insn_data[icode].n_operands && arglist != 0; i++)
10677 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10679 switch (i)
10681 case 2:
10682 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10683 break;
10685 case 3:
10686 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10687 break;
10689 case 4:
10690 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10691 break;
10693 default:
10694 gcc_unreachable ();
10696 return target;
10699 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
10700 function (TYPE says which). ARGLIST is the list of arguments to the
10701 function, ICODE is the instruction that should be used to compare
10702 the first two arguments, and COND is the condition it should test.
10703 TARGET, if nonnull, suggests a good place to put the result. */
10705 static rtx
10706 mips_expand_builtin_movtf (enum mips_builtin_type type,
10707 enum insn_code icode, enum mips_fp_condition cond,
10708 rtx target, tree arglist)
10710 rtx cmp_result, op0, op1;
10712 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10713 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10714 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10715 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10717 icode = CODE_FOR_mips_cond_move_tf_ps;
10718 target = mips_prepare_builtin_target (icode, 0, target);
10719 if (type == MIPS_BUILTIN_MOVT)
10721 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10722 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10724 else
10726 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10727 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10729 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10730 return target;
10733 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10734 into TARGET otherwise. Return TARGET. */
10736 static rtx
10737 mips_builtin_branch_and_move (rtx condition, rtx target,
10738 rtx value_if_true, rtx value_if_false)
10740 rtx true_label, done_label;
10742 true_label = gen_label_rtx ();
10743 done_label = gen_label_rtx ();
10745 /* First assume that CONDITION is false. */
10746 emit_move_insn (target, value_if_false);
10748 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10749 emit_jump_insn (gen_condjump (condition, true_label));
10750 emit_jump_insn (gen_jump (done_label));
10751 emit_barrier ();
10753 /* Fix TARGET if CONDITION is true. */
10754 emit_label (true_label);
10755 emit_move_insn (target, value_if_true);
10757 emit_label (done_label);
10758 return target;
10761 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
10762 of the comparison instruction and COND is the condition it should test.
10763 ARGLIST is the list of function arguments and TARGET, if nonnull,
10764 suggests a good place to put the boolean result. */
10766 static rtx
10767 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10768 enum insn_code icode, enum mips_fp_condition cond,
10769 rtx target, tree arglist)
10771 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
10772 int i;
10774 if (target == 0 || GET_MODE (target) != SImode)
10775 target = gen_reg_rtx (SImode);
10777 /* Prepare the operands to the comparison. */
10778 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10779 for (i = 1; i < insn_data[icode].n_operands - 1; i++)
10780 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10782 switch (insn_data[icode].n_operands)
10784 case 4:
10785 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
10786 break;
10788 case 6:
10789 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10790 ops[3], ops[4], GEN_INT (cond)));
10791 break;
10793 default:
10794 gcc_unreachable ();
10797 /* If the comparison sets more than one register, we define the result
10798 to be 0 if all registers are false and -1 if all registers are true.
10799 The value of the complete result is indeterminate otherwise. */
10800 switch (builtin_type)
10802 case MIPS_BUILTIN_CMP_ALL:
10803 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10804 return mips_builtin_branch_and_move (condition, target,
10805 const0_rtx, const1_rtx);
10807 case MIPS_BUILTIN_CMP_UPPER:
10808 case MIPS_BUILTIN_CMP_LOWER:
10809 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10810 condition = gen_single_cc (cmp_result, offset);
10811 return mips_builtin_branch_and_move (condition, target,
10812 const1_rtx, const0_rtx);
10814 default:
10815 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10816 return mips_builtin_branch_and_move (condition, target,
10817 const1_rtx, const0_rtx);
10821 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
10822 suggests a good place to put the boolean result. */
10824 static rtx
10825 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10827 rtx condition, cmp_result;
10828 int cmp_value;
10830 if (target == 0 || GET_MODE (target) != SImode)
10831 target = gen_reg_rtx (SImode);
10833 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10835 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10836 cmp_value = 32;
10837 else
10838 gcc_assert (0);
10840 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10841 return mips_builtin_branch_and_move (condition, target,
10842 const1_rtx, const0_rtx);
10845 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
10846 FIRST is true if this is the first time handling this decl. */
10848 static void
10849 mips_encode_section_info (tree decl, rtx rtl, int first)
10851 default_encode_section_info (decl, rtl, first);
10853 if (TREE_CODE (decl) == FUNCTION_DECL
10854 && lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
10856 rtx symbol = XEXP (rtl, 0);
10857 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
10861 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. PIC_FUNCTION_ADDR_REGNUM is live
10862 on entry to a function when generating -mshared abicalls code. */
10864 static void
10865 mips_extra_live_on_entry (bitmap regs)
10867 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
10868 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
10871 /* SImode values are represented as sign-extended to DImode. */
10874 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
10876 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
10877 return SIGN_EXTEND;
10879 return UNKNOWN;
10882 #include "gt-mips.h"