1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save and restore registers.
78 The value for normal-mode code must be a SMALL_OPERAND and must
79 preserve the maximum stack alignment. We therefore use a value
80 of 0x7ff0 in this case.
82 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
83 up to 0x7f8 bytes and can usually save or restore all the registers
84 that we need to save or restore. (Note that we can only use these
85 instructions for o32, for which the stack alignment is 8 bytes.)
87 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
88 RESTORE are not available. We can then use unextended instructions
89 to save and restore registers, and to allocate and deallocate the top
91 #define MIPS_MAX_FIRST_STACK_STEP \
92 (!TARGET_MIPS16 ? 0x7ff0 \
93 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
94 : TARGET_64BIT ? 0x100 : 0x400)
96 /* True if INSN is a mips.md pattern or asm statement. */
97 #define USEFUL_INSN_P(INSN) \
99 && GET_CODE (PATTERN (INSN)) != USE \
100 && GET_CODE (PATTERN (INSN)) != CLOBBER \
101 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
102 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
104 /* If INSN is a delayed branch sequence, return the first instruction
105 in the sequence, otherwise return INSN itself. */
106 #define SEQ_BEGIN(INSN) \
107 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
108 ? XVECEXP (PATTERN (INSN), 0, 0) \
111 /* Likewise for the last instruction in a delayed branch sequence. */
112 #define SEQ_END(INSN) \
113 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
114 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117 /* Execute the following loop body with SUBINSN set to each instruction
118 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
119 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
120 for ((SUBINSN) = SEQ_BEGIN (INSN); \
121 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
122 (SUBINSN) = NEXT_INSN (SUBINSN))
124 /* True if bit BIT is set in VALUE. */
125 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
127 /* Classifies an address.
130 A natural register + offset address. The register satisfies
131 mips_valid_base_register_p and the offset is a const_arith_operand.
134 A LO_SUM rtx. The first operand is a valid base register and
135 the second operand is a symbolic address.
138 A signed 16-bit constant address.
141 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
142 enum mips_address_type
{
149 /* Classifies the prototype of a builtin function. */
150 enum mips_function_type
152 MIPS_V2SF_FTYPE_V2SF
,
153 MIPS_V2SF_FTYPE_V2SF_V2SF
,
154 MIPS_V2SF_FTYPE_V2SF_V2SF_INT
,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF
,
156 MIPS_V2SF_FTYPE_SF_SF
,
157 MIPS_INT_FTYPE_V2SF_V2SF
,
158 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF
,
159 MIPS_INT_FTYPE_SF_SF
,
160 MIPS_INT_FTYPE_DF_DF
,
167 /* For MIPS DSP ASE */
169 MIPS_DI_FTYPE_DI_SI_SI
,
170 MIPS_DI_FTYPE_DI_V2HI_V2HI
,
171 MIPS_DI_FTYPE_DI_V4QI_V4QI
,
173 MIPS_SI_FTYPE_PTR_SI
,
177 MIPS_SI_FTYPE_V2HI_V2HI
,
179 MIPS_SI_FTYPE_V4QI_V4QI
,
182 MIPS_V2HI_FTYPE_SI_SI
,
183 MIPS_V2HI_FTYPE_V2HI
,
184 MIPS_V2HI_FTYPE_V2HI_SI
,
185 MIPS_V2HI_FTYPE_V2HI_V2HI
,
186 MIPS_V2HI_FTYPE_V4QI
,
187 MIPS_V2HI_FTYPE_V4QI_V2HI
,
189 MIPS_V4QI_FTYPE_V2HI_V2HI
,
190 MIPS_V4QI_FTYPE_V4QI_SI
,
191 MIPS_V4QI_FTYPE_V4QI_V4QI
,
192 MIPS_VOID_FTYPE_SI_SI
,
193 MIPS_VOID_FTYPE_V2HI_V2HI
,
194 MIPS_VOID_FTYPE_V4QI_V4QI
,
196 /* For MIPS DSP REV 2 ASE. */
197 MIPS_V4QI_FTYPE_V4QI
,
198 MIPS_SI_FTYPE_SI_SI_SI
,
199 MIPS_DI_FTYPE_DI_USI_USI
,
201 MIPS_DI_FTYPE_USI_USI
,
202 MIPS_V2HI_FTYPE_SI_SI_SI
,
208 /* Specifies how a builtin function should be converted into rtl. */
209 enum mips_builtin_type
211 /* The builtin corresponds directly to an .md pattern. The return
212 value is mapped to operand 0 and the arguments are mapped to
213 operands 1 and above. */
216 /* The builtin corresponds directly to an .md pattern. There is no return
217 value and the arguments are mapped to operands 0 and above. */
218 MIPS_BUILTIN_DIRECT_NO_TARGET
,
220 /* The builtin corresponds to a comparison instruction followed by
221 a mips_cond_move_tf_ps pattern. The first two arguments are the
222 values to compare and the second two arguments are the vector
223 operands for the movt.ps or movf.ps instruction (in assembly order). */
227 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
228 of this instruction is the result of the comparison, which has mode
229 CCV2 or CCV4. The function arguments are mapped to operands 1 and
230 above. The function's return value is an SImode boolean that is
231 true under the following conditions:
233 MIPS_BUILTIN_CMP_ANY: one of the registers is true
234 MIPS_BUILTIN_CMP_ALL: all of the registers are true
235 MIPS_BUILTIN_CMP_LOWER: the first register is true
236 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
237 MIPS_BUILTIN_CMP_ANY
,
238 MIPS_BUILTIN_CMP_ALL
,
239 MIPS_BUILTIN_CMP_UPPER
,
240 MIPS_BUILTIN_CMP_LOWER
,
242 /* As above, but the instruction only sets a single $fcc register. */
243 MIPS_BUILTIN_CMP_SINGLE
,
245 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
246 MIPS_BUILTIN_BPOSGE32
249 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
250 #define MIPS_FP_CONDITIONS(MACRO) \
268 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
269 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
270 enum mips_fp_condition
{
271 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND
)
274 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
275 #define STRINGIFY(X) #X
276 static const char *const mips_fp_conditions
[] = {
277 MIPS_FP_CONDITIONS (STRINGIFY
)
280 /* A function to save or store a register. The first argument is the
281 register and the second is the stack slot. */
282 typedef void (*mips_save_restore_fn
) (rtx
, rtx
);
284 struct mips16_constant
;
285 struct mips_arg_info
;
286 struct mips_address_info
;
287 struct mips_integer_op
;
290 static bool mips_valid_base_register_p (rtx
, enum machine_mode
, int);
291 static bool mips_classify_address (struct mips_address_info
*, rtx
,
292 enum machine_mode
, int);
293 static bool mips_cannot_force_const_mem (rtx
);
294 static bool mips_use_blocks_for_constant_p (enum machine_mode
, rtx
);
295 static int mips_symbol_insns (enum mips_symbol_type
, enum machine_mode
);
296 static bool mips16_unextended_reference_p (enum machine_mode mode
, rtx
, rtx
);
297 static rtx
mips_force_temporary (rtx
, rtx
);
298 static rtx
mips_unspec_offset_high (rtx
, rtx
, rtx
, enum mips_symbol_type
);
299 static rtx
mips_add_offset (rtx
, rtx
, HOST_WIDE_INT
);
300 static unsigned int mips_build_shift (struct mips_integer_op
*, HOST_WIDE_INT
);
301 static unsigned int mips_build_lower (struct mips_integer_op
*,
302 unsigned HOST_WIDE_INT
);
303 static unsigned int mips_build_integer (struct mips_integer_op
*,
304 unsigned HOST_WIDE_INT
);
305 static void mips_legitimize_const_move (enum machine_mode
, rtx
, rtx
);
306 static int m16_check_op (rtx
, int, int, int);
307 static bool mips_rtx_costs (rtx
, int, int, int *);
308 static int mips_address_cost (rtx
);
309 static void mips_emit_compare (enum rtx_code
*, rtx
*, rtx
*, bool);
310 static void mips_load_call_address (rtx
, rtx
, int);
311 static bool mips_function_ok_for_sibcall (tree
, tree
);
312 static void mips_block_move_straight (rtx
, rtx
, HOST_WIDE_INT
);
313 static void mips_adjust_block_mem (rtx
, HOST_WIDE_INT
, rtx
*, rtx
*);
314 static void mips_block_move_loop (rtx
, rtx
, HOST_WIDE_INT
);
315 static void mips_arg_info (const CUMULATIVE_ARGS
*, enum machine_mode
,
316 tree
, int, struct mips_arg_info
*);
317 static bool mips_get_unaligned_mem (rtx
*, unsigned int, int, rtx
*, rtx
*);
318 static void mips_set_architecture (const struct mips_cpu_info
*);
319 static void mips_set_tune (const struct mips_cpu_info
*);
320 static bool mips_handle_option (size_t, const char *, int);
321 static struct machine_function
*mips_init_machine_status (void);
322 static void print_operand_reloc (FILE *, rtx
, enum mips_symbol_context
,
324 static void mips_file_start (void);
325 static int mips_small_data_pattern_1 (rtx
*, void *);
326 static int mips_rewrite_small_data_1 (rtx
*, void *);
327 static bool mips_function_has_gp_insn (void);
328 static unsigned int mips_global_pointer (void);
329 static bool mips_save_reg_p (unsigned int);
330 static void mips_save_restore_reg (enum machine_mode
, int, HOST_WIDE_INT
,
331 mips_save_restore_fn
);
332 static void mips_for_each_saved_reg (HOST_WIDE_INT
, mips_save_restore_fn
);
333 static void mips_output_cplocal (void);
334 static void mips_emit_loadgp (void);
335 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT
);
336 static void mips_set_frame_expr (rtx
);
337 static rtx
mips_frame_set (rtx
, rtx
);
338 static void mips_save_reg (rtx
, rtx
);
339 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT
);
340 static void mips_restore_reg (rtx
, rtx
);
341 static void mips_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
342 HOST_WIDE_INT
, tree
);
343 static int symbolic_expression_p (rtx
);
344 static section
*mips_select_rtx_section (enum machine_mode
, rtx
,
345 unsigned HOST_WIDE_INT
);
346 static section
*mips_function_rodata_section (tree
);
347 static bool mips_in_small_data_p (tree
);
348 static bool mips_use_anchors_for_symbol_p (rtx
);
349 static int mips_fpr_return_fields (tree
, tree
*);
350 static bool mips_return_in_msb (tree
);
351 static rtx
mips_return_fpr_pair (enum machine_mode mode
,
352 enum machine_mode mode1
, HOST_WIDE_INT
,
353 enum machine_mode mode2
, HOST_WIDE_INT
);
354 static rtx
mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx
dump_constants_1 (enum machine_mode
, rtx
, rtx
);
358 static void dump_constants (struct mips16_constant
*, rtx
);
359 static int mips16_insn_length (rtx
);
360 static int mips16_rewrite_pool_refs (rtx
*, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim
*);
363 static void mips_sim_init (struct mips_sim
*, state_t
);
364 static void mips_sim_next_cycle (struct mips_sim
*);
365 static void mips_sim_wait_reg (struct mips_sim
*, rtx
, rtx
);
366 static int mips_sim_wait_regs_2 (rtx
*, void *);
367 static void mips_sim_wait_regs_1 (rtx
*, void *);
368 static void mips_sim_wait_regs (struct mips_sim
*, rtx
);
369 static void mips_sim_wait_units (struct mips_sim
*, rtx
);
370 static void mips_sim_wait_insn (struct mips_sim
*, rtx
);
371 static void mips_sim_record_set (rtx
, const_rtx
, void *);
372 static void mips_sim_issue_insn (struct mips_sim
*, rtx
);
373 static void mips_sim_issue_nop (struct mips_sim
*);
374 static void mips_sim_finish_insn (struct mips_sim
*, rtx
);
375 static void vr4130_avoid_branch_rt_conflict (rtx
);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx
, rtx
, int *, rtx
*, rtx
);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info
*mips_parse_cpu (const char *);
383 static const struct mips_cpu_info
*mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (tree
, tree
);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS
*);
386 static void mips_macc_chains_record (rtx
);
387 static void mips_macc_chains_reorder (rtx
*, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx
, const_rtx
, void *);
389 static bool vr4130_true_reg_dependence_p (rtx
);
390 static bool vr4130_swap_insns_p (rtx
, rtx
);
391 static void vr4130_reorder (rtx
*, int);
392 static void mips_promote_ready (rtx
*, int, int);
393 static int mips_sched_reorder (FILE *, int, rtx
*, int *, int);
394 static int mips_variable_issue (FILE *, int, rtx
, int);
395 static int mips_adjust_cost (rtx
, rtx
, rtx
, int);
396 static int mips_issue_rate (void);
397 static int mips_multipass_dfa_lookahead (void);
398 static void mips_init_libfuncs (void);
399 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS
*, enum machine_mode
,
401 static tree
mips_build_builtin_va_list (void);
402 static tree
mips_gimplify_va_arg_expr (tree
, tree
, tree
*, tree
*);
403 static bool mips_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode mode
,
405 static bool mips_callee_copies (CUMULATIVE_ARGS
*, enum machine_mode mode
,
407 static int mips_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode mode
,
409 static bool mips_valid_pointer_mode (enum machine_mode
);
410 static bool mips_vector_mode_supported_p (enum machine_mode
);
411 static rtx
mips_prepare_builtin_arg (enum insn_code
, unsigned int, tree
, unsigned int);
412 static rtx
mips_prepare_builtin_target (enum insn_code
, unsigned int, rtx
);
413 static rtx
mips_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
414 static void mips_init_builtins (void);
415 static rtx
mips_expand_builtin_direct (enum insn_code
, rtx
, tree
, bool);
416 static rtx
mips_expand_builtin_movtf (enum mips_builtin_type
,
417 enum insn_code
, enum mips_fp_condition
,
419 static rtx
mips_expand_builtin_compare (enum mips_builtin_type
,
420 enum insn_code
, enum mips_fp_condition
,
422 static rtx
mips_expand_builtin_bposge (enum mips_builtin_type
, rtx
);
423 static void mips_encode_section_info (tree
, rtx
, int);
424 static void mips_extra_live_on_entry (bitmap
);
425 static int mips_comp_type_attributes (tree
, tree
);
426 static int mips_mode_rep_extended (enum machine_mode
, enum machine_mode
);
427 static bool mips_offset_within_alignment_p (rtx
, HOST_WIDE_INT
);
428 static void mips_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
430 /* Structure to be filled in by compute_frame_size with register
431 save masks, and offsets for the current function. */
433 struct mips_frame_info
GTY(())
435 HOST_WIDE_INT total_size
; /* # bytes that the entire frame takes up */
436 HOST_WIDE_INT var_size
; /* # bytes that variables take up */
437 HOST_WIDE_INT args_size
; /* # bytes that outgoing arguments take up */
438 HOST_WIDE_INT cprestore_size
; /* # bytes that the .cprestore slot takes up */
439 HOST_WIDE_INT gp_reg_size
; /* # bytes needed to store gp regs */
440 HOST_WIDE_INT fp_reg_size
; /* # bytes needed to store fp regs */
441 unsigned int mask
; /* mask of saved gp registers */
442 unsigned int fmask
; /* mask of saved fp registers */
443 HOST_WIDE_INT gp_save_offset
; /* offset from vfp to store gp registers */
444 HOST_WIDE_INT fp_save_offset
; /* offset from vfp to store fp registers */
445 HOST_WIDE_INT gp_sp_offset
; /* offset from new sp to store gp registers */
446 HOST_WIDE_INT fp_sp_offset
; /* offset from new sp to store fp registers */
447 bool initialized
; /* true if frame size already calculated */
448 int num_gp
; /* number of gp registers saved */
449 int num_fp
; /* number of fp registers saved */
452 struct machine_function
GTY(()) {
453 /* Pseudo-reg holding the value of $28 in a mips16 function which
454 refers to GP relative global variables. */
455 rtx mips16_gp_pseudo_rtx
;
457 /* The number of extra stack bytes taken up by register varargs.
458 This area is allocated by the callee at the very top of the frame. */
461 /* Current frame information, calculated by compute_frame_size. */
462 struct mips_frame_info frame
;
464 /* The register to use as the global pointer within this function. */
465 unsigned int global_pointer
;
467 /* True if mips_adjust_insn_length should ignore an instruction's
469 bool ignore_hazard_length_p
;
471 /* True if the whole function is suitable for .set noreorder and
473 bool all_noreorder_p
;
475 /* True if the function is known to have an instruction that needs $gp. */
478 /* True if we have emitted an instruction to initialize
479 mips16_gp_pseudo_rtx. */
480 bool initialized_mips16_gp_pseudo_p
;
483 /* Information about a single argument. */
486 /* True if the argument is passed in a floating-point register, or
487 would have been if we hadn't run out of registers. */
490 /* The number of words passed in registers, rounded up. */
491 unsigned int reg_words
;
493 /* For EABI, the offset of the first register from GP_ARG_FIRST or
494 FP_ARG_FIRST. For other ABIs, the offset of the first register from
495 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
496 comment for details).
498 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
500 unsigned int reg_offset
;
502 /* The number of words that must be passed on the stack, rounded up. */
503 unsigned int stack_words
;
505 /* The offset from the start of the stack overflow area of the argument's
506 first stack word. Only meaningful when STACK_WORDS is nonzero. */
507 unsigned int stack_offset
;
511 /* Information about an address described by mips_address_type.
517 REG is the base register and OFFSET is the constant offset.
520 REG is the register that contains the high part of the address,
521 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
522 is the type of OFFSET's symbol.
525 SYMBOL_TYPE is the type of symbol being referenced. */
527 struct mips_address_info
529 enum mips_address_type type
;
532 enum mips_symbol_type symbol_type
;
536 /* One stage in a constant building sequence. These sequences have
540 A = A CODE[1] VALUE[1]
541 A = A CODE[2] VALUE[2]
544 where A is an accumulator, each CODE[i] is a binary rtl operation
545 and each VALUE[i] is a constant integer. */
546 struct mips_integer_op
{
548 unsigned HOST_WIDE_INT value
;
552 /* The largest number of operations needed to load an integer constant.
553 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
554 When the lowest bit is clear, we can try, but reject a sequence with
555 an extra SLL at the end. */
556 #define MIPS_MAX_INTEGER_OPS 7
558 /* Information about a MIPS16e SAVE or RESTORE instruction. */
559 struct mips16e_save_restore_info
{
560 /* The number of argument registers saved by a SAVE instruction.
561 0 for RESTORE instructions. */
564 /* Bit X is set if the instruction saves or restores GPR X. */
567 /* The total number of bytes to allocate. */
571 /* Global variables for machine-dependent things. */
573 /* Threshold for data being put into the small data/bss area, instead
574 of the normal data area. */
575 int mips_section_threshold
= -1;
577 /* Count the number of .file directives, so that .loc is up to date. */
578 int num_source_filenames
= 0;
580 /* Count the number of sdb related labels are generated (to find block
581 start and end boundaries). */
582 int sdb_label_count
= 0;
584 /* Next label # for each statement for Silicon Graphics IRIS systems. */
587 /* Name of the file containing the current function. */
588 const char *current_function_file
= "";
590 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
596 /* The next branch instruction is a branch likely, not branch normal. */
597 int mips_branch_likely
;
599 /* The operands passed to the last cmpMM expander. */
602 /* The target cpu for code generation. */
603 enum processor_type mips_arch
;
604 const struct mips_cpu_info
*mips_arch_info
;
606 /* The target cpu for optimization and scheduling. */
607 enum processor_type mips_tune
;
608 const struct mips_cpu_info
*mips_tune_info
;
610 /* Which instruction set architecture to use. */
613 /* Which ABI to use. */
614 int mips_abi
= MIPS_ABI_DEFAULT
;
616 /* Cost information to use. */
617 const struct mips_rtx_cost_data
*mips_cost
;
619 /* The -mtext-loads setting. */
620 enum mips_code_readable_setting mips_code_readable
= CODE_READABLE_YES
;
622 /* The architecture selected by -mipsN. */
623 static const struct mips_cpu_info
*mips_isa_info
;
625 /* If TRUE, we split addresses into their high and low parts in the RTL. */
626 int mips_split_addresses
;
628 /* Mode used for saving/restoring general purpose registers. */
629 static enum machine_mode gpr_mode
;
631 /* Array giving truth value on whether or not a given hard register
632 can support a given mode. */
633 char mips_hard_regno_mode_ok
[(int)MAX_MACHINE_MODE
][FIRST_PSEUDO_REGISTER
];
635 /* List of all MIPS punctuation characters used by print_operand. */
636 char mips_print_operand_punct
[256];
638 /* Map GCC register number to debugger register number. */
639 int mips_dbx_regno
[FIRST_PSEUDO_REGISTER
];
640 int mips_dwarf_regno
[FIRST_PSEUDO_REGISTER
];
642 /* A copy of the original flag_delayed_branch: see override_options. */
643 static int mips_flag_delayed_branch
;
645 static GTY (()) int mips_output_filename_first_time
= 1;
647 /* mips_split_p[X] is true if symbols of type X can be split by
648 mips_split_symbol(). */
649 bool mips_split_p
[NUM_SYMBOL_TYPES
];
651 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
652 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
653 if they are matched by a special .md file pattern. */
654 static const char *mips_lo_relocs
[NUM_SYMBOL_TYPES
];
656 /* Likewise for HIGHs. */
657 static const char *mips_hi_relocs
[NUM_SYMBOL_TYPES
];
659 /* Map hard register number to register class */
660 const enum reg_class mips_regno_to_class
[] =
662 LEA_REGS
, LEA_REGS
, M16_NA_REGS
, V1_REG
,
663 M16_REGS
, M16_REGS
, M16_REGS
, M16_REGS
,
664 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
665 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
666 M16_NA_REGS
, M16_NA_REGS
, LEA_REGS
, LEA_REGS
,
667 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
668 T_REG
, PIC_FN_ADDR_REG
, LEA_REGS
, LEA_REGS
,
669 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
670 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
671 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
672 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
673 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
674 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
675 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
676 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
677 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
678 MD0_REG
, MD1_REG
, NO_REGS
, ST_REGS
,
679 ST_REGS
, ST_REGS
, ST_REGS
, ST_REGS
,
680 ST_REGS
, ST_REGS
, ST_REGS
, NO_REGS
,
681 NO_REGS
, ALL_REGS
, ALL_REGS
, NO_REGS
,
682 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
683 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
684 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
685 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
686 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
687 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
688 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
689 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
690 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
691 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
692 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
693 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
694 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
695 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
696 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
697 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
698 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
699 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
700 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
701 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
702 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
703 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
704 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
705 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
706 DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
,
707 DSP_ACC_REGS
, DSP_ACC_REGS
, ALL_REGS
, ALL_REGS
,
708 ALL_REGS
, ALL_REGS
, ALL_REGS
, ALL_REGS
711 /* Table of machine dependent attributes. */
712 const struct attribute_spec mips_attribute_table
[] =
714 { "long_call", 0, 0, false, true, true, NULL
},
715 { "far", 0, 0, false, true, true, NULL
},
716 { "near", 0, 0, false, true, true, NULL
},
717 { NULL
, 0, 0, false, false, false, NULL
}
720 /* A table describing all the processors gcc knows about. Names are
721 matched in the order listed. The first mention of an ISA level is
722 taken as the canonical name for that ISA.
724 To ease comparison, please keep this table in the same order as
725 gas's mips_cpu_info_table[]. Please also make sure that
726 MIPS_ISA_LEVEL_SPEC handles all -march options correctly. */
727 const struct mips_cpu_info mips_cpu_info_table
[] = {
728 /* Entries for generic ISAs */
729 { "mips1", PROCESSOR_R3000
, 1 },
730 { "mips2", PROCESSOR_R6000
, 2 },
731 { "mips3", PROCESSOR_R4000
, 3 },
732 { "mips4", PROCESSOR_R8000
, 4 },
733 { "mips32", PROCESSOR_4KC
, 32 },
734 { "mips32r2", PROCESSOR_M4K
, 33 },
735 { "mips64", PROCESSOR_5KC
, 64 },
738 { "r3000", PROCESSOR_R3000
, 1 },
739 { "r2000", PROCESSOR_R3000
, 1 }, /* = r3000 */
740 { "r3900", PROCESSOR_R3900
, 1 },
743 { "r6000", PROCESSOR_R6000
, 2 },
746 { "r4000", PROCESSOR_R4000
, 3 },
747 { "vr4100", PROCESSOR_R4100
, 3 },
748 { "vr4111", PROCESSOR_R4111
, 3 },
749 { "vr4120", PROCESSOR_R4120
, 3 },
750 { "vr4130", PROCESSOR_R4130
, 3 },
751 { "vr4300", PROCESSOR_R4300
, 3 },
752 { "r4400", PROCESSOR_R4000
, 3 }, /* = r4000 */
753 { "r4600", PROCESSOR_R4600
, 3 },
754 { "orion", PROCESSOR_R4600
, 3 }, /* = r4600 */
755 { "r4650", PROCESSOR_R4650
, 3 },
758 { "r8000", PROCESSOR_R8000
, 4 },
759 { "vr5000", PROCESSOR_R5000
, 4 },
760 { "vr5400", PROCESSOR_R5400
, 4 },
761 { "vr5500", PROCESSOR_R5500
, 4 },
762 { "rm7000", PROCESSOR_R7000
, 4 },
763 { "rm9000", PROCESSOR_R9000
, 4 },
766 { "4kc", PROCESSOR_4KC
, 32 },
767 { "4km", PROCESSOR_4KC
, 32 }, /* = 4kc */
768 { "4kp", PROCESSOR_4KP
, 32 },
769 { "4ksc", PROCESSOR_4KC
, 32 },
771 /* MIPS32 Release 2 */
772 { "m4k", PROCESSOR_M4K
, 33 },
773 { "4kec", PROCESSOR_4KC
, 33 },
774 { "4kem", PROCESSOR_4KC
, 33 },
775 { "4kep", PROCESSOR_4KP
, 33 },
776 { "4ksd", PROCESSOR_4KC
, 33 },
778 { "24kc", PROCESSOR_24KC
, 33 },
779 { "24kf2_1", PROCESSOR_24KF2_1
, 33 },
780 { "24kf", PROCESSOR_24KF2_1
, 33 },
781 { "24kf1_1", PROCESSOR_24KF1_1
, 33 },
782 { "24kfx", PROCESSOR_24KF1_1
, 33 },
783 { "24kx", PROCESSOR_24KF1_1
, 33 },
785 { "24kec", PROCESSOR_24KC
, 33 }, /* 24K with DSP */
786 { "24kef2_1", PROCESSOR_24KF2_1
, 33 },
787 { "24kef", PROCESSOR_24KF2_1
, 33 },
788 { "24kef1_1", PROCESSOR_24KF1_1
, 33 },
789 { "24kefx", PROCESSOR_24KF1_1
, 33 },
790 { "24kex", PROCESSOR_24KF1_1
, 33 },
792 { "34kc", PROCESSOR_24KC
, 33 }, /* 34K with MT/DSP */
793 { "34kf2_1", PROCESSOR_24KF2_1
, 33 },
794 { "34kf", PROCESSOR_24KF2_1
, 33 },
795 { "34kf1_1", PROCESSOR_24KF1_1
, 33 },
796 { "34kfx", PROCESSOR_24KF1_1
, 33 },
797 { "34kx", PROCESSOR_24KF1_1
, 33 },
799 { "74kc", PROCESSOR_74KC
, 33 }, /* 74K with DSPr2 */
800 { "74kf2_1", PROCESSOR_74KF2_1
, 33 },
801 { "74kf", PROCESSOR_74KF2_1
, 33 },
802 { "74kf1_1", PROCESSOR_74KF1_1
, 33 },
803 { "74kfx", PROCESSOR_74KF1_1
, 33 },
804 { "74kx", PROCESSOR_74KF1_1
, 33 },
805 { "74kf3_2", PROCESSOR_74KF3_2
, 33 },
808 { "5kc", PROCESSOR_5KC
, 64 },
809 { "5kf", PROCESSOR_5KF
, 64 },
810 { "20kc", PROCESSOR_20KC
, 64 },
811 { "sb1", PROCESSOR_SB1
, 64 },
812 { "sb1a", PROCESSOR_SB1A
, 64 },
813 { "sr71000", PROCESSOR_SR71000
, 64 },
819 /* Default costs. If these are used for a processor we should look
820 up the actual costs. */
821 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
822 COSTS_N_INSNS (7), /* fp_mult_sf */ \
823 COSTS_N_INSNS (8), /* fp_mult_df */ \
824 COSTS_N_INSNS (23), /* fp_div_sf */ \
825 COSTS_N_INSNS (36), /* fp_div_df */ \
826 COSTS_N_INSNS (10), /* int_mult_si */ \
827 COSTS_N_INSNS (10), /* int_mult_di */ \
828 COSTS_N_INSNS (69), /* int_div_si */ \
829 COSTS_N_INSNS (69), /* int_div_di */ \
830 2, /* branch_cost */ \
831 4 /* memory_latency */
833 /* Need to replace these with the costs of calling the appropriate
835 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
836 COSTS_N_INSNS (256), /* fp_mult_sf */ \
837 COSTS_N_INSNS (256), /* fp_mult_df */ \
838 COSTS_N_INSNS (256), /* fp_div_sf */ \
839 COSTS_N_INSNS (256) /* fp_div_df */
841 static struct mips_rtx_cost_data
const mips_rtx_cost_optimize_size
=
843 COSTS_N_INSNS (1), /* fp_add */
844 COSTS_N_INSNS (1), /* fp_mult_sf */
845 COSTS_N_INSNS (1), /* fp_mult_df */
846 COSTS_N_INSNS (1), /* fp_div_sf */
847 COSTS_N_INSNS (1), /* fp_div_df */
848 COSTS_N_INSNS (1), /* int_mult_si */
849 COSTS_N_INSNS (1), /* int_mult_di */
850 COSTS_N_INSNS (1), /* int_div_si */
851 COSTS_N_INSNS (1), /* int_div_di */
853 4 /* memory_latency */
856 static struct mips_rtx_cost_data
const mips_rtx_cost_data
[PROCESSOR_MAX
] =
859 COSTS_N_INSNS (2), /* fp_add */
860 COSTS_N_INSNS (4), /* fp_mult_sf */
861 COSTS_N_INSNS (5), /* fp_mult_df */
862 COSTS_N_INSNS (12), /* fp_div_sf */
863 COSTS_N_INSNS (19), /* fp_div_df */
864 COSTS_N_INSNS (12), /* int_mult_si */
865 COSTS_N_INSNS (12), /* int_mult_di */
866 COSTS_N_INSNS (35), /* int_div_si */
867 COSTS_N_INSNS (35), /* int_div_di */
869 4 /* memory_latency */
874 COSTS_N_INSNS (6), /* int_mult_si */
875 COSTS_N_INSNS (6), /* int_mult_di */
876 COSTS_N_INSNS (36), /* int_div_si */
877 COSTS_N_INSNS (36), /* int_div_di */
879 4 /* memory_latency */
883 COSTS_N_INSNS (36), /* int_mult_si */
884 COSTS_N_INSNS (36), /* int_mult_di */
885 COSTS_N_INSNS (37), /* int_div_si */
886 COSTS_N_INSNS (37), /* int_div_di */
888 4 /* memory_latency */
892 COSTS_N_INSNS (4), /* int_mult_si */
893 COSTS_N_INSNS (11), /* int_mult_di */
894 COSTS_N_INSNS (36), /* int_div_si */
895 COSTS_N_INSNS (68), /* int_div_di */
897 4 /* memory_latency */
900 COSTS_N_INSNS (4), /* fp_add */
901 COSTS_N_INSNS (4), /* fp_mult_sf */
902 COSTS_N_INSNS (5), /* fp_mult_df */
903 COSTS_N_INSNS (17), /* fp_div_sf */
904 COSTS_N_INSNS (32), /* fp_div_df */
905 COSTS_N_INSNS (4), /* int_mult_si */
906 COSTS_N_INSNS (11), /* int_mult_di */
907 COSTS_N_INSNS (36), /* int_div_si */
908 COSTS_N_INSNS (68), /* int_div_di */
910 4 /* memory_latency */
913 COSTS_N_INSNS (4), /* fp_add */
914 COSTS_N_INSNS (4), /* fp_mult_sf */
915 COSTS_N_INSNS (5), /* fp_mult_df */
916 COSTS_N_INSNS (17), /* fp_div_sf */
917 COSTS_N_INSNS (32), /* fp_div_df */
918 COSTS_N_INSNS (4), /* int_mult_si */
919 COSTS_N_INSNS (7), /* int_mult_di */
920 COSTS_N_INSNS (42), /* int_div_si */
921 COSTS_N_INSNS (72), /* int_div_di */
923 4 /* memory_latency */
927 COSTS_N_INSNS (5), /* int_mult_si */
928 COSTS_N_INSNS (5), /* int_mult_di */
929 COSTS_N_INSNS (41), /* int_div_si */
930 COSTS_N_INSNS (41), /* int_div_di */
932 4 /* memory_latency */
935 COSTS_N_INSNS (8), /* fp_add */
936 COSTS_N_INSNS (8), /* fp_mult_sf */
937 COSTS_N_INSNS (10), /* fp_mult_df */
938 COSTS_N_INSNS (34), /* fp_div_sf */
939 COSTS_N_INSNS (64), /* fp_div_df */
940 COSTS_N_INSNS (5), /* int_mult_si */
941 COSTS_N_INSNS (5), /* int_mult_di */
942 COSTS_N_INSNS (41), /* int_div_si */
943 COSTS_N_INSNS (41), /* int_div_di */
945 4 /* memory_latency */
948 COSTS_N_INSNS (4), /* fp_add */
949 COSTS_N_INSNS (4), /* fp_mult_sf */
950 COSTS_N_INSNS (5), /* fp_mult_df */
951 COSTS_N_INSNS (17), /* fp_div_sf */
952 COSTS_N_INSNS (32), /* fp_div_df */
953 COSTS_N_INSNS (5), /* int_mult_si */
954 COSTS_N_INSNS (5), /* int_mult_di */
955 COSTS_N_INSNS (41), /* int_div_si */
956 COSTS_N_INSNS (41), /* int_div_di */
958 4 /* memory_latency */
962 COSTS_N_INSNS (5), /* int_mult_si */
963 COSTS_N_INSNS (5), /* int_mult_di */
964 COSTS_N_INSNS (41), /* int_div_si */
965 COSTS_N_INSNS (41), /* int_div_di */
967 4 /* memory_latency */
970 COSTS_N_INSNS (8), /* fp_add */
971 COSTS_N_INSNS (8), /* fp_mult_sf */
972 COSTS_N_INSNS (10), /* fp_mult_df */
973 COSTS_N_INSNS (34), /* fp_div_sf */
974 COSTS_N_INSNS (64), /* fp_div_df */
975 COSTS_N_INSNS (5), /* int_mult_si */
976 COSTS_N_INSNS (5), /* int_mult_di */
977 COSTS_N_INSNS (41), /* int_div_si */
978 COSTS_N_INSNS (41), /* int_div_di */
980 4 /* memory_latency */
983 COSTS_N_INSNS (4), /* fp_add */
984 COSTS_N_INSNS (4), /* fp_mult_sf */
985 COSTS_N_INSNS (5), /* fp_mult_df */
986 COSTS_N_INSNS (17), /* fp_div_sf */
987 COSTS_N_INSNS (32), /* fp_div_df */
988 COSTS_N_INSNS (5), /* int_mult_si */
989 COSTS_N_INSNS (5), /* int_mult_di */
990 COSTS_N_INSNS (41), /* int_div_si */
991 COSTS_N_INSNS (41), /* int_div_di */
993 4 /* memory_latency */
996 COSTS_N_INSNS (6), /* fp_add */
997 COSTS_N_INSNS (6), /* fp_mult_sf */
998 COSTS_N_INSNS (7), /* fp_mult_df */
999 COSTS_N_INSNS (25), /* fp_div_sf */
1000 COSTS_N_INSNS (48), /* fp_div_df */
1001 COSTS_N_INSNS (5), /* int_mult_si */
1002 COSTS_N_INSNS (5), /* int_mult_di */
1003 COSTS_N_INSNS (41), /* int_div_si */
1004 COSTS_N_INSNS (41), /* int_div_di */
1005 1, /* branch_cost */
1006 4 /* memory_latency */
1012 COSTS_N_INSNS (2), /* fp_add */
1013 COSTS_N_INSNS (4), /* fp_mult_sf */
1014 COSTS_N_INSNS (5), /* fp_mult_df */
1015 COSTS_N_INSNS (12), /* fp_div_sf */
1016 COSTS_N_INSNS (19), /* fp_div_df */
1017 COSTS_N_INSNS (2), /* int_mult_si */
1018 COSTS_N_INSNS (2), /* int_mult_di */
1019 COSTS_N_INSNS (35), /* int_div_si */
1020 COSTS_N_INSNS (35), /* int_div_di */
1021 1, /* branch_cost */
1022 4 /* memory_latency */
1025 COSTS_N_INSNS (3), /* fp_add */
1026 COSTS_N_INSNS (5), /* fp_mult_sf */
1027 COSTS_N_INSNS (6), /* fp_mult_df */
1028 COSTS_N_INSNS (15), /* fp_div_sf */
1029 COSTS_N_INSNS (16), /* fp_div_df */
1030 COSTS_N_INSNS (17), /* int_mult_si */
1031 COSTS_N_INSNS (17), /* int_mult_di */
1032 COSTS_N_INSNS (38), /* int_div_si */
1033 COSTS_N_INSNS (38), /* int_div_di */
1034 2, /* branch_cost */
1035 6 /* memory_latency */
1038 COSTS_N_INSNS (6), /* fp_add */
1039 COSTS_N_INSNS (7), /* fp_mult_sf */
1040 COSTS_N_INSNS (8), /* fp_mult_df */
1041 COSTS_N_INSNS (23), /* fp_div_sf */
1042 COSTS_N_INSNS (36), /* fp_div_df */
1043 COSTS_N_INSNS (10), /* int_mult_si */
1044 COSTS_N_INSNS (10), /* int_mult_di */
1045 COSTS_N_INSNS (69), /* int_div_si */
1046 COSTS_N_INSNS (69), /* int_div_di */
1047 2, /* branch_cost */
1048 6 /* memory_latency */
1060 /* The only costs that appear to be updated here are
1061 integer multiplication. */
1063 COSTS_N_INSNS (4), /* int_mult_si */
1064 COSTS_N_INSNS (6), /* int_mult_di */
1065 COSTS_N_INSNS (69), /* int_div_si */
1066 COSTS_N_INSNS (69), /* int_div_di */
1067 1, /* branch_cost */
1068 4 /* memory_latency */
1080 COSTS_N_INSNS (6), /* fp_add */
1081 COSTS_N_INSNS (4), /* fp_mult_sf */
1082 COSTS_N_INSNS (5), /* fp_mult_df */
1083 COSTS_N_INSNS (23), /* fp_div_sf */
1084 COSTS_N_INSNS (36), /* fp_div_df */
1085 COSTS_N_INSNS (5), /* int_mult_si */
1086 COSTS_N_INSNS (5), /* int_mult_di */
1087 COSTS_N_INSNS (36), /* int_div_si */
1088 COSTS_N_INSNS (36), /* int_div_di */
1089 1, /* branch_cost */
1090 4 /* memory_latency */
1093 COSTS_N_INSNS (6), /* fp_add */
1094 COSTS_N_INSNS (5), /* fp_mult_sf */
1095 COSTS_N_INSNS (6), /* fp_mult_df */
1096 COSTS_N_INSNS (30), /* fp_div_sf */
1097 COSTS_N_INSNS (59), /* fp_div_df */
1098 COSTS_N_INSNS (3), /* int_mult_si */
1099 COSTS_N_INSNS (4), /* int_mult_di */
1100 COSTS_N_INSNS (42), /* int_div_si */
1101 COSTS_N_INSNS (74), /* int_div_di */
1102 1, /* branch_cost */
1103 4 /* memory_latency */
1106 COSTS_N_INSNS (6), /* fp_add */
1107 COSTS_N_INSNS (5), /* fp_mult_sf */
1108 COSTS_N_INSNS (6), /* fp_mult_df */
1109 COSTS_N_INSNS (30), /* fp_div_sf */
1110 COSTS_N_INSNS (59), /* fp_div_df */
1111 COSTS_N_INSNS (5), /* int_mult_si */
1112 COSTS_N_INSNS (9), /* int_mult_di */
1113 COSTS_N_INSNS (42), /* int_div_si */
1114 COSTS_N_INSNS (74), /* int_div_di */
1115 1, /* branch_cost */
1116 4 /* memory_latency */
1119 /* The only costs that are changed here are
1120 integer multiplication. */
1121 COSTS_N_INSNS (6), /* fp_add */
1122 COSTS_N_INSNS (7), /* fp_mult_sf */
1123 COSTS_N_INSNS (8), /* fp_mult_df */
1124 COSTS_N_INSNS (23), /* fp_div_sf */
1125 COSTS_N_INSNS (36), /* fp_div_df */
1126 COSTS_N_INSNS (5), /* int_mult_si */
1127 COSTS_N_INSNS (9), /* int_mult_di */
1128 COSTS_N_INSNS (69), /* int_div_si */
1129 COSTS_N_INSNS (69), /* int_div_di */
1130 1, /* branch_cost */
1131 4 /* memory_latency */
1137 /* The only costs that are changed here are
1138 integer multiplication. */
1139 COSTS_N_INSNS (6), /* fp_add */
1140 COSTS_N_INSNS (7), /* fp_mult_sf */
1141 COSTS_N_INSNS (8), /* fp_mult_df */
1142 COSTS_N_INSNS (23), /* fp_div_sf */
1143 COSTS_N_INSNS (36), /* fp_div_df */
1144 COSTS_N_INSNS (3), /* int_mult_si */
1145 COSTS_N_INSNS (8), /* int_mult_di */
1146 COSTS_N_INSNS (69), /* int_div_si */
1147 COSTS_N_INSNS (69), /* int_div_di */
1148 1, /* branch_cost */
1149 4 /* memory_latency */
1152 /* These costs are the same as the SB-1A below. */
1153 COSTS_N_INSNS (4), /* fp_add */
1154 COSTS_N_INSNS (4), /* fp_mult_sf */
1155 COSTS_N_INSNS (4), /* fp_mult_df */
1156 COSTS_N_INSNS (24), /* fp_div_sf */
1157 COSTS_N_INSNS (32), /* fp_div_df */
1158 COSTS_N_INSNS (3), /* int_mult_si */
1159 COSTS_N_INSNS (4), /* int_mult_di */
1160 COSTS_N_INSNS (36), /* int_div_si */
1161 COSTS_N_INSNS (68), /* int_div_di */
1162 1, /* branch_cost */
1163 4 /* memory_latency */
1166 /* These costs are the same as the SB-1 above. */
1167 COSTS_N_INSNS (4), /* fp_add */
1168 COSTS_N_INSNS (4), /* fp_mult_sf */
1169 COSTS_N_INSNS (4), /* fp_mult_df */
1170 COSTS_N_INSNS (24), /* fp_div_sf */
1171 COSTS_N_INSNS (32), /* fp_div_df */
1172 COSTS_N_INSNS (3), /* int_mult_si */
1173 COSTS_N_INSNS (4), /* int_mult_di */
1174 COSTS_N_INSNS (36), /* int_div_si */
1175 COSTS_N_INSNS (68), /* int_div_di */
1176 1, /* branch_cost */
1177 4 /* memory_latency */
1184 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1185 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1186 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1187 static const unsigned char mips16e_s2_s8_regs
[] = {
1188 30, 23, 22, 21, 20, 19, 18
1190 static const unsigned char mips16e_a0_a3_regs
[] = {
1194 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1195 ordered from the uppermost in memory to the lowest in memory. */
1196 static const unsigned char mips16e_save_restore_regs
[] = {
1197 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1200 /* Nonzero if -march should decide the default value of
1201 MASK_SOFT_FLOAT_ABI. */
1202 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1203 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1206 /* Initialize the GCC target structure. */
1207 #undef TARGET_ASM_ALIGNED_HI_OP
1208 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1209 #undef TARGET_ASM_ALIGNED_SI_OP
1210 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1211 #undef TARGET_ASM_ALIGNED_DI_OP
1212 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1214 #undef TARGET_ASM_FUNCTION_PROLOGUE
1215 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1216 #undef TARGET_ASM_FUNCTION_EPILOGUE
1217 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1218 #undef TARGET_ASM_SELECT_RTX_SECTION
1219 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1220 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1221 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1223 #undef TARGET_SCHED_REORDER
1224 #define TARGET_SCHED_REORDER mips_sched_reorder
1225 #undef TARGET_SCHED_VARIABLE_ISSUE
1226 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1227 #undef TARGET_SCHED_ADJUST_COST
1228 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1229 #undef TARGET_SCHED_ISSUE_RATE
1230 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1231 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1232 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1233 mips_multipass_dfa_lookahead
1235 #undef TARGET_DEFAULT_TARGET_FLAGS
1236 #define TARGET_DEFAULT_TARGET_FLAGS \
1238 | TARGET_CPU_DEFAULT \
1239 | TARGET_ENDIAN_DEFAULT \
1240 | TARGET_FP_EXCEPTIONS_DEFAULT \
1241 | MASK_CHECK_ZERO_DIV \
1243 #undef TARGET_HANDLE_OPTION
1244 #define TARGET_HANDLE_OPTION mips_handle_option
1246 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1247 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1249 #undef TARGET_VALID_POINTER_MODE
1250 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1251 #undef TARGET_RTX_COSTS
1252 #define TARGET_RTX_COSTS mips_rtx_costs
1253 #undef TARGET_ADDRESS_COST
1254 #define TARGET_ADDRESS_COST mips_address_cost
1256 #undef TARGET_IN_SMALL_DATA_P
1257 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1259 #undef TARGET_MACHINE_DEPENDENT_REORG
1260 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1262 #undef TARGET_ASM_FILE_START
1263 #define TARGET_ASM_FILE_START mips_file_start
1264 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1265 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1267 #undef TARGET_INIT_LIBFUNCS
1268 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1270 #undef TARGET_BUILD_BUILTIN_VA_LIST
1271 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1272 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1273 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1275 #undef TARGET_PROMOTE_FUNCTION_ARGS
1276 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1277 #undef TARGET_PROMOTE_FUNCTION_RETURN
1278 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1279 #undef TARGET_PROMOTE_PROTOTYPES
1280 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1282 #undef TARGET_RETURN_IN_MEMORY
1283 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1284 #undef TARGET_RETURN_IN_MSB
1285 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1287 #undef TARGET_ASM_OUTPUT_MI_THUNK
1288 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1289 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1290 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1292 #undef TARGET_SETUP_INCOMING_VARARGS
1293 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1294 #undef TARGET_STRICT_ARGUMENT_NAMING
1295 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1296 #undef TARGET_MUST_PASS_IN_STACK
1297 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1298 #undef TARGET_PASS_BY_REFERENCE
1299 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1300 #undef TARGET_CALLEE_COPIES
1301 #define TARGET_CALLEE_COPIES mips_callee_copies
1302 #undef TARGET_ARG_PARTIAL_BYTES
1303 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1305 #undef TARGET_MODE_REP_EXTENDED
1306 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1308 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1309 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1311 #undef TARGET_INIT_BUILTINS
1312 #define TARGET_INIT_BUILTINS mips_init_builtins
1313 #undef TARGET_EXPAND_BUILTIN
1314 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1316 #undef TARGET_HAVE_TLS
1317 #define TARGET_HAVE_TLS HAVE_AS_TLS
1319 #undef TARGET_CANNOT_FORCE_CONST_MEM
1320 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1322 #undef TARGET_ENCODE_SECTION_INFO
1323 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1325 #undef TARGET_ATTRIBUTE_TABLE
1326 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1328 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1329 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1331 #undef TARGET_MIN_ANCHOR_OFFSET
1332 #define TARGET_MIN_ANCHOR_OFFSET -32768
1333 #undef TARGET_MAX_ANCHOR_OFFSET
1334 #define TARGET_MAX_ANCHOR_OFFSET 32767
1335 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1336 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1337 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1338 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1340 #undef TARGET_COMP_TYPE_ATTRIBUTES
1341 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1343 #ifdef HAVE_AS_DTPRELWORD
1344 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1345 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1348 struct gcc_target targetm
= TARGET_INITIALIZER
;
1351 /* Predicates to test for presence of "near" and "far"/"long_call"
1352 attributes on the given TYPE. */
1355 mips_near_type_p (tree type
)
1357 return lookup_attribute ("near", TYPE_ATTRIBUTES (type
)) != NULL
;
1361 mips_far_type_p (tree type
)
1363 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type
)) != NULL
1364 || lookup_attribute ("far", TYPE_ATTRIBUTES (type
)) != NULL
);
1368 /* Return 0 if the attributes for two types are incompatible, 1 if they
1369 are compatible, and 2 if they are nearly compatible (which causes a
1370 warning to be generated). */
1373 mips_comp_type_attributes (tree type1
, tree type2
)
1375 /* Check for mismatch of non-default calling convention. */
1376 if (TREE_CODE (type1
) != FUNCTION_TYPE
)
1379 /* Disallow mixed near/far attributes. */
1380 if (mips_far_type_p (type1
) && mips_near_type_p (type2
))
1382 if (mips_near_type_p (type1
) && mips_far_type_p (type2
))
1388 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1389 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1392 mips_split_plus (rtx x
, rtx
*base_ptr
, HOST_WIDE_INT
*offset_ptr
)
1394 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1396 *base_ptr
= XEXP (x
, 0);
1397 *offset_ptr
= INTVAL (XEXP (x
, 1));
1406 /* Return true if SYMBOL_REF X is associated with a global symbol
1407 (in the STB_GLOBAL sense). */
1410 mips_global_symbol_p (rtx x
)
1414 decl
= SYMBOL_REF_DECL (x
);
1416 return !SYMBOL_REF_LOCAL_P (x
);
1418 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1419 or weak symbols. Relocations in the object file will be against
1420 the target symbol, so it's that symbol's binding that matters here. */
1421 return DECL_P (decl
) && (TREE_PUBLIC (decl
) || DECL_WEAK (decl
));
1424 /* Return true if SYMBOL_REF X binds locally. */
1427 mips_symbol_binds_local_p (rtx x
)
1429 return (SYMBOL_REF_DECL (x
)
1430 ? targetm
.binds_local_p (SYMBOL_REF_DECL (x
))
1431 : SYMBOL_REF_LOCAL_P (x
));
1434 /* Return the method that should be used to access SYMBOL_REF or
1435 LABEL_REF X in context CONTEXT. */
1437 static enum mips_symbol_type
1438 mips_classify_symbol (rtx x
, enum mips_symbol_context context
)
1441 return SYMBOL_GOT_DISP
;
1443 if (GET_CODE (x
) == LABEL_REF
)
1445 /* LABEL_REFs are used for jump tables as well as text labels.
1446 Only return SYMBOL_PC_RELATIVE if we know the label is in
1447 the text section. */
1448 if (TARGET_MIPS16_SHORT_JUMP_TABLES
)
1449 return SYMBOL_PC_RELATIVE
;
1450 if (TARGET_ABICALLS
&& !TARGET_ABSOLUTE_ABICALLS
)
1451 return SYMBOL_GOT_PAGE_OFST
;
1452 return SYMBOL_ABSOLUTE
;
1455 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
1457 if (SYMBOL_REF_TLS_MODEL (x
))
1460 if (CONSTANT_POOL_ADDRESS_P (x
))
1462 if (TARGET_MIPS16_TEXT_LOADS
)
1463 return SYMBOL_PC_RELATIVE
;
1465 if (TARGET_MIPS16_PCREL_LOADS
&& context
== SYMBOL_CONTEXT_MEM
)
1466 return SYMBOL_PC_RELATIVE
;
1468 if (!TARGET_EMBEDDED_DATA
1469 && GET_MODE_SIZE (get_pool_mode (x
)) <= mips_section_threshold
)
1470 return SYMBOL_GP_RELATIVE
;
1473 /* Do not use small-data accesses for weak symbols; they may end up
1475 if (SYMBOL_REF_SMALL_P (x
)
1476 && !SYMBOL_REF_WEAK (x
))
1477 return SYMBOL_GP_RELATIVE
;
1479 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1482 && !(TARGET_ABSOLUTE_ABICALLS
&& mips_symbol_binds_local_p (x
)))
1484 /* There are three cases to consider:
1486 - o32 PIC (either with or without explicit relocs)
1487 - n32/n64 PIC without explicit relocs
1488 - n32/n64 PIC with explicit relocs
1490 In the first case, both local and global accesses will use an
1491 R_MIPS_GOT16 relocation. We must correctly predict which of
1492 the two semantics (local or global) the assembler and linker
1493 will apply. The choice depends on the symbol's binding rather
1494 than its visibility.
1496 In the second case, the assembler will not use R_MIPS_GOT16
1497 relocations, but it chooses between local and global accesses
1498 in the same way as for o32 PIC.
1500 In the third case we have more freedom since both forms of
1501 access will work for any kind of symbol. However, there seems
1502 little point in doing things differently. */
1503 if (mips_global_symbol_p (x
))
1504 return SYMBOL_GOT_DISP
;
1506 return SYMBOL_GOT_PAGE_OFST
;
1509 if (TARGET_MIPS16_PCREL_LOADS
&& context
!= SYMBOL_CONTEXT_CALL
)
1510 return SYMBOL_FORCE_TO_MEM
;
1511 return SYMBOL_ABSOLUTE
;
1514 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1515 is the alignment (in bytes) of SYMBOL_REF X. */
1518 mips_offset_within_alignment_p (rtx x
, HOST_WIDE_INT offset
)
1520 /* If for some reason we can't get the alignment for the
1521 symbol, initializing this to one means we will only accept
1523 HOST_WIDE_INT align
= 1;
1526 /* Get the alignment of the symbol we're referring to. */
1527 t
= SYMBOL_REF_DECL (x
);
1529 align
= DECL_ALIGN_UNIT (t
);
1531 return offset
>= 0 && offset
< align
;
1534 /* Return true if X is a symbolic constant that can be used in context
1535 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1538 mips_symbolic_constant_p (rtx x
, enum mips_symbol_context context
,
1539 enum mips_symbol_type
*symbol_type
)
1543 split_const (x
, &x
, &offset
);
1544 if (UNSPEC_ADDRESS_P (x
))
1546 *symbol_type
= UNSPEC_ADDRESS_TYPE (x
);
1547 x
= UNSPEC_ADDRESS (x
);
1549 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1551 *symbol_type
= mips_classify_symbol (x
, context
);
1552 if (*symbol_type
== SYMBOL_TLS
)
1558 if (offset
== const0_rtx
)
1561 /* Check whether a nonzero offset is valid for the underlying
1563 switch (*symbol_type
)
1565 case SYMBOL_ABSOLUTE
:
1566 case SYMBOL_FORCE_TO_MEM
:
1567 case SYMBOL_32_HIGH
:
1568 case SYMBOL_64_HIGH
:
1571 /* If the target has 64-bit pointers and the object file only
1572 supports 32-bit symbols, the values of those symbols will be
1573 sign-extended. In this case we can't allow an arbitrary offset
1574 in case the 32-bit value X + OFFSET has a different sign from X. */
1575 if (Pmode
== DImode
&& !ABI_HAS_64BIT_SYMBOLS
)
1576 return offset_within_block_p (x
, INTVAL (offset
));
1578 /* In other cases the relocations can handle any offset. */
1581 case SYMBOL_PC_RELATIVE
:
1582 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1583 In this case, we no longer have access to the underlying constant,
1584 but the original symbol-based access was known to be valid. */
1585 if (GET_CODE (x
) == LABEL_REF
)
1590 case SYMBOL_GP_RELATIVE
:
1591 /* Make sure that the offset refers to something within the
1592 same object block. This should guarantee that the final
1593 PC- or GP-relative offset is within the 16-bit limit. */
1594 return offset_within_block_p (x
, INTVAL (offset
));
1596 case SYMBOL_GOT_PAGE_OFST
:
1597 case SYMBOL_GOTOFF_PAGE
:
1598 /* If the symbol is global, the GOT entry will contain the symbol's
1599 address, and we will apply a 16-bit offset after loading it.
1600 If the symbol is local, the linker should provide enough local
1601 GOT entries for a 16-bit offset, but larger offsets may lead
1603 return SMALL_INT (offset
);
1607 /* There is no carry between the HI and LO REL relocations, so the
1608 offset is only valid if we know it won't lead to such a carry. */
1609 return mips_offset_within_alignment_p (x
, INTVAL (offset
));
1611 case SYMBOL_GOT_DISP
:
1612 case SYMBOL_GOTOFF_DISP
:
1613 case SYMBOL_GOTOFF_CALL
:
1614 case SYMBOL_GOTOFF_LOADGP
:
1617 case SYMBOL_GOTTPREL
:
1626 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1629 mips_regno_mode_ok_for_base_p (int regno
, enum machine_mode mode
, int strict
)
1631 if (!HARD_REGISTER_NUM_P (regno
))
1635 regno
= reg_renumber
[regno
];
1638 /* These fake registers will be eliminated to either the stack or
1639 hard frame pointer, both of which are usually valid base registers.
1640 Reload deals with the cases where the eliminated form isn't valid. */
1641 if (regno
== ARG_POINTER_REGNUM
|| regno
== FRAME_POINTER_REGNUM
)
1644 /* In mips16 mode, the stack pointer can only address word and doubleword
1645 values, nothing smaller. There are two problems here:
1647 (a) Instantiating virtual registers can introduce new uses of the
1648 stack pointer. If these virtual registers are valid addresses,
1649 the stack pointer should be too.
1651 (b) Most uses of the stack pointer are not made explicit until
1652 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1653 We don't know until that stage whether we'll be eliminating to the
1654 stack pointer (which needs the restriction) or the hard frame
1655 pointer (which doesn't).
1657 All in all, it seems more consistent to only enforce this restriction
1658 during and after reload. */
1659 if (TARGET_MIPS16
&& regno
== STACK_POINTER_REGNUM
)
1660 return !strict
|| GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8;
1662 return TARGET_MIPS16
? M16_REG_P (regno
) : GP_REG_P (regno
);
1666 /* Return true if X is a valid base register for the given mode.
1667 Allow only hard registers if STRICT. */
1670 mips_valid_base_register_p (rtx x
, enum machine_mode mode
, int strict
)
1672 if (!strict
&& GET_CODE (x
) == SUBREG
)
1676 && mips_regno_mode_ok_for_base_p (REGNO (x
), mode
, strict
));
1680 /* Return true if X is a valid address for machine mode MODE. If it is,
1681 fill in INFO appropriately. STRICT is true if we should only accept
1682 hard base registers. */
1685 mips_classify_address (struct mips_address_info
*info
, rtx x
,
1686 enum machine_mode mode
, int strict
)
1688 switch (GET_CODE (x
))
1692 info
->type
= ADDRESS_REG
;
1694 info
->offset
= const0_rtx
;
1695 return mips_valid_base_register_p (info
->reg
, mode
, strict
);
1698 info
->type
= ADDRESS_REG
;
1699 info
->reg
= XEXP (x
, 0);
1700 info
->offset
= XEXP (x
, 1);
1701 return (mips_valid_base_register_p (info
->reg
, mode
, strict
)
1702 && const_arith_operand (info
->offset
, VOIDmode
));
1705 info
->type
= ADDRESS_LO_SUM
;
1706 info
->reg
= XEXP (x
, 0);
1707 info
->offset
= XEXP (x
, 1);
1708 return (mips_valid_base_register_p (info
->reg
, mode
, strict
)
1709 && mips_symbolic_constant_p (info
->offset
, SYMBOL_CONTEXT_MEM
,
1711 && mips_symbol_insns (info
->symbol_type
, mode
) > 0
1712 && mips_lo_relocs
[info
->symbol_type
] != 0);
1715 /* Small-integer addresses don't occur very often, but they
1716 are legitimate if $0 is a valid base register. */
1717 info
->type
= ADDRESS_CONST_INT
;
1718 return !TARGET_MIPS16
&& SMALL_INT (x
);
1723 info
->type
= ADDRESS_SYMBOLIC
;
1724 return (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_MEM
,
1726 && mips_symbol_insns (info
->symbol_type
, mode
) > 0
1727 && !mips_split_p
[info
->symbol_type
]);
1734 /* Return true if X is a thread-local symbol. */
1737 mips_tls_operand_p (rtx x
)
1739 return GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0;
1742 /* Return true if X can not be forced into a constant pool. */
1745 mips_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
1747 return mips_tls_operand_p (*x
);
1750 /* Return true if X can not be forced into a constant pool. */
1753 mips_cannot_force_const_mem (rtx x
)
1759 /* As an optimization, reject constants that mips_legitimize_move
1762 Suppose we have a multi-instruction sequence that loads constant C
1763 into register R. If R does not get allocated a hard register, and
1764 R is used in an operand that allows both registers and memory
1765 references, reload will consider forcing C into memory and using
1766 one of the instruction's memory alternatives. Returning false
1767 here will force it to use an input reload instead. */
1768 if (GET_CODE (x
) == CONST_INT
)
1771 split_const (x
, &base
, &offset
);
1772 if (symbolic_operand (base
, VOIDmode
) && SMALL_INT (offset
))
1776 if (TARGET_HAVE_TLS
&& for_each_rtx (&x
, &mips_tls_symbol_ref_1
, 0))
1782 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1783 constants when we're using a per-function constant pool. */
1786 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED
,
1787 rtx x ATTRIBUTE_UNUSED
)
1789 return !TARGET_MIPS16_PCREL_LOADS
;
1792 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1793 single instruction. We rely on the fact that, in the worst case,
1794 all instructions involved in a MIPS16 address calculation are usually
1798 mips_symbol_insns_1 (enum mips_symbol_type type
, enum machine_mode mode
)
1802 case SYMBOL_ABSOLUTE
:
1803 /* When using 64-bit symbols, we need 5 preparatory instructions,
1806 lui $at,%highest(symbol)
1807 daddiu $at,$at,%higher(symbol)
1809 daddiu $at,$at,%hi(symbol)
1812 The final address is then $at + %lo(symbol). With 32-bit
1813 symbols we just need a preparatory lui for normal mode and
1814 a preparatory "li; sll" for MIPS16. */
1815 return ABI_HAS_64BIT_SYMBOLS
? 6 : TARGET_MIPS16
? 3 : 2;
1817 case SYMBOL_GP_RELATIVE
:
1818 /* Treat GP-relative accesses as taking a single instruction on
1819 MIPS16 too; the copy of $gp can often be shared. */
1822 case SYMBOL_PC_RELATIVE
:
1823 /* PC-relative constants can be only be used with addiupc,
1825 if (mode
== MAX_MACHINE_MODE
1826 || GET_MODE_SIZE (mode
) == 4
1827 || GET_MODE_SIZE (mode
) == 8)
1830 /* The constant must be loaded using addiupc first. */
1833 case SYMBOL_FORCE_TO_MEM
:
1834 /* The constant must be loaded from the constant pool. */
1837 case SYMBOL_GOT_DISP
:
1838 /* The constant will have to be loaded from the GOT before it
1839 is used in an address. */
1840 if (mode
!= MAX_MACHINE_MODE
)
1845 case SYMBOL_GOT_PAGE_OFST
:
1846 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1847 the local/global classification is accurate. See override_options
1850 The worst cases are:
1852 (1) For local symbols when generating o32 or o64 code. The assembler
1858 ...and the final address will be $at + %lo(symbol).
1860 (2) For global symbols when -mxgot. The assembler will use:
1862 lui $at,%got_hi(symbol)
1865 ...and the final address will be $at + %got_lo(symbol). */
1868 case SYMBOL_GOTOFF_PAGE
:
1869 case SYMBOL_GOTOFF_DISP
:
1870 case SYMBOL_GOTOFF_CALL
:
1871 case SYMBOL_GOTOFF_LOADGP
:
1872 case SYMBOL_32_HIGH
:
1873 case SYMBOL_64_HIGH
:
1879 case SYMBOL_GOTTPREL
:
1882 /* A 16-bit constant formed by a single relocation, or a 32-bit
1883 constant formed from a high 16-bit relocation and a low 16-bit
1884 relocation. Use mips_split_p to determine which. */
1885 return !mips_split_p
[type
] ? 1 : TARGET_MIPS16
? 3 : 2;
1888 /* We don't treat a bare TLS symbol as a constant. */
1894 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1895 to load symbols of type TYPE into a register. Return 0 if the given
1896 type of symbol cannot be used as an immediate operand.
1898 Otherwise, return the number of instructions needed to load or store
1899 values of mode MODE to or from addresses of type TYPE. Return 0 if
1900 the given type of symbol is not valid in addresses.
1902 In both cases, treat extended MIPS16 instructions as two instructions. */
1905 mips_symbol_insns (enum mips_symbol_type type
, enum machine_mode mode
)
1907 return mips_symbol_insns_1 (type
, mode
) * (TARGET_MIPS16
? 2 : 1);
1910 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1913 mips_stack_address_p (rtx x
, enum machine_mode mode
)
1915 struct mips_address_info addr
;
1917 return (mips_classify_address (&addr
, x
, mode
, false)
1918 && addr
.type
== ADDRESS_REG
1919 && addr
.reg
== stack_pointer_rtx
);
1922 /* Return true if a value at OFFSET bytes from BASE can be accessed
1923 using an unextended mips16 instruction. MODE is the mode of the
1926 Usually the offset in an unextended instruction is a 5-bit field.
1927 The offset is unsigned and shifted left once for HIs, twice
1928 for SIs, and so on. An exception is SImode accesses off the
1929 stack pointer, which have an 8-bit immediate field. */
1932 mips16_unextended_reference_p (enum machine_mode mode
, rtx base
, rtx offset
)
1935 && GET_CODE (offset
) == CONST_INT
1936 && INTVAL (offset
) >= 0
1937 && (INTVAL (offset
) & (GET_MODE_SIZE (mode
) - 1)) == 0)
1939 if (GET_MODE_SIZE (mode
) == 4 && base
== stack_pointer_rtx
)
1940 return INTVAL (offset
) < 256 * GET_MODE_SIZE (mode
);
1941 return INTVAL (offset
) < 32 * GET_MODE_SIZE (mode
);
1947 /* Return the number of instructions needed to load or store a value
1948 of mode MODE at X. Return 0 if X isn't valid for MODE.
1950 For mips16 code, count extended instructions as two instructions. */
1953 mips_address_insns (rtx x
, enum machine_mode mode
)
1955 struct mips_address_info addr
;
1958 if (mode
== BLKmode
)
1959 /* BLKmode is used for single unaligned loads and stores. */
1962 /* Each word of a multi-word value will be accessed individually. */
1963 factor
= (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
1965 if (mips_classify_address (&addr
, x
, mode
, false))
1970 && !mips16_unextended_reference_p (mode
, addr
.reg
, addr
.offset
))
1974 case ADDRESS_LO_SUM
:
1975 return (TARGET_MIPS16
? factor
* 2 : factor
);
1977 case ADDRESS_CONST_INT
:
1980 case ADDRESS_SYMBOLIC
:
1981 return factor
* mips_symbol_insns (addr
.symbol_type
, mode
);
1987 /* Likewise for constant X. */
1990 mips_const_insns (rtx x
)
1992 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
1993 enum mips_symbol_type symbol_type
;
1996 switch (GET_CODE (x
))
1999 if (!mips_symbolic_constant_p (XEXP (x
, 0), SYMBOL_CONTEXT_LEA
,
2001 || !mips_split_p
[symbol_type
])
2004 /* This is simply an lui for normal mode. It is an extended
2005 "li" followed by an extended "sll" for MIPS16. */
2006 return TARGET_MIPS16
? 4 : 1;
2010 /* Unsigned 8-bit constants can be loaded using an unextended
2011 LI instruction. Unsigned 16-bit constants can be loaded
2012 using an extended LI. Negative constants must be loaded
2013 using LI and then negated. */
2014 return (INTVAL (x
) >= 0 && INTVAL (x
) < 256 ? 1
2015 : SMALL_OPERAND_UNSIGNED (INTVAL (x
)) ? 2
2016 : INTVAL (x
) > -256 && INTVAL (x
) < 0 ? 2
2017 : SMALL_OPERAND_UNSIGNED (-INTVAL (x
)) ? 3
2020 return mips_build_integer (codes
, INTVAL (x
));
2024 return (!TARGET_MIPS16
&& x
== CONST0_RTX (GET_MODE (x
)) ? 1 : 0);
2030 /* See if we can refer to X directly. */
2031 if (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_LEA
, &symbol_type
))
2032 return mips_symbol_insns (symbol_type
, MAX_MACHINE_MODE
);
2034 /* Otherwise try splitting the constant into a base and offset.
2035 16-bit offsets can be added using an extra addiu. Larger offsets
2036 must be calculated separately and then added to the base. */
2037 split_const (x
, &x
, &offset
);
2040 int n
= mips_const_insns (x
);
2043 if (SMALL_INT (offset
))
2046 return n
+ 1 + mips_build_integer (codes
, INTVAL (offset
));
2053 return mips_symbol_insns (mips_classify_symbol (x
, SYMBOL_CONTEXT_LEA
),
2062 /* Return the number of instructions needed for memory reference X.
2063 Count extended mips16 instructions as two instructions. */
2066 mips_fetch_insns (rtx x
)
2068 gcc_assert (MEM_P (x
));
2069 return mips_address_insns (XEXP (x
, 0), GET_MODE (x
));
2073 /* Return the number of instructions needed for an integer division. */
2076 mips_idiv_insns (void)
2081 if (TARGET_CHECK_ZERO_DIV
)
2083 if (GENERATE_DIVIDE_TRAPS
)
2089 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
2094 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2095 returns a nonzero value if X is a legitimate address for a memory
2096 operand of the indicated MODE. STRICT is nonzero if this function
2097 is called during reload. */
2100 mips_legitimate_address_p (enum machine_mode mode
, rtx x
, int strict
)
2102 struct mips_address_info addr
;
2104 return mips_classify_address (&addr
, x
, mode
, strict
);
2107 /* Emit a move from SRC to DEST. Assume that the move expanders can
2108 handle all moves if !can_create_pseudo_p (). The distinction is
2109 important because, unlike emit_move_insn, the move expanders know
2110 how to force Pmode objects into the constant pool even when the
2111 constant pool address is not itself legitimate. */
2114 mips_emit_move (rtx dest
, rtx src
)
2116 return (can_create_pseudo_p ()
2117 ? emit_move_insn (dest
, src
)
2118 : emit_move_insn_1 (dest
, src
));
2121 /* Copy VALUE to a register and return that register. If new psuedos
2122 are allowed, copy it into a new register, otherwise use DEST. */
2125 mips_force_temporary (rtx dest
, rtx value
)
2127 if (can_create_pseudo_p ())
2128 return force_reg (Pmode
, value
);
2131 mips_emit_move (copy_rtx (dest
), value
);
2137 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2138 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2139 constant in that context and can be split into a high part and a LO_SUM.
2140 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2141 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2143 TEMP is as for mips_force_temporary and is used to load the high
2144 part into a register. */
2147 mips_split_symbol (rtx temp
, rtx addr
, enum machine_mode mode
, rtx
*lo_sum_out
)
2149 enum mips_symbol_context context
;
2150 enum mips_symbol_type symbol_type
;
2153 context
= (mode
== MAX_MACHINE_MODE
2154 ? SYMBOL_CONTEXT_LEA
2155 : SYMBOL_CONTEXT_MEM
);
2156 if (!mips_symbolic_constant_p (addr
, context
, &symbol_type
)
2157 || mips_symbol_insns (symbol_type
, mode
) == 0
2158 || !mips_split_p
[symbol_type
])
2163 if (symbol_type
== SYMBOL_GP_RELATIVE
)
2165 if (!can_create_pseudo_p ())
2167 emit_insn (gen_load_const_gp (copy_rtx (temp
)));
2171 high
= mips16_gp_pseudo_reg ();
2175 high
= gen_rtx_HIGH (Pmode
, copy_rtx (addr
));
2176 high
= mips_force_temporary (temp
, high
);
2178 *lo_sum_out
= gen_rtx_LO_SUM (Pmode
, high
, addr
);
2184 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2185 and add CONST_INT OFFSET to the result. */
2188 mips_unspec_address_offset (rtx base
, rtx offset
,
2189 enum mips_symbol_type symbol_type
)
2191 base
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, base
),
2192 UNSPEC_ADDRESS_FIRST
+ symbol_type
);
2193 if (offset
!= const0_rtx
)
2194 base
= gen_rtx_PLUS (Pmode
, base
, offset
);
2195 return gen_rtx_CONST (Pmode
, base
);
2198 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2199 type SYMBOL_TYPE. */
2202 mips_unspec_address (rtx address
, enum mips_symbol_type symbol_type
)
2206 split_const (address
, &base
, &offset
);
2207 return mips_unspec_address_offset (base
, offset
, symbol_type
);
2211 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2212 high part to BASE and return the result. Just return BASE otherwise.
2213 TEMP is available as a temporary register if needed.
2215 The returned expression can be used as the first operand to a LO_SUM. */
2218 mips_unspec_offset_high (rtx temp
, rtx base
, rtx addr
,
2219 enum mips_symbol_type symbol_type
)
2221 if (mips_split_p
[symbol_type
])
2223 addr
= gen_rtx_HIGH (Pmode
, mips_unspec_address (addr
, symbol_type
));
2224 addr
= mips_force_temporary (temp
, addr
);
2225 return mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, addr
, base
));
2231 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2232 mips_force_temporary; it is only needed when OFFSET is not a
2236 mips_add_offset (rtx temp
, rtx reg
, HOST_WIDE_INT offset
)
2238 if (!SMALL_OPERAND (offset
))
2243 /* Load the full offset into a register so that we can use
2244 an unextended instruction for the address itself. */
2245 high
= GEN_INT (offset
);
2250 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2251 high
= GEN_INT (CONST_HIGH_PART (offset
));
2252 offset
= CONST_LOW_PART (offset
);
2254 high
= mips_force_temporary (temp
, high
);
2255 reg
= mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, high
, reg
));
2257 return plus_constant (reg
, offset
);
2260 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2261 referencing, and TYPE is the symbol type to use (either global
2262 dynamic or local dynamic). V0 is an RTX for the return value
2263 location. The entire insn sequence is returned. */
2265 static GTY(()) rtx mips_tls_symbol
;
2268 mips_call_tls_get_addr (rtx sym
, enum mips_symbol_type type
, rtx v0
)
2270 rtx insn
, loc
, tga
, a0
;
2272 a0
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
2274 if (!mips_tls_symbol
)
2275 mips_tls_symbol
= init_one_libfunc ("__tls_get_addr");
2277 loc
= mips_unspec_address (sym
, type
);
2281 emit_insn (gen_rtx_SET (Pmode
, a0
,
2282 gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, loc
)));
2283 tga
= gen_rtx_MEM (Pmode
, mips_tls_symbol
);
2284 insn
= emit_call_insn (gen_call_value (v0
, tga
, const0_rtx
, const0_rtx
));
2285 CONST_OR_PURE_CALL_P (insn
) = 1;
2286 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), v0
);
2287 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), a0
);
2288 insn
= get_insns ();
2295 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2296 return value will be a valid address and move_operand (either a REG
2300 mips_legitimize_tls_address (rtx loc
)
2302 rtx dest
, insn
, v0
, v1
, tmp1
, tmp2
, eqv
;
2303 enum tls_model model
;
2305 v0
= gen_rtx_REG (Pmode
, GP_RETURN
);
2306 v1
= gen_rtx_REG (Pmode
, GP_RETURN
+ 1);
2308 model
= SYMBOL_REF_TLS_MODEL (loc
);
2309 /* Only TARGET_ABICALLS code can have more than one module; other
2310 code must be be static and should not use a GOT. All TLS models
2311 reduce to local exec in this situation. */
2312 if (!TARGET_ABICALLS
)
2313 model
= TLS_MODEL_LOCAL_EXEC
;
2317 case TLS_MODEL_GLOBAL_DYNAMIC
:
2318 insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSGD
, v0
);
2319 dest
= gen_reg_rtx (Pmode
);
2320 emit_libcall_block (insn
, dest
, v0
, loc
);
2323 case TLS_MODEL_LOCAL_DYNAMIC
:
2324 insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSLDM
, v0
);
2325 tmp1
= gen_reg_rtx (Pmode
);
2327 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2328 share the LDM result with other LD model accesses. */
2329 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
2331 emit_libcall_block (insn
, tmp1
, v0
, eqv
);
2333 tmp2
= mips_unspec_offset_high (NULL
, tmp1
, loc
, SYMBOL_DTPREL
);
2334 dest
= gen_rtx_LO_SUM (Pmode
, tmp2
,
2335 mips_unspec_address (loc
, SYMBOL_DTPREL
));
2338 case TLS_MODEL_INITIAL_EXEC
:
2339 tmp1
= gen_reg_rtx (Pmode
);
2340 tmp2
= mips_unspec_address (loc
, SYMBOL_GOTTPREL
);
2341 if (Pmode
== DImode
)
2343 emit_insn (gen_tls_get_tp_di (v1
));
2344 emit_insn (gen_load_gotdi (tmp1
, pic_offset_table_rtx
, tmp2
));
2348 emit_insn (gen_tls_get_tp_si (v1
));
2349 emit_insn (gen_load_gotsi (tmp1
, pic_offset_table_rtx
, tmp2
));
2351 dest
= gen_reg_rtx (Pmode
);
2352 emit_insn (gen_add3_insn (dest
, tmp1
, v1
));
2355 case TLS_MODEL_LOCAL_EXEC
:
2356 if (Pmode
== DImode
)
2357 emit_insn (gen_tls_get_tp_di (v1
));
2359 emit_insn (gen_tls_get_tp_si (v1
));
2361 tmp1
= mips_unspec_offset_high (NULL
, v1
, loc
, SYMBOL_TPREL
);
2362 dest
= gen_rtx_LO_SUM (Pmode
, tmp1
,
2363 mips_unspec_address (loc
, SYMBOL_TPREL
));
2373 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2374 be legitimized in a way that the generic machinery might not expect,
2375 put the new address in *XLOC and return true. MODE is the mode of
2376 the memory being accessed. */
2379 mips_legitimize_address (rtx
*xloc
, enum machine_mode mode
)
2381 if (mips_tls_operand_p (*xloc
))
2383 *xloc
= mips_legitimize_tls_address (*xloc
);
2387 /* See if the address can split into a high part and a LO_SUM. */
2388 if (mips_split_symbol (NULL
, *xloc
, mode
, xloc
))
2391 if (GET_CODE (*xloc
) == PLUS
&& GET_CODE (XEXP (*xloc
, 1)) == CONST_INT
)
2393 /* Handle REG + CONSTANT using mips_add_offset. */
2396 reg
= XEXP (*xloc
, 0);
2397 if (!mips_valid_base_register_p (reg
, mode
, 0))
2398 reg
= copy_to_mode_reg (Pmode
, reg
);
2399 *xloc
= mips_add_offset (0, reg
, INTVAL (XEXP (*xloc
, 1)));
2407 /* Subroutine of mips_build_integer (with the same interface).
2408 Assume that the final action in the sequence should be a left shift. */
2411 mips_build_shift (struct mips_integer_op
*codes
, HOST_WIDE_INT value
)
2413 unsigned int i
, shift
;
2415 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2416 since signed numbers are easier to load than unsigned ones. */
2418 while ((value
& 1) == 0)
2419 value
/= 2, shift
++;
2421 i
= mips_build_integer (codes
, value
);
2422 codes
[i
].code
= ASHIFT
;
2423 codes
[i
].value
= shift
;
2428 /* As for mips_build_shift, but assume that the final action will be
2429 an IOR or PLUS operation. */
2432 mips_build_lower (struct mips_integer_op
*codes
, unsigned HOST_WIDE_INT value
)
2434 unsigned HOST_WIDE_INT high
;
2437 high
= value
& ~(unsigned HOST_WIDE_INT
) 0xffff;
2438 if (!LUI_OPERAND (high
) && (value
& 0x18000) == 0x18000)
2440 /* The constant is too complex to load with a simple lui/ori pair
2441 so our goal is to clear as many trailing zeros as possible.
2442 In this case, we know bit 16 is set and that the low 16 bits
2443 form a negative number. If we subtract that number from VALUE,
2444 we will clear at least the lowest 17 bits, maybe more. */
2445 i
= mips_build_integer (codes
, CONST_HIGH_PART (value
));
2446 codes
[i
].code
= PLUS
;
2447 codes
[i
].value
= CONST_LOW_PART (value
);
2451 i
= mips_build_integer (codes
, high
);
2452 codes
[i
].code
= IOR
;
2453 codes
[i
].value
= value
& 0xffff;
2459 /* Fill CODES with a sequence of rtl operations to load VALUE.
2460 Return the number of operations needed. */
2463 mips_build_integer (struct mips_integer_op
*codes
,
2464 unsigned HOST_WIDE_INT value
)
2466 if (SMALL_OPERAND (value
)
2467 || SMALL_OPERAND_UNSIGNED (value
)
2468 || LUI_OPERAND (value
))
2470 /* The value can be loaded with a single instruction. */
2471 codes
[0].code
= UNKNOWN
;
2472 codes
[0].value
= value
;
2475 else if ((value
& 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value
)))
2477 /* Either the constant is a simple LUI/ORI combination or its
2478 lowest bit is set. We don't want to shift in this case. */
2479 return mips_build_lower (codes
, value
);
2481 else if ((value
& 0xffff) == 0)
2483 /* The constant will need at least three actions. The lowest
2484 16 bits are clear, so the final action will be a shift. */
2485 return mips_build_shift (codes
, value
);
2489 /* The final action could be a shift, add or inclusive OR.
2490 Rather than use a complex condition to select the best
2491 approach, try both mips_build_shift and mips_build_lower
2492 and pick the one that gives the shortest sequence.
2493 Note that this case is only used once per constant. */
2494 struct mips_integer_op alt_codes
[MIPS_MAX_INTEGER_OPS
];
2495 unsigned int cost
, alt_cost
;
2497 cost
= mips_build_shift (codes
, value
);
2498 alt_cost
= mips_build_lower (alt_codes
, value
);
2499 if (alt_cost
< cost
)
2501 memcpy (codes
, alt_codes
, alt_cost
* sizeof (codes
[0]));
2509 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2512 mips_move_integer (rtx dest
, rtx temp
, unsigned HOST_WIDE_INT value
)
2514 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
2515 enum machine_mode mode
;
2516 unsigned int i
, cost
;
2519 mode
= GET_MODE (dest
);
2520 cost
= mips_build_integer (codes
, value
);
2522 /* Apply each binary operation to X. Invariant: X is a legitimate
2523 source operand for a SET pattern. */
2524 x
= GEN_INT (codes
[0].value
);
2525 for (i
= 1; i
< cost
; i
++)
2527 if (!can_create_pseudo_p ())
2529 emit_insn (gen_rtx_SET (VOIDmode
, temp
, x
));
2533 x
= force_reg (mode
, x
);
2534 x
= gen_rtx_fmt_ee (codes
[i
].code
, mode
, x
, GEN_INT (codes
[i
].value
));
2537 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
2541 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2542 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2546 mips_legitimize_const_move (enum machine_mode mode
, rtx dest
, rtx src
)
2550 /* Split moves of big integers into smaller pieces. */
2551 if (splittable_const_int_operand (src
, mode
))
2553 mips_move_integer (dest
, dest
, INTVAL (src
));
2557 /* Split moves of symbolic constants into high/low pairs. */
2558 if (mips_split_symbol (dest
, src
, MAX_MACHINE_MODE
, &src
))
2560 emit_insn (gen_rtx_SET (VOIDmode
, dest
, src
));
2564 if (mips_tls_operand_p (src
))
2566 mips_emit_move (dest
, mips_legitimize_tls_address (src
));
2570 /* If we have (const (plus symbol offset)), load the symbol first
2571 and then add in the offset. This is usually better than forcing
2572 the constant into memory, at least in non-mips16 code. */
2573 split_const (src
, &base
, &offset
);
2575 && offset
!= const0_rtx
2576 && (can_create_pseudo_p () || SMALL_INT (offset
)))
2578 base
= mips_force_temporary (dest
, base
);
2579 mips_emit_move (dest
, mips_add_offset (0, base
, INTVAL (offset
)));
2583 src
= force_const_mem (mode
, src
);
2585 /* When using explicit relocs, constant pool references are sometimes
2586 not legitimate addresses. */
2587 mips_split_symbol (dest
, XEXP (src
, 0), mode
, &XEXP (src
, 0));
2588 mips_emit_move (dest
, src
);
2592 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2593 sequence that is valid. */
2596 mips_legitimize_move (enum machine_mode mode
, rtx dest
, rtx src
)
2598 if (!register_operand (dest
, mode
) && !reg_or_0_operand (src
, mode
))
2600 mips_emit_move (dest
, force_reg (mode
, src
));
2604 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2605 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
2606 && REG_P (src
) && MD_REG_P (REGNO (src
))
2607 && REG_P (dest
) && GP_REG_P (REGNO (dest
)))
2609 int other_regno
= REGNO (src
) == HI_REGNUM
? LO_REGNUM
: HI_REGNUM
;
2610 if (GET_MODE_SIZE (mode
) <= 4)
2611 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode
, REGNO (dest
)),
2612 gen_rtx_REG (SImode
, REGNO (src
)),
2613 gen_rtx_REG (SImode
, other_regno
)));
2615 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode
, REGNO (dest
)),
2616 gen_rtx_REG (DImode
, REGNO (src
)),
2617 gen_rtx_REG (DImode
, other_regno
)));
2621 /* We need to deal with constants that would be legitimate
2622 immediate_operands but not legitimate move_operands. */
2623 if (CONSTANT_P (src
) && !move_operand (src
, mode
))
2625 mips_legitimize_const_move (mode
, dest
, src
);
2626 set_unique_reg_note (get_last_insn (), REG_EQUAL
, copy_rtx (src
));
2632 /* We need a lot of little routines to check constant values on the
2633 mips16. These are used to figure out how long the instruction will
2634 be. It would be much better to do this using constraints, but
2635 there aren't nearly enough letters available. */
2638 m16_check_op (rtx op
, int low
, int high
, int mask
)
2640 return (GET_CODE (op
) == CONST_INT
2641 && INTVAL (op
) >= low
2642 && INTVAL (op
) <= high
2643 && (INTVAL (op
) & mask
) == 0);
2647 m16_uimm3_b (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2649 return m16_check_op (op
, 0x1, 0x8, 0);
2653 m16_simm4_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2655 return m16_check_op (op
, - 0x8, 0x7, 0);
2659 m16_nsimm4_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2661 return m16_check_op (op
, - 0x7, 0x8, 0);
2665 m16_simm5_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2667 return m16_check_op (op
, - 0x10, 0xf, 0);
2671 m16_nsimm5_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2673 return m16_check_op (op
, - 0xf, 0x10, 0);
2677 m16_uimm5_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2679 return m16_check_op (op
, (- 0x10) << 2, 0xf << 2, 3);
2683 m16_nuimm5_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2685 return m16_check_op (op
, (- 0xf) << 2, 0x10 << 2, 3);
2689 m16_simm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2691 return m16_check_op (op
, - 0x80, 0x7f, 0);
2695 m16_nsimm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2697 return m16_check_op (op
, - 0x7f, 0x80, 0);
2701 m16_uimm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2703 return m16_check_op (op
, 0x0, 0xff, 0);
2707 m16_nuimm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2709 return m16_check_op (op
, - 0xff, 0x0, 0);
2713 m16_uimm8_m1_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2715 return m16_check_op (op
, - 0x1, 0xfe, 0);
2719 m16_uimm8_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2721 return m16_check_op (op
, 0x0, 0xff << 2, 3);
2725 m16_nuimm8_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2727 return m16_check_op (op
, (- 0xff) << 2, 0x0, 3);
2731 m16_simm8_8 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2733 return m16_check_op (op
, (- 0x80) << 3, 0x7f << 3, 7);
2737 m16_nsimm8_8 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2739 return m16_check_op (op
, (- 0x7f) << 3, 0x80 << 3, 7);
2742 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2743 address instruction. */
2746 mips_lwxs_address_p (rtx addr
)
2749 && GET_CODE (addr
) == PLUS
2750 && REG_P (XEXP (addr
, 1)))
2752 rtx offset
= XEXP (addr
, 0);
2753 if (GET_CODE (offset
) == MULT
2754 && REG_P (XEXP (offset
, 0))
2755 && GET_CODE (XEXP (offset
, 1)) == CONST_INT
2756 && INTVAL (XEXP (offset
, 1)) == 4)
2763 mips_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
2765 enum machine_mode mode
= GET_MODE (x
);
2766 bool float_mode_p
= FLOAT_MODE_P (mode
);
2773 /* A number between 1 and 8 inclusive is efficient for a shift.
2774 Otherwise, we will need an extended instruction. */
2775 if ((outer_code
) == ASHIFT
|| (outer_code
) == ASHIFTRT
2776 || (outer_code
) == LSHIFTRT
)
2778 if (INTVAL (x
) >= 1 && INTVAL (x
) <= 8)
2781 *total
= COSTS_N_INSNS (1);
2785 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2786 if ((outer_code
) == XOR
2787 && INTVAL (x
) >= 0 && INTVAL (x
) < 0x10000)
2793 /* We may be able to use slt or sltu for a comparison with a
2794 signed 16-bit value. (The boundary conditions aren't quite
2795 right, but this is just a heuristic anyhow.) */
2796 if (((outer_code
) == LT
|| (outer_code
) == LE
2797 || (outer_code
) == GE
|| (outer_code
) == GT
2798 || (outer_code
) == LTU
|| (outer_code
) == LEU
2799 || (outer_code
) == GEU
|| (outer_code
) == GTU
)
2800 && INTVAL (x
) >= -0x8000 && INTVAL (x
) < 0x8000)
2806 /* Equality comparisons with 0 are cheap. */
2807 if (((outer_code
) == EQ
|| (outer_code
) == NE
)
2814 /* Constants in the range 0...255 can be loaded with an unextended
2815 instruction. They are therefore as cheap as a register move.
2817 Given the choice between "li R1,0...255" and "move R1,R2"
2818 (where R2 is a known constant), it is usually better to use "li",
2819 since we do not want to unnecessarily extend the lifetime
2821 if (outer_code
== SET
2823 && INTVAL (x
) < 256)
2831 /* These can be used anywhere. */
2836 /* Otherwise fall through to the handling below because
2837 we'll need to construct the constant. */
2843 if (LEGITIMATE_CONSTANT_P (x
))
2845 *total
= COSTS_N_INSNS (1);
2850 /* The value will need to be fetched from the constant pool. */
2851 *total
= CONSTANT_POOL_COST
;
2857 /* If the address is legitimate, return the number of
2858 instructions it needs. */
2859 rtx addr
= XEXP (x
, 0);
2860 int n
= mips_address_insns (addr
, GET_MODE (x
));
2863 *total
= COSTS_N_INSNS (n
+ 1);
2866 /* Check for scaled indexed address. */
2867 if (mips_lwxs_address_p (addr
))
2869 *total
= COSTS_N_INSNS (2);
2872 /* Otherwise use the default handling. */
2877 *total
= COSTS_N_INSNS (6);
2881 *total
= COSTS_N_INSNS ((mode
== DImode
&& !TARGET_64BIT
) ? 2 : 1);
2887 if (mode
== DImode
&& !TARGET_64BIT
)
2889 *total
= COSTS_N_INSNS (2);
2897 if (mode
== DImode
&& !TARGET_64BIT
)
2899 *total
= COSTS_N_INSNS ((GET_CODE (XEXP (x
, 1)) == CONST_INT
)
2907 *total
= COSTS_N_INSNS (1);
2909 *total
= COSTS_N_INSNS (4);
2913 *total
= COSTS_N_INSNS (1);
2920 *total
= mips_cost
->fp_add
;
2924 else if (mode
== DImode
&& !TARGET_64BIT
)
2926 *total
= COSTS_N_INSNS (4);
2932 if (mode
== DImode
&& !TARGET_64BIT
)
2934 *total
= COSTS_N_INSNS (4);
2941 *total
= mips_cost
->fp_mult_sf
;
2943 else if (mode
== DFmode
)
2944 *total
= mips_cost
->fp_mult_df
;
2946 else if (mode
== SImode
)
2947 *total
= mips_cost
->int_mult_si
;
2950 *total
= mips_cost
->int_mult_di
;
2959 *total
= mips_cost
->fp_div_sf
;
2961 *total
= mips_cost
->fp_div_df
;
2970 *total
= mips_cost
->int_div_di
;
2972 *total
= mips_cost
->int_div_si
;
2977 /* A sign extend from SImode to DImode in 64-bit mode is often
2978 zero instructions, because the result can often be used
2979 directly by another instruction; we'll call it one. */
2980 if (TARGET_64BIT
&& mode
== DImode
2981 && GET_MODE (XEXP (x
, 0)) == SImode
)
2982 *total
= COSTS_N_INSNS (1);
2984 *total
= COSTS_N_INSNS (2);
2988 if (TARGET_64BIT
&& mode
== DImode
2989 && GET_MODE (XEXP (x
, 0)) == SImode
)
2990 *total
= COSTS_N_INSNS (2);
2992 *total
= COSTS_N_INSNS (1);
2996 case UNSIGNED_FLOAT
:
2999 case FLOAT_TRUNCATE
:
3001 *total
= mips_cost
->fp_add
;
3009 /* Provide the costs of an addressing mode that contains ADDR.
3010 If ADDR is not a valid address, its cost is irrelevant. */
3013 mips_address_cost (rtx addr
)
3015 return mips_address_insns (addr
, SImode
);
3018 /* Return one word of double-word value OP, taking into account the fixed
3019 endianness of certain registers. HIGH_P is true to select the high part,
3020 false to select the low part. */
3023 mips_subword (rtx op
, int high_p
)
3026 enum machine_mode mode
;
3028 mode
= GET_MODE (op
);
3029 if (mode
== VOIDmode
)
3032 if (TARGET_BIG_ENDIAN
? !high_p
: high_p
)
3033 byte
= UNITS_PER_WORD
;
3037 if (FP_REG_RTX_P (op
))
3038 return gen_rtx_REG (word_mode
, high_p
? REGNO (op
) + 1 : REGNO (op
));
3041 return mips_rewrite_small_data (adjust_address (op
, word_mode
, byte
));
3043 return simplify_gen_subreg (word_mode
, op
, mode
, byte
);
3047 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3050 mips_split_64bit_move_p (rtx dest
, rtx src
)
3055 /* FP->FP moves can be done in a single instruction. */
3056 if (FP_REG_RTX_P (src
) && FP_REG_RTX_P (dest
))
3059 /* Check for floating-point loads and stores. They can be done using
3060 ldc1 and sdc1 on MIPS II and above. */
3063 if (FP_REG_RTX_P (dest
) && MEM_P (src
))
3065 if (FP_REG_RTX_P (src
) && MEM_P (dest
))
3072 /* Split a 64-bit move from SRC to DEST assuming that
3073 mips_split_64bit_move_p holds.
3075 Moves into and out of FPRs cause some difficulty here. Such moves
3076 will always be DFmode, since paired FPRs are not allowed to store
3077 DImode values. The most natural representation would be two separate
3078 32-bit moves, such as:
3080 (set (reg:SI $f0) (mem:SI ...))
3081 (set (reg:SI $f1) (mem:SI ...))
3083 However, the second insn is invalid because odd-numbered FPRs are
3084 not allowed to store independent values. Use the patterns load_df_low,
3085 load_df_high and store_df_high instead. */
3088 mips_split_64bit_move (rtx dest
, rtx src
)
3090 if (FP_REG_RTX_P (dest
))
3092 /* Loading an FPR from memory or from GPRs. */
3095 dest
= gen_lowpart (DFmode
, dest
);
3096 emit_insn (gen_load_df_low (dest
, mips_subword (src
, 0)));
3097 emit_insn (gen_mthc1 (dest
, mips_subword (src
, 1),
3102 emit_insn (gen_load_df_low (copy_rtx (dest
),
3103 mips_subword (src
, 0)));
3104 emit_insn (gen_load_df_high (dest
, mips_subword (src
, 1),
3108 else if (FP_REG_RTX_P (src
))
3110 /* Storing an FPR into memory or GPRs. */
3113 src
= gen_lowpart (DFmode
, src
);
3114 mips_emit_move (mips_subword (dest
, 0), mips_subword (src
, 0));
3115 emit_insn (gen_mfhc1 (mips_subword (dest
, 1), src
));
3119 mips_emit_move (mips_subword (dest
, 0), mips_subword (src
, 0));
3120 emit_insn (gen_store_df_high (mips_subword (dest
, 1), src
));
3125 /* The operation can be split into two normal moves. Decide in
3126 which order to do them. */
3129 low_dest
= mips_subword (dest
, 0);
3130 if (REG_P (low_dest
)
3131 && reg_overlap_mentioned_p (low_dest
, src
))
3133 mips_emit_move (mips_subword (dest
, 1), mips_subword (src
, 1));
3134 mips_emit_move (low_dest
, mips_subword (src
, 0));
3138 mips_emit_move (low_dest
, mips_subword (src
, 0));
3139 mips_emit_move (mips_subword (dest
, 1), mips_subword (src
, 1));
3144 /* Return the appropriate instructions to move SRC into DEST. Assume
3145 that SRC is operand 1 and DEST is operand 0. */
3148 mips_output_move (rtx dest
, rtx src
)
3150 enum rtx_code dest_code
, src_code
;
3151 enum mips_symbol_type symbol_type
;
3154 dest_code
= GET_CODE (dest
);
3155 src_code
= GET_CODE (src
);
3156 dbl_p
= (GET_MODE_SIZE (GET_MODE (dest
)) == 8);
3158 if (dbl_p
&& mips_split_64bit_move_p (dest
, src
))
3161 if ((src_code
== REG
&& GP_REG_P (REGNO (src
)))
3162 || (!TARGET_MIPS16
&& src
== CONST0_RTX (GET_MODE (dest
))))
3164 if (dest_code
== REG
)
3166 if (GP_REG_P (REGNO (dest
)))
3167 return "move\t%0,%z1";
3169 if (MD_REG_P (REGNO (dest
)))
3172 if (DSP_ACC_REG_P (REGNO (dest
)))
3174 static char retval
[] = "mt__\t%z1,%q0";
3175 retval
[2] = reg_names
[REGNO (dest
)][4];
3176 retval
[3] = reg_names
[REGNO (dest
)][5];
3180 if (FP_REG_P (REGNO (dest
)))
3181 return (dbl_p
? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3183 if (ALL_COP_REG_P (REGNO (dest
)))
3185 static char retval
[] = "dmtc_\t%z1,%0";
3187 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
3188 return (dbl_p
? retval
: retval
+ 1);
3191 if (dest_code
== MEM
)
3192 return (dbl_p
? "sd\t%z1,%0" : "sw\t%z1,%0");
3194 if (dest_code
== REG
&& GP_REG_P (REGNO (dest
)))
3196 if (src_code
== REG
)
3198 if (DSP_ACC_REG_P (REGNO (src
)))
3200 static char retval
[] = "mf__\t%0,%q1";
3201 retval
[2] = reg_names
[REGNO (src
)][4];
3202 retval
[3] = reg_names
[REGNO (src
)][5];
3206 if (ST_REG_P (REGNO (src
)) && ISA_HAS_8CC
)
3207 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3209 if (FP_REG_P (REGNO (src
)))
3210 return (dbl_p
? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3212 if (ALL_COP_REG_P (REGNO (src
)))
3214 static char retval
[] = "dmfc_\t%0,%1";
3216 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
3217 return (dbl_p
? retval
: retval
+ 1);
3221 if (src_code
== MEM
)
3222 return (dbl_p
? "ld\t%0,%1" : "lw\t%0,%1");
3224 if (src_code
== CONST_INT
)
3226 /* Don't use the X format, because that will give out of
3227 range numbers for 64-bit hosts and 32-bit targets. */
3229 return "li\t%0,%1\t\t\t# %X1";
3231 if (INTVAL (src
) >= 0 && INTVAL (src
) <= 0xffff)
3234 if (INTVAL (src
) < 0 && INTVAL (src
) >= -0xffff)
3238 if (src_code
== HIGH
)
3239 return TARGET_MIPS16
? "#" : "lui\t%0,%h1";
3241 if (CONST_GP_P (src
))
3242 return "move\t%0,%1";
3244 if (mips_symbolic_constant_p (src
, SYMBOL_CONTEXT_LEA
, &symbol_type
)
3245 && mips_lo_relocs
[symbol_type
] != 0)
3247 /* A signed 16-bit constant formed by applying a relocation
3248 operator to a symbolic address. */
3249 gcc_assert (!mips_split_p
[symbol_type
]);
3250 return "li\t%0,%R1";
3253 if (symbolic_operand (src
, VOIDmode
))
3255 gcc_assert (TARGET_MIPS16
3256 ? TARGET_MIPS16_TEXT_LOADS
3257 : !TARGET_EXPLICIT_RELOCS
);
3258 return (dbl_p
? "dla\t%0,%1" : "la\t%0,%1");
3261 if (src_code
== REG
&& FP_REG_P (REGNO (src
)))
3263 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
3265 if (GET_MODE (dest
) == V2SFmode
)
3266 return "mov.ps\t%0,%1";
3268 return (dbl_p
? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3271 if (dest_code
== MEM
)
3272 return (dbl_p
? "sdc1\t%1,%0" : "swc1\t%1,%0");
3274 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
3276 if (src_code
== MEM
)
3277 return (dbl_p
? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3279 if (dest_code
== REG
&& ALL_COP_REG_P (REGNO (dest
)) && src_code
== MEM
)
3281 static char retval
[] = "l_c_\t%0,%1";
3283 retval
[1] = (dbl_p
? 'd' : 'w');
3284 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
3287 if (dest_code
== MEM
&& src_code
== REG
&& ALL_COP_REG_P (REGNO (src
)))
3289 static char retval
[] = "s_c_\t%1,%0";
3291 retval
[1] = (dbl_p
? 'd' : 'w');
3292 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
3298 /* Restore $gp from its save slot. Valid only when using o32 or
3302 mips_restore_gp (void)
3306 gcc_assert (TARGET_ABICALLS
&& TARGET_OLDABI
);
3308 address
= mips_add_offset (pic_offset_table_rtx
,
3309 frame_pointer_needed
3310 ? hard_frame_pointer_rtx
3311 : stack_pointer_rtx
,
3312 current_function_outgoing_args_size
);
3313 slot
= gen_rtx_MEM (Pmode
, address
);
3315 mips_emit_move (pic_offset_table_rtx
, slot
);
3316 if (!TARGET_EXPLICIT_RELOCS
)
3317 emit_insn (gen_blockage ());
3320 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3323 mips_emit_binary (enum rtx_code code
, rtx target
, rtx op0
, rtx op1
)
3325 emit_insn (gen_rtx_SET (VOIDmode
, target
,
3326 gen_rtx_fmt_ee (code
, GET_MODE (target
), op0
, op1
)));
3329 /* Return true if CMP1 is a suitable second operand for relational
3330 operator CODE. See also the *sCC patterns in mips.md. */
3333 mips_relational_operand_ok_p (enum rtx_code code
, rtx cmp1
)
3339 return reg_or_0_operand (cmp1
, VOIDmode
);
3343 return !TARGET_MIPS16
&& cmp1
== const1_rtx
;
3347 return arith_operand (cmp1
, VOIDmode
);
3350 return sle_operand (cmp1
, VOIDmode
);
3353 return sleu_operand (cmp1
, VOIDmode
);
3360 /* Canonicalize LE or LEU comparisons into LT comparisons when
3361 possible to avoid extra instructions or inverting the
3365 mips_canonicalize_comparison (enum rtx_code
*code
, rtx
*cmp1
,
3366 enum machine_mode mode
)
3368 HOST_WIDE_INT original
, plus_one
;
3370 if (GET_CODE (*cmp1
) != CONST_INT
)
3373 original
= INTVAL (*cmp1
);
3374 plus_one
= trunc_int_for_mode ((unsigned HOST_WIDE_INT
) original
+ 1, mode
);
3379 if (original
< plus_one
)
3382 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
3391 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
3404 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3405 result in TARGET. CMP0 and TARGET are register_operands that have
3406 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3407 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3410 mips_emit_int_relational (enum rtx_code code
, bool *invert_ptr
,
3411 rtx target
, rtx cmp0
, rtx cmp1
)
3413 /* First see if there is a MIPS instruction that can do this operation
3414 with CMP1 in its current form. If not, try to canonicalize the
3415 comparison to LT. If that fails, try doing the same for the
3416 inverse operation. If that also fails, force CMP1 into a register
3418 if (mips_relational_operand_ok_p (code
, cmp1
))
3419 mips_emit_binary (code
, target
, cmp0
, cmp1
);
3420 else if (mips_canonicalize_comparison (&code
, &cmp1
, GET_MODE (target
)))
3421 mips_emit_binary (code
, target
, cmp0
, cmp1
);
3424 enum rtx_code inv_code
= reverse_condition (code
);
3425 if (!mips_relational_operand_ok_p (inv_code
, cmp1
))
3427 cmp1
= force_reg (GET_MODE (cmp0
), cmp1
);
3428 mips_emit_int_relational (code
, invert_ptr
, target
, cmp0
, cmp1
);
3430 else if (invert_ptr
== 0)
3432 rtx inv_target
= gen_reg_rtx (GET_MODE (target
));
3433 mips_emit_binary (inv_code
, inv_target
, cmp0
, cmp1
);
3434 mips_emit_binary (XOR
, target
, inv_target
, const1_rtx
);
3438 *invert_ptr
= !*invert_ptr
;
3439 mips_emit_binary (inv_code
, target
, cmp0
, cmp1
);
3444 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3445 The register will have the same mode as CMP0. */
3448 mips_zero_if_equal (rtx cmp0
, rtx cmp1
)
3450 if (cmp1
== const0_rtx
)
3453 if (uns_arith_operand (cmp1
, VOIDmode
))
3454 return expand_binop (GET_MODE (cmp0
), xor_optab
,
3455 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
3457 return expand_binop (GET_MODE (cmp0
), sub_optab
,
3458 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
3461 /* Convert *CODE into a code that can be used in a floating-point
3462 scc instruction (c.<cond>.<fmt>). Return true if the values of
3463 the condition code registers will be inverted, with 0 indicating
3464 that the condition holds. */
3467 mips_reverse_fp_cond_p (enum rtx_code
*code
)
3474 *code
= reverse_condition_maybe_unordered (*code
);
3482 /* Convert a comparison into something that can be used in a branch or
3483 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3484 being compared and *CODE is the code used to compare them.
3486 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3487 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3488 otherwise any standard branch condition can be used. The standard branch
3491 - EQ/NE between two registers.
3492 - any comparison between a register and zero. */
3495 mips_emit_compare (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
, bool need_eq_ne_p
)
3497 if (GET_MODE_CLASS (GET_MODE (cmp_operands
[0])) == MODE_INT
)
3499 if (!need_eq_ne_p
&& cmp_operands
[1] == const0_rtx
)
3501 *op0
= cmp_operands
[0];
3502 *op1
= cmp_operands
[1];
3504 else if (*code
== EQ
|| *code
== NE
)
3508 *op0
= mips_zero_if_equal (cmp_operands
[0], cmp_operands
[1]);
3513 *op0
= cmp_operands
[0];
3514 *op1
= force_reg (GET_MODE (*op0
), cmp_operands
[1]);
3519 /* The comparison needs a separate scc instruction. Store the
3520 result of the scc in *OP0 and compare it against zero. */
3521 bool invert
= false;
3522 *op0
= gen_reg_rtx (GET_MODE (cmp_operands
[0]));
3524 mips_emit_int_relational (*code
, &invert
, *op0
,
3525 cmp_operands
[0], cmp_operands
[1]);
3526 *code
= (invert
? EQ
: NE
);
3531 enum rtx_code cmp_code
;
3533 /* Floating-point tests use a separate c.cond.fmt comparison to
3534 set a condition code register. The branch or conditional move
3535 will then compare that register against zero.
3537 Set CMP_CODE to the code of the comparison instruction and
3538 *CODE to the code that the branch or move should use. */
3540 *code
= mips_reverse_fp_cond_p (&cmp_code
) ? EQ
: NE
;
3542 ? gen_reg_rtx (CCmode
)
3543 : gen_rtx_REG (CCmode
, FPSW_REGNUM
));
3545 mips_emit_binary (cmp_code
, *op0
, cmp_operands
[0], cmp_operands
[1]);
3549 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3550 Store the result in TARGET and return true if successful.
3552 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3555 mips_emit_scc (enum rtx_code code
, rtx target
)
3557 if (GET_MODE_CLASS (GET_MODE (cmp_operands
[0])) != MODE_INT
)
3560 target
= gen_lowpart (GET_MODE (cmp_operands
[0]), target
);
3561 if (code
== EQ
|| code
== NE
)
3563 rtx zie
= mips_zero_if_equal (cmp_operands
[0], cmp_operands
[1]);
3564 mips_emit_binary (code
, target
, zie
, const0_rtx
);
3567 mips_emit_int_relational (code
, 0, target
,
3568 cmp_operands
[0], cmp_operands
[1]);
3572 /* Emit the common code for doing conditional branches.
3573 operand[0] is the label to jump to.
3574 The comparison operands are saved away by cmp{si,di,sf,df}. */
3577 gen_conditional_branch (rtx
*operands
, enum rtx_code code
)
3579 rtx op0
, op1
, condition
;
3581 mips_emit_compare (&code
, &op0
, &op1
, TARGET_MIPS16
);
3582 condition
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
3583 emit_jump_insn (gen_condjump (condition
, operands
[0]));
3588 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3589 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3592 mips_expand_vcondv2sf (rtx dest
, rtx true_src
, rtx false_src
,
3593 enum rtx_code cond
, rtx cmp_op0
, rtx cmp_op1
)
3598 reversed_p
= mips_reverse_fp_cond_p (&cond
);
3599 cmp_result
= gen_reg_rtx (CCV2mode
);
3600 emit_insn (gen_scc_ps (cmp_result
,
3601 gen_rtx_fmt_ee (cond
, VOIDmode
, cmp_op0
, cmp_op1
)));
3603 emit_insn (gen_mips_cond_move_tf_ps (dest
, false_src
, true_src
,
3606 emit_insn (gen_mips_cond_move_tf_ps (dest
, true_src
, false_src
,
3610 /* Emit the common code for conditional moves. OPERANDS is the array
3611 of operands passed to the conditional move define_expand. */
3614 gen_conditional_move (rtx
*operands
)
3619 code
= GET_CODE (operands
[1]);
3620 mips_emit_compare (&code
, &op0
, &op1
, true);
3621 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
3622 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
3623 gen_rtx_fmt_ee (code
,
3626 operands
[2], operands
[3])));
3629 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3630 the conditional_trap expander. */
3633 mips_gen_conditional_trap (rtx
*operands
)
3636 enum rtx_code cmp_code
= GET_CODE (operands
[0]);
3637 enum machine_mode mode
= GET_MODE (cmp_operands
[0]);
3639 /* MIPS conditional trap machine instructions don't have GT or LE
3640 flavors, so we must invert the comparison and convert to LT and
3641 GE, respectively. */
3644 case GT
: cmp_code
= LT
; break;
3645 case LE
: cmp_code
= GE
; break;
3646 case GTU
: cmp_code
= LTU
; break;
3647 case LEU
: cmp_code
= GEU
; break;
3650 if (cmp_code
== GET_CODE (operands
[0]))
3652 op0
= cmp_operands
[0];
3653 op1
= cmp_operands
[1];
3657 op0
= cmp_operands
[1];
3658 op1
= cmp_operands
[0];
3660 op0
= force_reg (mode
, op0
);
3661 if (!arith_operand (op1
, mode
))
3662 op1
= force_reg (mode
, op1
);
3664 emit_insn (gen_rtx_TRAP_IF (VOIDmode
,
3665 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
),
3669 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3672 mips_ok_for_lazy_binding_p (rtx x
)
3674 return (TARGET_USE_GOT
3675 && GET_CODE (x
) == SYMBOL_REF
3676 && !mips_symbol_binds_local_p (x
));
3679 /* Load function address ADDR into register DEST. SIBCALL_P is true
3680 if the address is needed for a sibling call. */
3683 mips_load_call_address (rtx dest
, rtx addr
, int sibcall_p
)
3685 /* If we're generating PIC, and this call is to a global function,
3686 try to allow its address to be resolved lazily. This isn't
3687 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3688 to the stub would be our caller's gp, not ours. */
3689 if (TARGET_EXPLICIT_RELOCS
3690 && !(sibcall_p
&& TARGET_CALL_SAVED_GP
)
3691 && mips_ok_for_lazy_binding_p (addr
))
3693 rtx high
, lo_sum_symbol
;
3695 high
= mips_unspec_offset_high (dest
, pic_offset_table_rtx
,
3696 addr
, SYMBOL_GOTOFF_CALL
);
3697 lo_sum_symbol
= mips_unspec_address (addr
, SYMBOL_GOTOFF_CALL
);
3698 if (Pmode
== SImode
)
3699 emit_insn (gen_load_callsi (dest
, high
, lo_sum_symbol
));
3701 emit_insn (gen_load_calldi (dest
, high
, lo_sum_symbol
));
3704 mips_emit_move (dest
, addr
);
3708 /* Expand a call or call_value instruction. RESULT is where the
3709 result will go (null for calls), ADDR is the address of the
3710 function, ARGS_SIZE is the size of the arguments and AUX is
3711 the value passed to us by mips_function_arg. SIBCALL_P is true
3712 if we are expanding a sibling call, false if we're expanding
3716 mips_expand_call (rtx result
, rtx addr
, rtx args_size
, rtx aux
, int sibcall_p
)
3718 rtx orig_addr
, pattern
, insn
;
3721 if (!call_insn_operand (addr
, VOIDmode
))
3723 addr
= gen_reg_rtx (Pmode
);
3724 mips_load_call_address (addr
, orig_addr
, sibcall_p
);
3728 && TARGET_HARD_FLOAT_ABI
3729 && build_mips16_call_stub (result
, addr
, args_size
,
3730 aux
== 0 ? 0 : (int) GET_MODE (aux
)))
3734 pattern
= (sibcall_p
3735 ? gen_sibcall_internal (addr
, args_size
)
3736 : gen_call_internal (addr
, args_size
));
3737 else if (GET_CODE (result
) == PARALLEL
&& XVECLEN (result
, 0) == 2)
3741 reg1
= XEXP (XVECEXP (result
, 0, 0), 0);
3742 reg2
= XEXP (XVECEXP (result
, 0, 1), 0);
3745 ? gen_sibcall_value_multiple_internal (reg1
, addr
, args_size
, reg2
)
3746 : gen_call_value_multiple_internal (reg1
, addr
, args_size
, reg2
));
3749 pattern
= (sibcall_p
3750 ? gen_sibcall_value_internal (result
, addr
, args_size
)
3751 : gen_call_value_internal (result
, addr
, args_size
));
3753 insn
= emit_call_insn (pattern
);
3755 /* Lazy-binding stubs require $gp to be valid on entry. */
3756 if (mips_ok_for_lazy_binding_p (orig_addr
))
3757 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
3761 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3764 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED
,
3765 tree exp ATTRIBUTE_UNUSED
)
3767 return TARGET_SIBCALLS
;
3770 /* Emit code to move general operand SRC into condition-code
3771 register DEST. SCRATCH is a scratch TFmode float register.
3778 where FP1 and FP2 are single-precision float registers
3779 taken from SCRATCH. */
3782 mips_emit_fcc_reload (rtx dest
, rtx src
, rtx scratch
)
3786 /* Change the source to SFmode. */
3788 src
= adjust_address (src
, SFmode
, 0);
3789 else if (REG_P (src
) || GET_CODE (src
) == SUBREG
)
3790 src
= gen_rtx_REG (SFmode
, true_regnum (src
));
3792 fp1
= gen_rtx_REG (SFmode
, REGNO (scratch
));
3793 fp2
= gen_rtx_REG (SFmode
, REGNO (scratch
) + MAX_FPRS_PER_FMT
);
3795 mips_emit_move (copy_rtx (fp1
), src
);
3796 mips_emit_move (copy_rtx (fp2
), CONST0_RTX (SFmode
));
3797 emit_insn (gen_slt_sf (dest
, fp2
, fp1
));
3800 /* Emit code to change the current function's return address to
3801 ADDRESS. SCRATCH is available as a scratch register, if needed.
3802 ADDRESS and SCRATCH are both word-mode GPRs. */
3805 mips_set_return_address (rtx address
, rtx scratch
)
3809 compute_frame_size (get_frame_size ());
3810 gcc_assert ((cfun
->machine
->frame
.mask
>> 31) & 1);
3811 slot_address
= mips_add_offset (scratch
, stack_pointer_rtx
,
3812 cfun
->machine
->frame
.gp_sp_offset
);
3814 mips_emit_move (gen_rtx_MEM (GET_MODE (address
), slot_address
), address
);
3817 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3818 Assume that the areas do not overlap. */
3821 mips_block_move_straight (rtx dest
, rtx src
, HOST_WIDE_INT length
)
3823 HOST_WIDE_INT offset
, delta
;
3824 unsigned HOST_WIDE_INT bits
;
3826 enum machine_mode mode
;
3829 /* Work out how many bits to move at a time. If both operands have
3830 half-word alignment, it is usually better to move in half words.
3831 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3832 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3833 Otherwise move word-sized chunks. */
3834 if (MEM_ALIGN (src
) == BITS_PER_WORD
/ 2
3835 && MEM_ALIGN (dest
) == BITS_PER_WORD
/ 2)
3836 bits
= BITS_PER_WORD
/ 2;
3838 bits
= BITS_PER_WORD
;
3840 mode
= mode_for_size (bits
, MODE_INT
, 0);
3841 delta
= bits
/ BITS_PER_UNIT
;
3843 /* Allocate a buffer for the temporary registers. */
3844 regs
= alloca (sizeof (rtx
) * length
/ delta
);
3846 /* Load as many BITS-sized chunks as possible. Use a normal load if
3847 the source has enough alignment, otherwise use left/right pairs. */
3848 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
3850 regs
[i
] = gen_reg_rtx (mode
);
3851 if (MEM_ALIGN (src
) >= bits
)
3852 mips_emit_move (regs
[i
], adjust_address (src
, mode
, offset
));
3855 rtx part
= adjust_address (src
, BLKmode
, offset
);
3856 if (!mips_expand_unaligned_load (regs
[i
], part
, bits
, 0))
3861 /* Copy the chunks to the destination. */
3862 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
3863 if (MEM_ALIGN (dest
) >= bits
)
3864 mips_emit_move (adjust_address (dest
, mode
, offset
), regs
[i
]);
3867 rtx part
= adjust_address (dest
, BLKmode
, offset
);
3868 if (!mips_expand_unaligned_store (part
, regs
[i
], bits
, 0))
3872 /* Mop up any left-over bytes. */
3873 if (offset
< length
)
3875 src
= adjust_address (src
, BLKmode
, offset
);
3876 dest
= adjust_address (dest
, BLKmode
, offset
);
3877 move_by_pieces (dest
, src
, length
- offset
,
3878 MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
)), 0);
3882 #define MAX_MOVE_REGS 4
3883 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3886 /* Helper function for doing a loop-based block operation on memory
3887 reference MEM. Each iteration of the loop will operate on LENGTH
3890 Create a new base register for use within the loop and point it to
3891 the start of MEM. Create a new memory reference that uses this
3892 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3895 mips_adjust_block_mem (rtx mem
, HOST_WIDE_INT length
,
3896 rtx
*loop_reg
, rtx
*loop_mem
)
3898 *loop_reg
= copy_addr_to_reg (XEXP (mem
, 0));
3900 /* Although the new mem does not refer to a known location,
3901 it does keep up to LENGTH bytes of alignment. */
3902 *loop_mem
= change_address (mem
, BLKmode
, *loop_reg
);
3903 set_mem_align (*loop_mem
, MIN (MEM_ALIGN (mem
), length
* BITS_PER_UNIT
));
3907 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3908 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3909 memory regions do not overlap. */
3912 mips_block_move_loop (rtx dest
, rtx src
, HOST_WIDE_INT length
)
3914 rtx label
, src_reg
, dest_reg
, final_src
;
3915 HOST_WIDE_INT leftover
;
3917 leftover
= length
% MAX_MOVE_BYTES
;
3920 /* Create registers and memory references for use within the loop. */
3921 mips_adjust_block_mem (src
, MAX_MOVE_BYTES
, &src_reg
, &src
);
3922 mips_adjust_block_mem (dest
, MAX_MOVE_BYTES
, &dest_reg
, &dest
);
3924 /* Calculate the value that SRC_REG should have after the last iteration
3926 final_src
= expand_simple_binop (Pmode
, PLUS
, src_reg
, GEN_INT (length
),
3929 /* Emit the start of the loop. */
3930 label
= gen_label_rtx ();
3933 /* Emit the loop body. */
3934 mips_block_move_straight (dest
, src
, MAX_MOVE_BYTES
);
3936 /* Move on to the next block. */
3937 mips_emit_move (src_reg
, plus_constant (src_reg
, MAX_MOVE_BYTES
));
3938 mips_emit_move (dest_reg
, plus_constant (dest_reg
, MAX_MOVE_BYTES
));
3940 /* Emit the loop condition. */
3941 if (Pmode
== DImode
)
3942 emit_insn (gen_cmpdi (src_reg
, final_src
));
3944 emit_insn (gen_cmpsi (src_reg
, final_src
));
3945 emit_jump_insn (gen_bne (label
));
3947 /* Mop up any left-over bytes. */
3949 mips_block_move_straight (dest
, src
, leftover
);
3953 /* Expand a loop of synci insns for the address range [BEGIN, END). */
3956 mips_expand_synci_loop (rtx begin
, rtx end
)
3958 rtx inc
, label
, cmp
, cmp_result
;
3960 /* Load INC with the cache line size (rdhwr INC,$1). */
3961 inc
= gen_reg_rtx (SImode
);
3962 emit_insn (gen_rdhwr (inc
, const1_rtx
));
3964 /* Loop back to here. */
3965 label
= gen_label_rtx ();
3968 emit_insn (gen_synci (begin
));
3970 cmp
= gen_reg_rtx (Pmode
);
3971 mips_emit_binary (GTU
, cmp
, begin
, end
);
3973 mips_emit_binary (PLUS
, begin
, begin
, inc
);
3975 cmp_result
= gen_rtx_EQ (VOIDmode
, cmp
, const0_rtx
);
3976 emit_jump_insn (gen_condjump (cmp_result
, label
));
3979 /* Expand a movmemsi instruction. */
3982 mips_expand_block_move (rtx dest
, rtx src
, rtx length
)
3984 if (GET_CODE (length
) == CONST_INT
)
3986 if (INTVAL (length
) <= 2 * MAX_MOVE_BYTES
)
3988 mips_block_move_straight (dest
, src
, INTVAL (length
));
3993 mips_block_move_loop (dest
, src
, INTVAL (length
));
4000 /* Argument support functions. */
4002 /* Initialize CUMULATIVE_ARGS for a function. */
4005 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
4006 rtx libname ATTRIBUTE_UNUSED
)
4008 static CUMULATIVE_ARGS zero_cum
;
4009 tree param
, next_param
;
4012 cum
->prototype
= (fntype
&& TYPE_ARG_TYPES (fntype
));
4014 /* Determine if this function has variable arguments. This is
4015 indicated by the last argument being 'void_type_mode' if there
4016 are no variable arguments. The standard MIPS calling sequence
4017 passes all arguments in the general purpose registers in this case. */
4019 for (param
= fntype
? TYPE_ARG_TYPES (fntype
) : 0;
4020 param
!= 0; param
= next_param
)
4022 next_param
= TREE_CHAIN (param
);
4023 if (next_param
== 0 && TREE_VALUE (param
) != void_type_node
)
4024 cum
->gp_reg_found
= 1;
4029 /* Fill INFO with information about a single argument. CUM is the
4030 cumulative state for earlier arguments. MODE is the mode of this
4031 argument and TYPE is its type (if known). NAMED is true if this
4032 is a named (fixed) argument rather than a variable one. */
4035 mips_arg_info (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4036 tree type
, int named
, struct mips_arg_info
*info
)
4038 bool doubleword_aligned_p
;
4039 unsigned int num_bytes
, num_words
, max_regs
;
4041 /* Work out the size of the argument. */
4042 num_bytes
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
4043 num_words
= (num_bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
4045 /* Decide whether it should go in a floating-point register, assuming
4046 one is free. Later code checks for availability.
4048 The checks against UNITS_PER_FPVALUE handle the soft-float and
4049 single-float cases. */
4053 /* The EABI conventions have traditionally been defined in terms
4054 of TYPE_MODE, regardless of the actual type. */
4055 info
->fpr_p
= ((GET_MODE_CLASS (mode
) == MODE_FLOAT
4056 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
4057 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
4062 /* Only leading floating-point scalars are passed in
4063 floating-point registers. We also handle vector floats the same
4064 say, which is OK because they are not covered by the standard ABI. */
4065 info
->fpr_p
= (!cum
->gp_reg_found
4066 && cum
->arg_number
< 2
4067 && (type
== 0 || SCALAR_FLOAT_TYPE_P (type
)
4068 || VECTOR_FLOAT_TYPE_P (type
))
4069 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
4070 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
4071 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
4076 /* Scalar and complex floating-point types are passed in
4077 floating-point registers. */
4078 info
->fpr_p
= (named
4079 && (type
== 0 || FLOAT_TYPE_P (type
))
4080 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
4081 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
4082 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
4083 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_FPVALUE
);
4085 /* ??? According to the ABI documentation, the real and imaginary
4086 parts of complex floats should be passed in individual registers.
4087 The real and imaginary parts of stack arguments are supposed
4088 to be contiguous and there should be an extra word of padding
4091 This has two problems. First, it makes it impossible to use a
4092 single "void *" va_list type, since register and stack arguments
4093 are passed differently. (At the time of writing, MIPSpro cannot
4094 handle complex float varargs correctly.) Second, it's unclear
4095 what should happen when there is only one register free.
4097 For now, we assume that named complex floats should go into FPRs
4098 if there are two FPRs free, otherwise they should be passed in the
4099 same way as a struct containing two floats. */
4101 && GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
4102 && GET_MODE_UNIT_SIZE (mode
) < UNITS_PER_FPVALUE
)
4104 if (cum
->num_gprs
>= MAX_ARGS_IN_REGISTERS
- 1)
4105 info
->fpr_p
= false;
4115 /* See whether the argument has doubleword alignment. */
4116 doubleword_aligned_p
= FUNCTION_ARG_BOUNDARY (mode
, type
) > BITS_PER_WORD
;
4118 /* Set REG_OFFSET to the register count we're interested in.
4119 The EABI allocates the floating-point registers separately,
4120 but the other ABIs allocate them like integer registers. */
4121 info
->reg_offset
= (mips_abi
== ABI_EABI
&& info
->fpr_p
4125 /* Advance to an even register if the argument is doubleword-aligned. */
4126 if (doubleword_aligned_p
)
4127 info
->reg_offset
+= info
->reg_offset
& 1;
4129 /* Work out the offset of a stack argument. */
4130 info
->stack_offset
= cum
->stack_words
;
4131 if (doubleword_aligned_p
)
4132 info
->stack_offset
+= info
->stack_offset
& 1;
4134 max_regs
= MAX_ARGS_IN_REGISTERS
- info
->reg_offset
;
4136 /* Partition the argument between registers and stack. */
4137 info
->reg_words
= MIN (num_words
, max_regs
);
4138 info
->stack_words
= num_words
- info
->reg_words
;
4142 /* INFO describes an argument that is passed in a single-register value.
4143 Return the register it uses, assuming that FPRs are available if
4147 mips_arg_regno (const struct mips_arg_info
*info
, bool hard_float_p
)
4149 if (!info
->fpr_p
|| !hard_float_p
)
4150 return GP_ARG_FIRST
+ info
->reg_offset
;
4151 else if (mips_abi
== ABI_32
&& TARGET_DOUBLE_FLOAT
&& info
->reg_offset
> 0)
4152 /* In o32, the second argument is always passed in $f14
4153 for TARGET_DOUBLE_FLOAT, regardless of whether the
4154 first argument was a word or doubleword. */
4155 return FP_ARG_FIRST
+ 2;
4157 return FP_ARG_FIRST
+ info
->reg_offset
;
4160 /* Implement FUNCTION_ARG_ADVANCE. */
4163 function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4164 tree type
, int named
)
4166 struct mips_arg_info info
;
4168 mips_arg_info (cum
, mode
, type
, named
, &info
);
4171 cum
->gp_reg_found
= true;
4173 /* See the comment above the cumulative args structure in mips.h
4174 for an explanation of what this code does. It assumes the O32
4175 ABI, which passes at most 2 arguments in float registers. */
4176 if (cum
->arg_number
< 2 && info
.fpr_p
)
4177 cum
->fp_code
+= (mode
== SFmode
? 1 : 2) << (cum
->arg_number
* 2);
4179 if (mips_abi
!= ABI_EABI
|| !info
.fpr_p
)
4180 cum
->num_gprs
= info
.reg_offset
+ info
.reg_words
;
4181 else if (info
.reg_words
> 0)
4182 cum
->num_fprs
+= MAX_FPRS_PER_FMT
;
4184 if (info
.stack_words
> 0)
4185 cum
->stack_words
= info
.stack_offset
+ info
.stack_words
;
4190 /* Implement FUNCTION_ARG. */
4193 function_arg (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4194 tree type
, int named
)
4196 struct mips_arg_info info
;
4198 /* We will be called with a mode of VOIDmode after the last argument
4199 has been seen. Whatever we return will be passed to the call
4200 insn. If we need a mips16 fp_code, return a REG with the code
4201 stored as the mode. */
4202 if (mode
== VOIDmode
)
4204 if (TARGET_MIPS16
&& cum
->fp_code
!= 0)
4205 return gen_rtx_REG ((enum machine_mode
) cum
->fp_code
, 0);
4211 mips_arg_info (cum
, mode
, type
, named
, &info
);
4213 /* Return straight away if the whole argument is passed on the stack. */
4214 if (info
.reg_offset
== MAX_ARGS_IN_REGISTERS
)
4218 && TREE_CODE (type
) == RECORD_TYPE
4220 && TYPE_SIZE_UNIT (type
)
4221 && host_integerp (TYPE_SIZE_UNIT (type
), 1)
4224 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4225 structure contains a double in its entirety, then that 64-bit
4226 chunk is passed in a floating point register. */
4229 /* First check to see if there is any such field. */
4230 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
4231 if (TREE_CODE (field
) == FIELD_DECL
4232 && TREE_CODE (TREE_TYPE (field
)) == REAL_TYPE
4233 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
4234 && host_integerp (bit_position (field
), 0)
4235 && int_bit_position (field
) % BITS_PER_WORD
== 0)
4240 /* Now handle the special case by returning a PARALLEL
4241 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4242 chunks are passed in registers. */
4244 HOST_WIDE_INT bitpos
;
4247 /* assign_parms checks the mode of ENTRY_PARM, so we must
4248 use the actual mode here. */
4249 ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (info
.reg_words
));
4252 field
= TYPE_FIELDS (type
);
4253 for (i
= 0; i
< info
.reg_words
; i
++)
4257 for (; field
; field
= TREE_CHAIN (field
))
4258 if (TREE_CODE (field
) == FIELD_DECL
4259 && int_bit_position (field
) >= bitpos
)
4263 && int_bit_position (field
) == bitpos
4264 && TREE_CODE (TREE_TYPE (field
)) == REAL_TYPE
4265 && !TARGET_SOFT_FLOAT
4266 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
)
4267 reg
= gen_rtx_REG (DFmode
, FP_ARG_FIRST
+ info
.reg_offset
+ i
);
4269 reg
= gen_rtx_REG (DImode
, GP_ARG_FIRST
+ info
.reg_offset
+ i
);
4272 = gen_rtx_EXPR_LIST (VOIDmode
, reg
,
4273 GEN_INT (bitpos
/ BITS_PER_UNIT
));
4275 bitpos
+= BITS_PER_WORD
;
4281 /* Handle the n32/n64 conventions for passing complex floating-point
4282 arguments in FPR pairs. The real part goes in the lower register
4283 and the imaginary part goes in the upper register. */
4286 && GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
4289 enum machine_mode inner
;
4292 inner
= GET_MODE_INNER (mode
);
4293 reg
= FP_ARG_FIRST
+ info
.reg_offset
;
4294 if (info
.reg_words
* UNITS_PER_WORD
== GET_MODE_SIZE (inner
))
4296 /* Real part in registers, imaginary part on stack. */
4297 gcc_assert (info
.stack_words
== info
.reg_words
);
4298 return gen_rtx_REG (inner
, reg
);
4302 gcc_assert (info
.stack_words
== 0);
4303 real
= gen_rtx_EXPR_LIST (VOIDmode
,
4304 gen_rtx_REG (inner
, reg
),
4306 imag
= gen_rtx_EXPR_LIST (VOIDmode
,
4308 reg
+ info
.reg_words
/ 2),
4309 GEN_INT (GET_MODE_SIZE (inner
)));
4310 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, real
, imag
));
4314 return gen_rtx_REG (mode
, mips_arg_regno (&info
, TARGET_HARD_FLOAT
));
4318 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4321 mips_arg_partial_bytes (CUMULATIVE_ARGS
*cum
,
4322 enum machine_mode mode
, tree type
, bool named
)
4324 struct mips_arg_info info
;
4326 mips_arg_info (cum
, mode
, type
, named
, &info
);
4327 return info
.stack_words
> 0 ? info
.reg_words
* UNITS_PER_WORD
: 0;
4331 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4332 PARM_BOUNDARY bits of alignment, but will be given anything up
4333 to STACK_BOUNDARY bits if the type requires it. */
4336 function_arg_boundary (enum machine_mode mode
, tree type
)
4338 unsigned int alignment
;
4340 alignment
= type
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
);
4341 if (alignment
< PARM_BOUNDARY
)
4342 alignment
= PARM_BOUNDARY
;
4343 if (alignment
> STACK_BOUNDARY
)
4344 alignment
= STACK_BOUNDARY
;
4348 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4349 upward rather than downward. In other words, return true if the
4350 first byte of the stack slot has useful data, false if the last
4354 mips_pad_arg_upward (enum machine_mode mode
, tree type
)
4356 /* On little-endian targets, the first byte of every stack argument
4357 is passed in the first byte of the stack slot. */
4358 if (!BYTES_BIG_ENDIAN
)
4361 /* Otherwise, integral types are padded downward: the last byte of a
4362 stack argument is passed in the last byte of the stack slot. */
4364 ? INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
)
4365 : GET_MODE_CLASS (mode
) == MODE_INT
)
4368 /* Big-endian o64 pads floating-point arguments downward. */
4369 if (mips_abi
== ABI_O64
)
4370 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4373 /* Other types are padded upward for o32, o64, n32 and n64. */
4374 if (mips_abi
!= ABI_EABI
)
4377 /* Arguments smaller than a stack slot are padded downward. */
4378 if (mode
!= BLKmode
)
4379 return (GET_MODE_BITSIZE (mode
) >= PARM_BOUNDARY
);
4381 return (int_size_in_bytes (type
) >= (PARM_BOUNDARY
/ BITS_PER_UNIT
));
4385 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4386 if the least significant byte of the register has useful data. Return
4387 the opposite if the most significant byte does. */
4390 mips_pad_reg_upward (enum machine_mode mode
, tree type
)
4392 /* No shifting is required for floating-point arguments. */
4393 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4394 return !BYTES_BIG_ENDIAN
;
4396 /* Otherwise, apply the same padding to register arguments as we do
4397 to stack arguments. */
4398 return mips_pad_arg_upward (mode
, type
);
4402 mips_setup_incoming_varargs (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4403 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
4406 CUMULATIVE_ARGS local_cum
;
4407 int gp_saved
, fp_saved
;
4409 /* The caller has advanced CUM up to, but not beyond, the last named
4410 argument. Advance a local copy of CUM past the last "real" named
4411 argument, to find out how many registers are left over. */
4414 FUNCTION_ARG_ADVANCE (local_cum
, mode
, type
, 1);
4416 /* Found out how many registers we need to save. */
4417 gp_saved
= MAX_ARGS_IN_REGISTERS
- local_cum
.num_gprs
;
4418 fp_saved
= (EABI_FLOAT_VARARGS_P
4419 ? MAX_ARGS_IN_REGISTERS
- local_cum
.num_fprs
4428 ptr
= plus_constant (virtual_incoming_args_rtx
,
4429 REG_PARM_STACK_SPACE (cfun
->decl
)
4430 - gp_saved
* UNITS_PER_WORD
);
4431 mem
= gen_rtx_MEM (BLKmode
, ptr
);
4432 set_mem_alias_set (mem
, get_varargs_alias_set ());
4434 move_block_from_reg (local_cum
.num_gprs
+ GP_ARG_FIRST
,
4439 /* We can't use move_block_from_reg, because it will use
4441 enum machine_mode mode
;
4444 /* Set OFF to the offset from virtual_incoming_args_rtx of
4445 the first float register. The FP save area lies below
4446 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4447 off
= -gp_saved
* UNITS_PER_WORD
;
4448 off
&= ~(UNITS_PER_FPVALUE
- 1);
4449 off
-= fp_saved
* UNITS_PER_FPREG
;
4451 mode
= TARGET_SINGLE_FLOAT
? SFmode
: DFmode
;
4453 for (i
= local_cum
.num_fprs
; i
< MAX_ARGS_IN_REGISTERS
;
4454 i
+= MAX_FPRS_PER_FMT
)
4458 ptr
= plus_constant (virtual_incoming_args_rtx
, off
);
4459 mem
= gen_rtx_MEM (mode
, ptr
);
4460 set_mem_alias_set (mem
, get_varargs_alias_set ());
4461 mips_emit_move (mem
, gen_rtx_REG (mode
, FP_ARG_FIRST
+ i
));
4462 off
+= UNITS_PER_HWFPVALUE
;
4466 if (REG_PARM_STACK_SPACE (cfun
->decl
) == 0)
4467 cfun
->machine
->varargs_size
= (gp_saved
* UNITS_PER_WORD
4468 + fp_saved
* UNITS_PER_FPREG
);
4471 /* Create the va_list data type.
4472 We keep 3 pointers, and two offsets.
4473 Two pointers are to the overflow area, which starts at the CFA.
4474 One of these is constant, for addressing into the GPR save area below it.
4475 The other is advanced up the stack through the overflow region.
4476 The third pointer is to the GPR save area. Since the FPR save area
4477 is just below it, we can address FPR slots off this pointer.
4478 We also keep two one-byte offsets, which are to be subtracted from the
4479 constant pointers to yield addresses in the GPR and FPR save areas.
4480 These are downcounted as float or non-float arguments are used,
4481 and when they get to zero, the argument must be obtained from the
4483 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4484 pointer is enough. It's started at the GPR save area, and is
4486 Note that the GPR save area is not constant size, due to optimization
4487 in the prologue. Hence, we can't use a design with two pointers
4488 and two offsets, although we could have designed this with two pointers
4489 and three offsets. */
4492 mips_build_builtin_va_list (void)
4494 if (EABI_FLOAT_VARARGS_P
)
4496 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
, f_res
, record
;
4499 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
4501 f_ovfl
= build_decl (FIELD_DECL
, get_identifier ("__overflow_argptr"),
4503 f_gtop
= build_decl (FIELD_DECL
, get_identifier ("__gpr_top"),
4505 f_ftop
= build_decl (FIELD_DECL
, get_identifier ("__fpr_top"),
4507 f_goff
= build_decl (FIELD_DECL
, get_identifier ("__gpr_offset"),
4508 unsigned_char_type_node
);
4509 f_foff
= build_decl (FIELD_DECL
, get_identifier ("__fpr_offset"),
4510 unsigned_char_type_node
);
4511 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4512 warn on every user file. */
4513 index
= build_int_cst (NULL_TREE
, GET_MODE_SIZE (ptr_mode
) - 2 - 1);
4514 array
= build_array_type (unsigned_char_type_node
,
4515 build_index_type (index
));
4516 f_res
= build_decl (FIELD_DECL
, get_identifier ("__reserved"), array
);
4518 DECL_FIELD_CONTEXT (f_ovfl
) = record
;
4519 DECL_FIELD_CONTEXT (f_gtop
) = record
;
4520 DECL_FIELD_CONTEXT (f_ftop
) = record
;
4521 DECL_FIELD_CONTEXT (f_goff
) = record
;
4522 DECL_FIELD_CONTEXT (f_foff
) = record
;
4523 DECL_FIELD_CONTEXT (f_res
) = record
;
4525 TYPE_FIELDS (record
) = f_ovfl
;
4526 TREE_CHAIN (f_ovfl
) = f_gtop
;
4527 TREE_CHAIN (f_gtop
) = f_ftop
;
4528 TREE_CHAIN (f_ftop
) = f_goff
;
4529 TREE_CHAIN (f_goff
) = f_foff
;
4530 TREE_CHAIN (f_foff
) = f_res
;
4532 layout_type (record
);
4535 else if (TARGET_IRIX
&& TARGET_IRIX6
)
4536 /* On IRIX 6, this type is 'char *'. */
4537 return build_pointer_type (char_type_node
);
4539 /* Otherwise, we use 'void *'. */
4540 return ptr_type_node
;
4543 /* Implement va_start. */
4546 mips_va_start (tree valist
, rtx nextarg
)
4548 if (EABI_FLOAT_VARARGS_P
)
4550 const CUMULATIVE_ARGS
*cum
;
4551 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
4552 tree ovfl
, gtop
, ftop
, goff
, foff
;
4554 int gpr_save_area_size
;
4555 int fpr_save_area_size
;
4558 cum
= ¤t_function_args_info
;
4560 = (MAX_ARGS_IN_REGISTERS
- cum
->num_gprs
) * UNITS_PER_WORD
;
4562 = (MAX_ARGS_IN_REGISTERS
- cum
->num_fprs
) * UNITS_PER_FPREG
;
4564 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
4565 f_gtop
= TREE_CHAIN (f_ovfl
);
4566 f_ftop
= TREE_CHAIN (f_gtop
);
4567 f_goff
= TREE_CHAIN (f_ftop
);
4568 f_foff
= TREE_CHAIN (f_goff
);
4570 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
4572 gtop
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
), valist
, f_gtop
,
4574 ftop
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
), valist
, f_ftop
,
4576 goff
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
), valist
, f_goff
,
4578 foff
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
), valist
, f_foff
,
4581 /* Emit code to initialize OVFL, which points to the next varargs
4582 stack argument. CUM->STACK_WORDS gives the number of stack
4583 words used by named arguments. */
4584 t
= make_tree (TREE_TYPE (ovfl
), virtual_incoming_args_rtx
);
4585 if (cum
->stack_words
> 0)
4586 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ovfl
), t
,
4587 size_int (cum
->stack_words
* UNITS_PER_WORD
));
4588 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (ovfl
), ovfl
, t
);
4589 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
4591 /* Emit code to initialize GTOP, the top of the GPR save area. */
4592 t
= make_tree (TREE_TYPE (gtop
), virtual_incoming_args_rtx
);
4593 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (gtop
), gtop
, t
);
4594 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
4596 /* Emit code to initialize FTOP, the top of the FPR save area.
4597 This address is gpr_save_area_bytes below GTOP, rounded
4598 down to the next fp-aligned boundary. */
4599 t
= make_tree (TREE_TYPE (ftop
), virtual_incoming_args_rtx
);
4600 fpr_offset
= gpr_save_area_size
+ UNITS_PER_FPVALUE
- 1;
4601 fpr_offset
&= ~(UNITS_PER_FPVALUE
- 1);
4603 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ftop
), t
,
4604 size_int (-fpr_offset
));
4605 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (ftop
), ftop
, t
);
4606 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
4608 /* Emit code to initialize GOFF, the offset from GTOP of the
4609 next GPR argument. */
4610 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (goff
), goff
,
4611 build_int_cst (NULL_TREE
, gpr_save_area_size
));
4612 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
4614 /* Likewise emit code to initialize FOFF, the offset from FTOP
4615 of the next FPR argument. */
4616 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (foff
), foff
,
4617 build_int_cst (NULL_TREE
, fpr_save_area_size
));
4618 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
4622 nextarg
= plus_constant (nextarg
, -cfun
->machine
->varargs_size
);
4623 std_expand_builtin_va_start (valist
, nextarg
);
4627 /* Implement va_arg. */
4630 mips_gimplify_va_arg_expr (tree valist
, tree type
, tree
*pre_p
, tree
*post_p
)
4632 HOST_WIDE_INT size
, rsize
;
4636 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
4639 type
= build_pointer_type (type
);
4641 size
= int_size_in_bytes (type
);
4642 rsize
= (size
+ UNITS_PER_WORD
- 1) & -UNITS_PER_WORD
;
4644 if (mips_abi
!= ABI_EABI
|| !EABI_FLOAT_VARARGS_P
)
4645 addr
= std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
4648 /* Not a simple merged stack. */
4650 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
4651 tree ovfl
, top
, off
, align
;
4652 HOST_WIDE_INT osize
;
4655 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
4656 f_gtop
= TREE_CHAIN (f_ovfl
);
4657 f_ftop
= TREE_CHAIN (f_gtop
);
4658 f_goff
= TREE_CHAIN (f_ftop
);
4659 f_foff
= TREE_CHAIN (f_goff
);
4661 /* We maintain separate pointers and offsets for floating-point
4662 and integer arguments, but we need similar code in both cases.
4665 TOP be the top of the register save area;
4666 OFF be the offset from TOP of the next register;
4667 ADDR_RTX be the address of the argument;
4668 RSIZE be the number of bytes used to store the argument
4669 when it's in the register save area;
4670 OSIZE be the number of bytes used to store it when it's
4671 in the stack overflow area; and
4672 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4674 The code we want is:
4676 1: off &= -rsize; // round down
4679 4: addr_rtx = top - off;
4684 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4685 10: addr_rtx = ovfl + PADDING;
4689 [1] and [9] can sometimes be optimized away. */
4691 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
4694 if (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
4695 && GET_MODE_SIZE (TYPE_MODE (type
)) <= UNITS_PER_FPVALUE
)
4697 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
), valist
, f_ftop
,
4699 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
), valist
, f_foff
,
4702 /* When floating-point registers are saved to the stack,
4703 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4704 of the float's precision. */
4705 rsize
= UNITS_PER_HWFPVALUE
;
4707 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4708 (= PARM_BOUNDARY bits). This can be different from RSIZE
4711 (1) On 32-bit targets when TYPE is a structure such as:
4713 struct s { float f; };
4715 Such structures are passed in paired FPRs, so RSIZE
4716 will be 8 bytes. However, the structure only takes
4717 up 4 bytes of memory, so OSIZE will only be 4.
4719 (2) In combinations such as -mgp64 -msingle-float
4720 -fshort-double. Doubles passed in registers
4721 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4722 but those passed on the stack take up
4723 UNITS_PER_WORD bytes. */
4724 osize
= MAX (GET_MODE_SIZE (TYPE_MODE (type
)), UNITS_PER_WORD
);
4728 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
), valist
, f_gtop
,
4730 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
), valist
, f_goff
,
4732 if (rsize
> UNITS_PER_WORD
)
4734 /* [1] Emit code for: off &= -rsize. */
4735 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (off
), off
,
4736 build_int_cst (NULL_TREE
, -rsize
));
4737 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (off
), off
, t
);
4738 gimplify_and_add (t
, pre_p
);
4743 /* [2] Emit code to branch if off == 0. */
4744 t
= build2 (NE_EXPR
, boolean_type_node
, off
,
4745 build_int_cst (TREE_TYPE (off
), 0));
4746 addr
= build3 (COND_EXPR
, ptr_type_node
, t
, NULL_TREE
, NULL_TREE
);
4748 /* [5] Emit code for: off -= rsize. We do this as a form of
4749 post-increment not available to C. Also widen for the
4750 coming pointer arithmetic. */
4751 t
= fold_convert (TREE_TYPE (off
), build_int_cst (NULL_TREE
, rsize
));
4752 t
= build2 (POSTDECREMENT_EXPR
, TREE_TYPE (off
), off
, t
);
4753 t
= fold_convert (sizetype
, t
);
4754 t
= fold_build1 (NEGATE_EXPR
, sizetype
, t
);
4756 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4757 the argument has RSIZE - SIZE bytes of leading padding. */
4758 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (top
), top
, t
);
4759 if (BYTES_BIG_ENDIAN
&& rsize
> size
)
4761 u
= size_int (rsize
- size
);
4762 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, u
);
4764 COND_EXPR_THEN (addr
) = t
;
4766 if (osize
> UNITS_PER_WORD
)
4768 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4769 u
= size_int (osize
- 1);
4770 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ovfl
), ovfl
, u
);
4771 t
= fold_convert (sizetype
, t
);
4772 u
= size_int (-osize
);
4773 t
= build2 (BIT_AND_EXPR
, sizetype
, t
, u
);
4774 t
= fold_convert (TREE_TYPE (ovfl
), t
);
4775 align
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (ovfl
), ovfl
, t
);
4780 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4781 post-increment ovfl by osize. On big-endian machines,
4782 the argument has OSIZE - SIZE bytes of leading padding. */
4783 u
= fold_convert (TREE_TYPE (ovfl
),
4784 build_int_cst (NULL_TREE
, osize
));
4785 t
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (ovfl
), ovfl
, u
);
4786 if (BYTES_BIG_ENDIAN
&& osize
> size
)
4788 u
= size_int (osize
- size
);
4789 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, u
);
4792 /* String [9] and [10,11] together. */
4794 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (t
), align
, t
);
4795 COND_EXPR_ELSE (addr
) = t
;
4797 addr
= fold_convert (build_pointer_type (type
), addr
);
4798 addr
= build_va_arg_indirect_ref (addr
);
4802 addr
= build_va_arg_indirect_ref (addr
);
4807 /* Return true if it is possible to use left/right accesses for a
4808 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4809 returning true, update *OP, *LEFT and *RIGHT as follows:
4811 *OP is a BLKmode reference to the whole field.
4813 *LEFT is a QImode reference to the first byte if big endian or
4814 the last byte if little endian. This address can be used in the
4815 left-side instructions (lwl, swl, ldl, sdl).
4817 *RIGHT is a QImode reference to the opposite end of the field and
4818 can be used in the patterning right-side instruction. */
4821 mips_get_unaligned_mem (rtx
*op
, unsigned int width
, int bitpos
,
4822 rtx
*left
, rtx
*right
)
4826 /* Check that the operand really is a MEM. Not all the extv and
4827 extzv predicates are checked. */
4831 /* Check that the size is valid. */
4832 if (width
!= 32 && (!TARGET_64BIT
|| width
!= 64))
4835 /* We can only access byte-aligned values. Since we are always passed
4836 a reference to the first byte of the field, it is not necessary to
4837 do anything with BITPOS after this check. */
4838 if (bitpos
% BITS_PER_UNIT
!= 0)
4841 /* Reject aligned bitfields: we want to use a normal load or store
4842 instead of a left/right pair. */
4843 if (MEM_ALIGN (*op
) >= width
)
4846 /* Adjust *OP to refer to the whole field. This also has the effect
4847 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4848 *op
= adjust_address (*op
, BLKmode
, 0);
4849 set_mem_size (*op
, GEN_INT (width
/ BITS_PER_UNIT
));
4851 /* Get references to both ends of the field. We deliberately don't
4852 use the original QImode *OP for FIRST since the new BLKmode one
4853 might have a simpler address. */
4854 first
= adjust_address (*op
, QImode
, 0);
4855 last
= adjust_address (*op
, QImode
, width
/ BITS_PER_UNIT
- 1);
4857 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4858 be the upper word and RIGHT the lower word. */
4859 if (TARGET_BIG_ENDIAN
)
4860 *left
= first
, *right
= last
;
4862 *left
= last
, *right
= first
;
4868 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4869 Return true on success. We only handle cases where zero_extract is
4870 equivalent to sign_extract. */
4873 mips_expand_unaligned_load (rtx dest
, rtx src
, unsigned int width
, int bitpos
)
4875 rtx left
, right
, temp
;
4877 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4878 paradoxical word_mode subreg. This is the only case in which
4879 we allow the destination to be larger than the source. */
4880 if (GET_CODE (dest
) == SUBREG
4881 && GET_MODE (dest
) == DImode
4882 && SUBREG_BYTE (dest
) == 0
4883 && GET_MODE (SUBREG_REG (dest
)) == SImode
)
4884 dest
= SUBREG_REG (dest
);
4886 /* After the above adjustment, the destination must be the same
4887 width as the source. */
4888 if (GET_MODE_BITSIZE (GET_MODE (dest
)) != width
)
4891 if (!mips_get_unaligned_mem (&src
, width
, bitpos
, &left
, &right
))
4894 temp
= gen_reg_rtx (GET_MODE (dest
));
4895 if (GET_MODE (dest
) == DImode
)
4897 emit_insn (gen_mov_ldl (temp
, src
, left
));
4898 emit_insn (gen_mov_ldr (dest
, copy_rtx (src
), right
, temp
));
4902 emit_insn (gen_mov_lwl (temp
, src
, left
));
4903 emit_insn (gen_mov_lwr (dest
, copy_rtx (src
), right
, temp
));
4909 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4913 mips_expand_unaligned_store (rtx dest
, rtx src
, unsigned int width
, int bitpos
)
4916 enum machine_mode mode
;
4918 if (!mips_get_unaligned_mem (&dest
, width
, bitpos
, &left
, &right
))
4921 mode
= mode_for_size (width
, MODE_INT
, 0);
4922 src
= gen_lowpart (mode
, src
);
4926 emit_insn (gen_mov_sdl (dest
, src
, left
));
4927 emit_insn (gen_mov_sdr (copy_rtx (dest
), copy_rtx (src
), right
));
4931 emit_insn (gen_mov_swl (dest
, src
, left
));
4932 emit_insn (gen_mov_swr (copy_rtx (dest
), copy_rtx (src
), right
));
4937 /* Return true if X is a MEM with the same size as MODE. */
4940 mips_mem_fits_mode_p (enum machine_mode mode
, rtx x
)
4947 size
= MEM_SIZE (x
);
4948 return size
&& INTVAL (size
) == GET_MODE_SIZE (mode
);
4951 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4952 source of an "ext" instruction or the destination of an "ins"
4953 instruction. OP must be a register operand and the following
4954 conditions must hold:
4956 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4957 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4958 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4960 Also reject lengths equal to a word as they are better handled
4961 by the move patterns. */
4964 mips_use_ins_ext_p (rtx op
, rtx size
, rtx position
)
4966 HOST_WIDE_INT len
, pos
;
4968 if (!ISA_HAS_EXT_INS
4969 || !register_operand (op
, VOIDmode
)
4970 || GET_MODE_BITSIZE (GET_MODE (op
)) > BITS_PER_WORD
)
4973 len
= INTVAL (size
);
4974 pos
= INTVAL (position
);
4976 if (len
<= 0 || len
>= GET_MODE_BITSIZE (GET_MODE (op
))
4977 || pos
< 0 || pos
+ len
> GET_MODE_BITSIZE (GET_MODE (op
)))
4983 /* Set up globals to generate code for the ISA or processor
4984 described by INFO. */
4987 mips_set_architecture (const struct mips_cpu_info
*info
)
4991 mips_arch_info
= info
;
4992 mips_arch
= info
->cpu
;
4993 mips_isa
= info
->isa
;
4998 /* Likewise for tuning. */
5001 mips_set_tune (const struct mips_cpu_info
*info
)
5005 mips_tune_info
= info
;
5006 mips_tune
= info
->cpu
;
5010 /* Implement TARGET_HANDLE_OPTION. */
5013 mips_handle_option (size_t code
, const char *arg
, int value ATTRIBUTE_UNUSED
)
5018 if (strcmp (arg
, "32") == 0)
5020 else if (strcmp (arg
, "o64") == 0)
5022 else if (strcmp (arg
, "n32") == 0)
5024 else if (strcmp (arg
, "64") == 0)
5026 else if (strcmp (arg
, "eabi") == 0)
5027 mips_abi
= ABI_EABI
;
5034 return mips_parse_cpu (arg
) != 0;
5037 mips_isa_info
= mips_parse_cpu (ACONCAT (("mips", arg
, NULL
)));
5038 return mips_isa_info
!= 0;
5040 case OPT_mno_flush_func
:
5041 mips_cache_flush_func
= NULL
;
5044 case OPT_mcode_readable_
:
5045 if (strcmp (arg
, "yes") == 0)
5046 mips_code_readable
= CODE_READABLE_YES
;
5047 else if (strcmp (arg
, "pcrel") == 0)
5048 mips_code_readable
= CODE_READABLE_PCREL
;
5049 else if (strcmp (arg
, "no") == 0)
5050 mips_code_readable
= CODE_READABLE_NO
;
5060 /* Set up the threshold for data to go into the small data area, instead
5061 of the normal data area, and detect any conflicts in the switches. */
5064 override_options (void)
5066 int i
, start
, regno
;
5067 enum machine_mode mode
;
5069 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5070 SUBTARGET_OVERRIDE_OPTIONS
;
5073 mips_section_threshold
= g_switch_set
? g_switch_value
: MIPS_DEFAULT_GVALUE
;
5075 /* The following code determines the architecture and register size.
5076 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5077 The GAS and GCC code should be kept in sync as much as possible. */
5079 if (mips_arch_string
!= 0)
5080 mips_set_architecture (mips_parse_cpu (mips_arch_string
));
5082 if (mips_isa_info
!= 0)
5084 if (mips_arch_info
== 0)
5085 mips_set_architecture (mips_isa_info
);
5086 else if (mips_arch_info
->isa
!= mips_isa_info
->isa
)
5087 error ("-%s conflicts with the other architecture options, "
5088 "which specify a %s processor",
5089 mips_isa_info
->name
,
5090 mips_cpu_info_from_isa (mips_arch_info
->isa
)->name
);
5093 if (mips_arch_info
== 0)
5095 #ifdef MIPS_CPU_STRING_DEFAULT
5096 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT
));
5098 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT
));
5102 if (ABI_NEEDS_64BIT_REGS
&& !ISA_HAS_64BIT_REGS
)
5103 error ("-march=%s is not compatible with the selected ABI",
5104 mips_arch_info
->name
);
5106 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5107 if (mips_tune_string
!= 0)
5108 mips_set_tune (mips_parse_cpu (mips_tune_string
));
5110 if (mips_tune_info
== 0)
5111 mips_set_tune (mips_arch_info
);
5113 /* Set cost structure for the processor. */
5115 mips_cost
= &mips_rtx_cost_optimize_size
;
5117 mips_cost
= &mips_rtx_cost_data
[mips_tune
];
5119 /* If the user hasn't specified a branch cost, use the processor's
5121 if (mips_branch_cost
== 0)
5122 mips_branch_cost
= mips_cost
->branch_cost
;
5124 if ((target_flags_explicit
& MASK_64BIT
) != 0)
5126 /* The user specified the size of the integer registers. Make sure
5127 it agrees with the ABI and ISA. */
5128 if (TARGET_64BIT
&& !ISA_HAS_64BIT_REGS
)
5129 error ("-mgp64 used with a 32-bit processor");
5130 else if (!TARGET_64BIT
&& ABI_NEEDS_64BIT_REGS
)
5131 error ("-mgp32 used with a 64-bit ABI");
5132 else if (TARGET_64BIT
&& ABI_NEEDS_32BIT_REGS
)
5133 error ("-mgp64 used with a 32-bit ABI");
5137 /* Infer the integer register size from the ABI and processor.
5138 Restrict ourselves to 32-bit registers if that's all the
5139 processor has, or if the ABI cannot handle 64-bit registers. */
5140 if (ABI_NEEDS_32BIT_REGS
|| !ISA_HAS_64BIT_REGS
)
5141 target_flags
&= ~MASK_64BIT
;
5143 target_flags
|= MASK_64BIT
;
5146 if ((target_flags_explicit
& MASK_FLOAT64
) != 0)
5148 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5149 only one right answer here. */
5150 if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
&& !TARGET_FLOAT64
)
5151 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5152 else if (!TARGET_64BIT
&& TARGET_FLOAT64
5153 && !(ISA_HAS_MXHC1
&& mips_abi
== ABI_32
))
5154 error ("-mgp32 and -mfp64 can only be combined if the target"
5155 " supports the mfhc1 and mthc1 instructions");
5156 else if (TARGET_SINGLE_FLOAT
&& TARGET_FLOAT64
)
5157 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5161 /* -msingle-float selects 32-bit float registers. Otherwise the
5162 float registers should be the same size as the integer ones. */
5163 if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
)
5164 target_flags
|= MASK_FLOAT64
;
5166 target_flags
&= ~MASK_FLOAT64
;
5169 /* End of code shared with GAS. */
5171 if ((target_flags_explicit
& MASK_LONG64
) == 0)
5173 if ((mips_abi
== ABI_EABI
&& TARGET_64BIT
) || mips_abi
== ABI_64
)
5174 target_flags
|= MASK_LONG64
;
5176 target_flags
&= ~MASK_LONG64
;
5179 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
5180 && (target_flags_explicit
& MASK_SOFT_FLOAT_ABI
) == 0)
5182 /* For some configurations, it is useful to have -march control
5183 the default setting of MASK_SOFT_FLOAT_ABI. */
5184 switch ((int) mips_arch
)
5186 case PROCESSOR_R4100
:
5187 case PROCESSOR_R4111
:
5188 case PROCESSOR_R4120
:
5189 case PROCESSOR_R4130
:
5190 target_flags
|= MASK_SOFT_FLOAT_ABI
;
5194 target_flags
&= ~MASK_SOFT_FLOAT_ABI
;
5200 flag_pcc_struct_return
= 0;
5202 if ((target_flags_explicit
& MASK_BRANCHLIKELY
) == 0)
5204 /* If neither -mbranch-likely nor -mno-branch-likely was given
5205 on the command line, set MASK_BRANCHLIKELY based on the target
5208 By default, we enable use of Branch Likely instructions on
5209 all architectures which support them with the following
5210 exceptions: when creating MIPS32 or MIPS64 code, and when
5211 tuning for architectures where their use tends to hurt
5214 The MIPS32 and MIPS64 architecture specifications say "Software
5215 is strongly encouraged to avoid use of Branch Likely
5216 instructions, as they will be removed from a future revision
5217 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5218 issue those instructions unless instructed to do so by
5220 if (ISA_HAS_BRANCHLIKELY
5221 && !(ISA_MIPS32
|| ISA_MIPS32R2
|| ISA_MIPS64
)
5222 && !(TUNE_MIPS5500
|| TUNE_SB1
))
5223 target_flags
|= MASK_BRANCHLIKELY
;
5225 target_flags
&= ~MASK_BRANCHLIKELY
;
5227 if (TARGET_BRANCHLIKELY
&& !ISA_HAS_BRANCHLIKELY
)
5228 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5230 /* The effect of -mabicalls isn't defined for the EABI. */
5231 if (mips_abi
== ABI_EABI
&& TARGET_ABICALLS
)
5233 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5234 target_flags
&= ~MASK_ABICALLS
;
5237 if (TARGET_ABICALLS
)
5239 /* We need to set flag_pic for executables as well as DSOs
5240 because we may reference symbols that are not defined in
5241 the final executable. (MIPS does not use things like
5242 copy relocs, for example.)
5244 Also, there is a body of code that uses __PIC__ to distinguish
5245 between -mabicalls and -mno-abicalls code. */
5247 if (mips_section_threshold
> 0)
5248 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
5251 if (TARGET_VXWORKS_RTP
&& mips_section_threshold
> 0)
5252 warning (0, "-G and -mrtp are incompatible");
5254 /* mips_split_addresses is a half-way house between explicit
5255 relocations and the traditional assembler macros. It can
5256 split absolute 32-bit symbolic constants into a high/lo_sum
5257 pair but uses macros for other sorts of access.
5259 Like explicit relocation support for REL targets, it relies
5260 on GNU extensions in the assembler and the linker.
5262 Although this code should work for -O0, it has traditionally
5263 been treated as an optimization. */
5264 if (!TARGET_MIPS16
&& TARGET_SPLIT_ADDRESSES
5265 && optimize
&& !flag_pic
5266 && !ABI_HAS_64BIT_SYMBOLS
)
5267 mips_split_addresses
= 1;
5269 mips_split_addresses
= 0;
5271 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5272 faster code, but at the expense of more nops. Enable it at -O3 and
5274 if (optimize
> 2 && (target_flags_explicit
& MASK_VR4130_ALIGN
) == 0)
5275 target_flags
|= MASK_VR4130_ALIGN
;
5279 /* Don't run the scheduler before reload, since it tends to
5280 increase register pressure. */
5281 flag_schedule_insns
= 0;
5283 /* Don't do hot/cold partitioning. The constant layout code expects
5284 the whole function to be in a single section. */
5285 flag_reorder_blocks_and_partition
= 0;
5287 /* Silently disable -mexplicit-relocs since it doesn't apply
5288 to mips16 code. Even so, it would overly pedantic to warn
5289 about "-mips16 -mexplicit-relocs", especially given that
5290 we use a %gprel() operator. */
5291 target_flags
&= ~MASK_EXPLICIT_RELOCS
;
5294 /* When using explicit relocs, we call dbr_schedule from within
5296 if (TARGET_EXPLICIT_RELOCS
)
5298 mips_flag_delayed_branch
= flag_delayed_branch
;
5299 flag_delayed_branch
= 0;
5302 #ifdef MIPS_TFMODE_FORMAT
5303 REAL_MODE_FORMAT (TFmode
) = &MIPS_TFMODE_FORMAT
;
5306 /* Make sure that the user didn't turn off paired single support when
5307 MIPS-3D support is requested. */
5308 if (TARGET_MIPS3D
&& (target_flags_explicit
& MASK_PAIRED_SINGLE_FLOAT
)
5309 && !TARGET_PAIRED_SINGLE_FLOAT
)
5310 error ("-mips3d requires -mpaired-single");
5312 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5314 target_flags
|= MASK_PAIRED_SINGLE_FLOAT
;
5316 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5317 and TARGET_HARD_FLOAT are both true. */
5318 if (TARGET_PAIRED_SINGLE_FLOAT
&& !(TARGET_FLOAT64
&& TARGET_HARD_FLOAT
))
5319 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5321 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5323 if (TARGET_PAIRED_SINGLE_FLOAT
&& !ISA_MIPS64
)
5324 error ("-mips3d/-mpaired-single must be used with -mips64");
5326 /* If TARGET_DSPR2, enable MASK_DSP. */
5328 target_flags
|= MASK_DSP
;
5330 if (TARGET_MIPS16
&& TARGET_DSP
)
5331 error ("-mips16 and -mdsp cannot be used together");
5333 mips_print_operand_punct
['?'] = 1;
5334 mips_print_operand_punct
['#'] = 1;
5335 mips_print_operand_punct
['/'] = 1;
5336 mips_print_operand_punct
['&'] = 1;
5337 mips_print_operand_punct
['!'] = 1;
5338 mips_print_operand_punct
['*'] = 1;
5339 mips_print_operand_punct
['@'] = 1;
5340 mips_print_operand_punct
['.'] = 1;
5341 mips_print_operand_punct
['('] = 1;
5342 mips_print_operand_punct
[')'] = 1;
5343 mips_print_operand_punct
['['] = 1;
5344 mips_print_operand_punct
[']'] = 1;
5345 mips_print_operand_punct
['<'] = 1;
5346 mips_print_operand_punct
['>'] = 1;
5347 mips_print_operand_punct
['{'] = 1;
5348 mips_print_operand_punct
['}'] = 1;
5349 mips_print_operand_punct
['^'] = 1;
5350 mips_print_operand_punct
['$'] = 1;
5351 mips_print_operand_punct
['+'] = 1;
5352 mips_print_operand_punct
['~'] = 1;
5354 /* Set up array to map GCC register number to debug register number.
5355 Ignore the special purpose register numbers. */
5357 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
5359 mips_dbx_regno
[i
] = INVALID_REGNUM
;
5360 if (GP_REG_P (i
) || FP_REG_P (i
) || ALL_COP_REG_P (i
))
5361 mips_dwarf_regno
[i
] = i
;
5363 mips_dwarf_regno
[i
] = INVALID_REGNUM
;
5366 start
= GP_DBX_FIRST
- GP_REG_FIRST
;
5367 for (i
= GP_REG_FIRST
; i
<= GP_REG_LAST
; i
++)
5368 mips_dbx_regno
[i
] = i
+ start
;
5370 start
= FP_DBX_FIRST
- FP_REG_FIRST
;
5371 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
5372 mips_dbx_regno
[i
] = i
+ start
;
5374 /* HI and LO debug registers use big-endian ordering. */
5375 mips_dbx_regno
[HI_REGNUM
] = MD_DBX_FIRST
+ 0;
5376 mips_dbx_regno
[LO_REGNUM
] = MD_DBX_FIRST
+ 1;
5377 mips_dwarf_regno
[HI_REGNUM
] = MD_REG_FIRST
+ 0;
5378 mips_dwarf_regno
[LO_REGNUM
] = MD_REG_FIRST
+ 1;
5379 for (i
= DSP_ACC_REG_FIRST
; i
<= DSP_ACC_REG_LAST
; i
+= 2)
5381 mips_dwarf_regno
[i
+ TARGET_LITTLE_ENDIAN
] = i
;
5382 mips_dwarf_regno
[i
+ TARGET_BIG_ENDIAN
] = i
+ 1;
5385 /* Set up array giving whether a given register can hold a given mode. */
5387 for (mode
= VOIDmode
;
5388 mode
!= MAX_MACHINE_MODE
;
5389 mode
= (enum machine_mode
) ((int)mode
+ 1))
5391 register int size
= GET_MODE_SIZE (mode
);
5392 register enum mode_class
class = GET_MODE_CLASS (mode
);
5394 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
5398 if (mode
== CCV2mode
)
5401 && (regno
- ST_REG_FIRST
) % 2 == 0);
5403 else if (mode
== CCV4mode
)
5406 && (regno
- ST_REG_FIRST
) % 4 == 0);
5408 else if (mode
== CCmode
)
5411 temp
= (regno
== FPSW_REGNUM
);
5413 temp
= (ST_REG_P (regno
) || GP_REG_P (regno
)
5414 || FP_REG_P (regno
));
5417 else if (GP_REG_P (regno
))
5418 temp
= ((regno
& 1) == 0 || size
<= UNITS_PER_WORD
);
5420 else if (FP_REG_P (regno
))
5421 temp
= ((((regno
% MAX_FPRS_PER_FMT
) == 0)
5422 || (MIN_FPRS_PER_FMT
== 1
5423 && size
<= UNITS_PER_FPREG
))
5424 && (((class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
5425 || class == MODE_VECTOR_FLOAT
)
5426 && size
<= UNITS_PER_FPVALUE
)
5427 /* Allow integer modes that fit into a single
5428 register. We need to put integers into FPRs
5429 when using instructions like cvt and trunc.
5430 We can't allow sizes smaller than a word,
5431 the FPU has no appropriate load/store
5432 instructions for those. */
5433 || (class == MODE_INT
5434 && size
>= MIN_UNITS_PER_WORD
5435 && size
<= UNITS_PER_FPREG
)
5436 /* Allow TFmode for CCmode reloads. */
5437 || (ISA_HAS_8CC
&& mode
== TFmode
)));
5439 else if (ACC_REG_P (regno
))
5440 temp
= (INTEGRAL_MODE_P (mode
)
5441 && size
<= UNITS_PER_WORD
* 2
5442 && (size
<= UNITS_PER_WORD
5443 || regno
== MD_REG_FIRST
5444 || (DSP_ACC_REG_P (regno
)
5445 && ((regno
- DSP_ACC_REG_FIRST
) & 1) == 0)));
5447 else if (ALL_COP_REG_P (regno
))
5448 temp
= (class == MODE_INT
&& size
<= UNITS_PER_WORD
);
5452 mips_hard_regno_mode_ok
[(int)mode
][regno
] = temp
;
5456 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5457 initialized yet, so we can't use that here. */
5458 gpr_mode
= TARGET_64BIT
? DImode
: SImode
;
5460 /* Provide default values for align_* for 64-bit targets. */
5461 if (TARGET_64BIT
&& !TARGET_MIPS16
)
5463 if (align_loops
== 0)
5465 if (align_jumps
== 0)
5467 if (align_functions
== 0)
5468 align_functions
= 8;
5471 /* Function to allocate machine-dependent function status. */
5472 init_machine_status
= &mips_init_machine_status
;
5474 if (ABI_HAS_64BIT_SYMBOLS
)
5476 if (TARGET_EXPLICIT_RELOCS
)
5478 mips_split_p
[SYMBOL_64_HIGH
] = true;
5479 mips_hi_relocs
[SYMBOL_64_HIGH
] = "%highest(";
5480 mips_lo_relocs
[SYMBOL_64_HIGH
] = "%higher(";
5482 mips_split_p
[SYMBOL_64_MID
] = true;
5483 mips_hi_relocs
[SYMBOL_64_MID
] = "%higher(";
5484 mips_lo_relocs
[SYMBOL_64_MID
] = "%hi(";
5486 mips_split_p
[SYMBOL_64_LOW
] = true;
5487 mips_hi_relocs
[SYMBOL_64_LOW
] = "%hi(";
5488 mips_lo_relocs
[SYMBOL_64_LOW
] = "%lo(";
5490 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
5491 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
5496 if (TARGET_EXPLICIT_RELOCS
|| mips_split_addresses
|| TARGET_MIPS16
)
5498 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
5499 mips_hi_relocs
[SYMBOL_ABSOLUTE
] = "%hi(";
5500 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
5502 mips_lo_relocs
[SYMBOL_32_HIGH
] = "%hi(";
5508 /* The high part is provided by a pseudo copy of $gp. */
5509 mips_split_p
[SYMBOL_GP_RELATIVE
] = true;
5510 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gprel(";
5513 if (TARGET_EXPLICIT_RELOCS
)
5515 /* Small data constants are kept whole until after reload,
5516 then lowered by mips_rewrite_small_data. */
5517 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gp_rel(";
5519 mips_split_p
[SYMBOL_GOT_PAGE_OFST
] = true;
5522 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got_page(";
5523 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%got_ofst(";
5527 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got(";
5528 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%lo(";
5533 /* The HIGH and LO_SUM are matched by special .md patterns. */
5534 mips_split_p
[SYMBOL_GOT_DISP
] = true;
5536 mips_split_p
[SYMBOL_GOTOFF_DISP
] = true;
5537 mips_hi_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_hi(";
5538 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_lo(";
5540 mips_split_p
[SYMBOL_GOTOFF_CALL
] = true;
5541 mips_hi_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_hi(";
5542 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_lo(";
5547 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_disp(";
5549 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got(";
5550 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call16(";
5556 mips_split_p
[SYMBOL_GOTOFF_LOADGP
] = true;
5557 mips_hi_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%hi(%neg(%gp_rel(";
5558 mips_lo_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%lo(%neg(%gp_rel(";
5561 /* Thread-local relocation operators. */
5562 mips_lo_relocs
[SYMBOL_TLSGD
] = "%tlsgd(";
5563 mips_lo_relocs
[SYMBOL_TLSLDM
] = "%tlsldm(";
5564 mips_split_p
[SYMBOL_DTPREL
] = 1;
5565 mips_hi_relocs
[SYMBOL_DTPREL
] = "%dtprel_hi(";
5566 mips_lo_relocs
[SYMBOL_DTPREL
] = "%dtprel_lo(";
5567 mips_lo_relocs
[SYMBOL_GOTTPREL
] = "%gottprel(";
5568 mips_split_p
[SYMBOL_TPREL
] = 1;
5569 mips_hi_relocs
[SYMBOL_TPREL
] = "%tprel_hi(";
5570 mips_lo_relocs
[SYMBOL_TPREL
] = "%tprel_lo(";
5572 mips_lo_relocs
[SYMBOL_HALF
] = "%half(";
5574 /* We don't have a thread pointer access instruction on MIPS16, or
5575 appropriate TLS relocations. */
5577 targetm
.have_tls
= false;
5579 /* Default to working around R4000 errata only if the processor
5580 was selected explicitly. */
5581 if ((target_flags_explicit
& MASK_FIX_R4000
) == 0
5582 && mips_matching_cpu_name_p (mips_arch_info
->name
, "r4000"))
5583 target_flags
|= MASK_FIX_R4000
;
5585 /* Default to working around R4400 errata only if the processor
5586 was selected explicitly. */
5587 if ((target_flags_explicit
& MASK_FIX_R4400
) == 0
5588 && mips_matching_cpu_name_p (mips_arch_info
->name
, "r4400"))
5589 target_flags
|= MASK_FIX_R4400
;
5592 /* Swap the register information for registers I and I + 1, which
5593 currently have the wrong endianness. Note that the registers'
5594 fixedness and call-clobberedness might have been set on the
5598 mips_swap_registers (unsigned int i
)
5603 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
5604 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
5606 SWAP_INT (fixed_regs
[i
], fixed_regs
[i
+ 1]);
5607 SWAP_INT (call_used_regs
[i
], call_used_regs
[i
+ 1]);
5608 SWAP_INT (call_really_used_regs
[i
], call_really_used_regs
[i
+ 1]);
5609 SWAP_STRING (reg_names
[i
], reg_names
[i
+ 1]);
5615 /* Implement CONDITIONAL_REGISTER_USAGE. */
5618 mips_conditional_register_usage (void)
5624 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
++)
5625 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
5627 if (!TARGET_HARD_FLOAT
)
5631 for (regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
++)
5632 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
5633 for (regno
= ST_REG_FIRST
; regno
<= ST_REG_LAST
; regno
++)
5634 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
5636 else if (! ISA_HAS_8CC
)
5640 /* We only have a single condition code register. We
5641 implement this by hiding all the condition code registers,
5642 and generating RTL that refers directly to ST_REG_FIRST. */
5643 for (regno
= ST_REG_FIRST
; regno
<= ST_REG_LAST
; regno
++)
5644 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
5646 /* In mips16 mode, we permit the $t temporary registers to be used
5647 for reload. We prohibit the unused $s registers, since they
5648 are caller saved, and saving them via a mips16 register would
5649 probably waste more time than just reloading the value. */
5652 fixed_regs
[18] = call_used_regs
[18] = 1;
5653 fixed_regs
[19] = call_used_regs
[19] = 1;
5654 fixed_regs
[20] = call_used_regs
[20] = 1;
5655 fixed_regs
[21] = call_used_regs
[21] = 1;
5656 fixed_regs
[22] = call_used_regs
[22] = 1;
5657 fixed_regs
[23] = call_used_regs
[23] = 1;
5658 fixed_regs
[26] = call_used_regs
[26] = 1;
5659 fixed_regs
[27] = call_used_regs
[27] = 1;
5660 fixed_regs
[30] = call_used_regs
[30] = 1;
5662 /* fp20-23 are now caller saved. */
5663 if (mips_abi
== ABI_64
)
5666 for (regno
= FP_REG_FIRST
+ 20; regno
< FP_REG_FIRST
+ 24; regno
++)
5667 call_really_used_regs
[regno
] = call_used_regs
[regno
] = 1;
5669 /* Odd registers from fp21 to fp31 are now caller saved. */
5670 if (mips_abi
== ABI_N32
)
5673 for (regno
= FP_REG_FIRST
+ 21; regno
<= FP_REG_FIRST
+ 31; regno
+=2)
5674 call_really_used_regs
[regno
] = call_used_regs
[regno
] = 1;
5676 /* Make sure that double-register accumulator values are correctly
5677 ordered for the current endianness. */
5678 if (TARGET_LITTLE_ENDIAN
)
5681 mips_swap_registers (MD_REG_FIRST
);
5682 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
+= 2)
5683 mips_swap_registers (regno
);
5687 /* Allocate a chunk of memory for per-function machine-dependent data. */
5688 static struct machine_function
*
5689 mips_init_machine_status (void)
5691 return ((struct machine_function
*)
5692 ggc_alloc_cleared (sizeof (struct machine_function
)));
5695 /* On the mips16, we want to allocate $24 (T_REG) before other
5696 registers for instructions for which it is possible. This helps
5697 avoid shuffling registers around in order to set up for an xor,
5698 encouraging the compiler to use a cmp instead. */
5701 mips_order_regs_for_local_alloc (void)
5705 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
5706 reg_alloc_order
[i
] = i
;
5710 /* It really doesn't matter where we put register 0, since it is
5711 a fixed register anyhow. */
5712 reg_alloc_order
[0] = 24;
5713 reg_alloc_order
[24] = 0;
5718 /* The MIPS debug format wants all automatic variables and arguments
5719 to be in terms of the virtual frame pointer (stack pointer before
5720 any adjustment in the function), while the MIPS 3.0 linker wants
5721 the frame pointer to be the stack pointer after the initial
5722 adjustment. So, we do the adjustment here. The arg pointer (which
5723 is eliminated) points to the virtual frame pointer, while the frame
5724 pointer (which may be eliminated) points to the stack pointer after
5725 the initial adjustments. */
5728 mips_debugger_offset (rtx addr
, HOST_WIDE_INT offset
)
5730 rtx offset2
= const0_rtx
;
5731 rtx reg
= eliminate_constant_term (addr
, &offset2
);
5734 offset
= INTVAL (offset2
);
5736 if (reg
== stack_pointer_rtx
|| reg
== frame_pointer_rtx
5737 || reg
== hard_frame_pointer_rtx
)
5739 HOST_WIDE_INT frame_size
= (!cfun
->machine
->frame
.initialized
)
5740 ? compute_frame_size (get_frame_size ())
5741 : cfun
->machine
->frame
.total_size
;
5743 /* MIPS16 frame is smaller */
5744 if (frame_pointer_needed
&& TARGET_MIPS16
)
5745 frame_size
-= cfun
->machine
->frame
.args_size
;
5747 offset
= offset
- frame_size
;
5750 /* sdbout_parms does not want this to crash for unrecognized cases. */
5752 else if (reg
!= arg_pointer_rtx
)
5753 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5760 /* If OP is an UNSPEC address, return the address to which it refers,
5761 otherwise return OP itself. */
5764 mips_strip_unspec_address (rtx op
)
5768 split_const (op
, &base
, &offset
);
5769 if (UNSPEC_ADDRESS_P (base
))
5770 op
= plus_constant (UNSPEC_ADDRESS (base
), INTVAL (offset
));
5774 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5776 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5777 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5778 'h' OP is HIGH, prints %hi(X),
5779 'd' output integer constant in decimal,
5780 'z' if the operand is 0, use $0 instead of normal operand.
5781 'D' print second part of double-word register or memory operand.
5782 'L' print low-order register of double-word register operand.
5783 'M' print high-order register of double-word register operand.
5784 'C' print part of opcode for a branch condition.
5785 'F' print part of opcode for a floating-point branch condition.
5786 'N' print part of opcode for a branch condition, inverted.
5787 'W' print part of opcode for a floating-point branch condition, inverted.
5788 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5789 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5790 't' like 'T', but with the EQ/NE cases reversed
5791 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5792 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5793 'R' print the reloc associated with LO_SUM
5794 'q' print DSP accumulator registers
5796 The punctuation characters are:
5798 '(' Turn on .set noreorder
5799 ')' Turn on .set reorder
5800 '[' Turn on .set noat
5802 '<' Turn on .set nomacro
5803 '>' Turn on .set macro
5804 '{' Turn on .set volatile (not GAS)
5805 '}' Turn on .set novolatile (not GAS)
5806 '&' Turn on .set noreorder if filling delay slots
5807 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5808 '!' Turn on .set nomacro if filling delay slots
5809 '#' Print nop if in a .set noreorder section.
5810 '/' Like '#', but does nothing within a delayed branch sequence
5811 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5812 '@' Print the name of the assembler temporary register (at or $1).
5813 '.' Print the name of the register with a hard-wired zero (zero or $0).
5814 '^' Print the name of the pic call-through register (t9 or $25).
5815 '$' Print the name of the stack pointer register (sp or $29).
5816 '+' Print the name of the gp register (usually gp or $28).
5817 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5820 print_operand (FILE *file
, rtx op
, int letter
)
5822 register enum rtx_code code
;
5824 if (PRINT_OPERAND_PUNCT_VALID_P (letter
))
5829 if (mips_branch_likely
)
5834 fputs (reg_names
[GP_REG_FIRST
+ 1], file
);
5838 fputs (reg_names
[PIC_FUNCTION_ADDR_REGNUM
], file
);
5842 fputs (reg_names
[GP_REG_FIRST
+ 0], file
);
5846 fputs (reg_names
[STACK_POINTER_REGNUM
], file
);
5850 fputs (reg_names
[PIC_OFFSET_TABLE_REGNUM
], file
);
5854 if (final_sequence
!= 0 && set_noreorder
++ == 0)
5855 fputs (".set\tnoreorder\n\t", file
);
5859 if (final_sequence
!= 0)
5861 if (set_noreorder
++ == 0)
5862 fputs (".set\tnoreorder\n\t", file
);
5864 if (set_nomacro
++ == 0)
5865 fputs (".set\tnomacro\n\t", file
);
5870 if (final_sequence
!= 0 && set_nomacro
++ == 0)
5871 fputs ("\n\t.set\tnomacro", file
);
5875 if (set_noreorder
!= 0)
5876 fputs ("\n\tnop", file
);
5880 /* Print an extra newline so that the delayed insn is separated
5881 from the following ones. This looks neater and is consistent
5882 with non-nop delayed sequences. */
5883 if (set_noreorder
!= 0 && final_sequence
== 0)
5884 fputs ("\n\tnop\n", file
);
5888 if (set_noreorder
++ == 0)
5889 fputs (".set\tnoreorder\n\t", file
);
5893 if (set_noreorder
== 0)
5894 error ("internal error: %%) found without a %%( in assembler pattern");
5896 else if (--set_noreorder
== 0)
5897 fputs ("\n\t.set\treorder", file
);
5902 if (set_noat
++ == 0)
5903 fputs (".set\tnoat\n\t", file
);
5908 error ("internal error: %%] found without a %%[ in assembler pattern");
5909 else if (--set_noat
== 0)
5910 fputs ("\n\t.set\tat", file
);
5915 if (set_nomacro
++ == 0)
5916 fputs (".set\tnomacro\n\t", file
);
5920 if (set_nomacro
== 0)
5921 error ("internal error: %%> found without a %%< in assembler pattern");
5922 else if (--set_nomacro
== 0)
5923 fputs ("\n\t.set\tmacro", file
);
5928 if (set_volatile
++ == 0)
5929 fputs ("#.set\tvolatile\n\t", file
);
5933 if (set_volatile
== 0)
5934 error ("internal error: %%} found without a %%{ in assembler pattern");
5935 else if (--set_volatile
== 0)
5936 fputs ("\n\t#.set\tnovolatile", file
);
5942 if (align_labels_log
> 0)
5943 ASM_OUTPUT_ALIGN (file
, align_labels_log
);
5948 error ("PRINT_OPERAND: unknown punctuation '%c'", letter
);
5957 error ("PRINT_OPERAND null pointer");
5961 code
= GET_CODE (op
);
5966 case EQ
: fputs ("eq", file
); break;
5967 case NE
: fputs ("ne", file
); break;
5968 case GT
: fputs ("gt", file
); break;
5969 case GE
: fputs ("ge", file
); break;
5970 case LT
: fputs ("lt", file
); break;
5971 case LE
: fputs ("le", file
); break;
5972 case GTU
: fputs ("gtu", file
); break;
5973 case GEU
: fputs ("geu", file
); break;
5974 case LTU
: fputs ("ltu", file
); break;
5975 case LEU
: fputs ("leu", file
); break;
5977 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op
);
5980 else if (letter
== 'N')
5983 case EQ
: fputs ("ne", file
); break;
5984 case NE
: fputs ("eq", file
); break;
5985 case GT
: fputs ("le", file
); break;
5986 case GE
: fputs ("lt", file
); break;
5987 case LT
: fputs ("ge", file
); break;
5988 case LE
: fputs ("gt", file
); break;
5989 case GTU
: fputs ("leu", file
); break;
5990 case GEU
: fputs ("ltu", file
); break;
5991 case LTU
: fputs ("geu", file
); break;
5992 case LEU
: fputs ("gtu", file
); break;
5994 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op
);
5997 else if (letter
== 'F')
6000 case EQ
: fputs ("c1f", file
); break;
6001 case NE
: fputs ("c1t", file
); break;
6003 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op
);
6006 else if (letter
== 'W')
6009 case EQ
: fputs ("c1t", file
); break;
6010 case NE
: fputs ("c1f", file
); break;
6012 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op
);
6015 else if (letter
== 'h')
6017 if (GET_CODE (op
) == HIGH
)
6020 print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_hi_relocs
);
6023 else if (letter
== 'R')
6024 print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_lo_relocs
);
6026 else if (letter
== 'Y')
6028 if (GET_CODE (op
) == CONST_INT
6029 && ((unsigned HOST_WIDE_INT
) INTVAL (op
)
6030 < ARRAY_SIZE (mips_fp_conditions
)))
6031 fputs (mips_fp_conditions
[INTVAL (op
)], file
);
6033 output_operand_lossage ("invalid %%Y value");
6036 else if (letter
== 'Z')
6040 print_operand (file
, op
, 0);
6045 else if (letter
== 'q')
6050 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op
);
6052 regnum
= REGNO (op
);
6053 if (MD_REG_P (regnum
))
6054 fprintf (file
, "$ac0");
6055 else if (DSP_ACC_REG_P (regnum
))
6056 fprintf (file
, "$ac%c", reg_names
[regnum
][3]);
6058 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op
);
6061 else if (code
== REG
|| code
== SUBREG
)
6063 register int regnum
;
6066 regnum
= REGNO (op
);
6068 regnum
= true_regnum (op
);
6070 if ((letter
== 'M' && ! WORDS_BIG_ENDIAN
)
6071 || (letter
== 'L' && WORDS_BIG_ENDIAN
)
6075 fprintf (file
, "%s", reg_names
[regnum
]);
6078 else if (code
== MEM
)
6081 output_address (plus_constant (XEXP (op
, 0), 4));
6083 output_address (XEXP (op
, 0));
6086 else if (letter
== 'x' && GET_CODE (op
) == CONST_INT
)
6087 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, 0xffff & INTVAL(op
));
6089 else if (letter
== 'X' && GET_CODE(op
) == CONST_INT
)
6090 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (op
));
6092 else if (letter
== 'd' && GET_CODE(op
) == CONST_INT
)
6093 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (INTVAL(op
)));
6095 else if (letter
== 'z' && op
== CONST0_RTX (GET_MODE (op
)))
6096 fputs (reg_names
[GP_REG_FIRST
], file
);
6098 else if (letter
== 'd' || letter
== 'x' || letter
== 'X')
6099 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6101 else if (letter
== 'T' || letter
== 't')
6103 int truth
= (code
== NE
) == (letter
== 'T');
6104 fputc ("zfnt"[truth
* 2 + (GET_MODE (op
) == CCmode
)], file
);
6107 else if (CONST_GP_P (op
))
6108 fputs (reg_names
[GLOBAL_POINTER_REGNUM
], file
);
6111 output_addr_const (file
, mips_strip_unspec_address (op
));
6115 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6116 in context CONTEXT. RELOCS is the array of relocations to use. */
6119 print_operand_reloc (FILE *file
, rtx op
, enum mips_symbol_context context
,
6120 const char **relocs
)
6122 enum mips_symbol_type symbol_type
;
6125 if (!mips_symbolic_constant_p (op
, context
, &symbol_type
)
6126 || relocs
[symbol_type
] == 0)
6127 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op
);
6129 fputs (relocs
[symbol_type
], file
);
6130 output_addr_const (file
, mips_strip_unspec_address (op
));
6131 for (p
= relocs
[symbol_type
]; *p
!= 0; p
++)
6136 /* Output address operand X to FILE. */
6139 print_operand_address (FILE *file
, rtx x
)
6141 struct mips_address_info addr
;
6143 if (mips_classify_address (&addr
, x
, word_mode
, true))
6147 print_operand (file
, addr
.offset
, 0);
6148 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
6151 case ADDRESS_LO_SUM
:
6152 print_operand_reloc (file
, addr
.offset
, SYMBOL_CONTEXT_MEM
,
6154 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
6157 case ADDRESS_CONST_INT
:
6158 output_addr_const (file
, x
);
6159 fprintf (file
, "(%s)", reg_names
[0]);
6162 case ADDRESS_SYMBOLIC
:
6163 output_addr_const (file
, mips_strip_unspec_address (x
));
6169 /* When using assembler macros, keep track of all of small-data externs
6170 so that mips_file_end can emit the appropriate declarations for them.
6172 In most cases it would be safe (though pointless) to emit .externs
6173 for other symbols too. One exception is when an object is within
6174 the -G limit but declared by the user to be in a section other
6175 than .sbss or .sdata. */
6178 mips_output_external (FILE *file
, tree decl
, const char *name
)
6180 default_elf_asm_output_external (file
, decl
, name
);
6182 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6183 set in order to avoid putting out names that are never really
6185 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl
)))
6187 if (!TARGET_EXPLICIT_RELOCS
&& mips_in_small_data_p (decl
))
6189 fputs ("\t.extern\t", file
);
6190 assemble_name (file
, name
);
6191 fprintf (file
, ", " HOST_WIDE_INT_PRINT_DEC
"\n",
6192 int_size_in_bytes (TREE_TYPE (decl
)));
6194 else if (TARGET_IRIX
6195 && mips_abi
== ABI_32
6196 && TREE_CODE (decl
) == FUNCTION_DECL
)
6198 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6199 `.global name .text' directive for every used but
6200 undefined function. If we don't, the linker may perform
6201 an optimization (skipping over the insns that set $gp)
6202 when it is unsafe. */
6203 fputs ("\t.globl ", file
);
6204 assemble_name (file
, name
);
6205 fputs (" .text\n", file
);
6210 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6211 put out a MIPS ECOFF file and a stab. */
6214 mips_output_filename (FILE *stream
, const char *name
)
6217 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6219 if (write_symbols
== DWARF2_DEBUG
)
6221 else if (mips_output_filename_first_time
)
6223 mips_output_filename_first_time
= 0;
6224 num_source_filenames
+= 1;
6225 current_function_file
= name
;
6226 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6227 output_quoted_string (stream
, name
);
6228 putc ('\n', stream
);
6231 /* If we are emitting stabs, let dbxout.c handle this (except for
6232 the mips_output_filename_first_time case). */
6233 else if (write_symbols
== DBX_DEBUG
)
6236 else if (name
!= current_function_file
6237 && strcmp (name
, current_function_file
) != 0)
6239 num_source_filenames
+= 1;
6240 current_function_file
= name
;
6241 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6242 output_quoted_string (stream
, name
);
6243 putc ('\n', stream
);
6247 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6248 that should be written before the opening quote, such as "\t.ascii\t"
6249 for real string data or "\t# " for a comment. */
6252 mips_output_ascii (FILE *stream
, const char *string_param
, size_t len
,
6257 register const unsigned char *string
=
6258 (const unsigned char *)string_param
;
6260 fprintf (stream
, "%s\"", prefix
);
6261 for (i
= 0; i
< len
; i
++)
6263 register int c
= string
[i
];
6267 if (c
== '\\' || c
== '\"')
6269 putc ('\\', stream
);
6277 fprintf (stream
, "\\%03o", c
);
6281 if (cur_pos
> 72 && i
+1 < len
)
6284 fprintf (stream
, "\"\n%s\"", prefix
);
6287 fprintf (stream
, "\"\n");
6290 /* Implement TARGET_ASM_FILE_START. */
6293 mips_file_start (void)
6295 default_file_start ();
6299 /* Generate a special section to describe the ABI switches used to
6300 produce the resultant binary. This used to be done by the assembler
6301 setting bits in the ELF header's flags field, but we have run out of
6302 bits. GDB needs this information in order to be able to correctly
6303 debug these binaries. See the function mips_gdbarch_init() in
6304 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6305 causes unnecessary IRIX 6 ld warnings. */
6306 const char * abi_string
= NULL
;
6310 case ABI_32
: abi_string
= "abi32"; break;
6311 case ABI_N32
: abi_string
= "abiN32"; break;
6312 case ABI_64
: abi_string
= "abi64"; break;
6313 case ABI_O64
: abi_string
= "abiO64"; break;
6314 case ABI_EABI
: abi_string
= TARGET_64BIT
? "eabi64" : "eabi32"; break;
6318 /* Note - we use fprintf directly rather than calling switch_to_section
6319 because in this way we can avoid creating an allocated section. We
6320 do not want this section to take up any space in the running
6322 fprintf (asm_out_file
, "\t.section .mdebug.%s\n", abi_string
);
6324 /* There is no ELF header flag to distinguish long32 forms of the
6325 EABI from long64 forms. Emit a special section to help tools
6326 such as GDB. Do the same for o64, which is sometimes used with
6328 if (mips_abi
== ABI_EABI
|| mips_abi
== ABI_O64
)
6329 fprintf (asm_out_file
, "\t.section .gcc_compiled_long%d\n",
6330 TARGET_LONG64
? 64 : 32);
6332 /* Restore the default section. */
6333 fprintf (asm_out_file
, "\t.previous\n");
6335 #ifdef HAVE_AS_GNU_ATTRIBUTE
6336 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n",
6337 TARGET_HARD_FLOAT_ABI
? (TARGET_DOUBLE_FLOAT
? 1 : 2) : 3);
6341 /* Generate the pseudo ops that System V.4 wants. */
6342 if (TARGET_ABICALLS
)
6343 fprintf (asm_out_file
, "\t.abicalls\n");
6346 fprintf (asm_out_file
, "\t.set\tmips16\n");
6348 if (flag_verbose_asm
)
6349 fprintf (asm_out_file
, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6351 mips_section_threshold
, mips_arch_info
->name
, mips_isa
);
6354 #ifdef BSS_SECTION_ASM_OP
6355 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6356 in the use of sbss. */
6359 mips_output_aligned_bss (FILE *stream
, tree decl
, const char *name
,
6360 unsigned HOST_WIDE_INT size
, int align
)
6362 extern tree last_assemble_variable_decl
;
6364 if (mips_in_small_data_p (decl
))
6365 switch_to_section (get_named_section (NULL
, ".sbss", 0));
6367 switch_to_section (bss_section
);
6368 ASM_OUTPUT_ALIGN (stream
, floor_log2 (align
/ BITS_PER_UNIT
));
6369 last_assemble_variable_decl
= decl
;
6370 ASM_DECLARE_OBJECT_NAME (stream
, name
, decl
);
6371 ASM_OUTPUT_SKIP (stream
, size
!= 0 ? size
: 1);
6375 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6376 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6379 mips_output_aligned_decl_common (FILE *stream
, tree decl
, const char *name
,
6380 unsigned HOST_WIDE_INT size
,
6383 /* If the target wants uninitialized const declarations in
6384 .rdata then don't put them in .comm. */
6385 if (TARGET_EMBEDDED_DATA
&& TARGET_UNINIT_CONST_IN_RODATA
6386 && TREE_CODE (decl
) == VAR_DECL
&& TREE_READONLY (decl
)
6387 && (DECL_INITIAL (decl
) == 0 || DECL_INITIAL (decl
) == error_mark_node
))
6389 if (TREE_PUBLIC (decl
) && DECL_NAME (decl
))
6390 targetm
.asm_out
.globalize_label (stream
, name
);
6392 switch_to_section (readonly_data_section
);
6393 ASM_OUTPUT_ALIGN (stream
, floor_log2 (align
/ BITS_PER_UNIT
));
6394 mips_declare_object (stream
, name
, "",
6395 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
6399 mips_declare_common_object (stream
, name
, "\n\t.comm\t",
6403 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6404 NAME is the name of the object and ALIGN is the required alignment
6405 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6406 alignment argument. */
6409 mips_declare_common_object (FILE *stream
, const char *name
,
6410 const char *init_string
,
6411 unsigned HOST_WIDE_INT size
,
6412 unsigned int align
, bool takes_alignment_p
)
6414 if (!takes_alignment_p
)
6416 size
+= (align
/ BITS_PER_UNIT
) - 1;
6417 size
-= size
% (align
/ BITS_PER_UNIT
);
6418 mips_declare_object (stream
, name
, init_string
,
6419 "," HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
6422 mips_declare_object (stream
, name
, init_string
,
6423 "," HOST_WIDE_INT_PRINT_UNSIGNED
",%u\n",
6424 size
, align
/ BITS_PER_UNIT
);
6427 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6428 macros, mark the symbol as written so that mips_file_end won't emit an
6429 .extern for it. STREAM is the output file, NAME is the name of the
6430 symbol, INIT_STRING is the string that should be written before the
6431 symbol and FINAL_STRING is the string that should be written after it.
6432 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6435 mips_declare_object (FILE *stream
, const char *name
, const char *init_string
,
6436 const char *final_string
, ...)
6440 fputs (init_string
, stream
);
6441 assemble_name (stream
, name
);
6442 va_start (ap
, final_string
);
6443 vfprintf (stream
, final_string
, ap
);
6446 if (!TARGET_EXPLICIT_RELOCS
)
6448 tree name_tree
= get_identifier (name
);
6449 TREE_ASM_WRITTEN (name_tree
) = 1;
6453 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6454 extern int size_directive_output
;
6456 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6457 definitions except that it uses mips_declare_object() to emit the label. */
6460 mips_declare_object_name (FILE *stream
, const char *name
,
6461 tree decl ATTRIBUTE_UNUSED
)
6463 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6464 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
6467 size_directive_output
= 0;
6468 if (!flag_inhibit_size_directive
&& DECL_SIZE (decl
))
6472 size_directive_output
= 1;
6473 size
= int_size_in_bytes (TREE_TYPE (decl
));
6474 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
6477 mips_declare_object (stream
, name
, "", ":\n");
6480 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6483 mips_finish_declare_object (FILE *stream
, tree decl
, int top_level
, int at_end
)
6487 name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
6488 if (!flag_inhibit_size_directive
6489 && DECL_SIZE (decl
) != 0
6490 && !at_end
&& top_level
6491 && DECL_INITIAL (decl
) == error_mark_node
6492 && !size_directive_output
)
6496 size_directive_output
= 1;
6497 size
= int_size_in_bytes (TREE_TYPE (decl
));
6498 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
6503 /* Return true if X in context CONTEXT is a small data address that can
6504 be rewritten as a LO_SUM. */
6507 mips_rewrite_small_data_p (rtx x
, enum mips_symbol_context context
)
6509 enum mips_symbol_type symbol_type
;
6511 return (TARGET_EXPLICIT_RELOCS
6512 && mips_symbolic_constant_p (x
, context
, &symbol_type
)
6513 && symbol_type
== SYMBOL_GP_RELATIVE
);
6517 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
6518 containing MEM, or null if none. */
6521 mips_small_data_pattern_1 (rtx
*loc
, void *data
)
6523 enum mips_symbol_context context
;
6525 if (GET_CODE (*loc
) == LO_SUM
)
6530 if (for_each_rtx (&XEXP (*loc
, 0), mips_small_data_pattern_1
, *loc
))
6535 context
= data
? SYMBOL_CONTEXT_MEM
: SYMBOL_CONTEXT_LEA
;
6536 return mips_rewrite_small_data_p (*loc
, context
);
6539 /* Return true if OP refers to small data symbols directly, not through
6543 mips_small_data_pattern_p (rtx op
)
6545 return for_each_rtx (&op
, mips_small_data_pattern_1
, 0);
6548 /* A for_each_rtx callback, used by mips_rewrite_small_data.
6549 DATA is the containing MEM, or null if none. */
6552 mips_rewrite_small_data_1 (rtx
*loc
, void *data
)
6554 enum mips_symbol_context context
;
6558 for_each_rtx (&XEXP (*loc
, 0), mips_rewrite_small_data_1
, *loc
);
6562 context
= data
? SYMBOL_CONTEXT_MEM
: SYMBOL_CONTEXT_LEA
;
6563 if (mips_rewrite_small_data_p (*loc
, context
))
6564 *loc
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, *loc
);
6566 if (GET_CODE (*loc
) == LO_SUM
)
6572 /* If possible, rewrite OP so that it refers to small data using
6573 explicit relocations. */
6576 mips_rewrite_small_data (rtx op
)
6578 op
= copy_insn (op
);
6579 for_each_rtx (&op
, mips_rewrite_small_data_1
, 0);
6583 /* Return true if the current function has an insn that implicitly
6587 mips_function_has_gp_insn (void)
6589 /* Don't bother rechecking if we found one last time. */
6590 if (!cfun
->machine
->has_gp_insn_p
)
6594 push_topmost_sequence ();
6595 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6597 && GET_CODE (PATTERN (insn
)) != USE
6598 && GET_CODE (PATTERN (insn
)) != CLOBBER
6599 && (get_attr_got (insn
) != GOT_UNSET
6600 || small_data_pattern (PATTERN (insn
), VOIDmode
)))
6602 pop_topmost_sequence ();
6604 cfun
->machine
->has_gp_insn_p
= (insn
!= 0);
6606 return cfun
->machine
->has_gp_insn_p
;
6610 /* Return the register that should be used as the global pointer
6611 within this function. Return 0 if the function doesn't need
6612 a global pointer. */
6615 mips_global_pointer (void)
6619 /* $gp is always available unless we're using a GOT. */
6620 if (!TARGET_USE_GOT
)
6621 return GLOBAL_POINTER_REGNUM
;
6623 /* We must always provide $gp when it is used implicitly. */
6624 if (!TARGET_EXPLICIT_RELOCS
)
6625 return GLOBAL_POINTER_REGNUM
;
6627 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6629 if (current_function_profile
)
6630 return GLOBAL_POINTER_REGNUM
;
6632 /* If the function has a nonlocal goto, $gp must hold the correct
6633 global pointer for the target function. */
6634 if (current_function_has_nonlocal_goto
)
6635 return GLOBAL_POINTER_REGNUM
;
6637 /* If the gp is never referenced, there's no need to initialize it.
6638 Note that reload can sometimes introduce constant pool references
6639 into a function that otherwise didn't need them. For example,
6640 suppose we have an instruction like:
6642 (set (reg:DF R1) (float:DF (reg:SI R2)))
6644 If R2 turns out to be constant such as 1, the instruction may have a
6645 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6646 using this constant if R2 doesn't get allocated to a register.
6648 In cases like these, reload will have added the constant to the pool
6649 but no instruction will yet refer to it. */
6650 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM
)
6651 && !current_function_uses_const_pool
6652 && !mips_function_has_gp_insn ())
6655 /* We need a global pointer, but perhaps we can use a call-clobbered
6656 register instead of $gp. */
6657 if (TARGET_CALL_SAVED_GP
&& current_function_is_leaf
)
6658 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
6659 if (!df_regs_ever_live_p (regno
)
6660 && call_used_regs
[regno
]
6661 && !fixed_regs
[regno
]
6662 && regno
!= PIC_FUNCTION_ADDR_REGNUM
)
6665 return GLOBAL_POINTER_REGNUM
;
6669 /* Return true if the function return value MODE will get returned in a
6670 floating-point register. */
6673 mips_return_mode_in_fpr_p (enum machine_mode mode
)
6675 return ((GET_MODE_CLASS (mode
) == MODE_FLOAT
6676 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
6677 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
6678 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_HWFPVALUE
);
6681 /* Return a two-character string representing a function floating-point
6682 return mode, used to name MIPS16 function stubs. */
6685 mips16_call_stub_mode_suffix (enum machine_mode mode
)
6689 else if (mode
== DFmode
)
6691 else if (mode
== SCmode
)
6693 else if (mode
== DCmode
)
6695 else if (mode
== V2SFmode
)
6701 /* Return true if the current function returns its value in a floating-point
6702 register in MIPS16 mode. */
6705 mips16_cfun_returns_in_fpr_p (void)
6707 tree return_type
= DECL_RESULT (current_function_decl
);
6708 return (TARGET_MIPS16
6709 && TARGET_HARD_FLOAT_ABI
6710 && !aggregate_value_p (return_type
, current_function_decl
)
6711 && mips_return_mode_in_fpr_p (DECL_MODE (return_type
)));
6715 /* Return true if the current function must save REGNO. */
6718 mips_save_reg_p (unsigned int regno
)
6720 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6721 if we have not chosen a call-clobbered substitute. */
6722 if (regno
== GLOBAL_POINTER_REGNUM
)
6723 return TARGET_CALL_SAVED_GP
&& cfun
->machine
->global_pointer
== regno
;
6725 /* Check call-saved registers. */
6726 if (df_regs_ever_live_p (regno
) && !call_used_regs
[regno
])
6729 /* Save both registers in an FPR pair if either one is used. This is
6730 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
6731 register to be used without the even register. */
6732 if (FP_REG_P (regno
)
6733 && MAX_FPRS_PER_FMT
== 2
6734 && df_regs_ever_live_p (regno
+ 1)
6735 && !call_used_regs
[regno
+ 1])
6738 /* We need to save the old frame pointer before setting up a new one. */
6739 if (regno
== HARD_FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
6742 /* We need to save the incoming return address if it is ever clobbered
6743 within the function. */
6744 if (regno
== GP_REG_FIRST
+ 31 && df_regs_ever_live_p (regno
))
6749 /* $18 is a special case in mips16 code. It may be used to call
6750 a function which returns a floating point value, but it is
6751 marked in call_used_regs. */
6752 if (regno
== GP_REG_FIRST
+ 18 && df_regs_ever_live_p (regno
))
6755 /* $31 is also a special case. It will be used to copy a return
6756 value into the floating point registers if the return value is
6758 if (regno
== GP_REG_FIRST
+ 31
6759 && mips16_cfun_returns_in_fpr_p ())
6766 /* Return the index of the lowest X in the range [0, SIZE) for which
6767 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
6770 mips16e_find_first_register (unsigned int mask
, const unsigned char *regs
,
6775 for (i
= 0; i
< size
; i
++)
6776 if (BITSET_P (mask
, regs
[i
]))
6782 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
6783 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
6784 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
6785 the same is true for all indexes (X, SIZE). */
6788 mips16e_mask_registers (unsigned int *mask_ptr
, const unsigned char *regs
,
6789 unsigned int size
, HOST_WIDE_INT
*gp_reg_size_ptr
)
6793 i
= mips16e_find_first_register (*mask_ptr
, regs
, size
);
6794 for (i
++; i
< size
; i
++)
6795 if (!BITSET_P (*mask_ptr
, regs
[i
]))
6797 *gp_reg_size_ptr
+= GET_MODE_SIZE (gpr_mode
);
6798 *mask_ptr
|= 1 << regs
[i
];
6802 /* Return the bytes needed to compute the frame pointer from the current
6803 stack pointer. SIZE is the size (in bytes) of the local variables.
6805 MIPS stack frames look like:
6807 Before call After call
6808 high +-----------------------+ +-----------------------+
6810 | caller's temps. | | caller's temps. |
6812 +-----------------------+ +-----------------------+
6814 | arguments on stack. | | arguments on stack. |
6816 +-----------------------+ +-----------------------+
6817 | 4 words to save | | 4 words to save |
6818 | arguments passed | | arguments passed |
6819 | in registers, even | | in registers, even |
6820 | if not passed. | | if not passed. |
6821 SP->+-----------------------+ VFP->+-----------------------+
6822 (VFP = SP+fp_sp_offset) | |\
6823 | fp register save | | fp_reg_size
6825 SP+gp_sp_offset->+-----------------------+
6827 | | gp register save | | gp_reg_size
6828 gp_reg_rounded | | |/
6829 | +-----------------------+
6830 \| alignment padding |
6831 +-----------------------+
6833 | local variables | | var_size
6835 +-----------------------+
6837 | alloca allocations |
6839 +-----------------------+
6841 cprestore_size | | GP save for V.4 abi |
6843 +-----------------------+
6845 | arguments on stack | |
6847 +-----------------------+ |
6848 | 4 words to save | | args_size
6849 | arguments passed | |
6850 | in registers, even | |
6851 | if not passed. | |
6852 low | (TARGET_OLDABI only) |/
6853 memory SP->+-----------------------+
6858 compute_frame_size (HOST_WIDE_INT size
)
6861 HOST_WIDE_INT total_size
; /* # bytes that the entire frame takes up */
6862 HOST_WIDE_INT var_size
; /* # bytes that variables take up */
6863 HOST_WIDE_INT args_size
; /* # bytes that outgoing arguments take up */
6864 HOST_WIDE_INT cprestore_size
; /* # bytes that the cprestore slot takes up */
6865 HOST_WIDE_INT gp_reg_rounded
; /* # bytes needed to store gp after rounding */
6866 HOST_WIDE_INT gp_reg_size
; /* # bytes needed to store gp regs */
6867 HOST_WIDE_INT fp_reg_size
; /* # bytes needed to store fp regs */
6868 unsigned int mask
; /* mask of saved gp registers */
6869 unsigned int fmask
; /* mask of saved fp registers */
6871 cfun
->machine
->global_pointer
= mips_global_pointer ();
6877 var_size
= MIPS_STACK_ALIGN (size
);
6878 args_size
= current_function_outgoing_args_size
;
6879 cprestore_size
= MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET
) - args_size
;
6881 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6882 functions. If the function has local variables, we're committed
6883 to allocating it anyway. Otherwise reclaim it here. */
6884 if (var_size
== 0 && current_function_is_leaf
)
6885 cprestore_size
= args_size
= 0;
6887 /* The MIPS 3.0 linker does not like functions that dynamically
6888 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6889 looks like we are trying to create a second frame pointer to the
6890 function, so allocate some stack space to make it happy. */
6892 if (args_size
== 0 && current_function_calls_alloca
)
6893 args_size
= 4 * UNITS_PER_WORD
;
6895 total_size
= var_size
+ args_size
+ cprestore_size
;
6897 /* Calculate space needed for gp registers. */
6898 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
6899 if (mips_save_reg_p (regno
))
6901 gp_reg_size
+= GET_MODE_SIZE (gpr_mode
);
6902 mask
|= 1 << (regno
- GP_REG_FIRST
);
6905 /* We need to restore these for the handler. */
6906 if (current_function_calls_eh_return
)
6911 regno
= EH_RETURN_DATA_REGNO (i
);
6912 if (regno
== INVALID_REGNUM
)
6914 gp_reg_size
+= GET_MODE_SIZE (gpr_mode
);
6915 mask
|= 1 << (regno
- GP_REG_FIRST
);
6919 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
6920 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
6921 save all later registers too. */
6922 if (GENERATE_MIPS16E_SAVE_RESTORE
)
6924 mips16e_mask_registers (&mask
, mips16e_s2_s8_regs
,
6925 ARRAY_SIZE (mips16e_s2_s8_regs
), &gp_reg_size
);
6926 mips16e_mask_registers (&mask
, mips16e_a0_a3_regs
,
6927 ARRAY_SIZE (mips16e_a0_a3_regs
), &gp_reg_size
);
6930 /* This loop must iterate over the same space as its companion in
6931 mips_for_each_saved_reg. */
6932 for (regno
= (FP_REG_LAST
- MAX_FPRS_PER_FMT
+ 1);
6933 regno
>= FP_REG_FIRST
;
6934 regno
-= MAX_FPRS_PER_FMT
)
6936 if (mips_save_reg_p (regno
))
6938 fp_reg_size
+= MAX_FPRS_PER_FMT
* UNITS_PER_FPREG
;
6939 fmask
|= ((1 << MAX_FPRS_PER_FMT
) - 1) << (regno
- FP_REG_FIRST
);
6943 gp_reg_rounded
= MIPS_STACK_ALIGN (gp_reg_size
);
6944 total_size
+= gp_reg_rounded
+ MIPS_STACK_ALIGN (fp_reg_size
);
6946 /* Add in the space required for saving incoming register arguments. */
6947 total_size
+= current_function_pretend_args_size
;
6948 total_size
+= MIPS_STACK_ALIGN (cfun
->machine
->varargs_size
);
6950 /* Save other computed information. */
6951 cfun
->machine
->frame
.total_size
= total_size
;
6952 cfun
->machine
->frame
.var_size
= var_size
;
6953 cfun
->machine
->frame
.args_size
= args_size
;
6954 cfun
->machine
->frame
.cprestore_size
= cprestore_size
;
6955 cfun
->machine
->frame
.gp_reg_size
= gp_reg_size
;
6956 cfun
->machine
->frame
.fp_reg_size
= fp_reg_size
;
6957 cfun
->machine
->frame
.mask
= mask
;
6958 cfun
->machine
->frame
.fmask
= fmask
;
6959 cfun
->machine
->frame
.initialized
= reload_completed
;
6960 cfun
->machine
->frame
.num_gp
= gp_reg_size
/ UNITS_PER_WORD
;
6961 cfun
->machine
->frame
.num_fp
= (fp_reg_size
6962 / (MAX_FPRS_PER_FMT
* UNITS_PER_FPREG
));
6966 HOST_WIDE_INT offset
;
6968 if (GENERATE_MIPS16E_SAVE_RESTORE
)
6969 /* MIPS16e SAVE and RESTORE instructions require the GP save area
6970 to be aligned at the high end with any padding at the low end.
6971 It is only safe to use this calculation for o32, where we never
6972 have pretend arguments, and where any varargs will be saved in
6973 the caller-allocated area rather than at the top of the frame. */
6974 offset
= (total_size
- GET_MODE_SIZE (gpr_mode
));
6976 offset
= (args_size
+ cprestore_size
+ var_size
6977 + gp_reg_size
- GET_MODE_SIZE (gpr_mode
));
6978 cfun
->machine
->frame
.gp_sp_offset
= offset
;
6979 cfun
->machine
->frame
.gp_save_offset
= offset
- total_size
;
6983 cfun
->machine
->frame
.gp_sp_offset
= 0;
6984 cfun
->machine
->frame
.gp_save_offset
= 0;
6989 HOST_WIDE_INT offset
;
6991 offset
= (args_size
+ cprestore_size
+ var_size
6992 + gp_reg_rounded
+ fp_reg_size
6993 - MAX_FPRS_PER_FMT
* UNITS_PER_FPREG
);
6994 cfun
->machine
->frame
.fp_sp_offset
= offset
;
6995 cfun
->machine
->frame
.fp_save_offset
= offset
- total_size
;
6999 cfun
->machine
->frame
.fp_sp_offset
= 0;
7000 cfun
->machine
->frame
.fp_save_offset
= 0;
7003 /* Ok, we're done. */
7007 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7008 pointer or argument pointer. TO is either the stack pointer or
7009 hard frame pointer. */
7012 mips_initial_elimination_offset (int from
, int to
)
7014 HOST_WIDE_INT offset
;
7016 compute_frame_size (get_frame_size ());
7018 /* Set OFFSET to the offset from the stack pointer. */
7021 case FRAME_POINTER_REGNUM
:
7025 case ARG_POINTER_REGNUM
:
7026 offset
= (cfun
->machine
->frame
.total_size
7027 - current_function_pretend_args_size
);
7034 if (TARGET_MIPS16
&& to
== HARD_FRAME_POINTER_REGNUM
)
7035 offset
-= cfun
->machine
->frame
.args_size
;
7040 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7041 back to a previous frame. */
7043 mips_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
7048 return get_hard_reg_initial_val (Pmode
, GP_REG_FIRST
+ 31);
7051 /* Use FN to save or restore register REGNO. MODE is the register's
7052 mode and OFFSET is the offset of its save slot from the current
7056 mips_save_restore_reg (enum machine_mode mode
, int regno
,
7057 HOST_WIDE_INT offset
, mips_save_restore_fn fn
)
7061 mem
= gen_frame_mem (mode
, plus_constant (stack_pointer_rtx
, offset
));
7063 fn (gen_rtx_REG (mode
, regno
), mem
);
7067 /* Call FN for each register that is saved by the current function.
7068 SP_OFFSET is the offset of the current stack pointer from the start
7072 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset
, mips_save_restore_fn fn
)
7074 enum machine_mode fpr_mode
;
7075 HOST_WIDE_INT offset
;
7078 /* Save registers starting from high to low. The debuggers prefer at least
7079 the return register be stored at func+4, and also it allows us not to
7080 need a nop in the epilogue if at least one register is reloaded in
7081 addition to return address. */
7082 offset
= cfun
->machine
->frame
.gp_sp_offset
- sp_offset
;
7083 for (regno
= GP_REG_LAST
; regno
>= GP_REG_FIRST
; regno
--)
7084 if (BITSET_P (cfun
->machine
->frame
.mask
, regno
- GP_REG_FIRST
))
7086 mips_save_restore_reg (gpr_mode
, regno
, offset
, fn
);
7087 offset
-= GET_MODE_SIZE (gpr_mode
);
7090 /* This loop must iterate over the same space as its companion in
7091 compute_frame_size. */
7092 offset
= cfun
->machine
->frame
.fp_sp_offset
- sp_offset
;
7093 fpr_mode
= (TARGET_SINGLE_FLOAT
? SFmode
: DFmode
);
7094 for (regno
= (FP_REG_LAST
- MAX_FPRS_PER_FMT
+ 1);
7095 regno
>= FP_REG_FIRST
;
7096 regno
-= MAX_FPRS_PER_FMT
)
7097 if (BITSET_P (cfun
->machine
->frame
.fmask
, regno
- FP_REG_FIRST
))
7099 mips_save_restore_reg (fpr_mode
, regno
, offset
, fn
);
7100 offset
-= GET_MODE_SIZE (fpr_mode
);
7104 /* If we're generating n32 or n64 abicalls, and the current function
7105 does not use $28 as its global pointer, emit a cplocal directive.
7106 Use pic_offset_table_rtx as the argument to the directive. */
7109 mips_output_cplocal (void)
7111 if (!TARGET_EXPLICIT_RELOCS
7112 && cfun
->machine
->global_pointer
> 0
7113 && cfun
->machine
->global_pointer
!= GLOBAL_POINTER_REGNUM
)
7114 output_asm_insn (".cplocal %+", 0);
7117 /* Return the style of GP load sequence that is being used for the
7118 current function. */
7120 enum mips_loadgp_style
7121 mips_current_loadgp_style (void)
7123 if (!TARGET_USE_GOT
|| cfun
->machine
->global_pointer
== 0)
7129 if (TARGET_ABSOLUTE_ABICALLS
)
7130 return LOADGP_ABSOLUTE
;
7132 return TARGET_NEWABI
? LOADGP_NEWABI
: LOADGP_OLDABI
;
7135 /* The __gnu_local_gp symbol. */
7137 static GTY(()) rtx mips_gnu_local_gp
;
7139 /* If we're generating n32 or n64 abicalls, emit instructions
7140 to set up the global pointer. */
7143 mips_emit_loadgp (void)
7145 rtx addr
, offset
, incoming_address
, base
, index
;
7147 switch (mips_current_loadgp_style ())
7149 case LOADGP_ABSOLUTE
:
7150 if (mips_gnu_local_gp
== NULL
)
7152 mips_gnu_local_gp
= gen_rtx_SYMBOL_REF (Pmode
, "__gnu_local_gp");
7153 SYMBOL_REF_FLAGS (mips_gnu_local_gp
) |= SYMBOL_FLAG_LOCAL
;
7155 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp
));
7159 addr
= XEXP (DECL_RTL (current_function_decl
), 0);
7160 offset
= mips_unspec_address (addr
, SYMBOL_GOTOFF_LOADGP
);
7161 incoming_address
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
7162 emit_insn (gen_loadgp_newabi (offset
, incoming_address
));
7163 if (!TARGET_EXPLICIT_RELOCS
)
7164 emit_insn (gen_loadgp_blockage ());
7168 base
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_BASE
));
7169 index
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_INDEX
));
7170 emit_insn (gen_loadgp_rtp (base
, index
));
7171 if (!TARGET_EXPLICIT_RELOCS
)
7172 emit_insn (gen_loadgp_blockage ());
7180 /* Set up the stack and frame (if desired) for the function. */
7183 mips_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
7186 HOST_WIDE_INT tsize
= cfun
->machine
->frame
.total_size
;
7188 #ifdef SDB_DEBUGGING_INFO
7189 if (debug_info_level
!= DINFO_LEVEL_TERSE
&& write_symbols
== SDB_DEBUG
)
7190 SDB_OUTPUT_SOURCE_LINE (file
, DECL_SOURCE_LINE (current_function_decl
));
7193 /* In mips16 mode, we may need to generate a 32 bit to handle
7194 floating point arguments. The linker will arrange for any 32-bit
7195 functions to call this stub, which will then jump to the 16-bit
7198 && TARGET_HARD_FLOAT_ABI
7199 && current_function_args_info
.fp_code
!= 0)
7200 build_mips16_function_stub (file
);
7202 if (!FUNCTION_NAME_ALREADY_DECLARED
)
7204 /* Get the function name the same way that toplev.c does before calling
7205 assemble_start_function. This is needed so that the name used here
7206 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7207 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
7209 if (!flag_inhibit_size_directive
)
7211 fputs ("\t.ent\t", file
);
7212 assemble_name (file
, fnname
);
7216 assemble_name (file
, fnname
);
7217 fputs (":\n", file
);
7220 /* Stop mips_file_end from treating this function as external. */
7221 if (TARGET_IRIX
&& mips_abi
== ABI_32
)
7222 TREE_ASM_WRITTEN (DECL_NAME (cfun
->decl
)) = 1;
7224 if (!flag_inhibit_size_directive
)
7226 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7228 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC
",%s\t\t"
7229 "# vars= " HOST_WIDE_INT_PRINT_DEC
", regs= %d/%d"
7230 ", args= " HOST_WIDE_INT_PRINT_DEC
7231 ", gp= " HOST_WIDE_INT_PRINT_DEC
"\n",
7232 (reg_names
[(frame_pointer_needed
)
7233 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
]),
7234 ((frame_pointer_needed
&& TARGET_MIPS16
)
7235 ? tsize
- cfun
->machine
->frame
.args_size
7237 reg_names
[GP_REG_FIRST
+ 31],
7238 cfun
->machine
->frame
.var_size
,
7239 cfun
->machine
->frame
.num_gp
,
7240 cfun
->machine
->frame
.num_fp
,
7241 cfun
->machine
->frame
.args_size
,
7242 cfun
->machine
->frame
.cprestore_size
);
7244 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7245 fprintf (file
, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
7246 cfun
->machine
->frame
.mask
,
7247 cfun
->machine
->frame
.gp_save_offset
);
7248 fprintf (file
, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
7249 cfun
->machine
->frame
.fmask
,
7250 cfun
->machine
->frame
.fp_save_offset
);
7253 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7254 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7257 if (mips_current_loadgp_style () == LOADGP_OLDABI
)
7259 /* Handle the initialization of $gp for SVR4 PIC. */
7260 if (!cfun
->machine
->all_noreorder_p
)
7261 output_asm_insn ("%(.cpload\t%^%)", 0);
7263 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7265 else if (cfun
->machine
->all_noreorder_p
)
7266 output_asm_insn ("%(%<", 0);
7268 /* Tell the assembler which register we're using as the global
7269 pointer. This is needed for thunks, since they can use either
7270 explicit relocs or assembler macros. */
7271 mips_output_cplocal ();
7274 /* Make the last instruction frame related and note that it performs
7275 the operation described by FRAME_PATTERN. */
7278 mips_set_frame_expr (rtx frame_pattern
)
7282 insn
= get_last_insn ();
7283 RTX_FRAME_RELATED_P (insn
) = 1;
7284 REG_NOTES (insn
) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7290 /* Return a frame-related rtx that stores REG at MEM.
7291 REG must be a single register. */
7294 mips_frame_set (rtx mem
, rtx reg
)
7298 /* If we're saving the return address register and the dwarf return
7299 address column differs from the hard register number, adjust the
7300 note reg to refer to the former. */
7301 if (REGNO (reg
) == GP_REG_FIRST
+ 31
7302 && DWARF_FRAME_RETURN_COLUMN
!= GP_REG_FIRST
+ 31)
7303 reg
= gen_rtx_REG (GET_MODE (reg
), DWARF_FRAME_RETURN_COLUMN
);
7305 set
= gen_rtx_SET (VOIDmode
, mem
, reg
);
7306 RTX_FRAME_RELATED_P (set
) = 1;
7312 /* Save register REG to MEM. Make the instruction frame-related. */
7315 mips_save_reg (rtx reg
, rtx mem
)
7317 if (GET_MODE (reg
) == DFmode
&& !TARGET_FLOAT64
)
7321 if (mips_split_64bit_move_p (mem
, reg
))
7322 mips_split_64bit_move (mem
, reg
);
7324 mips_emit_move (mem
, reg
);
7326 x1
= mips_frame_set (mips_subword (mem
, 0), mips_subword (reg
, 0));
7327 x2
= mips_frame_set (mips_subword (mem
, 1), mips_subword (reg
, 1));
7328 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, x1
, x2
)));
7333 && REGNO (reg
) != GP_REG_FIRST
+ 31
7334 && !M16_REG_P (REGNO (reg
)))
7336 /* Save a non-mips16 register by moving it through a temporary.
7337 We don't need to do this for $31 since there's a special
7338 instruction for it. */
7339 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg
)), reg
);
7340 mips_emit_move (mem
, MIPS_PROLOGUE_TEMP (GET_MODE (reg
)));
7343 mips_emit_move (mem
, reg
);
7345 mips_set_frame_expr (mips_frame_set (mem
, reg
));
7349 /* Return a move between register REGNO and memory location SP + OFFSET.
7350 Make the move a load if RESTORE_P, otherwise make it a frame-related
7354 mips16e_save_restore_reg (bool restore_p
, HOST_WIDE_INT offset
,
7359 mem
= gen_frame_mem (SImode
, plus_constant (stack_pointer_rtx
, offset
));
7360 reg
= gen_rtx_REG (SImode
, regno
);
7362 ? gen_rtx_SET (VOIDmode
, reg
, mem
)
7363 : mips_frame_set (mem
, reg
));
7366 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7367 The instruction must:
7369 - Allocate or deallocate SIZE bytes in total; SIZE is known
7372 - Save or restore as many registers in *MASK_PTR as possible.
7373 The instruction saves the first registers at the top of the
7374 allocated area, with the other registers below it.
7376 - Save NARGS argument registers above the allocated area.
7378 (NARGS is always zero if RESTORE_P.)
7380 The SAVE and RESTORE instructions cannot save and restore all general
7381 registers, so there may be some registers left over for the caller to
7382 handle. Destructively modify *MASK_PTR so that it contains the registers
7383 that still need to be saved or restored. The caller can save these
7384 registers in the memory immediately below *OFFSET_PTR, which is a
7385 byte offset from the bottom of the allocated stack area. */
7388 mips16e_build_save_restore (bool restore_p
, unsigned int *mask_ptr
,
7389 HOST_WIDE_INT
*offset_ptr
, unsigned int nargs
,
7393 HOST_WIDE_INT offset
, top_offset
;
7394 unsigned int i
, regno
;
7397 gcc_assert (cfun
->machine
->frame
.fp_reg_size
== 0);
7399 /* Calculate the number of elements in the PARALLEL. We need one element
7400 for the stack adjustment, one for each argument register save, and one
7401 for each additional register move. */
7403 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
7404 if (BITSET_P (*mask_ptr
, mips16e_save_restore_regs
[i
]))
7407 /* Create the final PARALLEL. */
7408 pattern
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (n
));
7411 /* Add the stack pointer adjustment. */
7412 set
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
7413 plus_constant (stack_pointer_rtx
,
7414 restore_p
? size
: -size
));
7415 RTX_FRAME_RELATED_P (set
) = 1;
7416 XVECEXP (pattern
, 0, n
++) = set
;
7418 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7419 top_offset
= restore_p
? size
: 0;
7421 /* Save the arguments. */
7422 for (i
= 0; i
< nargs
; i
++)
7424 offset
= top_offset
+ i
* GET_MODE_SIZE (gpr_mode
);
7425 set
= mips16e_save_restore_reg (restore_p
, offset
, GP_ARG_FIRST
+ i
);
7426 XVECEXP (pattern
, 0, n
++) = set
;
7429 /* Then fill in the other register moves. */
7430 offset
= top_offset
;
7431 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
7433 regno
= mips16e_save_restore_regs
[i
];
7434 if (BITSET_P (*mask_ptr
, regno
))
7436 offset
-= UNITS_PER_WORD
;
7437 set
= mips16e_save_restore_reg (restore_p
, offset
, regno
);
7438 XVECEXP (pattern
, 0, n
++) = set
;
7439 *mask_ptr
&= ~(1 << regno
);
7443 /* Tell the caller what offset it should use for the remaining registers. */
7444 *offset_ptr
= size
+ (offset
- top_offset
) + size
;
7446 gcc_assert (n
== XVECLEN (pattern
, 0));
7451 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7452 pointer. Return true if PATTERN matches the kind of instruction
7453 generated by mips16e_build_save_restore. If INFO is nonnull,
7454 initialize it when returning true. */
7457 mips16e_save_restore_pattern_p (rtx pattern
, HOST_WIDE_INT adjust
,
7458 struct mips16e_save_restore_info
*info
)
7460 unsigned int i
, nargs
, mask
;
7461 HOST_WIDE_INT top_offset
, save_offset
, offset
, extra
;
7462 rtx set
, reg
, mem
, base
;
7465 if (!GENERATE_MIPS16E_SAVE_RESTORE
)
7468 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7469 top_offset
= adjust
> 0 ? adjust
: 0;
7471 /* Interpret all other members of the PARALLEL. */
7472 save_offset
= top_offset
- GET_MODE_SIZE (gpr_mode
);
7476 for (n
= 1; n
< XVECLEN (pattern
, 0); n
++)
7478 /* Check that we have a SET. */
7479 set
= XVECEXP (pattern
, 0, n
);
7480 if (GET_CODE (set
) != SET
)
7483 /* Check that the SET is a load (if restoring) or a store
7485 mem
= adjust
> 0 ? SET_SRC (set
) : SET_DEST (set
);
7489 /* Check that the address is the sum of the stack pointer and a
7490 possibly-zero constant offset. */
7491 mips_split_plus (XEXP (mem
, 0), &base
, &offset
);
7492 if (base
!= stack_pointer_rtx
)
7495 /* Check that SET's other operand is a register. */
7496 reg
= adjust
> 0 ? SET_DEST (set
) : SET_SRC (set
);
7500 /* Check for argument saves. */
7501 if (offset
== top_offset
+ nargs
* GET_MODE_SIZE (gpr_mode
)
7502 && REGNO (reg
) == GP_ARG_FIRST
+ nargs
)
7504 else if (offset
== save_offset
)
7506 while (mips16e_save_restore_regs
[i
++] != REGNO (reg
))
7507 if (i
== ARRAY_SIZE (mips16e_save_restore_regs
))
7510 mask
|= 1 << REGNO (reg
);
7511 save_offset
-= GET_MODE_SIZE (gpr_mode
);
7517 /* Check that the restrictions on register ranges are met. */
7519 mips16e_mask_registers (&mask
, mips16e_s2_s8_regs
,
7520 ARRAY_SIZE (mips16e_s2_s8_regs
), &extra
);
7521 mips16e_mask_registers (&mask
, mips16e_a0_a3_regs
,
7522 ARRAY_SIZE (mips16e_a0_a3_regs
), &extra
);
7526 /* Make sure that the topmost argument register is not saved twice.
7527 The checks above ensure that the same is then true for the other
7528 argument registers. */
7529 if (nargs
> 0 && BITSET_P (mask
, GP_ARG_FIRST
+ nargs
- 1))
7532 /* Pass back information, if requested. */
7535 info
->nargs
= nargs
;
7537 info
->size
= (adjust
> 0 ? adjust
: -adjust
);
7543 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7544 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7545 the null terminator. */
7548 mips16e_add_register_range (char *s
, unsigned int min_reg
,
7549 unsigned int max_reg
)
7551 if (min_reg
!= max_reg
)
7552 s
+= sprintf (s
, ",%s-%s", reg_names
[min_reg
], reg_names
[max_reg
]);
7554 s
+= sprintf (s
, ",%s", reg_names
[min_reg
]);
7558 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7559 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7562 mips16e_output_save_restore (rtx pattern
, HOST_WIDE_INT adjust
)
7564 static char buffer
[300];
7566 struct mips16e_save_restore_info info
;
7567 unsigned int i
, end
;
7570 /* Parse the pattern. */
7571 if (!mips16e_save_restore_pattern_p (pattern
, adjust
, &info
))
7574 /* Add the mnemonic. */
7575 s
= strcpy (buffer
, adjust
> 0 ? "restore\t" : "save\t");
7578 /* Save the arguments. */
7580 s
+= sprintf (s
, "%s-%s,", reg_names
[GP_ARG_FIRST
],
7581 reg_names
[GP_ARG_FIRST
+ info
.nargs
- 1]);
7582 else if (info
.nargs
== 1)
7583 s
+= sprintf (s
, "%s,", reg_names
[GP_ARG_FIRST
]);
7585 /* Emit the amount of stack space to allocate or deallocate. */
7586 s
+= sprintf (s
, "%d", (int) info
.size
);
7588 /* Save or restore $16. */
7589 if (BITSET_P (info
.mask
, 16))
7590 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 16]);
7592 /* Save or restore $17. */
7593 if (BITSET_P (info
.mask
, 17))
7594 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 17]);
7596 /* Save or restore registers in the range $s2...$s8, which
7597 mips16e_s2_s8_regs lists in decreasing order. Note that this
7598 is a software register range; the hardware registers are not
7599 numbered consecutively. */
7600 end
= ARRAY_SIZE (mips16e_s2_s8_regs
);
7601 i
= mips16e_find_first_register (info
.mask
, mips16e_s2_s8_regs
, end
);
7603 s
= mips16e_add_register_range (s
, mips16e_s2_s8_regs
[end
- 1],
7604 mips16e_s2_s8_regs
[i
]);
7606 /* Save or restore registers in the range $a0...$a3. */
7607 end
= ARRAY_SIZE (mips16e_a0_a3_regs
);
7608 i
= mips16e_find_first_register (info
.mask
, mips16e_a0_a3_regs
, end
);
7610 s
= mips16e_add_register_range (s
, mips16e_a0_a3_regs
[i
],
7611 mips16e_a0_a3_regs
[end
- 1]);
7613 /* Save or restore $31. */
7614 if (BITSET_P (info
.mask
, 31))
7615 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 31]);
7620 /* Return a simplified form of X using the register values in REG_VALUES.
7621 REG_VALUES[R] is the last value assigned to hard register R, or null
7622 if R has not been modified.
7624 This function is rather limited, but is good enough for our purposes. */
7627 mips16e_collect_propagate_value (rtx x
, rtx
*reg_values
)
7631 x
= avoid_constant_pool_reference (x
);
7635 x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
7636 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
),
7637 x0
, GET_MODE (XEXP (x
, 0)));
7640 if (ARITHMETIC_P (x
))
7642 x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
7643 x1
= mips16e_collect_propagate_value (XEXP (x
, 1), reg_values
);
7644 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), x0
, x1
);
7648 && reg_values
[REGNO (x
)]
7649 && !rtx_unstable_p (reg_values
[REGNO (x
)]))
7650 return reg_values
[REGNO (x
)];
7655 /* Return true if (set DEST SRC) stores an argument register into its
7656 caller-allocated save slot, storing the number of that argument
7657 register in *REGNO_PTR if so. REG_VALUES is as for
7658 mips16e_collect_propagate_value. */
7661 mips16e_collect_argument_save_p (rtx dest
, rtx src
, rtx
*reg_values
,
7662 unsigned int *regno_ptr
)
7664 unsigned int argno
, regno
;
7665 HOST_WIDE_INT offset
, required_offset
;
7668 /* Check that this is a word-mode store. */
7669 if (!MEM_P (dest
) || !REG_P (src
) || GET_MODE (dest
) != word_mode
)
7672 /* Check that the register being saved is an unmodified argument
7674 regno
= REGNO (src
);
7675 if (regno
< GP_ARG_FIRST
|| regno
> GP_ARG_LAST
|| reg_values
[regno
])
7677 argno
= regno
- GP_ARG_FIRST
;
7679 /* Check whether the address is an appropriate stack pointer or
7680 frame pointer access. The frame pointer is offset from the
7681 stack pointer by the size of the outgoing arguments. */
7682 addr
= mips16e_collect_propagate_value (XEXP (dest
, 0), reg_values
);
7683 mips_split_plus (addr
, &base
, &offset
);
7684 required_offset
= cfun
->machine
->frame
.total_size
+ argno
* UNITS_PER_WORD
;
7685 if (base
== hard_frame_pointer_rtx
)
7686 required_offset
-= cfun
->machine
->frame
.args_size
;
7687 else if (base
!= stack_pointer_rtx
)
7689 if (offset
!= required_offset
)
7696 /* A subroutine of mips_expand_prologue, called only when generating
7697 MIPS16e SAVE instructions. Search the start of the function for any
7698 instructions that save argument registers into their caller-allocated
7699 save slots. Delete such instructions and return a value N such that
7700 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7701 instructions redundant. */
7704 mips16e_collect_argument_saves (void)
7706 rtx reg_values
[FIRST_PSEUDO_REGISTER
];
7707 rtx insn
, next
, set
, dest
, src
;
7708 unsigned int nargs
, regno
;
7710 push_topmost_sequence ();
7712 memset (reg_values
, 0, sizeof (reg_values
));
7713 for (insn
= get_insns (); insn
; insn
= next
)
7715 next
= NEXT_INSN (insn
);
7722 set
= PATTERN (insn
);
7723 if (GET_CODE (set
) != SET
)
7726 dest
= SET_DEST (set
);
7727 src
= SET_SRC (set
);
7728 if (mips16e_collect_argument_save_p (dest
, src
, reg_values
, ®no
))
7730 if (!BITSET_P (cfun
->machine
->frame
.mask
, regno
))
7733 nargs
= MAX (nargs
, (regno
- GP_ARG_FIRST
) + 1);
7736 else if (REG_P (dest
) && GET_MODE (dest
) == word_mode
)
7737 reg_values
[REGNO (dest
)]
7738 = mips16e_collect_propagate_value (src
, reg_values
);
7742 pop_topmost_sequence ();
7747 /* Expand the prologue into a bunch of separate insns. */
7750 mips_expand_prologue (void)
7756 if (cfun
->machine
->global_pointer
> 0)
7757 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
7759 size
= compute_frame_size (get_frame_size ());
7761 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
7762 bytes beforehand; this is enough to cover the register save area
7763 without going out of range. */
7764 if ((cfun
->machine
->frame
.mask
| cfun
->machine
->frame
.fmask
) != 0)
7766 HOST_WIDE_INT step1
;
7768 step1
= MIN (size
, MIPS_MAX_FIRST_STACK_STEP
);
7770 if (GENERATE_MIPS16E_SAVE_RESTORE
)
7772 HOST_WIDE_INT offset
;
7773 unsigned int mask
, regno
;
7775 /* Try to merge argument stores into the save instruction. */
7776 nargs
= mips16e_collect_argument_saves ();
7778 /* Build the save instruction. */
7779 mask
= cfun
->machine
->frame
.mask
;
7780 insn
= mips16e_build_save_restore (false, &mask
, &offset
,
7782 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
7785 /* Check if we need to save other registers. */
7786 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
7787 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
7789 offset
-= GET_MODE_SIZE (gpr_mode
);
7790 mips_save_restore_reg (gpr_mode
, regno
, offset
, mips_save_reg
);
7795 insn
= gen_add3_insn (stack_pointer_rtx
,
7798 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
7800 mips_for_each_saved_reg (size
, mips_save_reg
);
7804 /* Allocate the rest of the frame. */
7807 if (SMALL_OPERAND (-size
))
7808 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx
,
7810 GEN_INT (-size
)))) = 1;
7813 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), GEN_INT (size
));
7816 /* There are no instructions to add or subtract registers
7817 from the stack pointer, so use the frame pointer as a
7818 temporary. We should always be using a frame pointer
7819 in this case anyway. */
7820 gcc_assert (frame_pointer_needed
);
7821 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
7822 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx
,
7823 hard_frame_pointer_rtx
,
7824 MIPS_PROLOGUE_TEMP (Pmode
)));
7825 mips_emit_move (stack_pointer_rtx
, hard_frame_pointer_rtx
);
7828 emit_insn (gen_sub3_insn (stack_pointer_rtx
,
7830 MIPS_PROLOGUE_TEMP (Pmode
)));
7832 /* Describe the combined effect of the previous instructions. */
7834 (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
7835 plus_constant (stack_pointer_rtx
, -size
)));
7839 /* Set up the frame pointer, if we're using one. In mips16 code,
7840 we point the frame pointer ahead of the outgoing argument area.
7841 This should allow more variables & incoming arguments to be
7842 accessed with unextended instructions. */
7843 if (frame_pointer_needed
)
7845 if (TARGET_MIPS16
&& cfun
->machine
->frame
.args_size
!= 0)
7847 rtx offset
= GEN_INT (cfun
->machine
->frame
.args_size
);
7848 if (SMALL_OPERAND (cfun
->machine
->frame
.args_size
))
7850 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx
,
7855 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), offset
);
7856 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
7857 emit_insn (gen_add3_insn (hard_frame_pointer_rtx
,
7858 hard_frame_pointer_rtx
,
7859 MIPS_PROLOGUE_TEMP (Pmode
)));
7861 (gen_rtx_SET (VOIDmode
, hard_frame_pointer_rtx
,
7862 plus_constant (stack_pointer_rtx
,
7863 cfun
->machine
->frame
.args_size
)));
7867 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx
,
7868 stack_pointer_rtx
)) = 1;
7871 mips_emit_loadgp ();
7873 /* If generating o32/o64 abicalls, save $gp on the stack. */
7874 if (TARGET_ABICALLS
&& TARGET_OLDABI
&& !current_function_is_leaf
)
7875 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size
)));
7877 /* If we are profiling, make sure no instructions are scheduled before
7878 the call to mcount. */
7880 if (current_function_profile
)
7881 emit_insn (gen_blockage ());
7884 /* Do any necessary cleanup after a function to restore stack, frame,
7887 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
7890 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
7891 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
7893 /* Reinstate the normal $gp. */
7894 SET_REGNO (pic_offset_table_rtx
, GLOBAL_POINTER_REGNUM
);
7895 mips_output_cplocal ();
7897 if (cfun
->machine
->all_noreorder_p
)
7899 /* Avoid using %>%) since it adds excess whitespace. */
7900 output_asm_insn (".set\tmacro", 0);
7901 output_asm_insn (".set\treorder", 0);
7902 set_noreorder
= set_nomacro
= 0;
7905 if (!FUNCTION_NAME_ALREADY_DECLARED
&& !flag_inhibit_size_directive
)
7909 /* Get the function name the same way that toplev.c does before calling
7910 assemble_start_function. This is needed so that the name used here
7911 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7912 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
7913 fputs ("\t.end\t", file
);
7914 assemble_name (file
, fnname
);
7919 /* Emit instructions to restore register REG from slot MEM. */
7922 mips_restore_reg (rtx reg
, rtx mem
)
7924 /* There's no mips16 instruction to load $31 directly. Load into
7925 $7 instead and adjust the return insn appropriately. */
7926 if (TARGET_MIPS16
&& REGNO (reg
) == GP_REG_FIRST
+ 31)
7927 reg
= gen_rtx_REG (GET_MODE (reg
), 7);
7929 if (TARGET_MIPS16
&& !M16_REG_P (REGNO (reg
)))
7931 /* Can't restore directly; move through a temporary. */
7932 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg
)), mem
);
7933 mips_emit_move (reg
, MIPS_EPILOGUE_TEMP (GET_MODE (reg
)));
7936 mips_emit_move (reg
, mem
);
7940 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
7941 if this epilogue precedes a sibling call, false if it is for a normal
7942 "epilogue" pattern. */
7945 mips_expand_epilogue (int sibcall_p
)
7947 HOST_WIDE_INT step1
, step2
;
7950 if (!sibcall_p
&& mips_can_use_return_insn ())
7952 emit_jump_insn (gen_return ());
7956 /* In mips16 mode, if the return value should go into a floating-point
7957 register, we need to call a helper routine to copy it over. */
7958 if (mips16_cfun_returns_in_fpr_p ())
7967 enum machine_mode return_mode
;
7969 return_type
= DECL_RESULT (current_function_decl
);
7970 return_mode
= DECL_MODE (return_type
);
7972 name
= ACONCAT (("__mips16_ret_",
7973 mips16_call_stub_mode_suffix (return_mode
),
7975 id
= get_identifier (name
);
7976 func
= gen_rtx_SYMBOL_REF (Pmode
, IDENTIFIER_POINTER (id
));
7977 retval
= gen_rtx_REG (return_mode
, GP_RETURN
);
7978 call
= gen_call_value_internal (retval
, func
, const0_rtx
);
7979 insn
= emit_call_insn (call
);
7980 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), retval
);
7983 /* Split the frame into two. STEP1 is the amount of stack we should
7984 deallocate before restoring the registers. STEP2 is the amount we
7985 should deallocate afterwards.
7987 Start off by assuming that no registers need to be restored. */
7988 step1
= cfun
->machine
->frame
.total_size
;
7991 /* Work out which register holds the frame address. Account for the
7992 frame pointer offset used by mips16 code. */
7993 if (!frame_pointer_needed
)
7994 base
= stack_pointer_rtx
;
7997 base
= hard_frame_pointer_rtx
;
7999 step1
-= cfun
->machine
->frame
.args_size
;
8002 /* If we need to restore registers, deallocate as much stack as
8003 possible in the second step without going out of range. */
8004 if ((cfun
->machine
->frame
.mask
| cfun
->machine
->frame
.fmask
) != 0)
8006 step2
= MIN (step1
, MIPS_MAX_FIRST_STACK_STEP
);
8010 /* Set TARGET to BASE + STEP1. */
8016 /* Get an rtx for STEP1 that we can add to BASE. */
8017 adjust
= GEN_INT (step1
);
8018 if (!SMALL_OPERAND (step1
))
8020 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), adjust
);
8021 adjust
= MIPS_EPILOGUE_TEMP (Pmode
);
8024 /* Normal mode code can copy the result straight into $sp. */
8026 target
= stack_pointer_rtx
;
8028 emit_insn (gen_add3_insn (target
, base
, adjust
));
8031 /* Copy TARGET into the stack pointer. */
8032 if (target
!= stack_pointer_rtx
)
8033 mips_emit_move (stack_pointer_rtx
, target
);
8035 /* If we're using addressing macros, $gp is implicitly used by all
8036 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8038 if (TARGET_CALL_SAVED_GP
&& !TARGET_EXPLICIT_RELOCS
)
8039 emit_insn (gen_blockage ());
8041 if (GENERATE_MIPS16E_SAVE_RESTORE
&& cfun
->machine
->frame
.mask
!= 0)
8043 unsigned int regno
, mask
;
8044 HOST_WIDE_INT offset
;
8047 /* Generate the restore instruction. */
8048 mask
= cfun
->machine
->frame
.mask
;
8049 restore
= mips16e_build_save_restore (true, &mask
, &offset
, 0, step2
);
8051 /* Restore any other registers manually. */
8052 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
8053 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
8055 offset
-= GET_MODE_SIZE (gpr_mode
);
8056 mips_save_restore_reg (gpr_mode
, regno
, offset
, mips_restore_reg
);
8059 /* Restore the remaining registers and deallocate the final bit
8061 emit_insn (restore
);
8065 /* Restore the registers. */
8066 mips_for_each_saved_reg (cfun
->machine
->frame
.total_size
- step2
,
8069 /* Deallocate the final bit of the frame. */
8071 emit_insn (gen_add3_insn (stack_pointer_rtx
,
8076 /* Add in the __builtin_eh_return stack adjustment. We need to
8077 use a temporary in mips16 code. */
8078 if (current_function_calls_eh_return
)
8082 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), stack_pointer_rtx
);
8083 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode
),
8084 MIPS_EPILOGUE_TEMP (Pmode
),
8085 EH_RETURN_STACKADJ_RTX
));
8086 mips_emit_move (stack_pointer_rtx
, MIPS_EPILOGUE_TEMP (Pmode
));
8089 emit_insn (gen_add3_insn (stack_pointer_rtx
,
8091 EH_RETURN_STACKADJ_RTX
));
8096 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8097 path will restore the return address into $7 rather than $31. */
8099 && !GENERATE_MIPS16E_SAVE_RESTORE
8100 && (cfun
->machine
->frame
.mask
& RA_MASK
) != 0)
8101 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode
,
8102 GP_REG_FIRST
+ 7)));
8104 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode
,
8105 GP_REG_FIRST
+ 31)));
8109 /* Return nonzero if this function is known to have a null epilogue.
8110 This allows the optimizer to omit jumps to jumps if no stack
8114 mips_can_use_return_insn (void)
8116 if (! reload_completed
)
8119 if (df_regs_ever_live_p (31) || current_function_profile
)
8122 /* In mips16 mode, a function that returns a floating point value
8123 needs to arrange to copy the return value into the floating point
8125 if (mips16_cfun_returns_in_fpr_p ())
8128 if (cfun
->machine
->frame
.initialized
)
8129 return cfun
->machine
->frame
.total_size
== 0;
8131 return compute_frame_size (get_frame_size ()) == 0;
8134 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8135 in order to avoid duplicating too much logic from elsewhere. */
8138 mips_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
8139 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
8142 rtx
this, temp1
, temp2
, insn
, fnaddr
;
8144 /* Pretend to be a post-reload pass while generating rtl. */
8145 reload_completed
= 1;
8147 /* Mark the end of the (empty) prologue. */
8148 emit_note (NOTE_INSN_PROLOGUE_END
);
8150 /* Pick a global pointer. Use a call-clobbered register if
8151 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8154 cfun
->machine
->global_pointer
=
8155 TARGET_CALL_SAVED_GP
? 15 : GLOBAL_POINTER_REGNUM
;
8157 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
8161 /* Set up the global pointer for n32 or n64 abicalls. If
8162 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8163 no need to load it.*/
8164 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8165 || !targetm
.binds_local_p (function
))
8166 mips_emit_loadgp ();
8168 /* We need two temporary registers in some cases. */
8169 temp1
= gen_rtx_REG (Pmode
, 2);
8170 temp2
= gen_rtx_REG (Pmode
, 3);
8172 /* Find out which register contains the "this" pointer. */
8173 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
8174 this = gen_rtx_REG (Pmode
, GP_ARG_FIRST
+ 1);
8176 this = gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
8178 /* Add DELTA to THIS. */
8181 rtx offset
= GEN_INT (delta
);
8182 if (!SMALL_OPERAND (delta
))
8184 mips_emit_move (temp1
, offset
);
8187 emit_insn (gen_add3_insn (this, this, offset
));
8190 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8191 if (vcall_offset
!= 0)
8195 /* Set TEMP1 to *THIS. */
8196 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, this));
8198 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8199 addr
= mips_add_offset (temp2
, temp1
, vcall_offset
);
8201 /* Load the offset and add it to THIS. */
8202 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, addr
));
8203 emit_insn (gen_add3_insn (this, this, temp1
));
8206 /* Jump to the target function. Use a sibcall if direct jumps are
8207 allowed, otherwise load the address into a register first. */
8208 fnaddr
= XEXP (DECL_RTL (function
), 0);
8209 if (TARGET_MIPS16
|| TARGET_USE_GOT
|| SYMBOL_REF_LONG_CALL_P (fnaddr
))
8211 /* This is messy. gas treats "la $25,foo" as part of a call
8212 sequence and may allow a global "foo" to be lazily bound.
8213 The general move patterns therefore reject this combination.
8215 In this context, lazy binding would actually be OK
8216 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8217 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8218 We must therefore load the address via a temporary
8219 register if mips_dangerous_for_la25_p.
8221 If we jump to the temporary register rather than $25, the assembler
8222 can use the move insn to fill the jump's delay slot. */
8223 if (TARGET_USE_PIC_FN_ADDR_REG
8224 && !mips_dangerous_for_la25_p (fnaddr
))
8225 temp1
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
8226 mips_load_call_address (temp1
, fnaddr
, true);
8228 if (TARGET_USE_PIC_FN_ADDR_REG
8229 && REGNO (temp1
) != PIC_FUNCTION_ADDR_REGNUM
)
8230 mips_emit_move (gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
), temp1
);
8231 emit_jump_insn (gen_indirect_jump (temp1
));
8235 insn
= emit_call_insn (gen_sibcall_internal (fnaddr
, const0_rtx
));
8236 SIBLING_CALL_P (insn
) = 1;
8239 /* Run just enough of rest_of_compilation. This sequence was
8240 "borrowed" from alpha.c. */
8241 insn
= get_insns ();
8242 insn_locators_alloc ();
8243 split_all_insns_noflow ();
8244 mips16_lay_out_constants ();
8245 shorten_branches (insn
);
8246 final_start_function (insn
, file
, 1);
8247 final (insn
, file
, 1);
8248 final_end_function ();
8250 /* Clean up the vars set above. Note that final_end_function resets
8251 the global pointer for us. */
8252 reload_completed
= 0;
8255 /* Returns nonzero if X contains a SYMBOL_REF. */
8258 symbolic_expression_p (rtx x
)
8260 if (GET_CODE (x
) == SYMBOL_REF
)
8263 if (GET_CODE (x
) == CONST
)
8264 return symbolic_expression_p (XEXP (x
, 0));
8267 return symbolic_expression_p (XEXP (x
, 0));
8269 if (ARITHMETIC_P (x
))
8270 return (symbolic_expression_p (XEXP (x
, 0))
8271 || symbolic_expression_p (XEXP (x
, 1)));
8276 /* Choose the section to use for the constant rtx expression X that has
8280 mips_select_rtx_section (enum machine_mode mode
, rtx x
,
8281 unsigned HOST_WIDE_INT align
)
8283 if (TARGET_EMBEDDED_DATA
)
8285 /* For embedded applications, always put constants in read-only data,
8286 in order to reduce RAM usage. */
8287 return mergeable_constant_section (mode
, align
, 0);
8291 /* For hosted applications, always put constants in small data if
8292 possible, as this gives the best performance. */
8293 /* ??? Consider using mergeable small data sections. */
8295 if (GET_MODE_SIZE (mode
) <= (unsigned) mips_section_threshold
8296 && mips_section_threshold
> 0)
8297 return get_named_section (NULL
, ".sdata", 0);
8298 else if (flag_pic
&& symbolic_expression_p (x
))
8299 return get_named_section (NULL
, ".data.rel.ro", 3);
8301 return mergeable_constant_section (mode
, align
, 0);
8305 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8307 The complication here is that, with the combination TARGET_ABICALLS
8308 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8309 therefore not be included in the read-only part of a DSO. Handle such
8310 cases by selecting a normal data section instead of a read-only one.
8311 The logic apes that in default_function_rodata_section. */
8314 mips_function_rodata_section (tree decl
)
8316 if (!TARGET_ABICALLS
|| TARGET_GPWORD
)
8317 return default_function_rodata_section (decl
);
8319 if (decl
&& DECL_SECTION_NAME (decl
))
8321 const char *name
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
8322 if (DECL_ONE_ONLY (decl
) && strncmp (name
, ".gnu.linkonce.t.", 16) == 0)
8324 char *rname
= ASTRDUP (name
);
8326 return get_section (rname
, SECTION_LINKONCE
| SECTION_WRITE
, decl
);
8328 else if (flag_function_sections
&& flag_data_sections
8329 && strncmp (name
, ".text.", 6) == 0)
8331 char *rname
= ASTRDUP (name
);
8332 memcpy (rname
+ 1, "data", 4);
8333 return get_section (rname
, SECTION_WRITE
, decl
);
8336 return data_section
;
8339 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8340 locally-defined objects go in a small data section. It also controls
8341 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8342 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8345 mips_in_small_data_p (tree decl
)
8349 if (TREE_CODE (decl
) == STRING_CST
|| TREE_CODE (decl
) == FUNCTION_DECL
)
8352 /* We don't yet generate small-data references for -mabicalls or
8353 VxWorks RTP code. See the related -G handling in override_options. */
8354 if (TARGET_ABICALLS
|| TARGET_VXWORKS_RTP
)
8357 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
) != 0)
8361 /* Reject anything that isn't in a known small-data section. */
8362 name
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
8363 if (strcmp (name
, ".sdata") != 0 && strcmp (name
, ".sbss") != 0)
8366 /* If a symbol is defined externally, the assembler will use the
8367 usual -G rules when deciding how to implement macros. */
8368 if (TARGET_EXPLICIT_RELOCS
|| !DECL_EXTERNAL (decl
))
8371 else if (TARGET_EMBEDDED_DATA
)
8373 /* Don't put constants into the small data section: we want them
8374 to be in ROM rather than RAM. */
8375 if (TREE_CODE (decl
) != VAR_DECL
)
8378 if (TREE_READONLY (decl
)
8379 && !TREE_SIDE_EFFECTS (decl
)
8380 && (!DECL_INITIAL (decl
) || TREE_CONSTANT (DECL_INITIAL (decl
))))
8384 size
= int_size_in_bytes (TREE_TYPE (decl
));
8385 return (size
> 0 && size
<= mips_section_threshold
);
8388 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8389 anchors for small data: the GP register acts as an anchor in that
8390 case. We also don't want to use them for PC-relative accesses,
8391 where the PC acts as an anchor. */
8394 mips_use_anchors_for_symbol_p (rtx symbol
)
8396 switch (mips_classify_symbol (symbol
, SYMBOL_CONTEXT_MEM
))
8398 case SYMBOL_PC_RELATIVE
:
8399 case SYMBOL_GP_RELATIVE
:
8407 /* See whether VALTYPE is a record whose fields should be returned in
8408 floating-point registers. If so, return the number of fields and
8409 list them in FIELDS (which should have two elements). Return 0
8412 For n32 & n64, a structure with one or two fields is returned in
8413 floating-point registers as long as every field has a floating-point
8417 mips_fpr_return_fields (tree valtype
, tree
*fields
)
8425 if (TREE_CODE (valtype
) != RECORD_TYPE
)
8429 for (field
= TYPE_FIELDS (valtype
); field
!= 0; field
= TREE_CHAIN (field
))
8431 if (TREE_CODE (field
) != FIELD_DECL
)
8434 if (TREE_CODE (TREE_TYPE (field
)) != REAL_TYPE
)
8440 fields
[i
++] = field
;
8446 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8447 a value in the most significant part of $2/$3 if:
8449 - the target is big-endian;
8451 - the value has a structure or union type (we generalize this to
8452 cover aggregates from other languages too); and
8454 - the structure is not returned in floating-point registers. */
8457 mips_return_in_msb (tree valtype
)
8461 return (TARGET_NEWABI
8462 && TARGET_BIG_ENDIAN
8463 && AGGREGATE_TYPE_P (valtype
)
8464 && mips_fpr_return_fields (valtype
, fields
) == 0);
8468 /* Return a composite value in a pair of floating-point registers.
8469 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8470 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8473 For n32 & n64, $f0 always holds the first value and $f2 the second.
8474 Otherwise the values are packed together as closely as possible. */
8477 mips_return_fpr_pair (enum machine_mode mode
,
8478 enum machine_mode mode1
, HOST_WIDE_INT offset1
,
8479 enum machine_mode mode2
, HOST_WIDE_INT offset2
)
8483 inc
= (TARGET_NEWABI
? 2 : MAX_FPRS_PER_FMT
);
8484 return gen_rtx_PARALLEL
8487 gen_rtx_EXPR_LIST (VOIDmode
,
8488 gen_rtx_REG (mode1
, FP_RETURN
),
8490 gen_rtx_EXPR_LIST (VOIDmode
,
8491 gen_rtx_REG (mode2
, FP_RETURN
+ inc
),
8492 GEN_INT (offset2
))));
8497 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
8498 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
8499 VALTYPE is null and MODE is the mode of the return value. */
8502 mips_function_value (tree valtype
, tree func ATTRIBUTE_UNUSED
,
8503 enum machine_mode mode
)
8510 mode
= TYPE_MODE (valtype
);
8511 unsignedp
= TYPE_UNSIGNED (valtype
);
8513 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
8514 true, we must promote the mode just as PROMOTE_MODE does. */
8515 mode
= promote_mode (valtype
, mode
, &unsignedp
, 1);
8517 /* Handle structures whose fields are returned in $f0/$f2. */
8518 switch (mips_fpr_return_fields (valtype
, fields
))
8521 return gen_rtx_REG (mode
, FP_RETURN
);
8524 return mips_return_fpr_pair (mode
,
8525 TYPE_MODE (TREE_TYPE (fields
[0])),
8526 int_byte_position (fields
[0]),
8527 TYPE_MODE (TREE_TYPE (fields
[1])),
8528 int_byte_position (fields
[1]));
8531 /* If a value is passed in the most significant part of a register, see
8532 whether we have to round the mode up to a whole number of words. */
8533 if (mips_return_in_msb (valtype
))
8535 HOST_WIDE_INT size
= int_size_in_bytes (valtype
);
8536 if (size
% UNITS_PER_WORD
!= 0)
8538 size
+= UNITS_PER_WORD
- size
% UNITS_PER_WORD
;
8539 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
8543 /* For EABI, the class of return register depends entirely on MODE.
8544 For example, "struct { some_type x; }" and "union { some_type x; }"
8545 are returned in the same way as a bare "some_type" would be.
8546 Other ABIs only use FPRs for scalar, complex or vector types. */
8547 if (mips_abi
!= ABI_EABI
&& !FLOAT_TYPE_P (valtype
))
8548 return gen_rtx_REG (mode
, GP_RETURN
);
8553 /* Handle long doubles for n32 & n64. */
8555 return mips_return_fpr_pair (mode
,
8557 DImode
, GET_MODE_SIZE (mode
) / 2);
8559 if (mips_return_mode_in_fpr_p (mode
))
8561 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
8562 return mips_return_fpr_pair (mode
,
8563 GET_MODE_INNER (mode
), 0,
8564 GET_MODE_INNER (mode
),
8565 GET_MODE_SIZE (mode
) / 2);
8567 return gen_rtx_REG (mode
, FP_RETURN
);
8571 return gen_rtx_REG (mode
, GP_RETURN
);
8574 /* Return nonzero when an argument must be passed by reference. */
8577 mips_pass_by_reference (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
8578 enum machine_mode mode
, tree type
,
8579 bool named ATTRIBUTE_UNUSED
)
8581 if (mips_abi
== ABI_EABI
)
8585 /* ??? How should SCmode be handled? */
8586 if (mode
== DImode
|| mode
== DFmode
)
8589 size
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
8590 return size
== -1 || size
> UNITS_PER_WORD
;
8594 /* If we have a variable-sized parameter, we have no choice. */
8595 return targetm
.calls
.must_pass_in_stack (mode
, type
);
8600 mips_callee_copies (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
8601 enum machine_mode mode ATTRIBUTE_UNUSED
,
8602 tree type ATTRIBUTE_UNUSED
, bool named
)
8604 return mips_abi
== ABI_EABI
&& named
;
8607 /* Return true if registers of class CLASS cannot change from mode FROM
8611 mips_cannot_change_mode_class (enum machine_mode from
,
8612 enum machine_mode to
, enum reg_class
class)
8614 if (MIN (GET_MODE_SIZE (from
), GET_MODE_SIZE (to
)) <= UNITS_PER_WORD
8615 && MAX (GET_MODE_SIZE (from
), GET_MODE_SIZE (to
)) > UNITS_PER_WORD
)
8617 if (TARGET_BIG_ENDIAN
)
8619 /* When a multi-word value is stored in paired floating-point
8620 registers, the first register always holds the low word.
8621 We therefore can't allow FPRs to change between single-word
8622 and multi-word modes. */
8623 if (MAX_FPRS_PER_FMT
> 1 && reg_classes_intersect_p (FP_REGS
, class))
8628 /* gcc assumes that each word of a multiword register can be accessed
8629 individually using SUBREGs. This is not true for floating-point
8630 registers if they are bigger than a word. */
8631 if (UNITS_PER_FPREG
> UNITS_PER_WORD
8632 && GET_MODE_SIZE (from
) > UNITS_PER_WORD
8633 && GET_MODE_SIZE (to
) < UNITS_PER_FPREG
8634 && reg_classes_intersect_p (FP_REGS
, class))
8637 /* Loading a 32-bit value into a 64-bit floating-point register
8638 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8639 We can't allow 64-bit float registers to change from SImode to
8644 && GET_MODE_SIZE (to
) >= UNITS_PER_WORD
8645 && reg_classes_intersect_p (FP_REGS
, class))
8651 /* Return true if X should not be moved directly into register $25.
8652 We need this because many versions of GAS will treat "la $25,foo" as
8653 part of a call sequence and so allow a global "foo" to be lazily bound. */
8656 mips_dangerous_for_la25_p (rtx x
)
8658 return (!TARGET_EXPLICIT_RELOCS
8660 && GET_CODE (x
) == SYMBOL_REF
8661 && mips_global_symbol_p (x
));
8664 /* Implement PREFERRED_RELOAD_CLASS. */
8667 mips_preferred_reload_class (rtx x
, enum reg_class
class)
8669 if (mips_dangerous_for_la25_p (x
) && reg_class_subset_p (LEA_REGS
, class))
8672 if (TARGET_HARD_FLOAT
8673 && FLOAT_MODE_P (GET_MODE (x
))
8674 && reg_class_subset_p (FP_REGS
, class))
8677 if (reg_class_subset_p (GR_REGS
, class))
8680 if (TARGET_MIPS16
&& reg_class_subset_p (M16_REGS
, class))
8686 /* This function returns the register class required for a secondary
8687 register when copying between one of the registers in CLASS, and X,
8688 using MODE. If IN_P is nonzero, the copy is going from X to the
8689 register, otherwise the register is the source. A return value of
8690 NO_REGS means that no secondary register is required. */
8693 mips_secondary_reload_class (enum reg_class
class,
8694 enum machine_mode mode
, rtx x
, int in_p
)
8696 enum reg_class gr_regs
= TARGET_MIPS16
? M16_REGS
: GR_REGS
;
8700 if (REG_P (x
)|| GET_CODE (x
) == SUBREG
)
8701 regno
= true_regnum (x
);
8703 gp_reg_p
= TARGET_MIPS16
? M16_REG_P (regno
) : GP_REG_P (regno
);
8705 if (mips_dangerous_for_la25_p (x
))
8708 if (TEST_HARD_REG_BIT (reg_class_contents
[(int) class], 25))
8712 /* Copying from HI or LO to anywhere other than a general register
8713 requires a general register.
8714 This rule applies to both the original HI/LO pair and the new
8715 DSP accumulators. */
8716 if (reg_class_subset_p (class, ACC_REGS
))
8718 if (TARGET_MIPS16
&& in_p
)
8720 /* We can't really copy to HI or LO at all in mips16 mode. */
8723 return gp_reg_p
? NO_REGS
: gr_regs
;
8725 if (ACC_REG_P (regno
))
8727 if (TARGET_MIPS16
&& ! in_p
)
8729 /* We can't really copy to HI or LO at all in mips16 mode. */
8732 return class == gr_regs
? NO_REGS
: gr_regs
;
8735 /* We can only copy a value to a condition code register from a
8736 floating point register, and even then we require a scratch
8737 floating point register. We can only copy a value out of a
8738 condition code register into a general register. */
8739 if (class == ST_REGS
)
8743 return gp_reg_p
? NO_REGS
: gr_regs
;
8745 if (ST_REG_P (regno
))
8749 return class == gr_regs
? NO_REGS
: gr_regs
;
8752 if (class == FP_REGS
)
8756 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
8759 else if (CONSTANT_P (x
) && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
8761 /* We can use the l.s and l.d macros to load floating-point
8762 constants. ??? For l.s, we could probably get better
8763 code by returning GR_REGS here. */
8766 else if (gp_reg_p
|| x
== CONST0_RTX (mode
))
8768 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
8771 else if (FP_REG_P (regno
))
8773 /* In this case we can use mov.s or mov.d. */
8778 /* Otherwise, we need to reload through an integer register. */
8783 /* In mips16 mode, going between memory and anything but M16_REGS
8784 requires an M16_REG. */
8787 if (class != M16_REGS
&& class != M16_NA_REGS
)
8795 if (class == M16_REGS
|| class == M16_NA_REGS
)
8804 /* Implement CLASS_MAX_NREGS.
8806 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8808 - ST_REGS are always hold CCmode values, and CCmode values are
8809 considered to be 4 bytes wide.
8811 All other register classes are covered by UNITS_PER_WORD. Note that
8812 this is true even for unions of integer and float registers when the
8813 latter are smaller than the former. The only supported combination
8814 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8815 words but 32-bit float registers. A word-based calculation is correct
8816 in that case since -msingle-float disallows multi-FPR values. */
8819 mips_class_max_nregs (enum reg_class
class ATTRIBUTE_UNUSED
,
8820 enum machine_mode mode
)
8822 if (class == ST_REGS
)
8823 return (GET_MODE_SIZE (mode
) + 3) / 4;
8824 else if (class == FP_REGS
)
8825 return (GET_MODE_SIZE (mode
) + UNITS_PER_FPREG
- 1) / UNITS_PER_FPREG
;
8827 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
8831 mips_valid_pointer_mode (enum machine_mode mode
)
8833 return (mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
));
8836 /* Target hook for vector_mode_supported_p. */
8839 mips_vector_mode_supported_p (enum machine_mode mode
)
8844 return TARGET_PAIRED_SINGLE_FLOAT
;
8855 /* If we can access small data directly (using gp-relative relocation
8856 operators) return the small data pointer, otherwise return null.
8858 For each mips16 function which refers to GP relative symbols, we
8859 use a pseudo register, initialized at the start of the function, to
8860 hold the $gp value. */
8863 mips16_gp_pseudo_reg (void)
8865 if (cfun
->machine
->mips16_gp_pseudo_rtx
== NULL_RTX
)
8866 cfun
->machine
->mips16_gp_pseudo_rtx
= gen_reg_rtx (Pmode
);
8868 /* Don't initialize the pseudo register if we are being called from
8869 the tree optimizers' cost-calculation routines. */
8870 if (!cfun
->machine
->initialized_mips16_gp_pseudo_p
8871 && (current_ir_type () != IR_GIMPLE
|| currently_expanding_to_rtl
))
8875 /* We want to initialize this to a value which gcc will believe
8877 insn
= gen_load_const_gp (cfun
->machine
->mips16_gp_pseudo_rtx
);
8879 push_topmost_sequence ();
8880 /* We need to emit the initialization after the FUNCTION_BEG
8881 note, so that it will be integrated. */
8882 for (scan
= get_insns (); scan
!= NULL_RTX
; scan
= NEXT_INSN (scan
))
8884 && NOTE_KIND (scan
) == NOTE_INSN_FUNCTION_BEG
)
8886 if (scan
== NULL_RTX
)
8887 scan
= get_insns ();
8888 insn
= emit_insn_after (insn
, scan
);
8889 pop_topmost_sequence ();
8891 cfun
->machine
->initialized_mips16_gp_pseudo_p
= true;
8894 return cfun
->machine
->mips16_gp_pseudo_rtx
;
8897 /* Write out code to move floating point arguments in or out of
8898 general registers. Output the instructions to FILE. FP_CODE is
8899 the code describing which arguments are present (see the comment at
8900 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
8901 we are copying from the floating point registers. */
8904 mips16_fp_args (FILE *file
, int fp_code
, int from_fp_p
)
8909 CUMULATIVE_ARGS cum
;
8911 /* This code only works for the original 32-bit ABI and the O64 ABI. */
8912 gcc_assert (TARGET_OLDABI
);
8919 init_cumulative_args (&cum
, NULL
, NULL
);
8921 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
8923 enum machine_mode mode
;
8924 struct mips_arg_info info
;
8928 else if ((f
& 3) == 2)
8933 mips_arg_info (&cum
, mode
, NULL
, true, &info
);
8934 gparg
= mips_arg_regno (&info
, false);
8935 fparg
= mips_arg_regno (&info
, true);
8938 fprintf (file
, "\t%s\t%s,%s\n", s
,
8939 reg_names
[gparg
], reg_names
[fparg
]);
8940 else if (TARGET_64BIT
)
8941 fprintf (file
, "\td%s\t%s,%s\n", s
,
8942 reg_names
[gparg
], reg_names
[fparg
]);
8943 else if (ISA_HAS_MXHC1
)
8944 /* -mips32r2 -mfp64 */
8945 fprintf (file
, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
8947 reg_names
[gparg
+ (WORDS_BIG_ENDIAN
? 1 : 0)],
8949 from_fp_p
? "mfhc1" : "mthc1",
8950 reg_names
[gparg
+ (WORDS_BIG_ENDIAN
? 0 : 1)],
8952 else if (TARGET_BIG_ENDIAN
)
8953 fprintf (file
, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s
,
8954 reg_names
[gparg
], reg_names
[fparg
+ 1], s
,
8955 reg_names
[gparg
+ 1], reg_names
[fparg
]);
8957 fprintf (file
, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s
,
8958 reg_names
[gparg
], reg_names
[fparg
], s
,
8959 reg_names
[gparg
+ 1], reg_names
[fparg
+ 1]);
8961 function_arg_advance (&cum
, mode
, NULL
, true);
8965 /* Build a mips16 function stub. This is used for functions which
8966 take arguments in the floating point registers. It is 32-bit code
8967 that moves the floating point args into the general registers, and
8968 then jumps to the 16-bit code. */
8971 build_mips16_function_stub (FILE *file
)
8974 char *secname
, *stubname
;
8975 tree stubid
, stubdecl
;
8979 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
8980 secname
= (char *) alloca (strlen (fnname
) + 20);
8981 sprintf (secname
, ".mips16.fn.%s", fnname
);
8982 stubname
= (char *) alloca (strlen (fnname
) + 20);
8983 sprintf (stubname
, "__fn_stub_%s", fnname
);
8984 stubid
= get_identifier (stubname
);
8985 stubdecl
= build_decl (FUNCTION_DECL
, stubid
,
8986 build_function_type (void_type_node
, NULL_TREE
));
8987 DECL_SECTION_NAME (stubdecl
) = build_string (strlen (secname
), secname
);
8988 DECL_RESULT (stubdecl
) = build_decl (RESULT_DECL
, NULL_TREE
, void_type_node
);
8990 fprintf (file
, "\t# Stub function for %s (", current_function_name ());
8992 for (f
= (unsigned int) current_function_args_info
.fp_code
; f
!= 0; f
>>= 2)
8994 fprintf (file
, "%s%s",
8995 need_comma
? ", " : "",
8996 (f
& 3) == 1 ? "float" : "double");
8999 fprintf (file
, ")\n");
9001 fprintf (file
, "\t.set\tnomips16\n");
9002 switch_to_section (function_section (stubdecl
));
9003 ASM_OUTPUT_ALIGN (file
, floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
));
9005 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9006 within a .ent, and we cannot emit another .ent. */
9007 if (!FUNCTION_NAME_ALREADY_DECLARED
)
9009 fputs ("\t.ent\t", file
);
9010 assemble_name (file
, stubname
);
9014 assemble_name (file
, stubname
);
9015 fputs (":\n", file
);
9017 /* We don't want the assembler to insert any nops here. */
9018 fprintf (file
, "\t.set\tnoreorder\n");
9020 mips16_fp_args (file
, current_function_args_info
.fp_code
, 1);
9022 fprintf (asm_out_file
, "\t.set\tnoat\n");
9023 fprintf (asm_out_file
, "\tla\t%s,", reg_names
[GP_REG_FIRST
+ 1]);
9024 assemble_name (file
, fnname
);
9025 fprintf (file
, "\n");
9026 fprintf (asm_out_file
, "\tjr\t%s\n", reg_names
[GP_REG_FIRST
+ 1]);
9027 fprintf (asm_out_file
, "\t.set\tat\n");
9029 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9030 with one of the mfc1 instructions, because the result is not
9031 available for one instruction, so if the very first instruction
9032 in the function refers to the register, it will see the wrong
9034 fprintf (file
, "\tnop\n");
9036 fprintf (file
, "\t.set\treorder\n");
9038 if (!FUNCTION_NAME_ALREADY_DECLARED
)
9040 fputs ("\t.end\t", file
);
9041 assemble_name (file
, stubname
);
9045 fprintf (file
, "\t.set\tmips16\n");
9047 switch_to_section (function_section (current_function_decl
));
9050 /* We keep a list of functions for which we have already built stubs
9051 in build_mips16_call_stub. */
9055 struct mips16_stub
*next
;
9060 static struct mips16_stub
*mips16_stubs
;
9062 /* Emit code to return a double value from a mips16 stub. GPREG is the
9063 first GP reg to use, FPREG is the first FP reg to use. */
9066 mips16_fpret_double (int gpreg
, int fpreg
)
9069 fprintf (asm_out_file
, "\tdmfc1\t%s,%s\n",
9070 reg_names
[gpreg
], reg_names
[fpreg
]);
9071 else if (TARGET_FLOAT64
)
9073 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9074 reg_names
[gpreg
+ WORDS_BIG_ENDIAN
],
9076 fprintf (asm_out_file
, "\tmfhc1\t%s,%s\n",
9077 reg_names
[gpreg
+ !WORDS_BIG_ENDIAN
],
9082 if (TARGET_BIG_ENDIAN
)
9084 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9085 reg_names
[gpreg
+ 0],
9086 reg_names
[fpreg
+ 1]);
9087 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9088 reg_names
[gpreg
+ 1],
9089 reg_names
[fpreg
+ 0]);
9093 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9094 reg_names
[gpreg
+ 0],
9095 reg_names
[fpreg
+ 0]);
9096 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9097 reg_names
[gpreg
+ 1],
9098 reg_names
[fpreg
+ 1]);
9103 /* Build a call stub for a mips16 call. A stub is needed if we are
9104 passing any floating point values which should go into the floating
9105 point registers. If we are, and the call turns out to be to a
9106 32-bit function, the stub will be used to move the values into the
9107 floating point registers before calling the 32-bit function. The
9108 linker will magically adjust the function call to either the 16-bit
9109 function or the 32-bit stub, depending upon where the function call
9110 is actually defined.
9112 Similarly, we need a stub if the return value might come back in a
9113 floating point register.
9115 RETVAL is the location of the return value, or null if this is
9116 a call rather than a call_value. FN is the address of the
9117 function and ARG_SIZE is the size of the arguments. FP_CODE
9118 is the code built by function_arg. This function returns a nonzero
9119 value if it builds the call instruction itself. */
9122 build_mips16_call_stub (rtx retval
, rtx fn
, rtx arg_size
, int fp_code
)
9126 char *secname
, *stubname
;
9127 struct mips16_stub
*l
;
9128 tree stubid
, stubdecl
;
9132 /* We don't need to do anything if we aren't in mips16 mode, or if
9133 we were invoked with the -msoft-float option. */
9134 if (!TARGET_MIPS16
|| TARGET_SOFT_FLOAT_ABI
)
9137 /* Figure out whether the value might come back in a floating point
9140 fpret
= mips_return_mode_in_fpr_p (GET_MODE (retval
));
9142 /* We don't need to do anything if there were no floating point
9143 arguments and the value will not be returned in a floating point
9145 if (fp_code
== 0 && ! fpret
)
9148 /* We don't need to do anything if this is a call to a special
9149 mips16 support function. */
9150 if (GET_CODE (fn
) == SYMBOL_REF
9151 && strncmp (XSTR (fn
, 0), "__mips16_", 9) == 0)
9154 /* This code will only work for o32 and o64 abis. The other ABI's
9155 require more sophisticated support. */
9156 gcc_assert (TARGET_OLDABI
);
9158 /* If we're calling via a function pointer, then we must always call
9159 via a stub. There are magic stubs provided in libgcc.a for each
9160 of the required cases. Each of them expects the function address
9161 to arrive in register $2. */
9163 if (GET_CODE (fn
) != SYMBOL_REF
)
9169 /* ??? If this code is modified to support other ABI's, we need
9170 to handle PARALLEL return values here. */
9173 sprintf (buf
, "__mips16_call_stub_%s_%d",
9174 mips16_call_stub_mode_suffix (GET_MODE (retval
)),
9177 sprintf (buf
, "__mips16_call_stub_%d",
9180 id
= get_identifier (buf
);
9181 stub_fn
= gen_rtx_SYMBOL_REF (Pmode
, IDENTIFIER_POINTER (id
));
9183 mips_emit_move (gen_rtx_REG (Pmode
, 2), fn
);
9185 if (retval
== NULL_RTX
)
9186 insn
= gen_call_internal (stub_fn
, arg_size
);
9188 insn
= gen_call_value_internal (retval
, stub_fn
, arg_size
);
9189 insn
= emit_call_insn (insn
);
9191 /* Put the register usage information on the CALL. */
9192 CALL_INSN_FUNCTION_USAGE (insn
) =
9193 gen_rtx_EXPR_LIST (VOIDmode
,
9194 gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 2)),
9195 CALL_INSN_FUNCTION_USAGE (insn
));
9197 /* If we are handling a floating point return value, we need to
9198 save $18 in the function prologue. Putting a note on the
9199 call will mean that df_regs_ever_live_p ($18) will be true if the
9200 call is not eliminated, and we can check that in the prologue
9203 CALL_INSN_FUNCTION_USAGE (insn
) =
9204 gen_rtx_EXPR_LIST (VOIDmode
,
9205 gen_rtx_USE (VOIDmode
,
9206 gen_rtx_REG (word_mode
, 18)),
9207 CALL_INSN_FUNCTION_USAGE (insn
));
9209 /* Return 1 to tell the caller that we've generated the call
9214 /* We know the function we are going to call. If we have already
9215 built a stub, we don't need to do anything further. */
9217 fnname
= XSTR (fn
, 0);
9218 for (l
= mips16_stubs
; l
!= NULL
; l
= l
->next
)
9219 if (strcmp (l
->name
, fnname
) == 0)
9224 /* Build a special purpose stub. When the linker sees a
9225 function call in mips16 code, it will check where the target
9226 is defined. If the target is a 32-bit call, the linker will
9227 search for the section defined here. It can tell which
9228 symbol this section is associated with by looking at the
9229 relocation information (the name is unreliable, since this
9230 might be a static function). If such a section is found, the
9231 linker will redirect the call to the start of the magic
9234 If the function does not return a floating point value, the
9235 special stub section is named
9238 If the function does return a floating point value, the stub
9240 .mips16.call.fp.FNNAME
9243 secname
= (char *) alloca (strlen (fnname
) + 40);
9244 sprintf (secname
, ".mips16.call.%s%s",
9247 stubname
= (char *) alloca (strlen (fnname
) + 20);
9248 sprintf (stubname
, "__call_stub_%s%s",
9251 stubid
= get_identifier (stubname
);
9252 stubdecl
= build_decl (FUNCTION_DECL
, stubid
,
9253 build_function_type (void_type_node
, NULL_TREE
));
9254 DECL_SECTION_NAME (stubdecl
) = build_string (strlen (secname
), secname
);
9255 DECL_RESULT (stubdecl
) = build_decl (RESULT_DECL
, NULL_TREE
, void_type_node
);
9257 fprintf (asm_out_file
, "\t# Stub function to call %s%s (",
9259 ? (GET_MODE (retval
) == SFmode
? "float " : "double ")
9263 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
9265 fprintf (asm_out_file
, "%s%s",
9266 need_comma
? ", " : "",
9267 (f
& 3) == 1 ? "float" : "double");
9270 fprintf (asm_out_file
, ")\n");
9272 fprintf (asm_out_file
, "\t.set\tnomips16\n");
9273 assemble_start_function (stubdecl
, stubname
);
9275 if (!FUNCTION_NAME_ALREADY_DECLARED
)
9277 fputs ("\t.ent\t", asm_out_file
);
9278 assemble_name (asm_out_file
, stubname
);
9279 fputs ("\n", asm_out_file
);
9281 assemble_name (asm_out_file
, stubname
);
9282 fputs (":\n", asm_out_file
);
9285 /* We build the stub code by hand. That's the only way we can
9286 do it, since we can't generate 32-bit code during a 16-bit
9289 /* We don't want the assembler to insert any nops here. */
9290 fprintf (asm_out_file
, "\t.set\tnoreorder\n");
9292 mips16_fp_args (asm_out_file
, fp_code
, 0);
9296 fprintf (asm_out_file
, "\t.set\tnoat\n");
9297 fprintf (asm_out_file
, "\tla\t%s,%s\n", reg_names
[GP_REG_FIRST
+ 1],
9299 fprintf (asm_out_file
, "\tjr\t%s\n", reg_names
[GP_REG_FIRST
+ 1]);
9300 fprintf (asm_out_file
, "\t.set\tat\n");
9301 /* Unfortunately, we can't fill the jump delay slot. We
9302 can't fill with one of the mtc1 instructions, because the
9303 result is not available for one instruction, so if the
9304 very first instruction in the function refers to the
9305 register, it will see the wrong value. */
9306 fprintf (asm_out_file
, "\tnop\n");
9310 fprintf (asm_out_file
, "\tmove\t%s,%s\n",
9311 reg_names
[GP_REG_FIRST
+ 18], reg_names
[GP_REG_FIRST
+ 31]);
9312 fprintf (asm_out_file
, "\tjal\t%s\n", fnname
);
9313 /* As above, we can't fill the delay slot. */
9314 fprintf (asm_out_file
, "\tnop\n");
9315 if (GET_MODE (retval
) == SFmode
)
9316 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9317 reg_names
[GP_REG_FIRST
+ 2], reg_names
[FP_REG_FIRST
+ 0]);
9318 else if (GET_MODE (retval
) == SCmode
)
9320 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9321 reg_names
[GP_REG_FIRST
+ 2],
9322 reg_names
[FP_REG_FIRST
+ 0]);
9323 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9324 reg_names
[GP_REG_FIRST
+ 3],
9325 reg_names
[FP_REG_FIRST
+ MAX_FPRS_PER_FMT
]);
9327 else if (GET_MODE (retval
) == DFmode
9328 || GET_MODE (retval
) == V2SFmode
)
9330 mips16_fpret_double (GP_REG_FIRST
+ 2, FP_REG_FIRST
+ 0);
9332 else if (GET_MODE (retval
) == DCmode
)
9334 mips16_fpret_double (GP_REG_FIRST
+ 2,
9336 mips16_fpret_double (GP_REG_FIRST
+ 4,
9337 FP_REG_FIRST
+ MAX_FPRS_PER_FMT
);
9341 if (TARGET_BIG_ENDIAN
)
9343 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9344 reg_names
[GP_REG_FIRST
+ 2],
9345 reg_names
[FP_REG_FIRST
+ 1]);
9346 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9347 reg_names
[GP_REG_FIRST
+ 3],
9348 reg_names
[FP_REG_FIRST
+ 0]);
9352 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9353 reg_names
[GP_REG_FIRST
+ 2],
9354 reg_names
[FP_REG_FIRST
+ 0]);
9355 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9356 reg_names
[GP_REG_FIRST
+ 3],
9357 reg_names
[FP_REG_FIRST
+ 1]);
9360 fprintf (asm_out_file
, "\tj\t%s\n", reg_names
[GP_REG_FIRST
+ 18]);
9361 /* As above, we can't fill the delay slot. */
9362 fprintf (asm_out_file
, "\tnop\n");
9365 fprintf (asm_out_file
, "\t.set\treorder\n");
9367 #ifdef ASM_DECLARE_FUNCTION_SIZE
9368 ASM_DECLARE_FUNCTION_SIZE (asm_out_file
, stubname
, stubdecl
);
9371 if (!FUNCTION_NAME_ALREADY_DECLARED
)
9373 fputs ("\t.end\t", asm_out_file
);
9374 assemble_name (asm_out_file
, stubname
);
9375 fputs ("\n", asm_out_file
);
9378 fprintf (asm_out_file
, "\t.set\tmips16\n");
9380 /* Record this stub. */
9381 l
= (struct mips16_stub
*) xmalloc (sizeof *l
);
9382 l
->name
= xstrdup (fnname
);
9384 l
->next
= mips16_stubs
;
9388 /* If we expect a floating point return value, but we've built a
9389 stub which does not expect one, then we're in trouble. We can't
9390 use the existing stub, because it won't handle the floating point
9391 value. We can't build a new stub, because the linker won't know
9392 which stub to use for the various calls in this object file.
9393 Fortunately, this case is illegal, since it means that a function
9394 was declared in two different ways in a single compilation. */
9395 if (fpret
&& ! l
->fpret
)
9396 error ("cannot handle inconsistent calls to %qs", fnname
);
9398 /* If we are calling a stub which handles a floating point return
9399 value, we need to arrange to save $18 in the prologue. We do
9400 this by marking the function call as using the register. The
9401 prologue will later see that it is used, and emit code to save
9408 if (retval
== NULL_RTX
)
9409 insn
= gen_call_internal (fn
, arg_size
);
9411 insn
= gen_call_value_internal (retval
, fn
, arg_size
);
9412 insn
= emit_call_insn (insn
);
9414 CALL_INSN_FUNCTION_USAGE (insn
) =
9415 gen_rtx_EXPR_LIST (VOIDmode
,
9416 gen_rtx_USE (VOIDmode
, gen_rtx_REG (word_mode
, 18)),
9417 CALL_INSN_FUNCTION_USAGE (insn
));
9419 /* Return 1 to tell the caller that we've generated the call
9424 /* Return 0 to let the caller generate the call insn. */
9428 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9429 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9431 struct mips16_constant
{
9432 struct mips16_constant
*next
;
9435 enum machine_mode mode
;
9438 /* Information about an incomplete mips16 constant pool. FIRST is the
9439 first constant, HIGHEST_ADDRESS is the highest address that the first
9440 byte of the pool can have, and INSN_ADDRESS is the current instruction
9443 struct mips16_constant_pool
{
9444 struct mips16_constant
*first
;
9445 int highest_address
;
9449 /* Add constant VALUE to POOL and return its label. MODE is the
9450 value's mode (used for CONST_INTs, etc.). */
9453 add_constant (struct mips16_constant_pool
*pool
,
9454 rtx value
, enum machine_mode mode
)
9456 struct mips16_constant
**p
, *c
;
9457 bool first_of_size_p
;
9459 /* See whether the constant is already in the pool. If so, return the
9460 existing label, otherwise leave P pointing to the place where the
9461 constant should be added.
9463 Keep the pool sorted in increasing order of mode size so that we can
9464 reduce the number of alignments needed. */
9465 first_of_size_p
= true;
9466 for (p
= &pool
->first
; *p
!= 0; p
= &(*p
)->next
)
9468 if (mode
== (*p
)->mode
&& rtx_equal_p (value
, (*p
)->value
))
9470 if (GET_MODE_SIZE (mode
) < GET_MODE_SIZE ((*p
)->mode
))
9472 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE ((*p
)->mode
))
9473 first_of_size_p
= false;
9476 /* In the worst case, the constant needed by the earliest instruction
9477 will end up at the end of the pool. The entire pool must then be
9478 accessible from that instruction.
9480 When adding the first constant, set the pool's highest address to
9481 the address of the first out-of-range byte. Adjust this address
9482 downwards each time a new constant is added. */
9483 if (pool
->first
== 0)
9484 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
9485 is the address of the instruction with the lowest two bits clear.
9486 The base PC value for ld has the lowest three bits clear. Assume
9487 the worst case here. */
9488 pool
->highest_address
= pool
->insn_address
- (UNITS_PER_WORD
- 2) + 0x8000;
9489 pool
->highest_address
-= GET_MODE_SIZE (mode
);
9490 if (first_of_size_p
)
9491 /* Take into account the worst possible padding due to alignment. */
9492 pool
->highest_address
-= GET_MODE_SIZE (mode
) - 1;
9494 /* Create a new entry. */
9495 c
= (struct mips16_constant
*) xmalloc (sizeof *c
);
9498 c
->label
= gen_label_rtx ();
9505 /* Output constant VALUE after instruction INSN and return the last
9506 instruction emitted. MODE is the mode of the constant. */
9509 dump_constants_1 (enum machine_mode mode
, rtx value
, rtx insn
)
9511 switch (GET_MODE_CLASS (mode
))
9515 rtx size
= GEN_INT (GET_MODE_SIZE (mode
));
9516 return emit_insn_after (gen_consttable_int (value
, size
), insn
);
9520 return emit_insn_after (gen_consttable_float (value
), insn
);
9522 case MODE_VECTOR_FLOAT
:
9523 case MODE_VECTOR_INT
:
9526 for (i
= 0; i
< CONST_VECTOR_NUNITS (value
); i
++)
9527 insn
= dump_constants_1 (GET_MODE_INNER (mode
),
9528 CONST_VECTOR_ELT (value
, i
), insn
);
9538 /* Dump out the constants in CONSTANTS after INSN. */
9541 dump_constants (struct mips16_constant
*constants
, rtx insn
)
9543 struct mips16_constant
*c
, *next
;
9547 for (c
= constants
; c
!= NULL
; c
= next
)
9549 /* If necessary, increase the alignment of PC. */
9550 if (align
< GET_MODE_SIZE (c
->mode
))
9552 int align_log
= floor_log2 (GET_MODE_SIZE (c
->mode
));
9553 insn
= emit_insn_after (gen_align (GEN_INT (align_log
)), insn
);
9555 align
= GET_MODE_SIZE (c
->mode
);
9557 insn
= emit_label_after (c
->label
, insn
);
9558 insn
= dump_constants_1 (c
->mode
, c
->value
, insn
);
9564 emit_barrier_after (insn
);
9567 /* Return the length of instruction INSN. */
9570 mips16_insn_length (rtx insn
)
9574 rtx body
= PATTERN (insn
);
9575 if (GET_CODE (body
) == ADDR_VEC
)
9576 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 0);
9577 if (GET_CODE (body
) == ADDR_DIFF_VEC
)
9578 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 1);
9580 return get_attr_length (insn
);
9583 /* Rewrite *X so that constant pool references refer to the constant's
9584 label instead. DATA points to the constant pool structure. */
9587 mips16_rewrite_pool_refs (rtx
*x
, void *data
)
9589 struct mips16_constant_pool
*pool
= data
;
9590 rtx base
, offset
, label
;
9594 else if (!TARGET_MIPS16_TEXT_LOADS
)
9597 split_const (*x
, &base
, &offset
);
9598 if (GET_CODE (base
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (base
))
9600 label
= add_constant (pool
, get_pool_constant (base
),
9601 get_pool_mode (base
));
9602 base
= gen_rtx_LABEL_REF (Pmode
, label
);
9603 *x
= mips_unspec_address_offset (base
, offset
, SYMBOL_PC_RELATIVE
);
9606 return GET_CODE (*x
) == CONST
? -1 : 0;
9609 /* Build MIPS16 constant pools. */
9612 mips16_lay_out_constants (void)
9614 struct mips16_constant_pool pool
;
9617 if (!TARGET_MIPS16_PCREL_LOADS
)
9621 memset (&pool
, 0, sizeof (pool
));
9622 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
9624 /* Rewrite constant pool references in INSN. */
9626 for_each_rtx (&PATTERN (insn
), mips16_rewrite_pool_refs
, &pool
);
9628 pool
.insn_address
+= mips16_insn_length (insn
);
9630 if (pool
.first
!= NULL
)
9632 /* If there are no natural barriers between the first user of
9633 the pool and the highest acceptable address, we'll need to
9634 create a new instruction to jump around the constant pool.
9635 In the worst case, this instruction will be 4 bytes long.
9637 If it's too late to do this transformation after INSN,
9638 do it immediately before INSN. */
9639 if (barrier
== 0 && pool
.insn_address
+ 4 > pool
.highest_address
)
9643 label
= gen_label_rtx ();
9645 jump
= emit_jump_insn_before (gen_jump (label
), insn
);
9646 JUMP_LABEL (jump
) = label
;
9647 LABEL_NUSES (label
) = 1;
9648 barrier
= emit_barrier_after (jump
);
9650 emit_label_after (label
, barrier
);
9651 pool
.insn_address
+= 4;
9654 /* See whether the constant pool is now out of range of the first
9655 user. If so, output the constants after the previous barrier.
9656 Note that any instructions between BARRIER and INSN (inclusive)
9657 will use negative offsets to refer to the pool. */
9658 if (pool
.insn_address
> pool
.highest_address
)
9660 dump_constants (pool
.first
, barrier
);
9664 else if (BARRIER_P (insn
))
9668 dump_constants (pool
.first
, get_last_insn ());
9671 /* A temporary variable used by for_each_rtx callbacks, etc. */
9672 static rtx mips_sim_insn
;
9674 /* A structure representing the state of the processor pipeline.
9675 Used by the mips_sim_* family of functions. */
9677 /* The maximum number of instructions that can be issued in a cycle.
9678 (Caches mips_issue_rate.) */
9679 unsigned int issue_rate
;
9681 /* The current simulation time. */
9684 /* How many more instructions can be issued in the current cycle. */
9685 unsigned int insns_left
;
9687 /* LAST_SET[X].INSN is the last instruction to set register X.
9688 LAST_SET[X].TIME is the time at which that instruction was issued.
9689 INSN is null if no instruction has yet set register X. */
9693 } last_set
[FIRST_PSEUDO_REGISTER
];
9695 /* The pipeline's current DFA state. */
9699 /* Reset STATE to the initial simulation state. */
9702 mips_sim_reset (struct mips_sim
*state
)
9705 state
->insns_left
= state
->issue_rate
;
9706 memset (&state
->last_set
, 0, sizeof (state
->last_set
));
9707 state_reset (state
->dfa_state
);
9710 /* Initialize STATE before its first use. DFA_STATE points to an
9711 allocated but uninitialized DFA state. */
9714 mips_sim_init (struct mips_sim
*state
, state_t dfa_state
)
9716 state
->issue_rate
= mips_issue_rate ();
9717 state
->dfa_state
= dfa_state
;
9718 mips_sim_reset (state
);
9721 /* Advance STATE by one clock cycle. */
9724 mips_sim_next_cycle (struct mips_sim
*state
)
9727 state
->insns_left
= state
->issue_rate
;
9728 state_transition (state
->dfa_state
, 0);
9731 /* Advance simulation state STATE until instruction INSN can read
9735 mips_sim_wait_reg (struct mips_sim
*state
, rtx insn
, rtx reg
)
9739 for (i
= 0; i
< HARD_REGNO_NREGS (REGNO (reg
), GET_MODE (reg
)); i
++)
9740 if (state
->last_set
[REGNO (reg
) + i
].insn
!= 0)
9744 t
= state
->last_set
[REGNO (reg
) + i
].time
;
9745 t
+= insn_latency (state
->last_set
[REGNO (reg
) + i
].insn
, insn
);
9746 while (state
->time
< t
)
9747 mips_sim_next_cycle (state
);
9751 /* A for_each_rtx callback. If *X is a register, advance simulation state
9752 DATA until mips_sim_insn can read the register's value. */
9755 mips_sim_wait_regs_2 (rtx
*x
, void *data
)
9758 mips_sim_wait_reg (data
, mips_sim_insn
, *x
);
9762 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
9765 mips_sim_wait_regs_1 (rtx
*x
, void *data
)
9767 for_each_rtx (x
, mips_sim_wait_regs_2
, data
);
9770 /* Advance simulation state STATE until all of INSN's register
9771 dependencies are satisfied. */
9774 mips_sim_wait_regs (struct mips_sim
*state
, rtx insn
)
9776 mips_sim_insn
= insn
;
9777 note_uses (&PATTERN (insn
), mips_sim_wait_regs_1
, state
);
9780 /* Advance simulation state STATE until the units required by
9781 instruction INSN are available. */
9784 mips_sim_wait_units (struct mips_sim
*state
, rtx insn
)
9788 tmp_state
= alloca (state_size ());
9789 while (state
->insns_left
== 0
9790 || (memcpy (tmp_state
, state
->dfa_state
, state_size ()),
9791 state_transition (tmp_state
, insn
) >= 0))
9792 mips_sim_next_cycle (state
);
9795 /* Advance simulation state STATE until INSN is ready to issue. */
9798 mips_sim_wait_insn (struct mips_sim
*state
, rtx insn
)
9800 mips_sim_wait_regs (state
, insn
);
9801 mips_sim_wait_units (state
, insn
);
9804 /* mips_sim_insn has just set X. Update the LAST_SET array
9805 in simulation state DATA. */
9808 mips_sim_record_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
9810 struct mips_sim
*state
;
9815 for (i
= 0; i
< HARD_REGNO_NREGS (REGNO (x
), GET_MODE (x
)); i
++)
9817 state
->last_set
[REGNO (x
) + i
].insn
= mips_sim_insn
;
9818 state
->last_set
[REGNO (x
) + i
].time
= state
->time
;
9822 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
9823 can issue immediately (i.e., that mips_sim_wait_insn has already
9827 mips_sim_issue_insn (struct mips_sim
*state
, rtx insn
)
9829 state_transition (state
->dfa_state
, insn
);
9830 state
->insns_left
--;
9832 mips_sim_insn
= insn
;
9833 note_stores (PATTERN (insn
), mips_sim_record_set
, state
);
9836 /* Simulate issuing a NOP in state STATE. */
9839 mips_sim_issue_nop (struct mips_sim
*state
)
9841 if (state
->insns_left
== 0)
9842 mips_sim_next_cycle (state
);
9843 state
->insns_left
--;
9846 /* Update simulation state STATE so that it's ready to accept the instruction
9847 after INSN. INSN should be part of the main rtl chain, not a member of a
9851 mips_sim_finish_insn (struct mips_sim
*state
, rtx insn
)
9853 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
9855 mips_sim_issue_nop (state
);
9857 switch (GET_CODE (SEQ_BEGIN (insn
)))
9861 /* We can't predict the processor state after a call or label. */
9862 mips_sim_reset (state
);
9866 /* The delay slots of branch likely instructions are only executed
9867 when the branch is taken. Therefore, if the caller has simulated
9868 the delay slot instruction, STATE does not really reflect the state
9869 of the pipeline for the instruction after the delay slot. Also,
9870 branch likely instructions tend to incur a penalty when not taken,
9871 so there will probably be an extra delay between the branch and
9872 the instruction after the delay slot. */
9873 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn
)))
9874 mips_sim_reset (state
);
9882 /* The VR4130 pipeline issues aligned pairs of instructions together,
9883 but it stalls the second instruction if it depends on the first.
9884 In order to cut down the amount of logic required, this dependence
9885 check is not based on a full instruction decode. Instead, any non-SPECIAL
9886 instruction is assumed to modify the register specified by bits 20-16
9887 (which is usually the "rt" field).
9889 In beq, beql, bne and bnel instructions, the rt field is actually an
9890 input, so we can end up with a false dependence between the branch
9891 and its delay slot. If this situation occurs in instruction INSN,
9892 try to avoid it by swapping rs and rt. */
9895 vr4130_avoid_branch_rt_conflict (rtx insn
)
9899 first
= SEQ_BEGIN (insn
);
9900 second
= SEQ_END (insn
);
9902 && NONJUMP_INSN_P (second
)
9903 && GET_CODE (PATTERN (first
)) == SET
9904 && GET_CODE (SET_DEST (PATTERN (first
))) == PC
9905 && GET_CODE (SET_SRC (PATTERN (first
))) == IF_THEN_ELSE
)
9907 /* Check for the right kind of condition. */
9908 rtx cond
= XEXP (SET_SRC (PATTERN (first
)), 0);
9909 if ((GET_CODE (cond
) == EQ
|| GET_CODE (cond
) == NE
)
9910 && REG_P (XEXP (cond
, 0))
9911 && REG_P (XEXP (cond
, 1))
9912 && reg_referenced_p (XEXP (cond
, 1), PATTERN (second
))
9913 && !reg_referenced_p (XEXP (cond
, 0), PATTERN (second
)))
9915 /* SECOND mentions the rt register but not the rs register. */
9916 rtx tmp
= XEXP (cond
, 0);
9917 XEXP (cond
, 0) = XEXP (cond
, 1);
9918 XEXP (cond
, 1) = tmp
;
9923 /* Implement -mvr4130-align. Go through each basic block and simulate the
9924 processor pipeline. If we find that a pair of instructions could execute
9925 in parallel, and the first of those instruction is not 8-byte aligned,
9926 insert a nop to make it aligned. */
9929 vr4130_align_insns (void)
9931 struct mips_sim state
;
9932 rtx insn
, subinsn
, last
, last2
, next
;
9937 /* LAST is the last instruction before INSN to have a nonzero length.
9938 LAST2 is the last such instruction before LAST. */
9942 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
9945 mips_sim_init (&state
, alloca (state_size ()));
9946 for (insn
= get_insns (); insn
!= 0; insn
= next
)
9948 unsigned int length
;
9950 next
= NEXT_INSN (insn
);
9952 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
9953 This isn't really related to the alignment pass, but we do it on
9954 the fly to avoid a separate instruction walk. */
9955 vr4130_avoid_branch_rt_conflict (insn
);
9957 if (USEFUL_INSN_P (insn
))
9958 FOR_EACH_SUBINSN (subinsn
, insn
)
9960 mips_sim_wait_insn (&state
, subinsn
);
9962 /* If we want this instruction to issue in parallel with the
9963 previous one, make sure that the previous instruction is
9964 aligned. There are several reasons why this isn't worthwhile
9965 when the second instruction is a call:
9967 - Calls are less likely to be performance critical,
9968 - There's a good chance that the delay slot can execute
9969 in parallel with the call.
9970 - The return address would then be unaligned.
9972 In general, if we're going to insert a nop between instructions
9973 X and Y, it's better to insert it immediately after X. That
9974 way, if the nop makes Y aligned, it will also align any labels
9976 if (state
.insns_left
!= state
.issue_rate
9977 && !CALL_P (subinsn
))
9979 if (subinsn
== SEQ_BEGIN (insn
) && aligned_p
)
9981 /* SUBINSN is the first instruction in INSN and INSN is
9982 aligned. We want to align the previous instruction
9983 instead, so insert a nop between LAST2 and LAST.
9985 Note that LAST could be either a single instruction
9986 or a branch with a delay slot. In the latter case,
9987 LAST, like INSN, is already aligned, but the delay
9988 slot must have some extra delay that stops it from
9989 issuing at the same time as the branch. We therefore
9990 insert a nop before the branch in order to align its
9992 emit_insn_after (gen_nop (), last2
);
9995 else if (subinsn
!= SEQ_BEGIN (insn
) && !aligned_p
)
9997 /* SUBINSN is the delay slot of INSN, but INSN is
9998 currently unaligned. Insert a nop between
9999 LAST and INSN to align it. */
10000 emit_insn_after (gen_nop (), last
);
10004 mips_sim_issue_insn (&state
, subinsn
);
10006 mips_sim_finish_insn (&state
, insn
);
10008 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10009 length
= get_attr_length (insn
);
10012 /* If the instruction is an asm statement or multi-instruction
10013 mips.md patern, the length is only an estimate. Insert an
10014 8 byte alignment after it so that the following instructions
10015 can be handled correctly. */
10016 if (NONJUMP_INSN_P (SEQ_BEGIN (insn
))
10017 && (recog_memoized (insn
) < 0 || length
>= 8))
10019 next
= emit_insn_after (gen_align (GEN_INT (3)), insn
);
10020 next
= NEXT_INSN (next
);
10021 mips_sim_next_cycle (&state
);
10024 else if (length
& 4)
10025 aligned_p
= !aligned_p
;
10030 /* See whether INSN is an aligned label. */
10031 if (LABEL_P (insn
) && label_to_alignment (insn
) >= 3)
10037 /* Subroutine of mips_reorg. If there is a hazard between INSN
10038 and a previous instruction, avoid it by inserting nops after
10041 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10042 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10043 before using the value of that register. *HILO_DELAY counts the
10044 number of instructions since the last hilo hazard (that is,
10045 the number of instructions since the last mflo or mfhi).
10047 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10048 for the next instruction.
10050 LO_REG is an rtx for the LO register, used in dependence checking. */
10053 mips_avoid_hazard (rtx after
, rtx insn
, int *hilo_delay
,
10054 rtx
*delayed_reg
, rtx lo_reg
)
10059 if (!INSN_P (insn
))
10062 pattern
= PATTERN (insn
);
10064 /* Do not put the whole function in .set noreorder if it contains
10065 an asm statement. We don't know whether there will be hazards
10066 between the asm statement and the gcc-generated code. */
10067 if (GET_CODE (pattern
) == ASM_INPUT
|| asm_noperands (pattern
) >= 0)
10068 cfun
->machine
->all_noreorder_p
= false;
10070 /* Ignore zero-length instructions (barriers and the like). */
10071 ninsns
= get_attr_length (insn
) / 4;
10075 /* Work out how many nops are needed. Note that we only care about
10076 registers that are explicitly mentioned in the instruction's pattern.
10077 It doesn't matter that calls use the argument registers or that they
10078 clobber hi and lo. */
10079 if (*hilo_delay
< 2 && reg_set_p (lo_reg
, pattern
))
10080 nops
= 2 - *hilo_delay
;
10081 else if (*delayed_reg
!= 0 && reg_referenced_p (*delayed_reg
, pattern
))
10086 /* Insert the nops between this instruction and the previous one.
10087 Each new nop takes us further from the last hilo hazard. */
10088 *hilo_delay
+= nops
;
10090 emit_insn_after (gen_hazard_nop (), after
);
10092 /* Set up the state for the next instruction. */
10093 *hilo_delay
+= ninsns
;
10095 if (INSN_CODE (insn
) >= 0)
10096 switch (get_attr_hazard (insn
))
10106 set
= single_set (insn
);
10107 gcc_assert (set
!= 0);
10108 *delayed_reg
= SET_DEST (set
);
10114 /* Go through the instruction stream and insert nops where necessary.
10115 See if the whole function can then be put into .set noreorder &
10119 mips_avoid_hazards (void)
10121 rtx insn
, last_insn
, lo_reg
, delayed_reg
;
10124 /* Force all instructions to be split into their final form. */
10125 split_all_insns_noflow ();
10127 /* Recalculate instruction lengths without taking nops into account. */
10128 cfun
->machine
->ignore_hazard_length_p
= true;
10129 shorten_branches (get_insns ());
10131 cfun
->machine
->all_noreorder_p
= true;
10133 /* Profiled functions can't be all noreorder because the profiler
10134 support uses assembler macros. */
10135 if (current_function_profile
)
10136 cfun
->machine
->all_noreorder_p
= false;
10138 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10139 we rely on the assembler to work around some errata. */
10140 if (TARGET_FIX_VR4120
)
10141 cfun
->machine
->all_noreorder_p
= false;
10143 /* The same is true for -mfix-vr4130 if we might generate mflo or
10144 mfhi instructions. Note that we avoid using mflo and mfhi if
10145 the VR4130 macc and dmacc instructions are available instead;
10146 see the *mfhilo_{si,di}_macc patterns. */
10147 if (TARGET_FIX_VR4130
&& !ISA_HAS_MACCHI
)
10148 cfun
->machine
->all_noreorder_p
= false;
10153 lo_reg
= gen_rtx_REG (SImode
, LO_REGNUM
);
10155 for (insn
= get_insns (); insn
!= 0; insn
= NEXT_INSN (insn
))
10158 if (GET_CODE (PATTERN (insn
)) == SEQUENCE
)
10159 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
10160 mips_avoid_hazard (last_insn
, XVECEXP (PATTERN (insn
), 0, i
),
10161 &hilo_delay
, &delayed_reg
, lo_reg
);
10163 mips_avoid_hazard (last_insn
, insn
, &hilo_delay
,
10164 &delayed_reg
, lo_reg
);
10171 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10176 mips16_lay_out_constants ();
10177 if (TARGET_EXPLICIT_RELOCS
)
10179 if (mips_flag_delayed_branch
)
10180 dbr_schedule (get_insns ());
10181 mips_avoid_hazards ();
10182 if (TUNE_MIPS4130
&& TARGET_VR4130_ALIGN
)
10183 vr4130_align_insns ();
10187 /* This function does three things:
10189 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10190 - Register the mips16 hardware floating point stubs.
10191 - Register the gofast functions if selected using --enable-gofast. */
10193 #include "config/gofast.h"
10196 mips_init_libfuncs (void)
10198 if (TARGET_FIX_VR4120
)
10200 set_optab_libfunc (sdiv_optab
, SImode
, "__vr4120_divsi3");
10201 set_optab_libfunc (smod_optab
, SImode
, "__vr4120_modsi3");
10204 if (TARGET_MIPS16
&& TARGET_HARD_FLOAT_ABI
)
10206 set_optab_libfunc (add_optab
, SFmode
, "__mips16_addsf3");
10207 set_optab_libfunc (sub_optab
, SFmode
, "__mips16_subsf3");
10208 set_optab_libfunc (smul_optab
, SFmode
, "__mips16_mulsf3");
10209 set_optab_libfunc (sdiv_optab
, SFmode
, "__mips16_divsf3");
10211 set_optab_libfunc (eq_optab
, SFmode
, "__mips16_eqsf2");
10212 set_optab_libfunc (ne_optab
, SFmode
, "__mips16_nesf2");
10213 set_optab_libfunc (gt_optab
, SFmode
, "__mips16_gtsf2");
10214 set_optab_libfunc (ge_optab
, SFmode
, "__mips16_gesf2");
10215 set_optab_libfunc (lt_optab
, SFmode
, "__mips16_ltsf2");
10216 set_optab_libfunc (le_optab
, SFmode
, "__mips16_lesf2");
10217 set_optab_libfunc (unord_optab
, SFmode
, "__mips16_unordsf2");
10219 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, "__mips16_fix_truncsfsi");
10220 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, "__mips16_floatsisf");
10221 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, "__mips16_floatunsisf");
10223 if (TARGET_DOUBLE_FLOAT
)
10225 set_optab_libfunc (add_optab
, DFmode
, "__mips16_adddf3");
10226 set_optab_libfunc (sub_optab
, DFmode
, "__mips16_subdf3");
10227 set_optab_libfunc (smul_optab
, DFmode
, "__mips16_muldf3");
10228 set_optab_libfunc (sdiv_optab
, DFmode
, "__mips16_divdf3");
10230 set_optab_libfunc (eq_optab
, DFmode
, "__mips16_eqdf2");
10231 set_optab_libfunc (ne_optab
, DFmode
, "__mips16_nedf2");
10232 set_optab_libfunc (gt_optab
, DFmode
, "__mips16_gtdf2");
10233 set_optab_libfunc (ge_optab
, DFmode
, "__mips16_gedf2");
10234 set_optab_libfunc (lt_optab
, DFmode
, "__mips16_ltdf2");
10235 set_optab_libfunc (le_optab
, DFmode
, "__mips16_ledf2");
10236 set_optab_libfunc (unord_optab
, DFmode
, "__mips16_unorddf2");
10238 set_conv_libfunc (sext_optab
, DFmode
, SFmode
, "__mips16_extendsfdf2");
10239 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
, "__mips16_truncdfsf2");
10241 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__mips16_fix_truncdfsi");
10242 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
, "__mips16_floatsidf");
10243 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
, "__mips16_floatunsidf");
10247 gofast_maybe_init_libfuncs ();
10250 /* Return a number assessing the cost of moving a register in class
10251 FROM to class TO. The classes are expressed using the enumeration
10252 values such as `GENERAL_REGS'. A value of 2 is the default; other
10253 values are interpreted relative to that.
10255 It is not required that the cost always equal 2 when FROM is the
10256 same as TO; on some machines it is expensive to move between
10257 registers if they are not general registers.
10259 If reload sees an insn consisting of a single `set' between two
10260 hard registers, and if `REGISTER_MOVE_COST' applied to their
10261 classes returns a value of 2, reload does not check to ensure that
10262 the constraints of the insn are met. Setting a cost of other than
10263 2 will allow reload to verify that the constraints are met. You
10264 should do this if the `movM' pattern's constraints do not allow
10267 ??? We make the cost of moving from HI/LO into general
10268 registers the same as for one of moving general registers to
10269 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10270 pseudo to HI/LO. This might hurt optimizations though, it
10271 isn't clear if it is wise. And it might not work in all cases. We
10272 could solve the DImode LO reg problem by using a multiply, just
10273 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10274 problem by using divide instructions. divu puts the remainder in
10275 the HI reg, so doing a divide by -1 will move the value in the HI
10276 reg for all values except -1. We could handle that case by using a
10277 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10278 a compare/branch to test the input value to see which instruction
10279 we need to use. This gets pretty messy, but it is feasible. */
10282 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
10283 enum reg_class to
, enum reg_class from
)
10285 if (from
== M16_REGS
&& reg_class_subset_p (to
, GENERAL_REGS
))
10287 else if (from
== M16_NA_REGS
&& reg_class_subset_p (to
, GENERAL_REGS
))
10289 else if (reg_class_subset_p (from
, GENERAL_REGS
))
10291 if (to
== M16_REGS
)
10293 else if (to
== M16_NA_REGS
)
10295 else if (reg_class_subset_p (to
, GENERAL_REGS
))
10302 else if (to
== FP_REGS
)
10304 else if (reg_class_subset_p (to
, ACC_REGS
))
10311 else if (reg_class_subset_p (to
, ALL_COP_REGS
))
10316 else if (from
== FP_REGS
)
10318 if (reg_class_subset_p (to
, GENERAL_REGS
))
10320 else if (to
== FP_REGS
)
10322 else if (to
== ST_REGS
)
10325 else if (reg_class_subset_p (from
, ACC_REGS
))
10327 if (reg_class_subset_p (to
, GENERAL_REGS
))
10335 else if (from
== ST_REGS
&& reg_class_subset_p (to
, GENERAL_REGS
))
10337 else if (reg_class_subset_p (from
, ALL_COP_REGS
))
10343 ??? What cases are these? Shouldn't we return 2 here? */
10348 /* Return the length of INSN. LENGTH is the initial length computed by
10349 attributes in the machine-description file. */
10352 mips_adjust_insn_length (rtx insn
, int length
)
10354 /* A unconditional jump has an unfilled delay slot if it is not part
10355 of a sequence. A conditional jump normally has a delay slot, but
10356 does not on MIPS16. */
10357 if (CALL_P (insn
) || (TARGET_MIPS16
? simplejump_p (insn
) : JUMP_P (insn
)))
10360 /* See how many nops might be needed to avoid hardware hazards. */
10361 if (!cfun
->machine
->ignore_hazard_length_p
&& INSN_CODE (insn
) >= 0)
10362 switch (get_attr_hazard (insn
))
10376 /* All MIPS16 instructions are a measly two bytes. */
10384 /* Return an asm sequence to start a noat block and load the address
10385 of a label into $1. */
10388 mips_output_load_label (void)
10390 if (TARGET_EXPLICIT_RELOCS
)
10394 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10397 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10400 if (ISA_HAS_LOAD_DELAY
)
10401 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10402 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10406 if (Pmode
== DImode
)
10407 return "%[dla\t%@,%0";
10409 return "%[la\t%@,%0";
10413 /* Return the assembly code for INSN, which has the operands given by
10414 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10415 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10416 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10417 version of BRANCH_IF_TRUE. */
10420 mips_output_conditional_branch (rtx insn
, rtx
*operands
,
10421 const char *branch_if_true
,
10422 const char *branch_if_false
)
10424 unsigned int length
;
10425 rtx taken
, not_taken
;
10427 length
= get_attr_length (insn
);
10430 /* Just a simple conditional branch. */
10431 mips_branch_likely
= (final_sequence
&& INSN_ANNULLED_BRANCH_P (insn
));
10432 return branch_if_true
;
10435 /* Generate a reversed branch around a direct jump. This fallback does
10436 not use branch-likely instructions. */
10437 mips_branch_likely
= false;
10438 not_taken
= gen_label_rtx ();
10439 taken
= operands
[1];
10441 /* Generate the reversed branch to NOT_TAKEN. */
10442 operands
[1] = not_taken
;
10443 output_asm_insn (branch_if_false
, operands
);
10445 /* If INSN has a delay slot, we must provide delay slots for both the
10446 branch to NOT_TAKEN and the conditional jump. We must also ensure
10447 that INSN's delay slot is executed in the appropriate cases. */
10448 if (final_sequence
)
10450 /* This first delay slot will always be executed, so use INSN's
10451 delay slot if is not annulled. */
10452 if (!INSN_ANNULLED_BRANCH_P (insn
))
10454 final_scan_insn (XVECEXP (final_sequence
, 0, 1),
10455 asm_out_file
, optimize
, 1, NULL
);
10456 INSN_DELETED_P (XVECEXP (final_sequence
, 0, 1)) = 1;
10459 output_asm_insn ("nop", 0);
10460 fprintf (asm_out_file
, "\n");
10463 /* Output the unconditional branch to TAKEN. */
10465 output_asm_insn ("j\t%0%/", &taken
);
10468 output_asm_insn (mips_output_load_label (), &taken
);
10469 output_asm_insn ("jr\t%@%]%/", 0);
10472 /* Now deal with its delay slot; see above. */
10473 if (final_sequence
)
10475 /* This delay slot will only be executed if the branch is taken.
10476 Use INSN's delay slot if is annulled. */
10477 if (INSN_ANNULLED_BRANCH_P (insn
))
10479 final_scan_insn (XVECEXP (final_sequence
, 0, 1),
10480 asm_out_file
, optimize
, 1, NULL
);
10481 INSN_DELETED_P (XVECEXP (final_sequence
, 0, 1)) = 1;
10484 output_asm_insn ("nop", 0);
10485 fprintf (asm_out_file
, "\n");
10488 /* Output NOT_TAKEN. */
10489 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
10490 CODE_LABEL_NUMBER (not_taken
));
10494 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10495 if some ordered condition is true. The condition is given by
10496 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10497 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10498 its second is always zero. */
10501 mips_output_order_conditional_branch (rtx insn
, rtx
*operands
, bool inverted_p
)
10503 const char *branch
[2];
10505 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10506 Make BRANCH[0] branch on the inverse condition. */
10507 switch (GET_CODE (operands
[0]))
10509 /* These cases are equivalent to comparisons against zero. */
10511 inverted_p
= !inverted_p
;
10512 /* Fall through. */
10514 branch
[!inverted_p
] = MIPS_BRANCH ("bne", "%2,%.,%1");
10515 branch
[inverted_p
] = MIPS_BRANCH ("beq", "%2,%.,%1");
10518 /* These cases are always true or always false. */
10520 inverted_p
= !inverted_p
;
10521 /* Fall through. */
10523 branch
[!inverted_p
] = MIPS_BRANCH ("beq", "%.,%.,%1");
10524 branch
[inverted_p
] = MIPS_BRANCH ("bne", "%.,%.,%1");
10528 branch
[!inverted_p
] = MIPS_BRANCH ("b%C0z", "%2,%1");
10529 branch
[inverted_p
] = MIPS_BRANCH ("b%N0z", "%2,%1");
10532 return mips_output_conditional_branch (insn
, operands
, branch
[1], branch
[0]);
10535 /* Used to output div or ddiv instruction DIVISION, which has the operands
10536 given by OPERANDS. Add in a divide-by-zero check if needed.
10538 When working around R4000 and R4400 errata, we need to make sure that
10539 the division is not immediately followed by a shift[1][2]. We also
10540 need to stop the division from being put into a branch delay slot[3].
10541 The easiest way to avoid both problems is to add a nop after the
10542 division. When a divide-by-zero check is needed, this nop can be
10543 used to fill the branch delay slot.
10545 [1] If a double-word or a variable shift executes immediately
10546 after starting an integer division, the shift may give an
10547 incorrect result. See quotations of errata #16 and #28 from
10548 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10549 in mips.md for details.
10551 [2] A similar bug to [1] exists for all revisions of the
10552 R4000 and the R4400 when run in an MC configuration.
10553 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10555 "19. In this following sequence:
10557 ddiv (or ddivu or div or divu)
10558 dsll32 (or dsrl32, dsra32)
10560 if an MPT stall occurs, while the divide is slipping the cpu
10561 pipeline, then the following double shift would end up with an
10564 Workaround: The compiler needs to avoid generating any
10565 sequence with divide followed by extended double shift."
10567 This erratum is also present in "MIPS R4400MC Errata, Processor
10568 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10569 & 3.0" as errata #10 and #4, respectively.
10571 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10572 (also valid for MIPS R4000MC processors):
10574 "52. R4000SC: This bug does not apply for the R4000PC.
10576 There are two flavors of this bug:
10578 1) If the instruction just after divide takes an RF exception
10579 (tlb-refill, tlb-invalid) and gets an instruction cache
10580 miss (both primary and secondary) and the line which is
10581 currently in secondary cache at this index had the first
10582 data word, where the bits 5..2 are set, then R4000 would
10583 get a wrong result for the div.
10588 ------------------- # end-of page. -tlb-refill
10593 ------------------- # end-of page. -tlb-invalid
10596 2) If the divide is in the taken branch delay slot, where the
10597 target takes RF exception and gets an I-cache miss for the
10598 exception vector or where I-cache miss occurs for the
10599 target address, under the above mentioned scenarios, the
10600 div would get wrong results.
10603 j r2 # to next page mapped or unmapped
10604 div r8,r9 # this bug would be there as long
10605 # as there is an ICache miss and
10606 nop # the "data pattern" is present
10609 beq r0, r0, NextPage # to Next page
10613 This bug is present for div, divu, ddiv, and ddivu
10616 Workaround: For item 1), OS could make sure that the next page
10617 after the divide instruction is also mapped. For item 2), the
10618 compiler could make sure that the divide instruction is not in
10619 the branch delay slot."
10621 These processors have PRId values of 0x00004220 and 0x00004300 for
10622 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10625 mips_output_division (const char *division
, rtx
*operands
)
10630 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
10632 output_asm_insn (s
, operands
);
10635 if (TARGET_CHECK_ZERO_DIV
)
10639 output_asm_insn (s
, operands
);
10640 s
= "bnez\t%2,1f\n\tbreak\t7\n1:";
10642 else if (GENERATE_DIVIDE_TRAPS
)
10644 output_asm_insn (s
, operands
);
10645 s
= "teq\t%2,%.,7";
10649 output_asm_insn ("%(bne\t%2,%.,1f", operands
);
10650 output_asm_insn (s
, operands
);
10651 s
= "break\t7%)\n1:";
10657 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
10658 with a final "000" replaced by "k". Ignore case.
10660 Note: this function is shared between GCC and GAS. */
10663 mips_strict_matching_cpu_name_p (const char *canonical
, const char *given
)
10665 while (*given
!= 0 && TOLOWER (*given
) == TOLOWER (*canonical
))
10666 given
++, canonical
++;
10668 return ((*given
== 0 && *canonical
== 0)
10669 || (strcmp (canonical
, "000") == 0 && strcasecmp (given
, "k") == 0));
10673 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
10674 CPU name. We've traditionally allowed a lot of variation here.
10676 Note: this function is shared between GCC and GAS. */
10679 mips_matching_cpu_name_p (const char *canonical
, const char *given
)
10681 /* First see if the name matches exactly, or with a final "000"
10682 turned into "k". */
10683 if (mips_strict_matching_cpu_name_p (canonical
, given
))
10686 /* If not, try comparing based on numerical designation alone.
10687 See if GIVEN is an unadorned number, or 'r' followed by a number. */
10688 if (TOLOWER (*given
) == 'r')
10690 if (!ISDIGIT (*given
))
10693 /* Skip over some well-known prefixes in the canonical name,
10694 hoping to find a number there too. */
10695 if (TOLOWER (canonical
[0]) == 'v' && TOLOWER (canonical
[1]) == 'r')
10697 else if (TOLOWER (canonical
[0]) == 'r' && TOLOWER (canonical
[1]) == 'm')
10699 else if (TOLOWER (canonical
[0]) == 'r')
10702 return mips_strict_matching_cpu_name_p (canonical
, given
);
10706 /* Return the mips_cpu_info entry for the processor or ISA given
10707 by CPU_STRING. Return null if the string isn't recognized.
10709 A similar function exists in GAS. */
10711 static const struct mips_cpu_info
*
10712 mips_parse_cpu (const char *cpu_string
)
10714 const struct mips_cpu_info
*p
;
10717 /* In the past, we allowed upper-case CPU names, but it doesn't
10718 work well with the multilib machinery. */
10719 for (s
= cpu_string
; *s
!= 0; s
++)
10722 warning (0, "the cpu name must be lower case");
10726 /* 'from-abi' selects the most compatible architecture for the given
10727 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
10728 EABIs, we have to decide whether we're using the 32-bit or 64-bit
10729 version. Look first at the -mgp options, if given, otherwise base
10730 the choice on MASK_64BIT in TARGET_DEFAULT. */
10731 if (strcasecmp (cpu_string
, "from-abi") == 0)
10732 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS
? 1
10733 : ABI_NEEDS_64BIT_REGS
? 3
10734 : (TARGET_64BIT
? 3 : 1));
10736 /* 'default' has traditionally been a no-op. Probably not very useful. */
10737 if (strcasecmp (cpu_string
, "default") == 0)
10740 for (p
= mips_cpu_info_table
; p
->name
!= 0; p
++)
10741 if (mips_matching_cpu_name_p (p
->name
, cpu_string
))
10748 /* Return the processor associated with the given ISA level, or null
10749 if the ISA isn't valid. */
10751 static const struct mips_cpu_info
*
10752 mips_cpu_info_from_isa (int isa
)
10754 const struct mips_cpu_info
*p
;
10756 for (p
= mips_cpu_info_table
; p
->name
!= 0; p
++)
10763 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
10764 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
10765 they only hold condition code modes, and CCmode is always considered to
10766 be 4 bytes wide. All other registers are word sized. */
10769 mips_hard_regno_nregs (int regno
, enum machine_mode mode
)
10771 if (ST_REG_P (regno
))
10772 return ((GET_MODE_SIZE (mode
) + 3) / 4);
10773 else if (! FP_REG_P (regno
))
10774 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
10776 return ((GET_MODE_SIZE (mode
) + UNITS_PER_FPREG
- 1) / UNITS_PER_FPREG
);
10779 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
10780 all BLKmode objects are returned in memory. Under the new (N32 and
10781 64-bit MIPS ABIs) small structures are returned in a register.
10782 Objects with varying size must still be returned in memory, of
10786 mips_return_in_memory (tree type
, tree fndecl ATTRIBUTE_UNUSED
)
10789 return (TYPE_MODE (type
) == BLKmode
);
10791 return ((int_size_in_bytes (type
) > (2 * UNITS_PER_WORD
))
10792 || (int_size_in_bytes (type
) == -1));
10796 mips_strict_argument_naming (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
)
10798 return !TARGET_OLDABI
;
10801 /* Return true if INSN is a multiply-add or multiply-subtract
10802 instruction and PREV assigns to the accumulator operand. */
10805 mips_linked_madd_p (rtx prev
, rtx insn
)
10809 x
= single_set (insn
);
10815 if (GET_CODE (x
) == PLUS
10816 && GET_CODE (XEXP (x
, 0)) == MULT
10817 && reg_set_p (XEXP (x
, 1), prev
))
10820 if (GET_CODE (x
) == MINUS
10821 && GET_CODE (XEXP (x
, 1)) == MULT
10822 && reg_set_p (XEXP (x
, 0), prev
))
10828 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10829 that may clobber hi or lo. */
10831 static rtx mips_macc_chains_last_hilo
;
10833 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10834 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10837 mips_macc_chains_record (rtx insn
)
10839 if (get_attr_may_clobber_hilo (insn
))
10840 mips_macc_chains_last_hilo
= insn
;
10843 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10844 has NREADY elements, looking for a multiply-add or multiply-subtract
10845 instruction that is cumulative with mips_macc_chains_last_hilo.
10846 If there is one, promote it ahead of anything else that might
10847 clobber hi or lo. */
10850 mips_macc_chains_reorder (rtx
*ready
, int nready
)
10854 if (mips_macc_chains_last_hilo
!= 0)
10855 for (i
= nready
- 1; i
>= 0; i
--)
10856 if (mips_linked_madd_p (mips_macc_chains_last_hilo
, ready
[i
]))
10858 for (j
= nready
- 1; j
> i
; j
--)
10859 if (recog_memoized (ready
[j
]) >= 0
10860 && get_attr_may_clobber_hilo (ready
[j
]))
10862 mips_promote_ready (ready
, i
, j
);
10869 /* The last instruction to be scheduled. */
10871 static rtx vr4130_last_insn
;
10873 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10874 points to an rtx that is initially an instruction. Nullify the rtx
10875 if the instruction uses the value of register X. */
10878 vr4130_true_reg_dependence_p_1 (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
10880 rtx
*insn_ptr
= data
;
10883 && reg_referenced_p (x
, PATTERN (*insn_ptr
)))
10887 /* Return true if there is true register dependence between vr4130_last_insn
10891 vr4130_true_reg_dependence_p (rtx insn
)
10893 note_stores (PATTERN (vr4130_last_insn
),
10894 vr4130_true_reg_dependence_p_1
, &insn
);
10898 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10899 the ready queue and that INSN2 is the instruction after it, return
10900 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10901 in which INSN1 and INSN2 can probably issue in parallel, but for
10902 which (INSN2, INSN1) should be less sensitive to instruction
10903 alignment than (INSN1, INSN2). See 4130.md for more details. */
10906 vr4130_swap_insns_p (rtx insn1
, rtx insn2
)
10910 /* Check for the following case:
10912 1) there is some other instruction X with an anti dependence on INSN1;
10913 2) X has a higher priority than INSN2; and
10914 3) X is an arithmetic instruction (and thus has no unit restrictions).
10916 If INSN1 is the last instruction blocking X, it would better to
10917 choose (INSN1, X) over (INSN2, INSN1). */
10918 FOR_EACH_DEP_LINK (dep
, INSN_FORW_DEPS (insn1
))
10919 if (DEP_LINK_KIND (dep
) == REG_DEP_ANTI
10920 && INSN_PRIORITY (DEP_LINK_CON (dep
)) > INSN_PRIORITY (insn2
)
10921 && recog_memoized (DEP_LINK_CON (dep
)) >= 0
10922 && get_attr_vr4130_class (DEP_LINK_CON (dep
)) == VR4130_CLASS_ALU
)
10925 if (vr4130_last_insn
!= 0
10926 && recog_memoized (insn1
) >= 0
10927 && recog_memoized (insn2
) >= 0)
10929 /* See whether INSN1 and INSN2 use different execution units,
10930 or if they are both ALU-type instructions. If so, they can
10931 probably execute in parallel. */
10932 enum attr_vr4130_class class1
= get_attr_vr4130_class (insn1
);
10933 enum attr_vr4130_class class2
= get_attr_vr4130_class (insn2
);
10934 if (class1
!= class2
|| class1
== VR4130_CLASS_ALU
)
10936 /* If only one of the instructions has a dependence on
10937 vr4130_last_insn, prefer to schedule the other one first. */
10938 bool dep1
= vr4130_true_reg_dependence_p (insn1
);
10939 bool dep2
= vr4130_true_reg_dependence_p (insn2
);
10943 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10944 is not an ALU-type instruction and if INSN1 uses the same
10945 execution unit. (Note that if this condition holds, we already
10946 know that INSN2 uses a different execution unit.) */
10947 if (class1
!= VR4130_CLASS_ALU
10948 && recog_memoized (vr4130_last_insn
) >= 0
10949 && class1
== get_attr_vr4130_class (vr4130_last_insn
))
10956 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10957 queue with at least two instructions. Swap the first two if
10958 vr4130_swap_insns_p says that it could be worthwhile. */
10961 vr4130_reorder (rtx
*ready
, int nready
)
10963 if (vr4130_swap_insns_p (ready
[nready
- 1], ready
[nready
- 2]))
10964 mips_promote_ready (ready
, nready
- 2, nready
- 1);
10967 /* Remove the instruction at index LOWER from ready queue READY and
10968 reinsert it in front of the instruction at index HIGHER. LOWER must
10972 mips_promote_ready (rtx
*ready
, int lower
, int higher
)
10977 new_head
= ready
[lower
];
10978 for (i
= lower
; i
< higher
; i
++)
10979 ready
[i
] = ready
[i
+ 1];
10980 ready
[i
] = new_head
;
10983 /* Implement TARGET_SCHED_REORDER. */
10986 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
10987 rtx
*ready
, int *nreadyp
, int cycle
)
10989 if (!reload_completed
&& TUNE_MACC_CHAINS
)
10992 mips_macc_chains_last_hilo
= 0;
10994 mips_macc_chains_reorder (ready
, *nreadyp
);
10996 if (reload_completed
&& TUNE_MIPS4130
&& !TARGET_VR4130_ALIGN
)
10999 vr4130_last_insn
= 0;
11001 vr4130_reorder (ready
, *nreadyp
);
11003 return mips_issue_rate ();
11006 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11009 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
11010 rtx insn
, int more
)
11012 switch (GET_CODE (PATTERN (insn
)))
11016 /* Don't count USEs and CLOBBERs against the issue rate. */
11021 if (!reload_completed
&& TUNE_MACC_CHAINS
)
11022 mips_macc_chains_record (insn
);
11023 vr4130_last_insn
= insn
;
11029 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11030 dependencies have no cost, except on the 20Kc where output-dependence
11031 is treated like input-dependence. */
11034 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED
, rtx link
,
11035 rtx dep ATTRIBUTE_UNUSED
, int cost
)
11037 if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
11040 if (REG_NOTE_KIND (link
) != 0)
11045 /* Return the number of instructions that can be issued per cycle. */
11048 mips_issue_rate (void)
11052 case PROCESSOR_74KC
:
11053 case PROCESSOR_74KF2_1
:
11054 case PROCESSOR_74KF1_1
:
11055 case PROCESSOR_74KF3_2
:
11056 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11057 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11058 but in reality only a maximum of 3 insns can be issued as the
11059 floating point load/stores also require a slot in the AGEN pipe. */
11062 case PROCESSOR_20KC
:
11063 case PROCESSOR_R4130
:
11064 case PROCESSOR_R5400
:
11065 case PROCESSOR_R5500
:
11066 case PROCESSOR_R7000
:
11067 case PROCESSOR_R9000
:
11070 case PROCESSOR_SB1
:
11071 case PROCESSOR_SB1A
:
11072 /* This is actually 4, but we get better performance if we claim 3.
11073 This is partly because of unwanted speculative code motion with the
11074 larger number, and partly because in most common cases we can't
11075 reach the theoretical max of 4. */
11083 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11084 be as wide as the scheduling freedom in the DFA. */
11087 mips_multipass_dfa_lookahead (void)
11089 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11096 /* Implements a store data bypass check. We need this because the cprestore
11097 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11098 default routine to abort. We just return false for that case. */
11099 /* ??? Should try to give a better result here than assuming false. */
11102 mips_store_data_bypass_p (rtx out_insn
, rtx in_insn
)
11104 if (GET_CODE (PATTERN (in_insn
)) == UNSPEC_VOLATILE
)
11107 return ! store_data_bypass_p (out_insn
, in_insn
);
11110 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11111 return the first operand of the associated "pref" or "prefx" insn. */
11114 mips_prefetch_cookie (rtx write
, rtx locality
)
11116 /* store_streamed / load_streamed. */
11117 if (INTVAL (locality
) <= 0)
11118 return GEN_INT (INTVAL (write
) + 4);
11120 /* store / load. */
11121 if (INTVAL (locality
) <= 2)
11124 /* store_retained / load_retained. */
11125 return GEN_INT (INTVAL (write
) + 6);
11128 /* MIPS builtin function support. */
11130 struct builtin_description
11132 /* The code of the main .md file instruction. See mips_builtin_type
11133 for more information. */
11134 enum insn_code icode
;
11136 /* The floating-point comparison code to use with ICODE, if any. */
11137 enum mips_fp_condition cond
;
11139 /* The name of the builtin function. */
11142 /* Specifies how the function should be expanded. */
11143 enum mips_builtin_type builtin_type
;
11145 /* The function's prototype. */
11146 enum mips_function_type function_type
;
11148 /* The target flags required for this function. */
11152 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11153 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11154 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11155 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11156 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11158 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11160 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11161 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11162 "__builtin_mips_" #INSN "_" #COND "_s", \
11163 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11164 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11165 "__builtin_mips_" #INSN "_" #COND "_d", \
11166 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11168 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11169 The lower and upper forms require TARGET_FLAGS while the any and all
11170 forms require MASK_MIPS3D. */
11171 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11172 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11173 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11174 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11175 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11176 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11177 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11178 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11179 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11180 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11181 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11182 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11183 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11185 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11186 require MASK_MIPS3D. */
11187 #define CMP_4S_BUILTINS(INSN, COND) \
11188 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11189 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11190 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11192 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11193 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11194 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11197 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11198 instruction requires TARGET_FLAGS. */
11199 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11200 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11201 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11202 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11204 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11205 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11206 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11209 /* Define all the builtins related to c.cond.fmt condition COND. */
11210 #define CMP_BUILTINS(COND) \
11211 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11212 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11213 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11214 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11215 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11216 CMP_4S_BUILTINS (c, COND), \
11217 CMP_4S_BUILTINS (cabs, COND)
11219 static const struct builtin_description mips_bdesc
[] =
11221 DIRECT_BUILTIN (pll_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
11222 DIRECT_BUILTIN (pul_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
11223 DIRECT_BUILTIN (plu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
11224 DIRECT_BUILTIN (puu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
11225 DIRECT_BUILTIN (cvt_ps_s
, MIPS_V2SF_FTYPE_SF_SF
, MASK_PAIRED_SINGLE_FLOAT
),
11226 DIRECT_BUILTIN (cvt_s_pl
, MIPS_SF_FTYPE_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
11227 DIRECT_BUILTIN (cvt_s_pu
, MIPS_SF_FTYPE_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
11228 DIRECT_BUILTIN (abs_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
11230 DIRECT_BUILTIN (alnv_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF_INT
,
11231 MASK_PAIRED_SINGLE_FLOAT
),
11232 DIRECT_BUILTIN (addr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_MIPS3D
),
11233 DIRECT_BUILTIN (mulr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_MIPS3D
),
11234 DIRECT_BUILTIN (cvt_pw_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_MIPS3D
),
11235 DIRECT_BUILTIN (cvt_ps_pw
, MIPS_V2SF_FTYPE_V2SF
, MASK_MIPS3D
),
11237 DIRECT_BUILTIN (recip1_s
, MIPS_SF_FTYPE_SF
, MASK_MIPS3D
),
11238 DIRECT_BUILTIN (recip1_d
, MIPS_DF_FTYPE_DF
, MASK_MIPS3D
),
11239 DIRECT_BUILTIN (recip1_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_MIPS3D
),
11240 DIRECT_BUILTIN (recip2_s
, MIPS_SF_FTYPE_SF_SF
, MASK_MIPS3D
),
11241 DIRECT_BUILTIN (recip2_d
, MIPS_DF_FTYPE_DF_DF
, MASK_MIPS3D
),
11242 DIRECT_BUILTIN (recip2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_MIPS3D
),
11244 DIRECT_BUILTIN (rsqrt1_s
, MIPS_SF_FTYPE_SF
, MASK_MIPS3D
),
11245 DIRECT_BUILTIN (rsqrt1_d
, MIPS_DF_FTYPE_DF
, MASK_MIPS3D
),
11246 DIRECT_BUILTIN (rsqrt1_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_MIPS3D
),
11247 DIRECT_BUILTIN (rsqrt2_s
, MIPS_SF_FTYPE_SF_SF
, MASK_MIPS3D
),
11248 DIRECT_BUILTIN (rsqrt2_d
, MIPS_DF_FTYPE_DF_DF
, MASK_MIPS3D
),
11249 DIRECT_BUILTIN (rsqrt2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_MIPS3D
),
11251 MIPS_FP_CONDITIONS (CMP_BUILTINS
)
11254 /* Builtin functions for the SB-1 processor. */
11256 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11258 static const struct builtin_description sb1_bdesc
[] =
11260 DIRECT_BUILTIN (sqrt_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_PAIRED_SINGLE_FLOAT
)
11263 /* Builtin functions for DSP ASE. */
11265 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11266 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11267 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11268 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11269 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11271 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11272 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11273 builtin_description fields. */
11274 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11275 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11276 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11278 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11279 branch instruction. TARGET_FLAGS is a builtin_description field. */
11280 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11281 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11282 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11284 static const struct builtin_description dsp_bdesc
[] =
11286 DIRECT_BUILTIN (addq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11287 DIRECT_BUILTIN (addq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11288 DIRECT_BUILTIN (addq_s_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
11289 DIRECT_BUILTIN (addu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
11290 DIRECT_BUILTIN (addu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
11291 DIRECT_BUILTIN (subq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11292 DIRECT_BUILTIN (subq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11293 DIRECT_BUILTIN (subq_s_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
11294 DIRECT_BUILTIN (subu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
11295 DIRECT_BUILTIN (subu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
11296 DIRECT_BUILTIN (addsc
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
11297 DIRECT_BUILTIN (addwc
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
11298 DIRECT_BUILTIN (modsub
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
11299 DIRECT_BUILTIN (raddu_w_qb
, MIPS_SI_FTYPE_V4QI
, MASK_DSP
),
11300 DIRECT_BUILTIN (absq_s_ph
, MIPS_V2HI_FTYPE_V2HI
, MASK_DSP
),
11301 DIRECT_BUILTIN (absq_s_w
, MIPS_SI_FTYPE_SI
, MASK_DSP
),
11302 DIRECT_BUILTIN (precrq_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11303 DIRECT_BUILTIN (precrq_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, MASK_DSP
),
11304 DIRECT_BUILTIN (precrq_rs_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, MASK_DSP
),
11305 DIRECT_BUILTIN (precrqu_s_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11306 DIRECT_BUILTIN (preceq_w_phl
, MIPS_SI_FTYPE_V2HI
, MASK_DSP
),
11307 DIRECT_BUILTIN (preceq_w_phr
, MIPS_SI_FTYPE_V2HI
, MASK_DSP
),
11308 DIRECT_BUILTIN (precequ_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
11309 DIRECT_BUILTIN (precequ_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
11310 DIRECT_BUILTIN (precequ_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
11311 DIRECT_BUILTIN (precequ_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
11312 DIRECT_BUILTIN (preceu_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
11313 DIRECT_BUILTIN (preceu_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
11314 DIRECT_BUILTIN (preceu_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
11315 DIRECT_BUILTIN (preceu_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
11316 DIRECT_BUILTIN (shll_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, MASK_DSP
),
11317 DIRECT_BUILTIN (shll_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSP
),
11318 DIRECT_BUILTIN (shll_s_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSP
),
11319 DIRECT_BUILTIN (shll_s_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
11320 DIRECT_BUILTIN (shrl_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, MASK_DSP
),
11321 DIRECT_BUILTIN (shra_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSP
),
11322 DIRECT_BUILTIN (shra_r_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSP
),
11323 DIRECT_BUILTIN (shra_r_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
11324 DIRECT_BUILTIN (muleu_s_ph_qbl
, MIPS_V2HI_FTYPE_V4QI_V2HI
, MASK_DSP
),
11325 DIRECT_BUILTIN (muleu_s_ph_qbr
, MIPS_V2HI_FTYPE_V4QI_V2HI
, MASK_DSP
),
11326 DIRECT_BUILTIN (mulq_rs_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11327 DIRECT_BUILTIN (muleq_s_w_phl
, MIPS_SI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11328 DIRECT_BUILTIN (muleq_s_w_phr
, MIPS_SI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11329 DIRECT_BUILTIN (bitrev
, MIPS_SI_FTYPE_SI
, MASK_DSP
),
11330 DIRECT_BUILTIN (insv
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
11331 DIRECT_BUILTIN (repl_qb
, MIPS_V4QI_FTYPE_SI
, MASK_DSP
),
11332 DIRECT_BUILTIN (repl_ph
, MIPS_V2HI_FTYPE_SI
, MASK_DSP
),
11333 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, MASK_DSP
),
11334 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, MASK_DSP
),
11335 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, MASK_DSP
),
11336 DIRECT_BUILTIN (cmpgu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSP
),
11337 DIRECT_BUILTIN (cmpgu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSP
),
11338 DIRECT_BUILTIN (cmpgu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSP
),
11339 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, MASK_DSP
),
11340 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, MASK_DSP
),
11341 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, MASK_DSP
),
11342 DIRECT_BUILTIN (pick_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
11343 DIRECT_BUILTIN (pick_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11344 DIRECT_BUILTIN (packrl_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
11345 DIRECT_NO_TARGET_BUILTIN (wrdsp
, MIPS_VOID_FTYPE_SI_SI
, MASK_DSP
),
11346 DIRECT_BUILTIN (rddsp
, MIPS_SI_FTYPE_SI
, MASK_DSP
),
11347 DIRECT_BUILTIN (lbux
, MIPS_SI_FTYPE_PTR_SI
, MASK_DSP
),
11348 DIRECT_BUILTIN (lhx
, MIPS_SI_FTYPE_PTR_SI
, MASK_DSP
),
11349 DIRECT_BUILTIN (lwx
, MIPS_SI_FTYPE_PTR_SI
, MASK_DSP
),
11350 BPOSGE_BUILTIN (32, MASK_DSP
),
11352 /* The following are for the MIPS DSP ASE REV 2. */
11353 DIRECT_BUILTIN (absq_s_qb
, MIPS_V4QI_FTYPE_V4QI
, MASK_DSPR2
),
11354 DIRECT_BUILTIN (addu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11355 DIRECT_BUILTIN (addu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11356 DIRECT_BUILTIN (adduh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
11357 DIRECT_BUILTIN (adduh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
11358 DIRECT_BUILTIN (append
, MIPS_SI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
11359 DIRECT_BUILTIN (balign
, MIPS_SI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
11360 DIRECT_BUILTIN (cmpgdu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
11361 DIRECT_BUILTIN (cmpgdu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
11362 DIRECT_BUILTIN (cmpgdu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
11363 DIRECT_BUILTIN (mul_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11364 DIRECT_BUILTIN (mul_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11365 DIRECT_BUILTIN (mulq_rs_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
11366 DIRECT_BUILTIN (mulq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11367 DIRECT_BUILTIN (mulq_s_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
11368 DIRECT_BUILTIN (precr_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11369 DIRECT_BUILTIN (precr_sra_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
11370 DIRECT_BUILTIN (precr_sra_r_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
11371 DIRECT_BUILTIN (prepend
, MIPS_SI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
11372 DIRECT_BUILTIN (shra_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, MASK_DSPR2
),
11373 DIRECT_BUILTIN (shra_r_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, MASK_DSPR2
),
11374 DIRECT_BUILTIN (shrl_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSPR2
),
11375 DIRECT_BUILTIN (subu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11376 DIRECT_BUILTIN (subu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11377 DIRECT_BUILTIN (subuh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
11378 DIRECT_BUILTIN (subuh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
11379 DIRECT_BUILTIN (addqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11380 DIRECT_BUILTIN (addqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11381 DIRECT_BUILTIN (addqh_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
11382 DIRECT_BUILTIN (addqh_r_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
11383 DIRECT_BUILTIN (subqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11384 DIRECT_BUILTIN (subqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
11385 DIRECT_BUILTIN (subqh_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
11386 DIRECT_BUILTIN (subqh_r_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
)
11389 static const struct builtin_description dsp_32only_bdesc
[] =
11391 DIRECT_BUILTIN (dpau_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, MASK_DSP
),
11392 DIRECT_BUILTIN (dpau_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, MASK_DSP
),
11393 DIRECT_BUILTIN (dpsu_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, MASK_DSP
),
11394 DIRECT_BUILTIN (dpsu_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, MASK_DSP
),
11395 DIRECT_BUILTIN (dpaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
11396 DIRECT_BUILTIN (dpsq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
11397 DIRECT_BUILTIN (mulsaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
11398 DIRECT_BUILTIN (dpaq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, MASK_DSP
),
11399 DIRECT_BUILTIN (dpsq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, MASK_DSP
),
11400 DIRECT_BUILTIN (maq_s_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
11401 DIRECT_BUILTIN (maq_s_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
11402 DIRECT_BUILTIN (maq_sa_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
11403 DIRECT_BUILTIN (maq_sa_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
11404 DIRECT_BUILTIN (extr_w
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
11405 DIRECT_BUILTIN (extr_r_w
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
11406 DIRECT_BUILTIN (extr_rs_w
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
11407 DIRECT_BUILTIN (extr_s_h
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
11408 DIRECT_BUILTIN (extp
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
11409 DIRECT_BUILTIN (extpdp
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
11410 DIRECT_BUILTIN (shilo
, MIPS_DI_FTYPE_DI_SI
, MASK_DSP
),
11411 DIRECT_BUILTIN (mthlip
, MIPS_DI_FTYPE_DI_SI
, MASK_DSP
),
11413 /* The following are for the MIPS DSP ASE REV 2. */
11414 DIRECT_BUILTIN (dpa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
11415 DIRECT_BUILTIN (dps_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
11416 DIRECT_BUILTIN (madd
, MIPS_DI_FTYPE_DI_SI_SI
, MASK_DSPR2
),
11417 DIRECT_BUILTIN (maddu
, MIPS_DI_FTYPE_DI_USI_USI
, MASK_DSPR2
),
11418 DIRECT_BUILTIN (msub
, MIPS_DI_FTYPE_DI_SI_SI
, MASK_DSPR2
),
11419 DIRECT_BUILTIN (msubu
, MIPS_DI_FTYPE_DI_USI_USI
, MASK_DSPR2
),
11420 DIRECT_BUILTIN (mulsa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
11421 DIRECT_BUILTIN (mult
, MIPS_DI_FTYPE_SI_SI
, MASK_DSPR2
),
11422 DIRECT_BUILTIN (multu
, MIPS_DI_FTYPE_USI_USI
, MASK_DSPR2
),
11423 DIRECT_BUILTIN (dpax_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
11424 DIRECT_BUILTIN (dpsx_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
11425 DIRECT_BUILTIN (dpaqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
11426 DIRECT_BUILTIN (dpaqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
11427 DIRECT_BUILTIN (dpsqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
11428 DIRECT_BUILTIN (dpsqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
)
11431 /* This helps provide a mapping from builtin function codes to bdesc
11436 /* The builtin function table that this entry describes. */
11437 const struct builtin_description
*bdesc
;
11439 /* The number of entries in the builtin function table. */
11442 /* The target processor that supports these builtin functions.
11443 PROCESSOR_MAX means we enable them for all processors. */
11444 enum processor_type proc
;
11446 /* If the target has these flags, this builtin function table
11447 will not be supported. */
11448 int unsupported_target_flags
;
11451 static const struct bdesc_map bdesc_arrays
[] =
11453 { mips_bdesc
, ARRAY_SIZE (mips_bdesc
), PROCESSOR_MAX
, 0 },
11454 { sb1_bdesc
, ARRAY_SIZE (sb1_bdesc
), PROCESSOR_SB1
, 0 },
11455 { dsp_bdesc
, ARRAY_SIZE (dsp_bdesc
), PROCESSOR_MAX
, 0 },
11456 { dsp_32only_bdesc
, ARRAY_SIZE (dsp_32only_bdesc
), PROCESSOR_MAX
,
11460 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
11461 suitable for input operand OP of instruction ICODE. Return the value. */
11464 mips_prepare_builtin_arg (enum insn_code icode
,
11465 unsigned int op
, tree exp
, unsigned int argnum
)
11468 enum machine_mode mode
;
11470 value
= expand_normal (CALL_EXPR_ARG (exp
, argnum
));
11471 mode
= insn_data
[icode
].operand
[op
].mode
;
11472 if (!insn_data
[icode
].operand
[op
].predicate (value
, mode
))
11474 value
= copy_to_mode_reg (mode
, value
);
11475 /* Check the predicate again. */
11476 if (!insn_data
[icode
].operand
[op
].predicate (value
, mode
))
11478 error ("invalid argument to builtin function");
11486 /* Return an rtx suitable for output operand OP of instruction ICODE.
11487 If TARGET is non-null, try to use it where possible. */
11490 mips_prepare_builtin_target (enum insn_code icode
, unsigned int op
, rtx target
)
11492 enum machine_mode mode
;
11494 mode
= insn_data
[icode
].operand
[op
].mode
;
11495 if (target
== 0 || !insn_data
[icode
].operand
[op
].predicate (target
, mode
))
11496 target
= gen_reg_rtx (mode
);
11501 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
11504 mips_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
11505 enum machine_mode mode ATTRIBUTE_UNUSED
,
11506 int ignore ATTRIBUTE_UNUSED
)
11508 enum insn_code icode
;
11509 enum mips_builtin_type type
;
11511 unsigned int fcode
;
11512 const struct builtin_description
*bdesc
;
11513 const struct bdesc_map
*m
;
11515 fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
11516 fcode
= DECL_FUNCTION_CODE (fndecl
);
11519 for (m
= bdesc_arrays
; m
< &bdesc_arrays
[ARRAY_SIZE (bdesc_arrays
)]; m
++)
11521 if (fcode
< m
->size
)
11524 icode
= bdesc
[fcode
].icode
;
11525 type
= bdesc
[fcode
].builtin_type
;
11535 case MIPS_BUILTIN_DIRECT
:
11536 return mips_expand_builtin_direct (icode
, target
, exp
, true);
11538 case MIPS_BUILTIN_DIRECT_NO_TARGET
:
11539 return mips_expand_builtin_direct (icode
, target
, exp
, false);
11541 case MIPS_BUILTIN_MOVT
:
11542 case MIPS_BUILTIN_MOVF
:
11543 return mips_expand_builtin_movtf (type
, icode
, bdesc
[fcode
].cond
,
11546 case MIPS_BUILTIN_CMP_ANY
:
11547 case MIPS_BUILTIN_CMP_ALL
:
11548 case MIPS_BUILTIN_CMP_UPPER
:
11549 case MIPS_BUILTIN_CMP_LOWER
:
11550 case MIPS_BUILTIN_CMP_SINGLE
:
11551 return mips_expand_builtin_compare (type
, icode
, bdesc
[fcode
].cond
,
11554 case MIPS_BUILTIN_BPOSGE32
:
11555 return mips_expand_builtin_bposge (type
, target
);
11562 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
11565 mips_init_builtins (void)
11567 const struct builtin_description
*d
;
11568 const struct bdesc_map
*m
;
11569 tree types
[(int) MIPS_MAX_FTYPE_MAX
];
11570 tree V2SF_type_node
;
11571 tree V2HI_type_node
;
11572 tree V4QI_type_node
;
11573 unsigned int offset
;
11575 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
11576 if (!TARGET_PAIRED_SINGLE_FLOAT
&& !TARGET_DSP
)
11579 if (TARGET_PAIRED_SINGLE_FLOAT
)
11581 V2SF_type_node
= build_vector_type_for_mode (float_type_node
, V2SFmode
);
11583 types
[MIPS_V2SF_FTYPE_V2SF
]
11584 = build_function_type_list (V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
11586 types
[MIPS_V2SF_FTYPE_V2SF_V2SF
]
11587 = build_function_type_list (V2SF_type_node
,
11588 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
11590 types
[MIPS_V2SF_FTYPE_V2SF_V2SF_INT
]
11591 = build_function_type_list (V2SF_type_node
,
11592 V2SF_type_node
, V2SF_type_node
,
11593 integer_type_node
, NULL_TREE
);
11595 types
[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF
]
11596 = build_function_type_list (V2SF_type_node
,
11597 V2SF_type_node
, V2SF_type_node
,
11598 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
11600 types
[MIPS_V2SF_FTYPE_SF_SF
]
11601 = build_function_type_list (V2SF_type_node
,
11602 float_type_node
, float_type_node
, NULL_TREE
);
11604 types
[MIPS_INT_FTYPE_V2SF_V2SF
]
11605 = build_function_type_list (integer_type_node
,
11606 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
11608 types
[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF
]
11609 = build_function_type_list (integer_type_node
,
11610 V2SF_type_node
, V2SF_type_node
,
11611 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
11613 types
[MIPS_INT_FTYPE_SF_SF
]
11614 = build_function_type_list (integer_type_node
,
11615 float_type_node
, float_type_node
, NULL_TREE
);
11617 types
[MIPS_INT_FTYPE_DF_DF
]
11618 = build_function_type_list (integer_type_node
,
11619 double_type_node
, double_type_node
, NULL_TREE
);
11621 types
[MIPS_SF_FTYPE_V2SF
]
11622 = build_function_type_list (float_type_node
, V2SF_type_node
, NULL_TREE
);
11624 types
[MIPS_SF_FTYPE_SF
]
11625 = build_function_type_list (float_type_node
,
11626 float_type_node
, NULL_TREE
);
11628 types
[MIPS_SF_FTYPE_SF_SF
]
11629 = build_function_type_list (float_type_node
,
11630 float_type_node
, float_type_node
, NULL_TREE
);
11632 types
[MIPS_DF_FTYPE_DF
]
11633 = build_function_type_list (double_type_node
,
11634 double_type_node
, NULL_TREE
);
11636 types
[MIPS_DF_FTYPE_DF_DF
]
11637 = build_function_type_list (double_type_node
,
11638 double_type_node
, double_type_node
, NULL_TREE
);
11643 V2HI_type_node
= build_vector_type_for_mode (intHI_type_node
, V2HImode
);
11644 V4QI_type_node
= build_vector_type_for_mode (intQI_type_node
, V4QImode
);
11646 types
[MIPS_V2HI_FTYPE_V2HI_V2HI
]
11647 = build_function_type_list (V2HI_type_node
,
11648 V2HI_type_node
, V2HI_type_node
,
11651 types
[MIPS_SI_FTYPE_SI_SI
]
11652 = build_function_type_list (intSI_type_node
,
11653 intSI_type_node
, intSI_type_node
,
11656 types
[MIPS_V4QI_FTYPE_V4QI_V4QI
]
11657 = build_function_type_list (V4QI_type_node
,
11658 V4QI_type_node
, V4QI_type_node
,
11661 types
[MIPS_SI_FTYPE_V4QI
]
11662 = build_function_type_list (intSI_type_node
,
11666 types
[MIPS_V2HI_FTYPE_V2HI
]
11667 = build_function_type_list (V2HI_type_node
,
11671 types
[MIPS_SI_FTYPE_SI
]
11672 = build_function_type_list (intSI_type_node
,
11676 types
[MIPS_V4QI_FTYPE_V2HI_V2HI
]
11677 = build_function_type_list (V4QI_type_node
,
11678 V2HI_type_node
, V2HI_type_node
,
11681 types
[MIPS_V2HI_FTYPE_SI_SI
]
11682 = build_function_type_list (V2HI_type_node
,
11683 intSI_type_node
, intSI_type_node
,
11686 types
[MIPS_SI_FTYPE_V2HI
]
11687 = build_function_type_list (intSI_type_node
,
11691 types
[MIPS_V2HI_FTYPE_V4QI
]
11692 = build_function_type_list (V2HI_type_node
,
11696 types
[MIPS_V4QI_FTYPE_V4QI_SI
]
11697 = build_function_type_list (V4QI_type_node
,
11698 V4QI_type_node
, intSI_type_node
,
11701 types
[MIPS_V2HI_FTYPE_V2HI_SI
]
11702 = build_function_type_list (V2HI_type_node
,
11703 V2HI_type_node
, intSI_type_node
,
11706 types
[MIPS_V2HI_FTYPE_V4QI_V2HI
]
11707 = build_function_type_list (V2HI_type_node
,
11708 V4QI_type_node
, V2HI_type_node
,
11711 types
[MIPS_SI_FTYPE_V2HI_V2HI
]
11712 = build_function_type_list (intSI_type_node
,
11713 V2HI_type_node
, V2HI_type_node
,
11716 types
[MIPS_DI_FTYPE_DI_V4QI_V4QI
]
11717 = build_function_type_list (intDI_type_node
,
11718 intDI_type_node
, V4QI_type_node
, V4QI_type_node
,
11721 types
[MIPS_DI_FTYPE_DI_V2HI_V2HI
]
11722 = build_function_type_list (intDI_type_node
,
11723 intDI_type_node
, V2HI_type_node
, V2HI_type_node
,
11726 types
[MIPS_DI_FTYPE_DI_SI_SI
]
11727 = build_function_type_list (intDI_type_node
,
11728 intDI_type_node
, intSI_type_node
, intSI_type_node
,
11731 types
[MIPS_V4QI_FTYPE_SI
]
11732 = build_function_type_list (V4QI_type_node
,
11736 types
[MIPS_V2HI_FTYPE_SI
]
11737 = build_function_type_list (V2HI_type_node
,
11741 types
[MIPS_VOID_FTYPE_V4QI_V4QI
]
11742 = build_function_type_list (void_type_node
,
11743 V4QI_type_node
, V4QI_type_node
,
11746 types
[MIPS_SI_FTYPE_V4QI_V4QI
]
11747 = build_function_type_list (intSI_type_node
,
11748 V4QI_type_node
, V4QI_type_node
,
11751 types
[MIPS_VOID_FTYPE_V2HI_V2HI
]
11752 = build_function_type_list (void_type_node
,
11753 V2HI_type_node
, V2HI_type_node
,
11756 types
[MIPS_SI_FTYPE_DI_SI
]
11757 = build_function_type_list (intSI_type_node
,
11758 intDI_type_node
, intSI_type_node
,
11761 types
[MIPS_DI_FTYPE_DI_SI
]
11762 = build_function_type_list (intDI_type_node
,
11763 intDI_type_node
, intSI_type_node
,
11766 types
[MIPS_VOID_FTYPE_SI_SI
]
11767 = build_function_type_list (void_type_node
,
11768 intSI_type_node
, intSI_type_node
,
11771 types
[MIPS_SI_FTYPE_PTR_SI
]
11772 = build_function_type_list (intSI_type_node
,
11773 ptr_type_node
, intSI_type_node
,
11776 types
[MIPS_SI_FTYPE_VOID
]
11777 = build_function_type (intSI_type_node
, void_list_node
);
11781 types
[MIPS_V4QI_FTYPE_V4QI
]
11782 = build_function_type_list (V4QI_type_node
,
11786 types
[MIPS_SI_FTYPE_SI_SI_SI
]
11787 = build_function_type_list (intSI_type_node
,
11788 intSI_type_node
, intSI_type_node
,
11789 intSI_type_node
, NULL_TREE
);
11791 types
[MIPS_DI_FTYPE_DI_USI_USI
]
11792 = build_function_type_list (intDI_type_node
,
11794 unsigned_intSI_type_node
,
11795 unsigned_intSI_type_node
, NULL_TREE
);
11797 types
[MIPS_DI_FTYPE_SI_SI
]
11798 = build_function_type_list (intDI_type_node
,
11799 intSI_type_node
, intSI_type_node
,
11802 types
[MIPS_DI_FTYPE_USI_USI
]
11803 = build_function_type_list (intDI_type_node
,
11804 unsigned_intSI_type_node
,
11805 unsigned_intSI_type_node
, NULL_TREE
);
11807 types
[MIPS_V2HI_FTYPE_SI_SI_SI
]
11808 = build_function_type_list (V2HI_type_node
,
11809 intSI_type_node
, intSI_type_node
,
11810 intSI_type_node
, NULL_TREE
);
11815 /* Iterate through all of the bdesc arrays, initializing all of the
11816 builtin functions. */
11819 for (m
= bdesc_arrays
; m
< &bdesc_arrays
[ARRAY_SIZE (bdesc_arrays
)]; m
++)
11821 if ((m
->proc
== PROCESSOR_MAX
|| (m
->proc
== mips_arch
))
11822 && (m
->unsupported_target_flags
& target_flags
) == 0)
11823 for (d
= m
->bdesc
; d
< &m
->bdesc
[m
->size
]; d
++)
11824 if ((d
->target_flags
& target_flags
) == d
->target_flags
)
11825 add_builtin_function (d
->name
, types
[d
->function_type
],
11826 d
- m
->bdesc
+ offset
,
11827 BUILT_IN_MD
, NULL
, NULL
);
11832 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
11833 .md pattern and CALL is the function expr with arguments. TARGET,
11834 if nonnull, suggests a good place to put the result.
11835 HAS_TARGET indicates the function must return something. */
11838 mips_expand_builtin_direct (enum insn_code icode
, rtx target
, tree exp
,
11841 rtx ops
[MAX_RECOG_OPERANDS
];
11847 /* We save target to ops[0]. */
11848 ops
[0] = mips_prepare_builtin_target (icode
, 0, target
);
11852 /* We need to test if the arglist is not zero. Some instructions have extra
11853 clobber registers. */
11854 for (; i
< insn_data
[icode
].n_operands
&& i
<= call_expr_nargs (exp
); i
++, j
++)
11855 ops
[i
] = mips_prepare_builtin_arg (icode
, i
, exp
, j
);
11860 emit_insn (GEN_FCN (icode
) (ops
[0], ops
[1]));
11864 emit_insn (GEN_FCN (icode
) (ops
[0], ops
[1], ops
[2]));
11868 emit_insn (GEN_FCN (icode
) (ops
[0], ops
[1], ops
[2], ops
[3]));
11872 gcc_unreachable ();
11877 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
11878 function (TYPE says which). EXP is the tree for the function
11879 function, ICODE is the instruction that should be used to compare
11880 the first two arguments, and COND is the condition it should test.
11881 TARGET, if nonnull, suggests a good place to put the result. */
11884 mips_expand_builtin_movtf (enum mips_builtin_type type
,
11885 enum insn_code icode
, enum mips_fp_condition cond
,
11886 rtx target
, tree exp
)
11888 rtx cmp_result
, op0
, op1
;
11890 cmp_result
= mips_prepare_builtin_target (icode
, 0, 0);
11891 op0
= mips_prepare_builtin_arg (icode
, 1, exp
, 0);
11892 op1
= mips_prepare_builtin_arg (icode
, 2, exp
, 1);
11893 emit_insn (GEN_FCN (icode
) (cmp_result
, op0
, op1
, GEN_INT (cond
)));
11895 icode
= CODE_FOR_mips_cond_move_tf_ps
;
11896 target
= mips_prepare_builtin_target (icode
, 0, target
);
11897 if (type
== MIPS_BUILTIN_MOVT
)
11899 op1
= mips_prepare_builtin_arg (icode
, 2, exp
, 2);
11900 op0
= mips_prepare_builtin_arg (icode
, 1, exp
, 3);
11904 op0
= mips_prepare_builtin_arg (icode
, 1, exp
, 2);
11905 op1
= mips_prepare_builtin_arg (icode
, 2, exp
, 3);
11907 emit_insn (gen_mips_cond_move_tf_ps (target
, op0
, op1
, cmp_result
));
11911 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11912 into TARGET otherwise. Return TARGET. */
11915 mips_builtin_branch_and_move (rtx condition
, rtx target
,
11916 rtx value_if_true
, rtx value_if_false
)
11918 rtx true_label
, done_label
;
11920 true_label
= gen_label_rtx ();
11921 done_label
= gen_label_rtx ();
11923 /* First assume that CONDITION is false. */
11924 mips_emit_move (target
, value_if_false
);
11926 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11927 emit_jump_insn (gen_condjump (condition
, true_label
));
11928 emit_jump_insn (gen_jump (done_label
));
11931 /* Fix TARGET if CONDITION is true. */
11932 emit_label (true_label
);
11933 mips_emit_move (target
, value_if_true
);
11935 emit_label (done_label
);
11939 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
11940 of the comparison instruction and COND is the condition it should test.
11941 EXP is the function call and arguments and TARGET, if nonnull,
11942 suggests a good place to put the boolean result. */
11945 mips_expand_builtin_compare (enum mips_builtin_type builtin_type
,
11946 enum insn_code icode
, enum mips_fp_condition cond
,
11947 rtx target
, tree exp
)
11949 rtx offset
, condition
, cmp_result
, ops
[MAX_RECOG_OPERANDS
];
11953 if (target
== 0 || GET_MODE (target
) != SImode
)
11954 target
= gen_reg_rtx (SImode
);
11956 /* Prepare the operands to the comparison. */
11957 cmp_result
= mips_prepare_builtin_target (icode
, 0, 0);
11958 for (i
= 1; i
< insn_data
[icode
].n_operands
- 1; i
++, j
++)
11959 ops
[i
] = mips_prepare_builtin_arg (icode
, i
, exp
, j
);
11961 switch (insn_data
[icode
].n_operands
)
11964 emit_insn (GEN_FCN (icode
) (cmp_result
, ops
[1], ops
[2], GEN_INT (cond
)));
11968 emit_insn (GEN_FCN (icode
) (cmp_result
, ops
[1], ops
[2],
11969 ops
[3], ops
[4], GEN_INT (cond
)));
11973 gcc_unreachable ();
11976 /* If the comparison sets more than one register, we define the result
11977 to be 0 if all registers are false and -1 if all registers are true.
11978 The value of the complete result is indeterminate otherwise. */
11979 switch (builtin_type
)
11981 case MIPS_BUILTIN_CMP_ALL
:
11982 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, constm1_rtx
);
11983 return mips_builtin_branch_and_move (condition
, target
,
11984 const0_rtx
, const1_rtx
);
11986 case MIPS_BUILTIN_CMP_UPPER
:
11987 case MIPS_BUILTIN_CMP_LOWER
:
11988 offset
= GEN_INT (builtin_type
== MIPS_BUILTIN_CMP_UPPER
);
11989 condition
= gen_single_cc (cmp_result
, offset
);
11990 return mips_builtin_branch_and_move (condition
, target
,
11991 const1_rtx
, const0_rtx
);
11994 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, const0_rtx
);
11995 return mips_builtin_branch_and_move (condition
, target
,
11996 const1_rtx
, const0_rtx
);
12000 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12001 suggests a good place to put the boolean result. */
12004 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type
, rtx target
)
12006 rtx condition
, cmp_result
;
12009 if (target
== 0 || GET_MODE (target
) != SImode
)
12010 target
= gen_reg_rtx (SImode
);
12012 cmp_result
= gen_rtx_REG (CCDSPmode
, CCDSP_PO_REGNUM
);
12014 if (builtin_type
== MIPS_BUILTIN_BPOSGE32
)
12019 condition
= gen_rtx_GE (VOIDmode
, cmp_result
, GEN_INT (cmp_value
));
12020 return mips_builtin_branch_and_move (condition
, target
,
12021 const1_rtx
, const0_rtx
);
12024 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12025 FIRST is true if this is the first time handling this decl. */
12028 mips_encode_section_info (tree decl
, rtx rtl
, int first
)
12030 default_encode_section_info (decl
, rtl
, first
);
12032 if (TREE_CODE (decl
) == FUNCTION_DECL
)
12034 rtx symbol
= XEXP (rtl
, 0);
12036 if ((TARGET_LONG_CALLS
&& !mips_near_type_p (TREE_TYPE (decl
)))
12037 || mips_far_type_p (TREE_TYPE (decl
)))
12038 SYMBOL_REF_FLAGS (symbol
) |= SYMBOL_FLAG_LONG_CALL
;
12042 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12043 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12046 mips_extra_live_on_entry (bitmap regs
)
12048 if (TARGET_USE_GOT
&& !TARGET_ABSOLUTE_ABICALLS
)
12049 bitmap_set_bit (regs
, PIC_FUNCTION_ADDR_REGNUM
);
12052 /* SImode values are represented as sign-extended to DImode. */
12055 mips_mode_rep_extended (enum machine_mode mode
, enum machine_mode mode_rep
)
12057 if (TARGET_64BIT
&& mode
== SImode
&& mode_rep
== DImode
)
12058 return SIGN_EXTEND
;
12063 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12066 mips_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
12071 fputs ("\t.dtprelword\t", file
);
12075 fputs ("\t.dtpreldword\t", file
);
12079 gcc_unreachable ();
12081 output_addr_const (file
, x
);
12082 fputs ("+0x8000", file
);
12085 #include "gt-mips.h"