1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
54 #include "target-def.h"
55 #include "integrate.h"
56 #include "langhooks.h"
57 #include "cfglayout.h"
58 #include "sched-int.h"
61 #include "diagnostic.h"
63 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
64 #define UNSPEC_ADDRESS_P(X) \
65 (GET_CODE (X) == UNSPEC \
66 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
67 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
69 /* Extract the symbol or label from UNSPEC wrapper X. */
70 #define UNSPEC_ADDRESS(X) \
73 /* Extract the symbol type from UNSPEC wrapper X. */
74 #define UNSPEC_ADDRESS_TYPE(X) \
75 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
77 /* The maximum distance between the top of the stack frame and the
78 value $sp has when we save and restore registers.
80 The value for normal-mode code must be a SMALL_OPERAND and must
81 preserve the maximum stack alignment. We therefore use a value
82 of 0x7ff0 in this case.
84 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
85 up to 0x7f8 bytes and can usually save or restore all the registers
86 that we need to save or restore. (Note that we can only use these
87 instructions for o32, for which the stack alignment is 8 bytes.)
89 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
90 RESTORE are not available. We can then use unextended instructions
91 to save and restore registers, and to allocate and deallocate the top
93 #define MIPS_MAX_FIRST_STACK_STEP \
94 (!TARGET_MIPS16 ? 0x7ff0 \
95 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
96 : TARGET_64BIT ? 0x100 : 0x400)
98 /* True if INSN is a mips.md pattern or asm statement. */
99 #define USEFUL_INSN_P(INSN) \
101 && GET_CODE (PATTERN (INSN)) != USE \
102 && GET_CODE (PATTERN (INSN)) != CLOBBER \
103 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
104 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
106 /* If INSN is a delayed branch sequence, return the first instruction
107 in the sequence, otherwise return INSN itself. */
108 #define SEQ_BEGIN(INSN) \
109 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
110 ? XVECEXP (PATTERN (INSN), 0, 0) \
113 /* Likewise for the last instruction in a delayed branch sequence. */
114 #define SEQ_END(INSN) \
115 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
116 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
119 /* Execute the following loop body with SUBINSN set to each instruction
120 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
121 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
122 for ((SUBINSN) = SEQ_BEGIN (INSN); \
123 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
124 (SUBINSN) = NEXT_INSN (SUBINSN))
126 /* True if bit BIT is set in VALUE. */
127 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
129 /* Classifies an address.
132 A natural register + offset address. The register satisfies
133 mips_valid_base_register_p and the offset is a const_arith_operand.
136 A LO_SUM rtx. The first operand is a valid base register and
137 the second operand is a symbolic address.
140 A signed 16-bit constant address.
143 A constant symbolic address. */
144 enum mips_address_type
{
151 /* Macros to create an enumeration identifier for a function prototype. */
152 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
153 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
154 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
155 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
157 /* Classifies the prototype of a built-in function. */
158 enum mips_function_type
{
159 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
160 #include "config/mips/mips-ftypes.def"
161 #undef DEF_MIPS_FTYPE
165 /* Specifies how a built-in function should be converted into rtl. */
166 enum mips_builtin_type
{
167 /* The function corresponds directly to an .md pattern. The return
168 value is mapped to operand 0 and the arguments are mapped to
169 operands 1 and above. */
172 /* The function corresponds directly to an .md pattern. There is no return
173 value and the arguments are mapped to operands 0 and above. */
174 MIPS_BUILTIN_DIRECT_NO_TARGET
,
176 /* The function corresponds to a comparison instruction followed by
177 a mips_cond_move_tf_ps pattern. The first two arguments are the
178 values to compare and the second two arguments are the vector
179 operands for the movt.ps or movf.ps instruction (in assembly order). */
183 /* The function corresponds to a V2SF comparison instruction. Operand 0
184 of this instruction is the result of the comparison, which has mode
185 CCV2 or CCV4. The function arguments are mapped to operands 1 and
186 above. The function's return value is an SImode boolean that is
187 true under the following conditions:
189 MIPS_BUILTIN_CMP_ANY: one of the registers is true
190 MIPS_BUILTIN_CMP_ALL: all of the registers are true
191 MIPS_BUILTIN_CMP_LOWER: the first register is true
192 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
193 MIPS_BUILTIN_CMP_ANY
,
194 MIPS_BUILTIN_CMP_ALL
,
195 MIPS_BUILTIN_CMP_UPPER
,
196 MIPS_BUILTIN_CMP_LOWER
,
198 /* As above, but the instruction only sets a single $fcc register. */
199 MIPS_BUILTIN_CMP_SINGLE
,
201 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
202 MIPS_BUILTIN_BPOSGE32
205 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
206 #define MIPS_FP_CONDITIONS(MACRO) \
224 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
225 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
226 enum mips_fp_condition
{
227 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND
)
230 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
231 #define STRINGIFY(X) #X
232 static const char *const mips_fp_conditions
[] = {
233 MIPS_FP_CONDITIONS (STRINGIFY
)
236 /* Information about a function's frame layout. */
237 struct mips_frame_info
GTY(()) {
238 /* The size of the frame in bytes. */
239 HOST_WIDE_INT total_size
;
241 /* The number of bytes allocated to variables. */
242 HOST_WIDE_INT var_size
;
244 /* The number of bytes allocated to outgoing function arguments. */
245 HOST_WIDE_INT args_size
;
247 /* The number of bytes allocated to the .cprestore slot, or 0 if there
249 HOST_WIDE_INT cprestore_size
;
251 /* Bit X is set if the function saves or restores GPR X. */
254 /* Likewise FPR X. */
257 /* The number of GPRs and FPRs saved. */
261 /* The offset of the topmost GPR and FPR save slots from the top of
262 the frame, or zero if no such slots are needed. */
263 HOST_WIDE_INT gp_save_offset
;
264 HOST_WIDE_INT fp_save_offset
;
266 /* Likewise, but giving offsets from the bottom of the frame. */
267 HOST_WIDE_INT gp_sp_offset
;
268 HOST_WIDE_INT fp_sp_offset
;
270 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
271 HOST_WIDE_INT arg_pointer_offset
;
273 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
274 HOST_WIDE_INT hard_frame_pointer_offset
;
277 struct machine_function
GTY(()) {
278 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
279 rtx mips16_gp_pseudo_rtx
;
281 /* The number of extra stack bytes taken up by register varargs.
282 This area is allocated by the callee at the very top of the frame. */
285 /* The current frame information, calculated by mips_compute_frame_info. */
286 struct mips_frame_info frame
;
288 /* The register to use as the function's global pointer. */
289 unsigned int global_pointer
;
291 /* True if mips_adjust_insn_length should ignore an instruction's
293 bool ignore_hazard_length_p
;
295 /* True if the whole function is suitable for .set noreorder and
297 bool all_noreorder_p
;
299 /* True if the function is known to have an instruction that needs $gp. */
302 /* True if we have emitted an instruction to initialize
303 mips16_gp_pseudo_rtx. */
304 bool initialized_mips16_gp_pseudo_p
;
307 /* Information about a single argument. */
308 struct mips_arg_info
{
309 /* True if the argument is passed in a floating-point register, or
310 would have been if we hadn't run out of registers. */
313 /* The number of words passed in registers, rounded up. */
314 unsigned int reg_words
;
316 /* For EABI, the offset of the first register from GP_ARG_FIRST or
317 FP_ARG_FIRST. For other ABIs, the offset of the first register from
318 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
319 comment for details).
321 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
323 unsigned int reg_offset
;
325 /* The number of words that must be passed on the stack, rounded up. */
326 unsigned int stack_words
;
328 /* The offset from the start of the stack overflow area of the argument's
329 first stack word. Only meaningful when STACK_WORDS is nonzero. */
330 unsigned int stack_offset
;
333 /* Information about an address described by mips_address_type.
339 REG is the base register and OFFSET is the constant offset.
342 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
343 is the type of symbol it references.
346 SYMBOL_TYPE is the type of symbol that the address references. */
347 struct mips_address_info
{
348 enum mips_address_type type
;
351 enum mips_symbol_type symbol_type
;
354 /* One stage in a constant building sequence. These sequences have
358 A = A CODE[1] VALUE[1]
359 A = A CODE[2] VALUE[2]
362 where A is an accumulator, each CODE[i] is a binary rtl operation
363 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
364 struct mips_integer_op
{
366 unsigned HOST_WIDE_INT value
;
369 /* The largest number of operations needed to load an integer constant.
370 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
371 When the lowest bit is clear, we can try, but reject a sequence with
372 an extra SLL at the end. */
373 #define MIPS_MAX_INTEGER_OPS 7
375 /* Information about a MIPS16e SAVE or RESTORE instruction. */
376 struct mips16e_save_restore_info
{
377 /* The number of argument registers saved by a SAVE instruction.
378 0 for RESTORE instructions. */
381 /* Bit X is set if the instruction saves or restores GPR X. */
384 /* The total number of bytes to allocate. */
388 /* Global variables for machine-dependent things. */
390 /* The -G setting, or the configuration's default small-data limit if
391 no -G option is given. */
392 static unsigned int mips_small_data_threshold
;
394 /* The number of file directives written by mips_output_filename. */
395 int num_source_filenames
;
397 /* The name that appeared in the last .file directive written by
398 mips_output_filename, or "" if mips_output_filename hasn't
399 written anything yet. */
400 const char *current_function_file
= "";
402 /* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
405 /* Arrays that map GCC register numbers to debugger register numbers. */
406 int mips_dbx_regno
[FIRST_PSEUDO_REGISTER
];
407 int mips_dwarf_regno
[FIRST_PSEUDO_REGISTER
];
409 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
414 /* True if we're writing out a branch-likely instruction rather than a
416 static bool mips_branch_likely
;
418 /* The operands passed to the last cmpMM expander. */
421 /* The current instruction-set architecture. */
422 enum processor_type mips_arch
;
423 const struct mips_cpu_info
*mips_arch_info
;
425 /* The processor that we should tune the code for. */
426 enum processor_type mips_tune
;
427 const struct mips_cpu_info
*mips_tune_info
;
429 /* The ISA level associated with mips_arch. */
432 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
433 static const struct mips_cpu_info
*mips_isa_option_info
;
435 /* Which ABI to use. */
436 int mips_abi
= MIPS_ABI_DEFAULT
;
438 /* Which cost information to use. */
439 const struct mips_rtx_cost_data
*mips_cost
;
441 /* The ambient target flags, excluding MASK_MIPS16. */
442 static int mips_base_target_flags
;
444 /* True if MIPS16 is the default mode. */
445 bool mips_base_mips16
;
447 /* The ambient values of other global variables. */
448 static int mips_base_delayed_branch
; /* flag_delayed_branch */
449 static int mips_base_schedule_insns
; /* flag_schedule_insns */
450 static int mips_base_reorder_blocks_and_partition
; /* flag_reorder... */
451 static int mips_base_move_loop_invariants
; /* flag_move_loop_invariants */
452 static int mips_base_align_loops
; /* align_loops */
453 static int mips_base_align_jumps
; /* align_jumps */
454 static int mips_base_align_functions
; /* align_functions */
456 /* The -mcode-readable setting. */
457 enum mips_code_readable_setting mips_code_readable
= CODE_READABLE_YES
;
459 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
460 bool mips_hard_regno_mode_ok
[(int) MAX_MACHINE_MODE
][FIRST_PSEUDO_REGISTER
];
462 /* Index C is true if character C is a valid PRINT_OPERAND punctation
464 bool mips_print_operand_punct
[256];
466 static GTY (()) int mips_output_filename_first_time
= 1;
468 /* mips_split_p[X] is true if symbols of type X can be split by
469 mips_split_symbol. */
470 bool mips_split_p
[NUM_SYMBOL_TYPES
];
472 /* mips_split_hi_p[X] is true if the high parts of symbols of type X
473 can be split by mips_split_symbol. */
474 bool mips_split_hi_p
[NUM_SYMBOL_TYPES
];
476 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
477 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
478 if they are matched by a special .md file pattern. */
479 static const char *mips_lo_relocs
[NUM_SYMBOL_TYPES
];
481 /* Likewise for HIGHs. */
482 static const char *mips_hi_relocs
[NUM_SYMBOL_TYPES
];
484 /* Index R is the smallest register class that contains register R. */
485 const enum reg_class mips_regno_to_class
[FIRST_PSEUDO_REGISTER
] = {
486 LEA_REGS
, LEA_REGS
, M16_REGS
, V1_REG
,
487 M16_REGS
, M16_REGS
, M16_REGS
, M16_REGS
,
488 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
489 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
490 M16_REGS
, M16_REGS
, LEA_REGS
, LEA_REGS
,
491 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
492 T_REG
, PIC_FN_ADDR_REG
, LEA_REGS
, LEA_REGS
,
493 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
494 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
495 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
496 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
497 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
498 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
499 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
500 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
501 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
502 MD0_REG
, MD1_REG
, NO_REGS
, ST_REGS
,
503 ST_REGS
, ST_REGS
, ST_REGS
, ST_REGS
,
504 ST_REGS
, ST_REGS
, ST_REGS
, NO_REGS
,
505 NO_REGS
, ALL_REGS
, ALL_REGS
, NO_REGS
,
506 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
507 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
508 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
509 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
510 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
511 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
512 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
513 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
514 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
515 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
516 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
517 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
518 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
519 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
520 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
521 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
522 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
523 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
524 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
525 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
526 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
527 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
528 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
529 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
530 DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
,
531 DSP_ACC_REGS
, DSP_ACC_REGS
, ALL_REGS
, ALL_REGS
,
532 ALL_REGS
, ALL_REGS
, ALL_REGS
, ALL_REGS
535 /* The value of TARGET_ATTRIBUTE_TABLE. */
536 const struct attribute_spec mips_attribute_table
[] = {
537 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
538 { "long_call", 0, 0, false, true, true, NULL
},
539 { "far", 0, 0, false, true, true, NULL
},
540 { "near", 0, 0, false, true, true, NULL
},
541 /* We would really like to treat "mips16" and "nomips16" as type
542 attributes, but GCC doesn't provide the hooks we need to support
543 the right conversion rules. As declaration attributes, they affect
544 code generation but don't carry other semantics. */
545 { "mips16", 0, 0, true, false, false, NULL
},
546 { "nomips16", 0, 0, true, false, false, NULL
},
547 { NULL
, 0, 0, false, false, false, NULL
}
550 /* A table describing all the processors GCC knows about. Names are
551 matched in the order listed. The first mention of an ISA level is
552 taken as the canonical name for that ISA.
554 To ease comparison, please keep this table in the same order
555 as GAS's mips_cpu_info_table. Please also make sure that
556 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
557 options correctly. */
558 static const struct mips_cpu_info mips_cpu_info_table
[] = {
559 /* Entries for generic ISAs. */
560 { "mips1", PROCESSOR_R3000
, 1, 0 },
561 { "mips2", PROCESSOR_R6000
, 2, 0 },
562 { "mips3", PROCESSOR_R4000
, 3, 0 },
563 { "mips4", PROCESSOR_R8000
, 4, 0 },
564 /* Prefer not to use branch-likely instructions for generic MIPS32rX
565 and MIPS64rX code. The instructions were officially deprecated
566 in revisions 2 and earlier, but revision 3 is likely to downgrade
567 that to a recommendation to avoid the instructions in code that
568 isn't tuned to a specific processor. */
569 { "mips32", PROCESSOR_4KC
, 32, PTF_AVOID_BRANCHLIKELY
},
570 { "mips32r2", PROCESSOR_M4K
, 33, PTF_AVOID_BRANCHLIKELY
},
571 { "mips64", PROCESSOR_5KC
, 64, PTF_AVOID_BRANCHLIKELY
},
572 /* ??? For now just tune the generic MIPS64r2 for 5KC as well. */
573 { "mips64r2", PROCESSOR_5KC
, 65, PTF_AVOID_BRANCHLIKELY
},
575 /* MIPS I processors. */
576 { "r3000", PROCESSOR_R3000
, 1, 0 },
577 { "r2000", PROCESSOR_R3000
, 1, 0 },
578 { "r3900", PROCESSOR_R3900
, 1, 0 },
580 /* MIPS II processors. */
581 { "r6000", PROCESSOR_R6000
, 2, 0 },
583 /* MIPS III processors. */
584 { "r4000", PROCESSOR_R4000
, 3, 0 },
585 { "vr4100", PROCESSOR_R4100
, 3, 0 },
586 { "vr4111", PROCESSOR_R4111
, 3, 0 },
587 { "vr4120", PROCESSOR_R4120
, 3, 0 },
588 { "vr4130", PROCESSOR_R4130
, 3, 0 },
589 { "vr4300", PROCESSOR_R4300
, 3, 0 },
590 { "r4400", PROCESSOR_R4000
, 3, 0 },
591 { "r4600", PROCESSOR_R4600
, 3, 0 },
592 { "orion", PROCESSOR_R4600
, 3, 0 },
593 { "r4650", PROCESSOR_R4650
, 3, 0 },
594 /* ST Loongson 2E/2F processors. */
595 { "loongson2e", PROCESSOR_LOONGSON_2E
, 3, PTF_AVOID_BRANCHLIKELY
},
596 { "loongson2f", PROCESSOR_LOONGSON_2F
, 3, PTF_AVOID_BRANCHLIKELY
},
598 /* MIPS IV processors. */
599 { "r8000", PROCESSOR_R8000
, 4, 0 },
600 { "vr5000", PROCESSOR_R5000
, 4, 0 },
601 { "vr5400", PROCESSOR_R5400
, 4, 0 },
602 { "vr5500", PROCESSOR_R5500
, 4, PTF_AVOID_BRANCHLIKELY
},
603 { "rm7000", PROCESSOR_R7000
, 4, 0 },
604 { "rm9000", PROCESSOR_R9000
, 4, 0 },
606 /* MIPS32 processors. */
607 { "4kc", PROCESSOR_4KC
, 32, 0 },
608 { "4km", PROCESSOR_4KC
, 32, 0 },
609 { "4kp", PROCESSOR_4KP
, 32, 0 },
610 { "4ksc", PROCESSOR_4KC
, 32, 0 },
612 /* MIPS32 Release 2 processors. */
613 { "m4k", PROCESSOR_M4K
, 33, 0 },
614 { "4kec", PROCESSOR_4KC
, 33, 0 },
615 { "4kem", PROCESSOR_4KC
, 33, 0 },
616 { "4kep", PROCESSOR_4KP
, 33, 0 },
617 { "4ksd", PROCESSOR_4KC
, 33, 0 },
619 { "24kc", PROCESSOR_24KC
, 33, 0 },
620 { "24kf2_1", PROCESSOR_24KF2_1
, 33, 0 },
621 { "24kf", PROCESSOR_24KF2_1
, 33, 0 },
622 { "24kf1_1", PROCESSOR_24KF1_1
, 33, 0 },
623 { "24kfx", PROCESSOR_24KF1_1
, 33, 0 },
624 { "24kx", PROCESSOR_24KF1_1
, 33, 0 },
626 { "24kec", PROCESSOR_24KC
, 33, 0 }, /* 24K with DSP. */
627 { "24kef2_1", PROCESSOR_24KF2_1
, 33, 0 },
628 { "24kef", PROCESSOR_24KF2_1
, 33, 0 },
629 { "24kef1_1", PROCESSOR_24KF1_1
, 33, 0 },
630 { "24kefx", PROCESSOR_24KF1_1
, 33, 0 },
631 { "24kex", PROCESSOR_24KF1_1
, 33, 0 },
633 { "34kc", PROCESSOR_24KC
, 33, 0 }, /* 34K with MT/DSP. */
634 { "34kf2_1", PROCESSOR_24KF2_1
, 33, 0 },
635 { "34kf", PROCESSOR_24KF2_1
, 33, 0 },
636 { "34kf1_1", PROCESSOR_24KF1_1
, 33, 0 },
637 { "34kfx", PROCESSOR_24KF1_1
, 33, 0 },
638 { "34kx", PROCESSOR_24KF1_1
, 33, 0 },
640 { "74kc", PROCESSOR_74KC
, 33, 0 }, /* 74K with DSPr2. */
641 { "74kf2_1", PROCESSOR_74KF2_1
, 33, 0 },
642 { "74kf", PROCESSOR_74KF2_1
, 33, 0 },
643 { "74kf1_1", PROCESSOR_74KF1_1
, 33, 0 },
644 { "74kfx", PROCESSOR_74KF1_1
, 33, 0 },
645 { "74kx", PROCESSOR_74KF1_1
, 33, 0 },
646 { "74kf3_2", PROCESSOR_74KF3_2
, 33, 0 },
648 /* MIPS64 processors. */
649 { "5kc", PROCESSOR_5KC
, 64, 0 },
650 { "5kf", PROCESSOR_5KF
, 64, 0 },
651 { "20kc", PROCESSOR_20KC
, 64, PTF_AVOID_BRANCHLIKELY
},
652 { "sb1", PROCESSOR_SB1
, 64, PTF_AVOID_BRANCHLIKELY
},
653 { "sb1a", PROCESSOR_SB1A
, 64, PTF_AVOID_BRANCHLIKELY
},
654 { "sr71000", PROCESSOR_SR71000
, 64, PTF_AVOID_BRANCHLIKELY
},
655 { "xlr", PROCESSOR_XLR
, 64, 0 },
657 /* MIPS64 Release 2 processors. */
658 { "octeon", PROCESSOR_OCTEON
, 65, PTF_AVOID_BRANCHLIKELY
}
661 /* Default costs. If these are used for a processor we should look
662 up the actual costs. */
663 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
664 COSTS_N_INSNS (7), /* fp_mult_sf */ \
665 COSTS_N_INSNS (8), /* fp_mult_df */ \
666 COSTS_N_INSNS (23), /* fp_div_sf */ \
667 COSTS_N_INSNS (36), /* fp_div_df */ \
668 COSTS_N_INSNS (10), /* int_mult_si */ \
669 COSTS_N_INSNS (10), /* int_mult_di */ \
670 COSTS_N_INSNS (69), /* int_div_si */ \
671 COSTS_N_INSNS (69), /* int_div_di */ \
672 2, /* branch_cost */ \
673 4 /* memory_latency */
675 /* Floating-point costs for processors without an FPU. Just assume that
676 all floating-point libcalls are very expensive. */
677 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
678 COSTS_N_INSNS (256), /* fp_mult_sf */ \
679 COSTS_N_INSNS (256), /* fp_mult_df */ \
680 COSTS_N_INSNS (256), /* fp_div_sf */ \
681 COSTS_N_INSNS (256) /* fp_div_df */
683 /* Costs to use when optimizing for size. */
684 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size
= {
685 COSTS_N_INSNS (1), /* fp_add */
686 COSTS_N_INSNS (1), /* fp_mult_sf */
687 COSTS_N_INSNS (1), /* fp_mult_df */
688 COSTS_N_INSNS (1), /* fp_div_sf */
689 COSTS_N_INSNS (1), /* fp_div_df */
690 COSTS_N_INSNS (1), /* int_mult_si */
691 COSTS_N_INSNS (1), /* int_mult_di */
692 COSTS_N_INSNS (1), /* int_div_si */
693 COSTS_N_INSNS (1), /* int_div_di */
695 4 /* memory_latency */
698 /* Costs to use when optimizing for speed, indexed by processor. */
699 static const struct mips_rtx_cost_data mips_rtx_cost_data
[PROCESSOR_MAX
] = {
701 COSTS_N_INSNS (2), /* fp_add */
702 COSTS_N_INSNS (4), /* fp_mult_sf */
703 COSTS_N_INSNS (5), /* fp_mult_df */
704 COSTS_N_INSNS (12), /* fp_div_sf */
705 COSTS_N_INSNS (19), /* fp_div_df */
706 COSTS_N_INSNS (12), /* int_mult_si */
707 COSTS_N_INSNS (12), /* int_mult_di */
708 COSTS_N_INSNS (35), /* int_div_si */
709 COSTS_N_INSNS (35), /* int_div_di */
711 4 /* memory_latency */
715 COSTS_N_INSNS (6), /* int_mult_si */
716 COSTS_N_INSNS (6), /* int_mult_di */
717 COSTS_N_INSNS (36), /* int_div_si */
718 COSTS_N_INSNS (36), /* int_div_di */
720 4 /* memory_latency */
724 COSTS_N_INSNS (36), /* int_mult_si */
725 COSTS_N_INSNS (36), /* int_mult_di */
726 COSTS_N_INSNS (37), /* int_div_si */
727 COSTS_N_INSNS (37), /* int_div_di */
729 4 /* memory_latency */
733 COSTS_N_INSNS (4), /* int_mult_si */
734 COSTS_N_INSNS (11), /* int_mult_di */
735 COSTS_N_INSNS (36), /* int_div_si */
736 COSTS_N_INSNS (68), /* int_div_di */
738 4 /* memory_latency */
741 COSTS_N_INSNS (4), /* fp_add */
742 COSTS_N_INSNS (4), /* fp_mult_sf */
743 COSTS_N_INSNS (5), /* fp_mult_df */
744 COSTS_N_INSNS (17), /* fp_div_sf */
745 COSTS_N_INSNS (32), /* fp_div_df */
746 COSTS_N_INSNS (4), /* int_mult_si */
747 COSTS_N_INSNS (11), /* int_mult_di */
748 COSTS_N_INSNS (36), /* int_div_si */
749 COSTS_N_INSNS (68), /* int_div_di */
751 4 /* memory_latency */
754 COSTS_N_INSNS (4), /* fp_add */
755 COSTS_N_INSNS (4), /* fp_mult_sf */
756 COSTS_N_INSNS (5), /* fp_mult_df */
757 COSTS_N_INSNS (17), /* fp_div_sf */
758 COSTS_N_INSNS (32), /* fp_div_df */
759 COSTS_N_INSNS (4), /* int_mult_si */
760 COSTS_N_INSNS (7), /* int_mult_di */
761 COSTS_N_INSNS (42), /* int_div_si */
762 COSTS_N_INSNS (72), /* int_div_di */
764 4 /* memory_latency */
768 COSTS_N_INSNS (5), /* int_mult_si */
769 COSTS_N_INSNS (5), /* int_mult_di */
770 COSTS_N_INSNS (41), /* int_div_si */
771 COSTS_N_INSNS (41), /* int_div_di */
773 4 /* memory_latency */
776 COSTS_N_INSNS (8), /* fp_add */
777 COSTS_N_INSNS (8), /* fp_mult_sf */
778 COSTS_N_INSNS (10), /* fp_mult_df */
779 COSTS_N_INSNS (34), /* fp_div_sf */
780 COSTS_N_INSNS (64), /* fp_div_df */
781 COSTS_N_INSNS (5), /* int_mult_si */
782 COSTS_N_INSNS (5), /* int_mult_di */
783 COSTS_N_INSNS (41), /* int_div_si */
784 COSTS_N_INSNS (41), /* int_div_di */
786 4 /* memory_latency */
789 COSTS_N_INSNS (4), /* fp_add */
790 COSTS_N_INSNS (4), /* fp_mult_sf */
791 COSTS_N_INSNS (5), /* fp_mult_df */
792 COSTS_N_INSNS (17), /* fp_div_sf */
793 COSTS_N_INSNS (32), /* fp_div_df */
794 COSTS_N_INSNS (5), /* int_mult_si */
795 COSTS_N_INSNS (5), /* int_mult_di */
796 COSTS_N_INSNS (41), /* int_div_si */
797 COSTS_N_INSNS (41), /* int_div_di */
799 4 /* memory_latency */
803 COSTS_N_INSNS (5), /* int_mult_si */
804 COSTS_N_INSNS (5), /* int_mult_di */
805 COSTS_N_INSNS (41), /* int_div_si */
806 COSTS_N_INSNS (41), /* int_div_di */
808 4 /* memory_latency */
811 COSTS_N_INSNS (8), /* fp_add */
812 COSTS_N_INSNS (8), /* fp_mult_sf */
813 COSTS_N_INSNS (10), /* fp_mult_df */
814 COSTS_N_INSNS (34), /* fp_div_sf */
815 COSTS_N_INSNS (64), /* fp_div_df */
816 COSTS_N_INSNS (5), /* int_mult_si */
817 COSTS_N_INSNS (5), /* int_mult_di */
818 COSTS_N_INSNS (41), /* int_div_si */
819 COSTS_N_INSNS (41), /* int_div_di */
821 4 /* memory_latency */
824 COSTS_N_INSNS (4), /* fp_add */
825 COSTS_N_INSNS (4), /* fp_mult_sf */
826 COSTS_N_INSNS (5), /* fp_mult_df */
827 COSTS_N_INSNS (17), /* fp_div_sf */
828 COSTS_N_INSNS (32), /* fp_div_df */
829 COSTS_N_INSNS (5), /* int_mult_si */
830 COSTS_N_INSNS (5), /* int_mult_di */
831 COSTS_N_INSNS (41), /* int_div_si */
832 COSTS_N_INSNS (41), /* int_div_di */
834 4 /* memory_latency */
837 COSTS_N_INSNS (6), /* fp_add */
838 COSTS_N_INSNS (6), /* fp_mult_sf */
839 COSTS_N_INSNS (7), /* fp_mult_df */
840 COSTS_N_INSNS (25), /* fp_div_sf */
841 COSTS_N_INSNS (48), /* fp_div_df */
842 COSTS_N_INSNS (5), /* int_mult_si */
843 COSTS_N_INSNS (5), /* int_mult_di */
844 COSTS_N_INSNS (41), /* int_div_si */
845 COSTS_N_INSNS (41), /* int_div_di */
847 4 /* memory_latency */
861 COSTS_N_INSNS (5), /* int_mult_si */
862 COSTS_N_INSNS (5), /* int_mult_di */
863 COSTS_N_INSNS (72), /* int_div_si */
864 COSTS_N_INSNS (72), /* int_div_di */
866 4 /* memory_latency */
869 COSTS_N_INSNS (2), /* fp_add */
870 COSTS_N_INSNS (4), /* fp_mult_sf */
871 COSTS_N_INSNS (5), /* fp_mult_df */
872 COSTS_N_INSNS (12), /* fp_div_sf */
873 COSTS_N_INSNS (19), /* fp_div_df */
874 COSTS_N_INSNS (2), /* int_mult_si */
875 COSTS_N_INSNS (2), /* int_mult_di */
876 COSTS_N_INSNS (35), /* int_div_si */
877 COSTS_N_INSNS (35), /* int_div_di */
879 4 /* memory_latency */
882 COSTS_N_INSNS (3), /* fp_add */
883 COSTS_N_INSNS (5), /* fp_mult_sf */
884 COSTS_N_INSNS (6), /* fp_mult_df */
885 COSTS_N_INSNS (15), /* fp_div_sf */
886 COSTS_N_INSNS (16), /* fp_div_df */
887 COSTS_N_INSNS (17), /* int_mult_si */
888 COSTS_N_INSNS (17), /* int_mult_di */
889 COSTS_N_INSNS (38), /* int_div_si */
890 COSTS_N_INSNS (38), /* int_div_di */
892 6 /* memory_latency */
895 COSTS_N_INSNS (6), /* fp_add */
896 COSTS_N_INSNS (7), /* fp_mult_sf */
897 COSTS_N_INSNS (8), /* fp_mult_df */
898 COSTS_N_INSNS (23), /* fp_div_sf */
899 COSTS_N_INSNS (36), /* fp_div_df */
900 COSTS_N_INSNS (10), /* int_mult_si */
901 COSTS_N_INSNS (10), /* int_mult_di */
902 COSTS_N_INSNS (69), /* int_div_si */
903 COSTS_N_INSNS (69), /* int_div_di */
905 6 /* memory_latency */
917 /* The only costs that appear to be updated here are
918 integer multiplication. */
920 COSTS_N_INSNS (4), /* int_mult_si */
921 COSTS_N_INSNS (6), /* int_mult_di */
922 COSTS_N_INSNS (69), /* int_div_si */
923 COSTS_N_INSNS (69), /* int_div_di */
925 4 /* memory_latency */
937 COSTS_N_INSNS (6), /* fp_add */
938 COSTS_N_INSNS (4), /* fp_mult_sf */
939 COSTS_N_INSNS (5), /* fp_mult_df */
940 COSTS_N_INSNS (23), /* fp_div_sf */
941 COSTS_N_INSNS (36), /* fp_div_df */
942 COSTS_N_INSNS (5), /* int_mult_si */
943 COSTS_N_INSNS (5), /* int_mult_di */
944 COSTS_N_INSNS (36), /* int_div_si */
945 COSTS_N_INSNS (36), /* int_div_di */
947 4 /* memory_latency */
950 COSTS_N_INSNS (6), /* fp_add */
951 COSTS_N_INSNS (5), /* fp_mult_sf */
952 COSTS_N_INSNS (6), /* fp_mult_df */
953 COSTS_N_INSNS (30), /* fp_div_sf */
954 COSTS_N_INSNS (59), /* fp_div_df */
955 COSTS_N_INSNS (3), /* int_mult_si */
956 COSTS_N_INSNS (4), /* int_mult_di */
957 COSTS_N_INSNS (42), /* int_div_si */
958 COSTS_N_INSNS (74), /* int_div_di */
960 4 /* memory_latency */
963 COSTS_N_INSNS (6), /* fp_add */
964 COSTS_N_INSNS (5), /* fp_mult_sf */
965 COSTS_N_INSNS (6), /* fp_mult_df */
966 COSTS_N_INSNS (30), /* fp_div_sf */
967 COSTS_N_INSNS (59), /* fp_div_df */
968 COSTS_N_INSNS (5), /* int_mult_si */
969 COSTS_N_INSNS (9), /* int_mult_di */
970 COSTS_N_INSNS (42), /* int_div_si */
971 COSTS_N_INSNS (74), /* int_div_di */
973 4 /* memory_latency */
976 /* The only costs that are changed here are
977 integer multiplication. */
978 COSTS_N_INSNS (6), /* fp_add */
979 COSTS_N_INSNS (7), /* fp_mult_sf */
980 COSTS_N_INSNS (8), /* fp_mult_df */
981 COSTS_N_INSNS (23), /* fp_div_sf */
982 COSTS_N_INSNS (36), /* fp_div_df */
983 COSTS_N_INSNS (5), /* int_mult_si */
984 COSTS_N_INSNS (9), /* int_mult_di */
985 COSTS_N_INSNS (69), /* int_div_si */
986 COSTS_N_INSNS (69), /* int_div_di */
988 4 /* memory_latency */
994 /* The only costs that are changed here are
995 integer multiplication. */
996 COSTS_N_INSNS (6), /* fp_add */
997 COSTS_N_INSNS (7), /* fp_mult_sf */
998 COSTS_N_INSNS (8), /* fp_mult_df */
999 COSTS_N_INSNS (23), /* fp_div_sf */
1000 COSTS_N_INSNS (36), /* fp_div_df */
1001 COSTS_N_INSNS (3), /* int_mult_si */
1002 COSTS_N_INSNS (8), /* int_mult_di */
1003 COSTS_N_INSNS (69), /* int_div_si */
1004 COSTS_N_INSNS (69), /* int_div_di */
1005 1, /* branch_cost */
1006 4 /* memory_latency */
1009 /* These costs are the same as the SB-1A below. */
1010 COSTS_N_INSNS (4), /* fp_add */
1011 COSTS_N_INSNS (4), /* fp_mult_sf */
1012 COSTS_N_INSNS (4), /* fp_mult_df */
1013 COSTS_N_INSNS (24), /* fp_div_sf */
1014 COSTS_N_INSNS (32), /* fp_div_df */
1015 COSTS_N_INSNS (3), /* int_mult_si */
1016 COSTS_N_INSNS (4), /* int_mult_di */
1017 COSTS_N_INSNS (36), /* int_div_si */
1018 COSTS_N_INSNS (68), /* int_div_di */
1019 1, /* branch_cost */
1020 4 /* memory_latency */
1023 /* These costs are the same as the SB-1 above. */
1024 COSTS_N_INSNS (4), /* fp_add */
1025 COSTS_N_INSNS (4), /* fp_mult_sf */
1026 COSTS_N_INSNS (4), /* fp_mult_df */
1027 COSTS_N_INSNS (24), /* fp_div_sf */
1028 COSTS_N_INSNS (32), /* fp_div_df */
1029 COSTS_N_INSNS (3), /* int_mult_si */
1030 COSTS_N_INSNS (4), /* int_mult_di */
1031 COSTS_N_INSNS (36), /* int_div_si */
1032 COSTS_N_INSNS (68), /* int_div_di */
1033 1, /* branch_cost */
1034 4 /* memory_latency */
1040 /* Need to replace first five with the costs of calling the appropriate
1042 COSTS_N_INSNS (256), /* fp_add */
1043 COSTS_N_INSNS (256), /* fp_mult_sf */
1044 COSTS_N_INSNS (256), /* fp_mult_df */
1045 COSTS_N_INSNS (256), /* fp_div_sf */
1046 COSTS_N_INSNS (256), /* fp_div_df */
1047 COSTS_N_INSNS (8), /* int_mult_si */
1048 COSTS_N_INSNS (8), /* int_mult_di */
1049 COSTS_N_INSNS (72), /* int_div_si */
1050 COSTS_N_INSNS (72), /* int_div_di */
1051 1, /* branch_cost */
1052 4 /* memory_latency */
1056 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1057 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1058 struct mflip_mips16_entry
GTY (()) {
1062 static GTY ((param_is (struct mflip_mips16_entry
))) htab_t mflip_mips16_htab
;
1064 /* Hash table callbacks for mflip_mips16_htab. */
1067 mflip_mips16_htab_hash (const void *entry
)
1069 return htab_hash_string (((const struct mflip_mips16_entry
*) entry
)->name
);
1073 mflip_mips16_htab_eq (const void *entry
, const void *name
)
1075 return strcmp (((const struct mflip_mips16_entry
*) entry
)->name
,
1076 (const char *) name
) == 0;
1079 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1080 mode, false if it should next add an attribute for the opposite mode. */
1081 static GTY(()) bool mips16_flipper
;
1083 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1084 for -mflip-mips16. Return true if it should use "mips16" and false if
1085 it should use "nomips16". */
1088 mflip_mips16_use_mips16_p (tree decl
)
1090 struct mflip_mips16_entry
*entry
;
1095 /* Use the opposite of the command-line setting for anonymous decls. */
1096 if (!DECL_NAME (decl
))
1097 return !mips_base_mips16
;
1099 if (!mflip_mips16_htab
)
1100 mflip_mips16_htab
= htab_create_ggc (37, mflip_mips16_htab_hash
,
1101 mflip_mips16_htab_eq
, NULL
);
1103 name
= IDENTIFIER_POINTER (DECL_NAME (decl
));
1104 hash
= htab_hash_string (name
);
1105 slot
= htab_find_slot_with_hash (mflip_mips16_htab
, name
, hash
, INSERT
);
1106 entry
= (struct mflip_mips16_entry
*) *slot
;
1109 mips16_flipper
= !mips16_flipper
;
1110 entry
= GGC_NEW (struct mflip_mips16_entry
);
1112 entry
->mips16_p
= mips16_flipper
? !mips_base_mips16
: mips_base_mips16
;
1115 return entry
->mips16_p
;
1118 /* Predicates to test for presence of "near" and "far"/"long_call"
1119 attributes on the given TYPE. */
1122 mips_near_type_p (const_tree type
)
1124 return lookup_attribute ("near", TYPE_ATTRIBUTES (type
)) != NULL
;
1128 mips_far_type_p (const_tree type
)
1130 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type
)) != NULL
1131 || lookup_attribute ("far", TYPE_ATTRIBUTES (type
)) != NULL
);
1134 /* Similar predicates for "mips16"/"nomips16" function attributes. */
1137 mips_mips16_decl_p (const_tree decl
)
1139 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl
)) != NULL
;
1143 mips_nomips16_decl_p (const_tree decl
)
1145 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl
)) != NULL
;
1148 /* Return true if function DECL is a MIPS16 function. Return the ambient
1149 setting if DECL is null. */
1152 mips_use_mips16_mode_p (tree decl
)
1156 /* Nested functions must use the same frame pointer as their
1157 parent and must therefore use the same ISA mode. */
1158 tree parent
= decl_function_context (decl
);
1161 if (mips_mips16_decl_p (decl
))
1163 if (mips_nomips16_decl_p (decl
))
1166 return mips_base_mips16
;
1169 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1172 mips_comp_type_attributes (const_tree type1
, const_tree type2
)
1174 /* Disallow mixed near/far attributes. */
1175 if (mips_far_type_p (type1
) && mips_near_type_p (type2
))
1177 if (mips_near_type_p (type1
) && mips_far_type_p (type2
))
1182 /* Implement TARGET_INSERT_ATTRIBUTES. */
1185 mips_insert_attributes (tree decl
, tree
*attributes
)
1188 bool mips16_p
, nomips16_p
;
1190 /* Check for "mips16" and "nomips16" attributes. */
1191 mips16_p
= lookup_attribute ("mips16", *attributes
) != NULL
;
1192 nomips16_p
= lookup_attribute ("nomips16", *attributes
) != NULL
;
1193 if (TREE_CODE (decl
) != FUNCTION_DECL
)
1196 error ("%qs attribute only applies to functions", "mips16");
1198 error ("%qs attribute only applies to functions", "nomips16");
1202 mips16_p
|= mips_mips16_decl_p (decl
);
1203 nomips16_p
|= mips_nomips16_decl_p (decl
);
1204 if (mips16_p
|| nomips16_p
)
1206 /* DECL cannot be simultaneously "mips16" and "nomips16". */
1207 if (mips16_p
&& nomips16_p
)
1208 error ("%qs cannot have both %<mips16%> and "
1209 "%<nomips16%> attributes",
1210 IDENTIFIER_POINTER (DECL_NAME (decl
)));
1212 else if (TARGET_FLIP_MIPS16
&& !DECL_ARTIFICIAL (decl
))
1214 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1215 "mips16" attribute, arbitrarily pick one. We must pick the same
1216 setting for duplicate declarations of a function. */
1217 name
= mflip_mips16_use_mips16_p (decl
) ? "mips16" : "nomips16";
1218 *attributes
= tree_cons (get_identifier (name
), NULL
, *attributes
);
1223 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1226 mips_merge_decl_attributes (tree olddecl
, tree newdecl
)
1228 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1229 if (mips_mips16_decl_p (olddecl
) != mips_mips16_decl_p (newdecl
))
1230 error ("%qs redeclared with conflicting %qs attributes",
1231 IDENTIFIER_POINTER (DECL_NAME (newdecl
)), "mips16");
1232 if (mips_nomips16_decl_p (olddecl
) != mips_nomips16_decl_p (newdecl
))
1233 error ("%qs redeclared with conflicting %qs attributes",
1234 IDENTIFIER_POINTER (DECL_NAME (newdecl
)), "nomips16");
1236 return merge_attributes (DECL_ATTRIBUTES (olddecl
),
1237 DECL_ATTRIBUTES (newdecl
));
1240 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1241 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1244 mips_split_plus (rtx x
, rtx
*base_ptr
, HOST_WIDE_INT
*offset_ptr
)
1246 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1248 *base_ptr
= XEXP (x
, 0);
1249 *offset_ptr
= INTVAL (XEXP (x
, 1));
1258 static unsigned int mips_build_integer (struct mips_integer_op
*,
1259 unsigned HOST_WIDE_INT
);
1261 /* A subroutine of mips_build_integer, with the same interface.
1262 Assume that the final action in the sequence should be a left shift. */
1265 mips_build_shift (struct mips_integer_op
*codes
, HOST_WIDE_INT value
)
1267 unsigned int i
, shift
;
1269 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1270 since signed numbers are easier to load than unsigned ones. */
1272 while ((value
& 1) == 0)
1273 value
/= 2, shift
++;
1275 i
= mips_build_integer (codes
, value
);
1276 codes
[i
].code
= ASHIFT
;
1277 codes
[i
].value
= shift
;
1281 /* As for mips_build_shift, but assume that the final action will be
1282 an IOR or PLUS operation. */
1285 mips_build_lower (struct mips_integer_op
*codes
, unsigned HOST_WIDE_INT value
)
1287 unsigned HOST_WIDE_INT high
;
1290 high
= value
& ~(unsigned HOST_WIDE_INT
) 0xffff;
1291 if (!LUI_OPERAND (high
) && (value
& 0x18000) == 0x18000)
1293 /* The constant is too complex to load with a simple LUI/ORI pair,
1294 so we want to give the recursive call as many trailing zeros as
1295 possible. In this case, we know bit 16 is set and that the
1296 low 16 bits form a negative number. If we subtract that number
1297 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1298 i
= mips_build_integer (codes
, CONST_HIGH_PART (value
));
1299 codes
[i
].code
= PLUS
;
1300 codes
[i
].value
= CONST_LOW_PART (value
);
1304 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1305 bits gives a value with at least 17 trailing zeros. */
1306 i
= mips_build_integer (codes
, high
);
1307 codes
[i
].code
= IOR
;
1308 codes
[i
].value
= value
& 0xffff;
1313 /* Fill CODES with a sequence of rtl operations to load VALUE.
1314 Return the number of operations needed. */
1317 mips_build_integer (struct mips_integer_op
*codes
,
1318 unsigned HOST_WIDE_INT value
)
1320 if (SMALL_OPERAND (value
)
1321 || SMALL_OPERAND_UNSIGNED (value
)
1322 || LUI_OPERAND (value
))
1324 /* The value can be loaded with a single instruction. */
1325 codes
[0].code
= UNKNOWN
;
1326 codes
[0].value
= value
;
1329 else if ((value
& 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value
)))
1331 /* Either the constant is a simple LUI/ORI combination or its
1332 lowest bit is set. We don't want to shift in this case. */
1333 return mips_build_lower (codes
, value
);
1335 else if ((value
& 0xffff) == 0)
1337 /* The constant will need at least three actions. The lowest
1338 16 bits are clear, so the final action will be a shift. */
1339 return mips_build_shift (codes
, value
);
1343 /* The final action could be a shift, add or inclusive OR.
1344 Rather than use a complex condition to select the best
1345 approach, try both mips_build_shift and mips_build_lower
1346 and pick the one that gives the shortest sequence.
1347 Note that this case is only used once per constant. */
1348 struct mips_integer_op alt_codes
[MIPS_MAX_INTEGER_OPS
];
1349 unsigned int cost
, alt_cost
;
1351 cost
= mips_build_shift (codes
, value
);
1352 alt_cost
= mips_build_lower (alt_codes
, value
);
1353 if (alt_cost
< cost
)
1355 memcpy (codes
, alt_codes
, alt_cost
* sizeof (codes
[0]));
1362 /* Return true if symbols of type TYPE require a GOT access. */
1365 mips_got_symbol_type_p (enum mips_symbol_type type
)
1369 case SYMBOL_GOT_PAGE_OFST
:
1370 case SYMBOL_GOT_DISP
:
1378 /* Return true if X is a thread-local symbol. */
1381 mips_tls_symbol_p (rtx x
)
1383 return GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0;
1386 /* Return true if SYMBOL_REF X is associated with a global symbol
1387 (in the STB_GLOBAL sense). */
1390 mips_global_symbol_p (const_rtx x
)
1392 const_tree decl
= SYMBOL_REF_DECL (x
);
1395 return !SYMBOL_REF_LOCAL_P (x
) || SYMBOL_REF_EXTERNAL_P (x
);
1397 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1398 or weak symbols. Relocations in the object file will be against
1399 the target symbol, so it's that symbol's binding that matters here. */
1400 return DECL_P (decl
) && (TREE_PUBLIC (decl
) || DECL_WEAK (decl
));
1403 /* Return true if function X is a libgcc MIPS16 stub function. */
1406 mips16_stub_function_p (const_rtx x
)
1408 return (GET_CODE (x
) == SYMBOL_REF
1409 && strncmp (XSTR (x
, 0), "__mips16_", 9) == 0);
1412 /* Return true if function X is a locally-defined and locally-binding
1416 mips16_local_function_p (const_rtx x
)
1418 return (GET_CODE (x
) == SYMBOL_REF
1419 && SYMBOL_REF_LOCAL_P (x
)
1420 && !SYMBOL_REF_EXTERNAL_P (x
)
1421 && mips_use_mips16_mode_p (SYMBOL_REF_DECL (x
)));
1424 /* Return true if SYMBOL_REF X binds locally. */
1427 mips_symbol_binds_local_p (const_rtx x
)
1429 return (SYMBOL_REF_DECL (x
)
1430 ? targetm
.binds_local_p (SYMBOL_REF_DECL (x
))
1431 : SYMBOL_REF_LOCAL_P (x
));
1434 /* Return true if rtx constants of mode MODE should be put into a small
1438 mips_rtx_constant_in_small_data_p (enum machine_mode mode
)
1440 return (!TARGET_EMBEDDED_DATA
1441 && TARGET_LOCAL_SDATA
1442 && GET_MODE_SIZE (mode
) <= mips_small_data_threshold
);
1445 /* Return true if X should not be moved directly into register $25.
1446 We need this because many versions of GAS will treat "la $25,foo" as
1447 part of a call sequence and so allow a global "foo" to be lazily bound. */
1450 mips_dangerous_for_la25_p (rtx x
)
1452 return (!TARGET_EXPLICIT_RELOCS
1454 && GET_CODE (x
) == SYMBOL_REF
1455 && mips_global_symbol_p (x
));
1458 /* Return true if calls to X might need $25 to be valid on entry. */
1461 mips_use_pic_fn_addr_reg_p (const_rtx x
)
1463 if (!TARGET_USE_PIC_FN_ADDR_REG
)
1466 /* MIPS16 stub functions are guaranteed not to use $25. */
1467 if (mips16_stub_function_p (x
))
1470 if (GET_CODE (x
) == SYMBOL_REF
)
1472 /* If PLTs and copy relocations are available, the static linker
1473 will make sure that $25 is valid on entry to the target function. */
1474 if (TARGET_ABICALLS_PIC0
)
1477 /* Locally-defined functions use absolute accesses to set up
1478 the global pointer. */
1479 if (TARGET_ABSOLUTE_ABICALLS
1480 && mips_symbol_binds_local_p (x
)
1481 && !SYMBOL_REF_EXTERNAL_P (x
))
1488 /* Return the method that should be used to access SYMBOL_REF or
1489 LABEL_REF X in context CONTEXT. */
1491 static enum mips_symbol_type
1492 mips_classify_symbol (const_rtx x
, enum mips_symbol_context context
)
1495 return SYMBOL_GOT_DISP
;
1497 if (GET_CODE (x
) == LABEL_REF
)
1499 /* LABEL_REFs are used for jump tables as well as text labels.
1500 Only return SYMBOL_PC_RELATIVE if we know the label is in
1501 the text section. */
1502 if (TARGET_MIPS16_SHORT_JUMP_TABLES
)
1503 return SYMBOL_PC_RELATIVE
;
1505 if (TARGET_ABICALLS
&& !TARGET_ABSOLUTE_ABICALLS
)
1506 return SYMBOL_GOT_PAGE_OFST
;
1508 return SYMBOL_ABSOLUTE
;
1511 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
1513 if (SYMBOL_REF_TLS_MODEL (x
))
1516 if (CONSTANT_POOL_ADDRESS_P (x
))
1518 if (TARGET_MIPS16_TEXT_LOADS
)
1519 return SYMBOL_PC_RELATIVE
;
1521 if (TARGET_MIPS16_PCREL_LOADS
&& context
== SYMBOL_CONTEXT_MEM
)
1522 return SYMBOL_PC_RELATIVE
;
1524 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x
)))
1525 return SYMBOL_GP_RELATIVE
;
1528 /* Do not use small-data accesses for weak symbols; they may end up
1530 if (TARGET_GPOPT
&& SYMBOL_REF_SMALL_P (x
) && !SYMBOL_REF_WEAK (x
))
1531 return SYMBOL_GP_RELATIVE
;
1533 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1535 if (TARGET_ABICALLS_PIC2
1536 && !(TARGET_ABSOLUTE_ABICALLS
&& mips_symbol_binds_local_p (x
)))
1538 /* There are three cases to consider:
1540 - o32 PIC (either with or without explicit relocs)
1541 - n32/n64 PIC without explicit relocs
1542 - n32/n64 PIC with explicit relocs
1544 In the first case, both local and global accesses will use an
1545 R_MIPS_GOT16 relocation. We must correctly predict which of
1546 the two semantics (local or global) the assembler and linker
1547 will apply. The choice depends on the symbol's binding rather
1548 than its visibility.
1550 In the second case, the assembler will not use R_MIPS_GOT16
1551 relocations, but it chooses between local and global accesses
1552 in the same way as for o32 PIC.
1554 In the third case we have more freedom since both forms of
1555 access will work for any kind of symbol. However, there seems
1556 little point in doing things differently. */
1557 if (mips_global_symbol_p (x
))
1558 return SYMBOL_GOT_DISP
;
1560 return SYMBOL_GOT_PAGE_OFST
;
1563 if (TARGET_MIPS16_PCREL_LOADS
&& context
!= SYMBOL_CONTEXT_CALL
)
1564 return SYMBOL_FORCE_TO_MEM
;
1566 return SYMBOL_ABSOLUTE
;
1569 /* Classify the base of symbolic expression X, given that X appears in
1572 static enum mips_symbol_type
1573 mips_classify_symbolic_expression (rtx x
, enum mips_symbol_context context
)
1577 split_const (x
, &x
, &offset
);
1578 if (UNSPEC_ADDRESS_P (x
))
1579 return UNSPEC_ADDRESS_TYPE (x
);
1581 return mips_classify_symbol (x
, context
);
1584 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1585 is the alignment in bytes of SYMBOL_REF X. */
1588 mips_offset_within_alignment_p (rtx x
, HOST_WIDE_INT offset
)
1590 HOST_WIDE_INT align
;
1592 align
= SYMBOL_REF_DECL (x
) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x
)) : 1;
1593 return IN_RANGE (offset
, 0, align
- 1);
1596 /* Return true if X is a symbolic constant that can be used in context
1597 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1600 mips_symbolic_constant_p (rtx x
, enum mips_symbol_context context
,
1601 enum mips_symbol_type
*symbol_type
)
1605 split_const (x
, &x
, &offset
);
1606 if (UNSPEC_ADDRESS_P (x
))
1608 *symbol_type
= UNSPEC_ADDRESS_TYPE (x
);
1609 x
= UNSPEC_ADDRESS (x
);
1611 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1613 *symbol_type
= mips_classify_symbol (x
, context
);
1614 if (*symbol_type
== SYMBOL_TLS
)
1620 if (offset
== const0_rtx
)
1623 /* Check whether a nonzero offset is valid for the underlying
1625 switch (*symbol_type
)
1627 case SYMBOL_ABSOLUTE
:
1628 case SYMBOL_FORCE_TO_MEM
:
1629 case SYMBOL_32_HIGH
:
1630 case SYMBOL_64_HIGH
:
1633 /* If the target has 64-bit pointers and the object file only
1634 supports 32-bit symbols, the values of those symbols will be
1635 sign-extended. In this case we can't allow an arbitrary offset
1636 in case the 32-bit value X + OFFSET has a different sign from X. */
1637 if (Pmode
== DImode
&& !ABI_HAS_64BIT_SYMBOLS
)
1638 return offset_within_block_p (x
, INTVAL (offset
));
1640 /* In other cases the relocations can handle any offset. */
1643 case SYMBOL_PC_RELATIVE
:
1644 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1645 In this case, we no longer have access to the underlying constant,
1646 but the original symbol-based access was known to be valid. */
1647 if (GET_CODE (x
) == LABEL_REF
)
1652 case SYMBOL_GP_RELATIVE
:
1653 /* Make sure that the offset refers to something within the
1654 same object block. This should guarantee that the final
1655 PC- or GP-relative offset is within the 16-bit limit. */
1656 return offset_within_block_p (x
, INTVAL (offset
));
1658 case SYMBOL_GOT_PAGE_OFST
:
1659 case SYMBOL_GOTOFF_PAGE
:
1660 /* If the symbol is global, the GOT entry will contain the symbol's
1661 address, and we will apply a 16-bit offset after loading it.
1662 If the symbol is local, the linker should provide enough local
1663 GOT entries for a 16-bit offset, but larger offsets may lead
1665 return SMALL_INT (offset
);
1669 /* There is no carry between the HI and LO REL relocations, so the
1670 offset is only valid if we know it won't lead to such a carry. */
1671 return mips_offset_within_alignment_p (x
, INTVAL (offset
));
1673 case SYMBOL_GOT_DISP
:
1674 case SYMBOL_GOTOFF_DISP
:
1675 case SYMBOL_GOTOFF_CALL
:
1676 case SYMBOL_GOTOFF_LOADGP
:
1679 case SYMBOL_GOTTPREL
:
1687 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1688 single instruction. We rely on the fact that, in the worst case,
1689 all instructions involved in a MIPS16 address calculation are usually
1693 mips_symbol_insns_1 (enum mips_symbol_type type
, enum machine_mode mode
)
1697 case SYMBOL_ABSOLUTE
:
1698 /* When using 64-bit symbols, we need 5 preparatory instructions,
1701 lui $at,%highest(symbol)
1702 daddiu $at,$at,%higher(symbol)
1704 daddiu $at,$at,%hi(symbol)
1707 The final address is then $at + %lo(symbol). With 32-bit
1708 symbols we just need a preparatory LUI for normal mode and
1709 a preparatory LI and SLL for MIPS16. */
1710 return ABI_HAS_64BIT_SYMBOLS
? 6 : TARGET_MIPS16
? 3 : 2;
1712 case SYMBOL_GP_RELATIVE
:
1713 /* Treat GP-relative accesses as taking a single instruction on
1714 MIPS16 too; the copy of $gp can often be shared. */
1717 case SYMBOL_PC_RELATIVE
:
1718 /* PC-relative constants can be only be used with ADDIUPC,
1719 DADDIUPC, LWPC and LDPC. */
1720 if (mode
== MAX_MACHINE_MODE
1721 || GET_MODE_SIZE (mode
) == 4
1722 || GET_MODE_SIZE (mode
) == 8)
1725 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1728 case SYMBOL_FORCE_TO_MEM
:
1729 /* LEAs will be converted into constant-pool references by
1731 if (mode
== MAX_MACHINE_MODE
)
1734 /* The constant must be loaded and then dereferenced. */
1737 case SYMBOL_GOT_DISP
:
1738 /* The constant will have to be loaded from the GOT before it
1739 is used in an address. */
1740 if (mode
!= MAX_MACHINE_MODE
)
1745 case SYMBOL_GOT_PAGE_OFST
:
1746 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1747 local/global classification is accurate. The worst cases are:
1749 (1) For local symbols when generating o32 or o64 code. The assembler
1755 ...and the final address will be $at + %lo(symbol).
1757 (2) For global symbols when -mxgot. The assembler will use:
1759 lui $at,%got_hi(symbol)
1762 ...and the final address will be $at + %got_lo(symbol). */
1765 case SYMBOL_GOTOFF_PAGE
:
1766 case SYMBOL_GOTOFF_DISP
:
1767 case SYMBOL_GOTOFF_CALL
:
1768 case SYMBOL_GOTOFF_LOADGP
:
1769 case SYMBOL_32_HIGH
:
1770 case SYMBOL_64_HIGH
:
1776 case SYMBOL_GOTTPREL
:
1779 /* A 16-bit constant formed by a single relocation, or a 32-bit
1780 constant formed from a high 16-bit relocation and a low 16-bit
1781 relocation. Use mips_split_p to determine which. 32-bit
1782 constants need an "lui; addiu" sequence for normal mode and
1783 an "li; sll; addiu" sequence for MIPS16 mode. */
1784 return !mips_split_p
[type
] ? 1 : TARGET_MIPS16
? 3 : 2;
1787 /* We don't treat a bare TLS symbol as a constant. */
1793 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1794 to load symbols of type TYPE into a register. Return 0 if the given
1795 type of symbol cannot be used as an immediate operand.
1797 Otherwise, return the number of instructions needed to load or store
1798 values of mode MODE to or from addresses of type TYPE. Return 0 if
1799 the given type of symbol is not valid in addresses.
1801 In both cases, treat extended MIPS16 instructions as two instructions. */
1804 mips_symbol_insns (enum mips_symbol_type type
, enum machine_mode mode
)
1806 return mips_symbol_insns_1 (type
, mode
) * (TARGET_MIPS16
? 2 : 1);
1809 /* A for_each_rtx callback. Stop the search if *X references a
1810 thread-local symbol. */
1813 mips_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
1815 return mips_tls_symbol_p (*x
);
1818 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1821 mips_cannot_force_const_mem (rtx x
)
1823 enum mips_symbol_type type
;
1826 /* There is no assembler syntax for expressing an address-sized
1828 if (GET_CODE (x
) == HIGH
)
1831 /* As an optimization, reject constants that mips_legitimize_move
1834 Suppose we have a multi-instruction sequence that loads constant C
1835 into register R. If R does not get allocated a hard register, and
1836 R is used in an operand that allows both registers and memory
1837 references, reload will consider forcing C into memory and using
1838 one of the instruction's memory alternatives. Returning false
1839 here will force it to use an input reload instead. */
1840 if (GET_CODE (x
) == CONST_INT
&& LEGITIMATE_CONSTANT_P (x
))
1843 split_const (x
, &base
, &offset
);
1844 if (mips_symbolic_constant_p (base
, SYMBOL_CONTEXT_LEA
, &type
)
1845 && type
!= SYMBOL_FORCE_TO_MEM
)
1847 /* The same optimization as for CONST_INT. */
1848 if (SMALL_INT (offset
) && mips_symbol_insns (type
, MAX_MACHINE_MODE
) > 0)
1851 /* If MIPS16 constant pools live in the text section, they should
1852 not refer to anything that might need run-time relocation. */
1853 if (TARGET_MIPS16_PCREL_LOADS
&& mips_got_symbol_type_p (type
))
1857 /* TLS symbols must be computed by mips_legitimize_move. */
1858 if (for_each_rtx (&x
, &mips_tls_symbol_ref_1
, NULL
))
1864 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1865 constants when we're using a per-function constant pool. */
1868 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED
,
1869 const_rtx x ATTRIBUTE_UNUSED
)
1871 return !TARGET_MIPS16_PCREL_LOADS
;
1874 /* Return true if register REGNO is a valid base register for mode MODE.
1875 STRICT_P is true if REG_OK_STRICT is in effect. */
1878 mips_regno_mode_ok_for_base_p (int regno
, enum machine_mode mode
,
1881 if (!HARD_REGISTER_NUM_P (regno
))
1885 regno
= reg_renumber
[regno
];
1888 /* These fake registers will be eliminated to either the stack or
1889 hard frame pointer, both of which are usually valid base registers.
1890 Reload deals with the cases where the eliminated form isn't valid. */
1891 if (regno
== ARG_POINTER_REGNUM
|| regno
== FRAME_POINTER_REGNUM
)
1894 /* In MIPS16 mode, the stack pointer can only address word and doubleword
1895 values, nothing smaller. There are two problems here:
1897 (a) Instantiating virtual registers can introduce new uses of the
1898 stack pointer. If these virtual registers are valid addresses,
1899 the stack pointer should be too.
1901 (b) Most uses of the stack pointer are not made explicit until
1902 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1903 We don't know until that stage whether we'll be eliminating to the
1904 stack pointer (which needs the restriction) or the hard frame
1905 pointer (which doesn't).
1907 All in all, it seems more consistent to only enforce this restriction
1908 during and after reload. */
1909 if (TARGET_MIPS16
&& regno
== STACK_POINTER_REGNUM
)
1910 return !strict_p
|| GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8;
1912 return TARGET_MIPS16
? M16_REG_P (regno
) : GP_REG_P (regno
);
1915 /* Return true if X is a valid base register for mode MODE.
1916 STRICT_P is true if REG_OK_STRICT is in effect. */
1919 mips_valid_base_register_p (rtx x
, enum machine_mode mode
, bool strict_p
)
1921 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
1925 && mips_regno_mode_ok_for_base_p (REGNO (x
), mode
, strict_p
));
1928 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
1929 can address a value of mode MODE. */
1932 mips_valid_offset_p (rtx x
, enum machine_mode mode
)
1934 /* Check that X is a signed 16-bit number. */
1935 if (!const_arith_operand (x
, Pmode
))
1938 /* We may need to split multiword moves, so make sure that every word
1940 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1941 && !SMALL_OPERAND (INTVAL (x
) + GET_MODE_SIZE (mode
) - UNITS_PER_WORD
))
1947 /* Return true if a LO_SUM can address a value of mode MODE when the
1948 LO_SUM symbol has type SYMBOL_TYPE. */
1951 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type
, enum machine_mode mode
)
1953 /* Check that symbols of type SYMBOL_TYPE can be used to access values
1955 if (mips_symbol_insns (symbol_type
, mode
) == 0)
1958 /* Check that there is a known low-part relocation. */
1959 if (mips_lo_relocs
[symbol_type
] == NULL
)
1962 /* We may need to split multiword moves, so make sure that each word
1963 can be accessed without inducing a carry. This is mainly needed
1964 for o64, which has historically only guaranteed 64-bit alignment
1965 for 128-bit types. */
1966 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1967 && GET_MODE_BITSIZE (mode
) > GET_MODE_ALIGNMENT (mode
))
1973 /* Return true if X is a valid address for machine mode MODE. If it is,
1974 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
1978 mips_classify_address (struct mips_address_info
*info
, rtx x
,
1979 enum machine_mode mode
, bool strict_p
)
1981 switch (GET_CODE (x
))
1985 info
->type
= ADDRESS_REG
;
1987 info
->offset
= const0_rtx
;
1988 return mips_valid_base_register_p (info
->reg
, mode
, strict_p
);
1991 info
->type
= ADDRESS_REG
;
1992 info
->reg
= XEXP (x
, 0);
1993 info
->offset
= XEXP (x
, 1);
1994 return (mips_valid_base_register_p (info
->reg
, mode
, strict_p
)
1995 && mips_valid_offset_p (info
->offset
, mode
));
1998 info
->type
= ADDRESS_LO_SUM
;
1999 info
->reg
= XEXP (x
, 0);
2000 info
->offset
= XEXP (x
, 1);
2001 /* We have to trust the creator of the LO_SUM to do something vaguely
2002 sane. Target-independent code that creates a LO_SUM should also
2003 create and verify the matching HIGH. Target-independent code that
2004 adds an offset to a LO_SUM must prove that the offset will not
2005 induce a carry. Failure to do either of these things would be
2006 a bug, and we are not required to check for it here. The MIPS
2007 backend itself should only create LO_SUMs for valid symbolic
2008 constants, with the high part being either a HIGH or a copy
2011 = mips_classify_symbolic_expression (info
->offset
, SYMBOL_CONTEXT_MEM
);
2012 return (mips_valid_base_register_p (info
->reg
, mode
, strict_p
)
2013 && mips_valid_lo_sum_p (info
->symbol_type
, mode
));
2016 /* Small-integer addresses don't occur very often, but they
2017 are legitimate if $0 is a valid base register. */
2018 info
->type
= ADDRESS_CONST_INT
;
2019 return !TARGET_MIPS16
&& SMALL_INT (x
);
2024 info
->type
= ADDRESS_SYMBOLIC
;
2025 return (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_MEM
,
2027 && mips_symbol_insns (info
->symbol_type
, mode
) > 0
2028 && !mips_split_p
[info
->symbol_type
]);
2035 /* Return true if X is a legitimate address for a memory operand of mode
2036 MODE. STRICT_P is true if REG_OK_STRICT is in effect. */
2039 mips_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict_p
)
2041 struct mips_address_info addr
;
2043 return mips_classify_address (&addr
, x
, mode
, strict_p
);
2046 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2049 mips_stack_address_p (rtx x
, enum machine_mode mode
)
2051 struct mips_address_info addr
;
2053 return (mips_classify_address (&addr
, x
, mode
, false)
2054 && addr
.type
== ADDRESS_REG
2055 && addr
.reg
== stack_pointer_rtx
);
2058 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2059 address instruction. Note that such addresses are not considered
2060 legitimate in the GO_IF_LEGITIMATE_ADDRESS sense, because their use
2061 is so restricted. */
2064 mips_lwxs_address_p (rtx addr
)
2067 && GET_CODE (addr
) == PLUS
2068 && REG_P (XEXP (addr
, 1)))
2070 rtx offset
= XEXP (addr
, 0);
2071 if (GET_CODE (offset
) == MULT
2072 && REG_P (XEXP (offset
, 0))
2073 && GET_CODE (XEXP (offset
, 1)) == CONST_INT
2074 && INTVAL (XEXP (offset
, 1)) == 4)
2080 /* Return true if a value at OFFSET bytes from base register BASE can be
2081 accessed using an unextended MIPS16 instruction. MODE is the mode of
2084 Usually the offset in an unextended instruction is a 5-bit field.
2085 The offset is unsigned and shifted left once for LH and SH, twice
2086 for LW and SW, and so on. An exception is LWSP and SWSP, which have
2087 an 8-bit immediate field that's shifted left twice. */
2090 mips16_unextended_reference_p (enum machine_mode mode
, rtx base
,
2091 unsigned HOST_WIDE_INT offset
)
2093 if (offset
% GET_MODE_SIZE (mode
) == 0)
2095 if (GET_MODE_SIZE (mode
) == 4 && base
== stack_pointer_rtx
)
2096 return offset
< 256U * GET_MODE_SIZE (mode
);
2097 return offset
< 32U * GET_MODE_SIZE (mode
);
2102 /* Return the number of instructions needed to load or store a value
2103 of mode MODE at address X. Return 0 if X isn't valid for MODE.
2104 Assume that multiword moves may need to be split into word moves
2105 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2108 For MIPS16 code, count extended instructions as two instructions. */
2111 mips_address_insns (rtx x
, enum machine_mode mode
, bool might_split_p
)
2113 struct mips_address_info addr
;
2116 /* BLKmode is used for single unaligned loads and stores and should
2117 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2118 meaningless, so we have to single it out as a special case one way
2120 if (mode
!= BLKmode
&& might_split_p
)
2121 factor
= (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2125 if (mips_classify_address (&addr
, x
, mode
, false))
2130 && !mips16_unextended_reference_p (mode
, addr
.reg
,
2131 UINTVAL (addr
.offset
)))
2135 case ADDRESS_LO_SUM
:
2136 return TARGET_MIPS16
? factor
* 2 : factor
;
2138 case ADDRESS_CONST_INT
:
2141 case ADDRESS_SYMBOLIC
:
2142 return factor
* mips_symbol_insns (addr
.symbol_type
, mode
);
2147 /* Return the number of instructions needed to load constant X.
2148 Return 0 if X isn't a valid constant. */
2151 mips_const_insns (rtx x
)
2153 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
2154 enum mips_symbol_type symbol_type
;
2157 switch (GET_CODE (x
))
2160 if (!mips_symbolic_constant_p (XEXP (x
, 0), SYMBOL_CONTEXT_LEA
,
2162 || !mips_split_p
[symbol_type
])
2165 /* This is simply an LUI for normal mode. It is an extended
2166 LI followed by an extended SLL for MIPS16. */
2167 return TARGET_MIPS16
? 4 : 1;
2171 /* Unsigned 8-bit constants can be loaded using an unextended
2172 LI instruction. Unsigned 16-bit constants can be loaded
2173 using an extended LI. Negative constants must be loaded
2174 using LI and then negated. */
2175 return (IN_RANGE (INTVAL (x
), 0, 255) ? 1
2176 : SMALL_OPERAND_UNSIGNED (INTVAL (x
)) ? 2
2177 : IN_RANGE (-INTVAL (x
), 0, 255) ? 2
2178 : SMALL_OPERAND_UNSIGNED (-INTVAL (x
)) ? 3
2181 return mips_build_integer (codes
, INTVAL (x
));
2185 /* Allow zeros for normal mode, where we can use $0. */
2186 return !TARGET_MIPS16
&& x
== CONST0_RTX (GET_MODE (x
)) ? 1 : 0;
2192 /* See if we can refer to X directly. */
2193 if (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_LEA
, &symbol_type
))
2194 return mips_symbol_insns (symbol_type
, MAX_MACHINE_MODE
);
2196 /* Otherwise try splitting the constant into a base and offset.
2197 If the offset is a 16-bit value, we can load the base address
2198 into a register and then use (D)ADDIU to add in the offset.
2199 If the offset is larger, we can load the base and offset
2200 into separate registers and add them together with (D)ADDU.
2201 However, the latter is only possible before reload; during
2202 and after reload, we must have the option of forcing the
2203 constant into the pool instead. */
2204 split_const (x
, &x
, &offset
);
2207 int n
= mips_const_insns (x
);
2210 if (SMALL_INT (offset
))
2212 else if (!targetm
.cannot_force_const_mem (x
))
2213 return n
+ 1 + mips_build_integer (codes
, INTVAL (offset
));
2220 return mips_symbol_insns (mips_classify_symbol (x
, SYMBOL_CONTEXT_LEA
),
2228 /* X is a doubleword constant that can be handled by splitting it into
2229 two words and loading each word separately. Return the number of
2230 instructions required to do this. */
2233 mips_split_const_insns (rtx x
)
2235 unsigned int low
, high
;
2237 low
= mips_const_insns (mips_subword (x
, false));
2238 high
= mips_const_insns (mips_subword (x
, true));
2239 gcc_assert (low
> 0 && high
> 0);
2243 /* Return the number of instructions needed to implement INSN,
2244 given that it loads from or stores to MEM. Count extended
2245 MIPS16 instructions as two instructions. */
2248 mips_load_store_insns (rtx mem
, rtx insn
)
2250 enum machine_mode mode
;
2254 gcc_assert (MEM_P (mem
));
2255 mode
= GET_MODE (mem
);
2257 /* Try to prove that INSN does not need to be split. */
2258 might_split_p
= true;
2259 if (GET_MODE_BITSIZE (mode
) == 64)
2261 set
= single_set (insn
);
2262 if (set
&& !mips_split_64bit_move_p (SET_DEST (set
), SET_SRC (set
)))
2263 might_split_p
= false;
2266 return mips_address_insns (XEXP (mem
, 0), mode
, might_split_p
);
2269 /* Return the number of instructions needed for an integer division. */
2272 mips_idiv_insns (void)
2277 if (TARGET_CHECK_ZERO_DIV
)
2279 if (GENERATE_DIVIDE_TRAPS
)
2285 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
2290 /* Emit a move from SRC to DEST. Assume that the move expanders can
2291 handle all moves if !can_create_pseudo_p (). The distinction is
2292 important because, unlike emit_move_insn, the move expanders know
2293 how to force Pmode objects into the constant pool even when the
2294 constant pool address is not itself legitimate. */
2297 mips_emit_move (rtx dest
, rtx src
)
2299 return (can_create_pseudo_p ()
2300 ? emit_move_insn (dest
, src
)
2301 : emit_move_insn_1 (dest
, src
));
2304 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2307 mips_emit_binary (enum rtx_code code
, rtx target
, rtx op0
, rtx op1
)
2309 emit_insn (gen_rtx_SET (VOIDmode
, target
,
2310 gen_rtx_fmt_ee (code
, GET_MODE (target
), op0
, op1
)));
2313 /* Compute (CODE OP0 OP1) and store the result in a new register
2314 of mode MODE. Return that new register. */
2317 mips_force_binary (enum machine_mode mode
, enum rtx_code code
, rtx op0
, rtx op1
)
2321 reg
= gen_reg_rtx (mode
);
2322 mips_emit_binary (code
, reg
, op0
, op1
);
2326 /* Copy VALUE to a register and return that register. If new pseudos
2327 are allowed, copy it into a new register, otherwise use DEST. */
2330 mips_force_temporary (rtx dest
, rtx value
)
2332 if (can_create_pseudo_p ())
2333 return force_reg (Pmode
, value
);
2336 mips_emit_move (dest
, value
);
2341 /* Emit a call sequence with call pattern PATTERN and return the call
2342 instruction itself (which is not necessarily the last instruction
2343 emitted). ORIG_ADDR is the original, unlegitimized address,
2344 ADDR is the legitimized form, and LAZY_P is true if the call
2345 address is lazily-bound. */
2348 mips_emit_call_insn (rtx pattern
, rtx orig_addr
, rtx addr
, bool lazy_p
)
2352 insn
= emit_call_insn (pattern
);
2354 if (TARGET_MIPS16
&& mips_use_pic_fn_addr_reg_p (orig_addr
))
2356 /* MIPS16 JALRs only take MIPS16 registers. If the target
2357 function requires $25 to be valid on entry, we must copy it
2358 there separately. The move instruction can be put in the
2359 call's delay slot. */
2360 reg
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
2361 emit_insn_before (gen_move_insn (reg
, addr
), insn
);
2362 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), reg
);
2366 /* Lazy-binding stubs require $gp to be valid on entry. */
2367 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
2371 /* See the comment above load_call<mode> for details. */
2372 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
),
2373 gen_rtx_REG (Pmode
, GOT_VERSION_REGNUM
));
2374 emit_insn (gen_update_got_version ());
2379 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2380 then add CONST_INT OFFSET to the result. */
2383 mips_unspec_address_offset (rtx base
, rtx offset
,
2384 enum mips_symbol_type symbol_type
)
2386 base
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, base
),
2387 UNSPEC_ADDRESS_FIRST
+ symbol_type
);
2388 if (offset
!= const0_rtx
)
2389 base
= gen_rtx_PLUS (Pmode
, base
, offset
);
2390 return gen_rtx_CONST (Pmode
, base
);
2393 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2394 type SYMBOL_TYPE. */
2397 mips_unspec_address (rtx address
, enum mips_symbol_type symbol_type
)
2401 split_const (address
, &base
, &offset
);
2402 return mips_unspec_address_offset (base
, offset
, symbol_type
);
2405 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2406 high part to BASE and return the result. Just return BASE otherwise.
2407 TEMP is as for mips_force_temporary.
2409 The returned expression can be used as the first operand to a LO_SUM. */
2412 mips_unspec_offset_high (rtx temp
, rtx base
, rtx addr
,
2413 enum mips_symbol_type symbol_type
)
2415 if (mips_split_p
[symbol_type
])
2417 addr
= gen_rtx_HIGH (Pmode
, mips_unspec_address (addr
, symbol_type
));
2418 addr
= mips_force_temporary (temp
, addr
);
2419 base
= mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, addr
, base
));
2424 /* Return an instruction that copies $gp into register REG. We want
2425 GCC to treat the register's value as constant, so that its value
2426 can be rematerialized on demand. */
2429 gen_load_const_gp (rtx reg
)
2431 return (Pmode
== SImode
2432 ? gen_load_const_gp_si (reg
)
2433 : gen_load_const_gp_di (reg
));
2436 /* Return a pseudo register that contains the value of $gp throughout
2437 the current function. Such registers are needed by MIPS16 functions,
2438 for which $gp itself is not a valid base register or addition operand. */
2441 mips16_gp_pseudo_reg (void)
2443 if (cfun
->machine
->mips16_gp_pseudo_rtx
== NULL_RTX
)
2444 cfun
->machine
->mips16_gp_pseudo_rtx
= gen_reg_rtx (Pmode
);
2446 /* Don't emit an instruction to initialize the pseudo register if
2447 we are being called from the tree optimizers' cost-calculation
2449 if (!cfun
->machine
->initialized_mips16_gp_pseudo_p
2450 && (current_ir_type () != IR_GIMPLE
|| currently_expanding_to_rtl
))
2454 push_topmost_sequence ();
2456 scan
= get_insns ();
2457 while (NEXT_INSN (scan
) && !INSN_P (NEXT_INSN (scan
)))
2458 scan
= NEXT_INSN (scan
);
2460 insn
= gen_load_const_gp (cfun
->machine
->mips16_gp_pseudo_rtx
);
2461 emit_insn_after (insn
, scan
);
2463 pop_topmost_sequence ();
2465 cfun
->machine
->initialized_mips16_gp_pseudo_p
= true;
2468 return cfun
->machine
->mips16_gp_pseudo_rtx
;
2471 /* Return a base register that holds pic_offset_table_rtx.
2472 TEMP, if nonnull, is a scratch Pmode base register. */
2475 mips_pic_base_register (rtx temp
)
2478 return pic_offset_table_rtx
;
2480 if (can_create_pseudo_p ())
2481 return mips16_gp_pseudo_reg ();
2484 /* The first post-reload split exposes all references to $gp
2485 (both uses and definitions). All references must remain
2486 explicit after that point.
2488 It is safe to introduce uses of $gp at any time, so for
2489 simplicity, we do that before the split too. */
2490 mips_emit_move (temp
, pic_offset_table_rtx
);
2492 emit_insn (gen_load_const_gp (temp
));
2496 /* Create and return a GOT reference of type TYPE for address ADDR.
2497 TEMP, if nonnull, is a scratch Pmode base register. */
2500 mips_got_load (rtx temp
, rtx addr
, enum mips_symbol_type type
)
2502 rtx base
, high
, lo_sum_symbol
;
2504 base
= mips_pic_base_register (temp
);
2506 /* If we used the temporary register to load $gp, we can't use
2507 it for the high part as well. */
2508 if (temp
!= NULL
&& reg_overlap_mentioned_p (base
, temp
))
2511 high
= mips_unspec_offset_high (temp
, base
, addr
, type
);
2512 lo_sum_symbol
= mips_unspec_address (addr
, type
);
2514 if (type
== SYMBOL_GOTOFF_CALL
)
2515 return (Pmode
== SImode
2516 ? gen_unspec_callsi (high
, lo_sum_symbol
)
2517 : gen_unspec_calldi (high
, lo_sum_symbol
));
2519 return (Pmode
== SImode
2520 ? gen_unspec_gotsi (high
, lo_sum_symbol
)
2521 : gen_unspec_gotdi (high
, lo_sum_symbol
));
2524 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2525 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2526 constant in that context and can be split into high and low parts.
2527 If so, and if LOW_OUT is nonnull, emit the high part and store the
2528 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
2530 TEMP is as for mips_force_temporary and is used to load the high
2531 part into a register.
2533 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
2534 a legitimize SET_SRC for an .md pattern, otherwise the low part
2535 is guaranteed to be a legitimate address for mode MODE. */
2538 mips_split_symbol (rtx temp
, rtx addr
, enum machine_mode mode
, rtx
*low_out
)
2540 enum mips_symbol_context context
;
2541 enum mips_symbol_type symbol_type
;
2544 context
= (mode
== MAX_MACHINE_MODE
2545 ? SYMBOL_CONTEXT_LEA
2546 : SYMBOL_CONTEXT_MEM
);
2547 if (GET_CODE (addr
) == HIGH
&& context
== SYMBOL_CONTEXT_LEA
)
2549 addr
= XEXP (addr
, 0);
2550 if (mips_symbolic_constant_p (addr
, context
, &symbol_type
)
2551 && mips_symbol_insns (symbol_type
, mode
) > 0
2552 && mips_split_hi_p
[symbol_type
])
2555 switch (symbol_type
)
2557 case SYMBOL_GOT_PAGE_OFST
:
2558 /* The high part of a page/ofst pair is loaded from the GOT. */
2559 *low_out
= mips_got_load (temp
, addr
, SYMBOL_GOTOFF_PAGE
);
2570 if (mips_symbolic_constant_p (addr
, context
, &symbol_type
)
2571 && mips_symbol_insns (symbol_type
, mode
) > 0
2572 && mips_split_p
[symbol_type
])
2575 switch (symbol_type
)
2577 case SYMBOL_GOT_DISP
:
2578 /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
2579 *low_out
= mips_got_load (temp
, addr
, SYMBOL_GOTOFF_DISP
);
2582 case SYMBOL_GP_RELATIVE
:
2583 high
= mips_pic_base_register (temp
);
2584 *low_out
= gen_rtx_LO_SUM (Pmode
, high
, addr
);
2588 high
= gen_rtx_HIGH (Pmode
, copy_rtx (addr
));
2589 high
= mips_force_temporary (temp
, high
);
2590 *low_out
= gen_rtx_LO_SUM (Pmode
, high
, addr
);
2599 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2600 mips_force_temporary; it is only needed when OFFSET is not a
2604 mips_add_offset (rtx temp
, rtx reg
, HOST_WIDE_INT offset
)
2606 if (!SMALL_OPERAND (offset
))
2612 /* Load the full offset into a register so that we can use
2613 an unextended instruction for the address itself. */
2614 high
= GEN_INT (offset
);
2619 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2620 high
= GEN_INT (CONST_HIGH_PART (offset
));
2621 offset
= CONST_LOW_PART (offset
);
2623 high
= mips_force_temporary (temp
, high
);
2624 reg
= mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, high
, reg
));
2626 return plus_constant (reg
, offset
);
2629 /* The __tls_get_attr symbol. */
2630 static GTY(()) rtx mips_tls_symbol
;
2632 /* Return an instruction sequence that calls __tls_get_addr. SYM is
2633 the TLS symbol we are referencing and TYPE is the symbol type to use
2634 (either global dynamic or local dynamic). V0 is an RTX for the
2635 return value location. */
2638 mips_call_tls_get_addr (rtx sym
, enum mips_symbol_type type
, rtx v0
)
2642 a0
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
2644 if (!mips_tls_symbol
)
2645 mips_tls_symbol
= init_one_libfunc ("__tls_get_addr");
2647 loc
= mips_unspec_address (sym
, type
);
2651 emit_insn (gen_rtx_SET (Pmode
, a0
,
2652 gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, loc
)));
2653 insn
= mips_expand_call (MIPS_CALL_NORMAL
, v0
, mips_tls_symbol
,
2654 const0_rtx
, NULL_RTX
, false);
2655 RTL_CONST_CALL_P (insn
) = 1;
2656 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), a0
);
2657 insn
= get_insns ();
2664 /* Return a pseudo register that contains the current thread pointer. */
2671 tp
= gen_reg_rtx (Pmode
);
2672 if (Pmode
== DImode
)
2673 emit_insn (gen_tls_get_tp_di (tp
));
2675 emit_insn (gen_tls_get_tp_si (tp
));
2679 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2680 its address. The return value will be both a valid address and a valid
2681 SET_SRC (either a REG or a LO_SUM). */
2684 mips_legitimize_tls_address (rtx loc
)
2686 rtx dest
, insn
, v0
, tp
, tmp1
, tmp2
, eqv
;
2687 enum tls_model model
;
2691 sorry ("MIPS16 TLS");
2692 return gen_reg_rtx (Pmode
);
2695 model
= SYMBOL_REF_TLS_MODEL (loc
);
2696 /* Only TARGET_ABICALLS code can have more than one module; other
2697 code must be be static and should not use a GOT. All TLS models
2698 reduce to local exec in this situation. */
2699 if (!TARGET_ABICALLS
)
2700 model
= TLS_MODEL_LOCAL_EXEC
;
2704 case TLS_MODEL_GLOBAL_DYNAMIC
:
2705 v0
= gen_rtx_REG (Pmode
, GP_RETURN
);
2706 insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSGD
, v0
);
2707 dest
= gen_reg_rtx (Pmode
);
2708 emit_libcall_block (insn
, dest
, v0
, loc
);
2711 case TLS_MODEL_LOCAL_DYNAMIC
:
2712 v0
= gen_rtx_REG (Pmode
, GP_RETURN
);
2713 insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSLDM
, v0
);
2714 tmp1
= gen_reg_rtx (Pmode
);
2716 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2717 share the LDM result with other LD model accesses. */
2718 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
2720 emit_libcall_block (insn
, tmp1
, v0
, eqv
);
2722 tmp2
= mips_unspec_offset_high (NULL
, tmp1
, loc
, SYMBOL_DTPREL
);
2723 dest
= gen_rtx_LO_SUM (Pmode
, tmp2
,
2724 mips_unspec_address (loc
, SYMBOL_DTPREL
));
2727 case TLS_MODEL_INITIAL_EXEC
:
2728 tp
= mips_get_tp ();
2729 tmp1
= gen_reg_rtx (Pmode
);
2730 tmp2
= mips_unspec_address (loc
, SYMBOL_GOTTPREL
);
2731 if (Pmode
== DImode
)
2732 emit_insn (gen_load_gotdi (tmp1
, pic_offset_table_rtx
, tmp2
));
2734 emit_insn (gen_load_gotsi (tmp1
, pic_offset_table_rtx
, tmp2
));
2735 dest
= gen_reg_rtx (Pmode
);
2736 emit_insn (gen_add3_insn (dest
, tmp1
, tp
));
2739 case TLS_MODEL_LOCAL_EXEC
:
2740 tp
= mips_get_tp ();
2741 tmp1
= mips_unspec_offset_high (NULL
, tp
, loc
, SYMBOL_TPREL
);
2742 dest
= gen_rtx_LO_SUM (Pmode
, tmp1
,
2743 mips_unspec_address (loc
, SYMBOL_TPREL
));
2752 /* If X is not a valid address for mode MODE, force it into a register. */
2755 mips_force_address (rtx x
, enum machine_mode mode
)
2757 if (!mips_legitimate_address_p (mode
, x
, false))
2758 x
= force_reg (Pmode
, x
);
2762 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2763 be legitimized in a way that the generic machinery might not expect,
2764 put the new address in *XLOC and return true. MODE is the mode of
2765 the memory being accessed. */
2768 mips_legitimize_address (rtx
*xloc
, enum machine_mode mode
)
2771 HOST_WIDE_INT offset
;
2773 if (mips_tls_symbol_p (*xloc
))
2775 *xloc
= mips_legitimize_tls_address (*xloc
);
2779 /* See if the address can split into a high part and a LO_SUM. */
2780 if (mips_split_symbol (NULL
, *xloc
, mode
, &addr
))
2782 *xloc
= mips_force_address (addr
, mode
);
2786 /* Handle BASE + OFFSET using mips_add_offset. */
2787 mips_split_plus (*xloc
, &base
, &offset
);
2790 if (!mips_valid_base_register_p (base
, mode
, false))
2791 base
= copy_to_mode_reg (Pmode
, base
);
2792 addr
= mips_add_offset (NULL
, base
, offset
);
2793 *xloc
= mips_force_address (addr
, mode
);
2799 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
2802 mips_move_integer (rtx temp
, rtx dest
, unsigned HOST_WIDE_INT value
)
2804 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
2805 enum machine_mode mode
;
2806 unsigned int i
, num_ops
;
2809 mode
= GET_MODE (dest
);
2810 num_ops
= mips_build_integer (codes
, value
);
2812 /* Apply each binary operation to X. Invariant: X is a legitimate
2813 source operand for a SET pattern. */
2814 x
= GEN_INT (codes
[0].value
);
2815 for (i
= 1; i
< num_ops
; i
++)
2817 if (!can_create_pseudo_p ())
2819 emit_insn (gen_rtx_SET (VOIDmode
, temp
, x
));
2823 x
= force_reg (mode
, x
);
2824 x
= gen_rtx_fmt_ee (codes
[i
].code
, mode
, x
, GEN_INT (codes
[i
].value
));
2827 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
2830 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2831 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2835 mips_legitimize_const_move (enum machine_mode mode
, rtx dest
, rtx src
)
2839 /* Split moves of big integers into smaller pieces. */
2840 if (splittable_const_int_operand (src
, mode
))
2842 mips_move_integer (dest
, dest
, INTVAL (src
));
2846 /* Split moves of symbolic constants into high/low pairs. */
2847 if (mips_split_symbol (dest
, src
, MAX_MACHINE_MODE
, &src
))
2849 emit_insn (gen_rtx_SET (VOIDmode
, dest
, src
));
2853 /* Generate the appropriate access sequences for TLS symbols. */
2854 if (mips_tls_symbol_p (src
))
2856 mips_emit_move (dest
, mips_legitimize_tls_address (src
));
2860 /* If we have (const (plus symbol offset)), and that expression cannot
2861 be forced into memory, load the symbol first and add in the offset.
2862 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2863 forced into memory, as it usually produces better code. */
2864 split_const (src
, &base
, &offset
);
2865 if (offset
!= const0_rtx
2866 && (targetm
.cannot_force_const_mem (src
)
2867 || (!TARGET_MIPS16
&& can_create_pseudo_p ())))
2869 base
= mips_force_temporary (dest
, base
);
2870 mips_emit_move (dest
, mips_add_offset (NULL
, base
, INTVAL (offset
)));
2874 src
= force_const_mem (mode
, src
);
2876 /* When using explicit relocs, constant pool references are sometimes
2877 not legitimate addresses. */
2878 mips_split_symbol (dest
, XEXP (src
, 0), mode
, &XEXP (src
, 0));
2879 mips_emit_move (dest
, src
);
2882 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
2883 sequence that is valid. */
2886 mips_legitimize_move (enum machine_mode mode
, rtx dest
, rtx src
)
2888 if (!register_operand (dest
, mode
) && !reg_or_0_operand (src
, mode
))
2890 mips_emit_move (dest
, force_reg (mode
, src
));
2894 /* We need to deal with constants that would be legitimate
2895 immediate_operands but aren't legitimate move_operands. */
2896 if (CONSTANT_P (src
) && !move_operand (src
, mode
))
2898 mips_legitimize_const_move (mode
, dest
, src
);
2899 set_unique_reg_note (get_last_insn (), REG_EQUAL
, copy_rtx (src
));
2905 /* Return true if value X in context CONTEXT is a small-data address
2906 that can be rewritten as a LO_SUM. */
2909 mips_rewrite_small_data_p (rtx x
, enum mips_symbol_context context
)
2911 enum mips_symbol_type symbol_type
;
2913 return (mips_lo_relocs
[SYMBOL_GP_RELATIVE
]
2914 && !mips_split_p
[SYMBOL_GP_RELATIVE
]
2915 && mips_symbolic_constant_p (x
, context
, &symbol_type
)
2916 && symbol_type
== SYMBOL_GP_RELATIVE
);
2919 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2920 containing MEM, or null if none. */
2923 mips_small_data_pattern_1 (rtx
*loc
, void *data
)
2925 enum mips_symbol_context context
;
2927 if (GET_CODE (*loc
) == LO_SUM
)
2932 if (for_each_rtx (&XEXP (*loc
, 0), mips_small_data_pattern_1
, *loc
))
2937 context
= data
? SYMBOL_CONTEXT_MEM
: SYMBOL_CONTEXT_LEA
;
2938 return mips_rewrite_small_data_p (*loc
, context
);
2941 /* Return true if OP refers to small data symbols directly, not through
2945 mips_small_data_pattern_p (rtx op
)
2947 return for_each_rtx (&op
, mips_small_data_pattern_1
, NULL
);
2950 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2951 DATA is the containing MEM, or null if none. */
2954 mips_rewrite_small_data_1 (rtx
*loc
, void *data
)
2956 enum mips_symbol_context context
;
2960 for_each_rtx (&XEXP (*loc
, 0), mips_rewrite_small_data_1
, *loc
);
2964 context
= data
? SYMBOL_CONTEXT_MEM
: SYMBOL_CONTEXT_LEA
;
2965 if (mips_rewrite_small_data_p (*loc
, context
))
2966 *loc
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, *loc
);
2968 if (GET_CODE (*loc
) == LO_SUM
)
2974 /* Rewrite instruction pattern PATTERN so that it refers to small data
2975 using explicit relocations. */
2978 mips_rewrite_small_data (rtx pattern
)
2980 pattern
= copy_insn (pattern
);
2981 for_each_rtx (&pattern
, mips_rewrite_small_data_1
, NULL
);
2985 /* We need a lot of little routines to check the range of MIPS16 immediate
2989 m16_check_op (rtx op
, int low
, int high
, int mask
)
2991 return (GET_CODE (op
) == CONST_INT
2992 && IN_RANGE (INTVAL (op
), low
, high
)
2993 && (INTVAL (op
) & mask
) == 0);
2997 m16_uimm3_b (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2999 return m16_check_op (op
, 0x1, 0x8, 0);
3003 m16_simm4_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3005 return m16_check_op (op
, -0x8, 0x7, 0);
3009 m16_nsimm4_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3011 return m16_check_op (op
, -0x7, 0x8, 0);
3015 m16_simm5_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3017 return m16_check_op (op
, -0x10, 0xf, 0);
3021 m16_nsimm5_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3023 return m16_check_op (op
, -0xf, 0x10, 0);
3027 m16_uimm5_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3029 return m16_check_op (op
, -0x10 << 2, 0xf << 2, 3);
3033 m16_nuimm5_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3035 return m16_check_op (op
, -0xf << 2, 0x10 << 2, 3);
3039 m16_simm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3041 return m16_check_op (op
, -0x80, 0x7f, 0);
3045 m16_nsimm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3047 return m16_check_op (op
, -0x7f, 0x80, 0);
3051 m16_uimm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3053 return m16_check_op (op
, 0x0, 0xff, 0);
3057 m16_nuimm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3059 return m16_check_op (op
, -0xff, 0x0, 0);
3063 m16_uimm8_m1_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3065 return m16_check_op (op
, -0x1, 0xfe, 0);
3069 m16_uimm8_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3071 return m16_check_op (op
, 0x0, 0xff << 2, 3);
3075 m16_nuimm8_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3077 return m16_check_op (op
, -0xff << 2, 0x0, 3);
3081 m16_simm8_8 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3083 return m16_check_op (op
, -0x80 << 3, 0x7f << 3, 7);
3087 m16_nsimm8_8 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3089 return m16_check_op (op
, -0x7f << 3, 0x80 << 3, 7);
3092 /* The cost of loading values from the constant pool. It should be
3093 larger than the cost of any constant we want to synthesize inline. */
3094 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3096 /* Return the cost of X when used as an operand to the MIPS16 instruction
3097 that implements CODE. Return -1 if there is no such instruction, or if
3098 X is not a valid immediate operand for it. */
3101 mips16_constant_cost (int code
, HOST_WIDE_INT x
)
3108 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3109 other shifts are extended. The shift patterns truncate the shift
3110 count to the right size, so there are no out-of-range values. */
3111 if (IN_RANGE (x
, 1, 8))
3113 return COSTS_N_INSNS (1);
3116 if (IN_RANGE (x
, -128, 127))
3118 if (SMALL_OPERAND (x
))
3119 return COSTS_N_INSNS (1);
3123 /* Like LE, but reject the always-true case. */
3127 /* We add 1 to the immediate and use SLT. */
3130 /* We can use CMPI for an xor with an unsigned 16-bit X. */
3133 if (IN_RANGE (x
, 0, 255))
3135 if (SMALL_OPERAND_UNSIGNED (x
))
3136 return COSTS_N_INSNS (1);
3141 /* Equality comparisons with 0 are cheap. */
3151 /* Return true if there is a non-MIPS16 instruction that implements CODE
3152 and if that instruction accepts X as an immediate operand. */
3155 mips_immediate_operand_p (int code
, HOST_WIDE_INT x
)
3162 /* All shift counts are truncated to a valid constant. */
3167 /* Likewise rotates, if the target supports rotates at all. */
3173 /* These instructions take 16-bit unsigned immediates. */
3174 return SMALL_OPERAND_UNSIGNED (x
);
3179 /* These instructions take 16-bit signed immediates. */
3180 return SMALL_OPERAND (x
);
3186 /* The "immediate" forms of these instructions are really
3187 implemented as comparisons with register 0. */
3192 /* Likewise, meaning that the only valid immediate operand is 1. */
3196 /* We add 1 to the immediate and use SLT. */
3197 return SMALL_OPERAND (x
+ 1);
3200 /* Likewise SLTU, but reject the always-true case. */
3201 return SMALL_OPERAND (x
+ 1) && x
+ 1 != 0;
3205 /* The bit position and size are immediate operands. */
3206 return ISA_HAS_EXT_INS
;
3209 /* By default assume that $0 can be used for 0. */
3214 /* Return the cost of binary operation X, given that the instruction
3215 sequence for a word-sized or smaller operation has cost SINGLE_COST
3216 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3219 mips_binary_cost (rtx x
, int single_cost
, int double_cost
)
3223 if (GET_MODE_SIZE (GET_MODE (x
)) == UNITS_PER_WORD
* 2)
3228 + rtx_cost (XEXP (x
, 0), 0, !optimize_size
)
3229 + rtx_cost (XEXP (x
, 1), GET_CODE (x
), !optimize_size
));
3232 /* Return the cost of floating-point multiplications of mode MODE. */
3235 mips_fp_mult_cost (enum machine_mode mode
)
3237 return mode
== DFmode
? mips_cost
->fp_mult_df
: mips_cost
->fp_mult_sf
;
3240 /* Return the cost of floating-point divisions of mode MODE. */
3243 mips_fp_div_cost (enum machine_mode mode
)
3245 return mode
== DFmode
? mips_cost
->fp_div_df
: mips_cost
->fp_div_sf
;
3248 /* Return the cost of sign-extending OP to mode MODE, not including the
3249 cost of OP itself. */
3252 mips_sign_extend_cost (enum machine_mode mode
, rtx op
)
3255 /* Extended loads are as cheap as unextended ones. */
3258 if (TARGET_64BIT
&& mode
== DImode
&& GET_MODE (op
) == SImode
)
3259 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3262 if (ISA_HAS_SEB_SEH
|| GENERATE_MIPS16E
)
3263 /* We can use SEB or SEH. */
3264 return COSTS_N_INSNS (1);
3266 /* We need to use a shift left and a shift right. */
3267 return COSTS_N_INSNS (TARGET_MIPS16
? 4 : 2);
3270 /* Return the cost of zero-extending OP to mode MODE, not including the
3271 cost of OP itself. */
3274 mips_zero_extend_cost (enum machine_mode mode
, rtx op
)
3277 /* Extended loads are as cheap as unextended ones. */
3280 if (TARGET_64BIT
&& mode
== DImode
&& GET_MODE (op
) == SImode
)
3281 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3282 return COSTS_N_INSNS (TARGET_MIPS16
? 4 : 2);
3284 if (GENERATE_MIPS16E
)
3285 /* We can use ZEB or ZEH. */
3286 return COSTS_N_INSNS (1);
3289 /* We need to load 0xff or 0xffff into a register and use AND. */
3290 return COSTS_N_INSNS (GET_MODE (op
) == QImode
? 2 : 3);
3292 /* We can use ANDI. */
3293 return COSTS_N_INSNS (1);
3296 /* Implement TARGET_RTX_COSTS. */
3299 mips_rtx_costs (rtx x
, int code
, int outer_code
, int *total
,
3302 enum machine_mode mode
= GET_MODE (x
);
3303 bool float_mode_p
= FLOAT_MODE_P (mode
);
3307 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3308 appear in the instruction stream, and the cost of a comparison is
3309 really the cost of the branch or scc condition. At the time of
3310 writing, GCC only uses an explicit outer COMPARE code when optabs
3311 is testing whether a constant is expensive enough to force into a
3312 register. We want optabs to pass such constants through the MIPS
3313 expanders instead, so make all constants very cheap here. */
3314 if (outer_code
== COMPARE
)
3316 gcc_assert (CONSTANT_P (x
));
3324 /* Treat *clear_upper32-style ANDs as having zero cost in the
3325 second operand. The cost is entirely in the first operand.
3327 ??? This is needed because we would otherwise try to CSE
3328 the constant operand. Although that's the right thing for
3329 instructions that continue to be a register operation throughout
3330 compilation, it is disastrous for instructions that could
3331 later be converted into a memory operation. */
3333 && outer_code
== AND
3334 && UINTVAL (x
) == 0xffffffff)
3342 cost
= mips16_constant_cost (outer_code
, INTVAL (x
));
3351 /* When not optimizing for size, we care more about the cost
3352 of hot code, and hot code is often in a loop. If a constant
3353 operand needs to be forced into a register, we will often be
3354 able to hoist the constant load out of the loop, so the load
3355 should not contribute to the cost. */
3357 || mips_immediate_operand_p (outer_code
, INTVAL (x
)))
3369 if (force_to_mem_operand (x
, VOIDmode
))
3371 *total
= COSTS_N_INSNS (1);
3374 cost
= mips_const_insns (x
);
3377 /* If the constant is likely to be stored in a GPR, SETs of
3378 single-insn constants are as cheap as register sets; we
3379 never want to CSE them.
3381 Don't reduce the cost of storing a floating-point zero in
3382 FPRs. If we have a zero in an FPR for other reasons, we
3383 can get better cfg-cleanup and delayed-branch results by
3384 using it consistently, rather than using $0 sometimes and
3385 an FPR at other times. Also, moves between floating-point
3386 registers are sometimes cheaper than (D)MTC1 $0. */
3388 && outer_code
== SET
3389 && !(float_mode_p
&& TARGET_HARD_FLOAT
))
3391 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3392 want to CSE the constant itself. It is usually better to
3393 have N copies of the last operation in the sequence and one
3394 shared copy of the other operations. (Note that this is
3395 not true for MIPS16 code, where the final operation in the
3396 sequence is often an extended instruction.)
3398 Also, if we have a CONST_INT, we don't know whether it is
3399 for a word or doubleword operation, so we cannot rely on
3400 the result of mips_build_integer. */
3401 else if (!TARGET_MIPS16
3402 && (outer_code
== SET
|| mode
== VOIDmode
))
3404 *total
= COSTS_N_INSNS (cost
);
3407 /* The value will need to be fetched from the constant pool. */
3408 *total
= CONSTANT_POOL_COST
;
3412 /* If the address is legitimate, return the number of
3413 instructions it needs. */
3415 cost
= mips_address_insns (addr
, mode
, true);
3418 *total
= COSTS_N_INSNS (cost
+ 1);
3421 /* Check for a scaled indexed address. */
3422 if (mips_lwxs_address_p (addr
))
3424 *total
= COSTS_N_INSNS (2);
3427 /* Otherwise use the default handling. */
3431 *total
= COSTS_N_INSNS (6);
3435 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 2 : 1);
3439 /* Check for a *clear_upper32 pattern and treat it like a zero
3440 extension. See the pattern's comment for details. */
3443 && CONST_INT_P (XEXP (x
, 1))
3444 && UINTVAL (XEXP (x
, 1)) == 0xffffffff)
3446 *total
= (mips_zero_extend_cost (mode
, XEXP (x
, 0))
3447 + rtx_cost (XEXP (x
, 0), 0, speed
));
3454 /* Double-word operations use two single-word operations. */
3455 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3463 if (CONSTANT_P (XEXP (x
, 1)))
3464 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3466 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3471 *total
= mips_cost
->fp_add
;
3473 *total
= COSTS_N_INSNS (4);
3477 /* Low-part immediates need an extended MIPS16 instruction. */
3478 *total
= (COSTS_N_INSNS (TARGET_MIPS16
? 2 : 1)
3479 + rtx_cost (XEXP (x
, 0), 0, speed
));
3494 /* Branch comparisons have VOIDmode, so use the first operand's
3496 mode
= GET_MODE (XEXP (x
, 0));
3497 if (FLOAT_MODE_P (mode
))
3499 *total
= mips_cost
->fp_add
;
3502 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3507 && (ISA_HAS_NMADD4_NMSUB4 (mode
) || ISA_HAS_NMADD3_NMSUB3 (mode
))
3508 && TARGET_FUSED_MADD
3509 && !HONOR_NANS (mode
)
3510 && !HONOR_SIGNED_ZEROS (mode
))
3512 /* See if we can use NMADD or NMSUB. See mips.md for the
3513 associated patterns. */
3514 rtx op0
= XEXP (x
, 0);
3515 rtx op1
= XEXP (x
, 1);
3516 if (GET_CODE (op0
) == MULT
&& GET_CODE (XEXP (op0
, 0)) == NEG
)
3518 *total
= (mips_fp_mult_cost (mode
)
3519 + rtx_cost (XEXP (XEXP (op0
, 0), 0), 0, speed
)
3520 + rtx_cost (XEXP (op0
, 1), 0, speed
)
3521 + rtx_cost (op1
, 0, speed
));
3524 if (GET_CODE (op1
) == MULT
)
3526 *total
= (mips_fp_mult_cost (mode
)
3527 + rtx_cost (op0
, 0, speed
)
3528 + rtx_cost (XEXP (op1
, 0), 0, speed
)
3529 + rtx_cost (XEXP (op1
, 1), 0, speed
));
3538 /* If this is part of a MADD or MSUB, treat the PLUS as
3541 && TARGET_FUSED_MADD
3542 && GET_CODE (XEXP (x
, 0)) == MULT
)
3545 *total
= mips_cost
->fp_add
;
3549 /* Double-word operations require three single-word operations and
3550 an SLTU. The MIPS16 version then needs to move the result of
3551 the SLTU from $24 to a MIPS16 register. */
3552 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1),
3553 COSTS_N_INSNS (TARGET_MIPS16
? 5 : 4));
3558 && (ISA_HAS_NMADD4_NMSUB4 (mode
) || ISA_HAS_NMADD3_NMSUB3 (mode
))
3559 && TARGET_FUSED_MADD
3560 && !HONOR_NANS (mode
)
3561 && HONOR_SIGNED_ZEROS (mode
))
3563 /* See if we can use NMADD or NMSUB. See mips.md for the
3564 associated patterns. */
3565 rtx op
= XEXP (x
, 0);
3566 if ((GET_CODE (op
) == PLUS
|| GET_CODE (op
) == MINUS
)
3567 && GET_CODE (XEXP (op
, 0)) == MULT
)
3569 *total
= (mips_fp_mult_cost (mode
)
3570 + rtx_cost (XEXP (XEXP (op
, 0), 0), 0, speed
)
3571 + rtx_cost (XEXP (XEXP (op
, 0), 1), 0, speed
)
3572 + rtx_cost (XEXP (op
, 1), 0, speed
));
3578 *total
= mips_cost
->fp_add
;
3580 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 4 : 1);
3585 *total
= mips_fp_mult_cost (mode
);
3586 else if (mode
== DImode
&& !TARGET_64BIT
)
3587 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3588 where the mulsidi3 always includes an MFHI and an MFLO. */
3589 *total
= (optimize_size
3590 ? COSTS_N_INSNS (ISA_HAS_MUL3
? 7 : 9)
3591 : mips_cost
->int_mult_si
* 3 + 6);
3592 else if (optimize_size
)
3593 *total
= (ISA_HAS_MUL3
? 1 : 2);
3594 else if (mode
== DImode
)
3595 *total
= mips_cost
->int_mult_di
;
3597 *total
= mips_cost
->int_mult_si
;
3601 /* Check for a reciprocal. */
3604 && flag_unsafe_math_optimizations
3605 && XEXP (x
, 0) == CONST1_RTX (mode
))
3607 if (outer_code
== SQRT
|| GET_CODE (XEXP (x
, 1)) == SQRT
)
3608 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3609 division as being free. */
3610 *total
= rtx_cost (XEXP (x
, 1), 0, speed
);
3612 *total
= mips_fp_div_cost (mode
) + rtx_cost (XEXP (x
, 1), 0, speed
);
3621 *total
= mips_fp_div_cost (mode
);
3630 /* It is our responsibility to make division by a power of 2
3631 as cheap as 2 register additions if we want the division
3632 expanders to be used for such operations; see the setting
3633 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3634 should always produce shorter code than using
3635 expand_sdiv2_pow2. */
3637 && CONST_INT_P (XEXP (x
, 1))
3638 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
3640 *total
= COSTS_N_INSNS (2) + rtx_cost (XEXP (x
, 0), 0, speed
);
3643 *total
= COSTS_N_INSNS (mips_idiv_insns ());
3645 else if (mode
== DImode
)
3646 *total
= mips_cost
->int_div_di
;
3648 *total
= mips_cost
->int_div_si
;
3652 *total
= mips_sign_extend_cost (mode
, XEXP (x
, 0));
3656 *total
= mips_zero_extend_cost (mode
, XEXP (x
, 0));
3660 case UNSIGNED_FLOAT
:
3663 case FLOAT_TRUNCATE
:
3664 *total
= mips_cost
->fp_add
;
3672 /* Implement TARGET_ADDRESS_COST. */
3675 mips_address_cost (rtx addr
, bool speed ATTRIBUTE_UNUSED
)
3677 return mips_address_insns (addr
, SImode
, false);
3680 /* Return one word of double-word value OP, taking into account the fixed
3681 endianness of certain registers. HIGH_P is true to select the high part,
3682 false to select the low part. */
3685 mips_subword (rtx op
, bool high_p
)
3687 unsigned int byte
, offset
;
3688 enum machine_mode mode
;
3690 mode
= GET_MODE (op
);
3691 if (mode
== VOIDmode
)
3692 mode
= TARGET_64BIT
? TImode
: DImode
;
3694 if (TARGET_BIG_ENDIAN
? !high_p
: high_p
)
3695 byte
= UNITS_PER_WORD
;
3699 if (FP_REG_RTX_P (op
))
3701 /* Paired FPRs are always ordered little-endian. */
3702 offset
= (UNITS_PER_WORD
< UNITS_PER_HWFPVALUE
? high_p
: byte
!= 0);
3703 return gen_rtx_REG (word_mode
, REGNO (op
) + offset
);
3707 return mips_rewrite_small_data (adjust_address (op
, word_mode
, byte
));
3709 return simplify_gen_subreg (word_mode
, op
, mode
, byte
);
3712 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3715 mips_split_64bit_move_p (rtx dest
, rtx src
)
3720 /* FPR-to-FPR moves can be done in a single instruction, if they're
3722 if (FP_REG_RTX_P (src
) && FP_REG_RTX_P (dest
))
3725 /* Check for floating-point loads and stores. */
3726 if (ISA_HAS_LDC1_SDC1
)
3728 if (FP_REG_RTX_P (dest
) && MEM_P (src
))
3730 if (FP_REG_RTX_P (src
) && MEM_P (dest
))
3736 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3737 this function handles 64-bit moves for which mips_split_64bit_move_p
3738 holds. For 64-bit targets, this function handles 128-bit moves. */
3741 mips_split_doubleword_move (rtx dest
, rtx src
)
3745 if (FP_REG_RTX_P (dest
) || FP_REG_RTX_P (src
))
3747 if (!TARGET_64BIT
&& GET_MODE (dest
) == DImode
)
3748 emit_insn (gen_move_doubleword_fprdi (dest
, src
));
3749 else if (!TARGET_64BIT
&& GET_MODE (dest
) == DFmode
)
3750 emit_insn (gen_move_doubleword_fprdf (dest
, src
));
3751 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V2SFmode
)
3752 emit_insn (gen_move_doubleword_fprv2sf (dest
, src
));
3753 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V2SImode
)
3754 emit_insn (gen_move_doubleword_fprv2si (dest
, src
));
3755 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V4HImode
)
3756 emit_insn (gen_move_doubleword_fprv4hi (dest
, src
));
3757 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V8QImode
)
3758 emit_insn (gen_move_doubleword_fprv8qi (dest
, src
));
3759 else if (TARGET_64BIT
&& GET_MODE (dest
) == TFmode
)
3760 emit_insn (gen_move_doubleword_fprtf (dest
, src
));
3764 else if (REG_P (dest
) && REGNO (dest
) == MD_REG_FIRST
)
3766 low_dest
= mips_subword (dest
, false);
3767 mips_emit_move (low_dest
, mips_subword (src
, false));
3769 emit_insn (gen_mthidi_ti (dest
, mips_subword (src
, true), low_dest
));
3771 emit_insn (gen_mthisi_di (dest
, mips_subword (src
, true), low_dest
));
3773 else if (REG_P (src
) && REGNO (src
) == MD_REG_FIRST
)
3775 mips_emit_move (mips_subword (dest
, false), mips_subword (src
, false));
3777 emit_insn (gen_mfhidi_ti (mips_subword (dest
, true), src
));
3779 emit_insn (gen_mfhisi_di (mips_subword (dest
, true), src
));
3783 /* The operation can be split into two normal moves. Decide in
3784 which order to do them. */
3785 low_dest
= mips_subword (dest
, false);
3786 if (REG_P (low_dest
)
3787 && reg_overlap_mentioned_p (low_dest
, src
))
3789 mips_emit_move (mips_subword (dest
, true), mips_subword (src
, true));
3790 mips_emit_move (low_dest
, mips_subword (src
, false));
3794 mips_emit_move (low_dest
, mips_subword (src
, false));
3795 mips_emit_move (mips_subword (dest
, true), mips_subword (src
, true));
3800 /* Return the appropriate instructions to move SRC into DEST. Assume
3801 that SRC is operand 1 and DEST is operand 0. */
3804 mips_output_move (rtx dest
, rtx src
)
3806 enum rtx_code dest_code
, src_code
;
3807 enum machine_mode mode
;
3808 enum mips_symbol_type symbol_type
;
3811 dest_code
= GET_CODE (dest
);
3812 src_code
= GET_CODE (src
);
3813 mode
= GET_MODE (dest
);
3814 dbl_p
= (GET_MODE_SIZE (mode
) == 8);
3816 if (dbl_p
&& mips_split_64bit_move_p (dest
, src
))
3819 if ((src_code
== REG
&& GP_REG_P (REGNO (src
)))
3820 || (!TARGET_MIPS16
&& src
== CONST0_RTX (mode
)))
3822 if (dest_code
== REG
)
3824 if (GP_REG_P (REGNO (dest
)))
3825 return "move\t%0,%z1";
3827 /* Moves to HI are handled by special .md insns. */
3828 if (REGNO (dest
) == LO_REGNUM
)
3831 if (DSP_ACC_REG_P (REGNO (dest
)))
3833 static char retval
[] = "mt__\t%z1,%q0";
3835 retval
[2] = reg_names
[REGNO (dest
)][4];
3836 retval
[3] = reg_names
[REGNO (dest
)][5];
3840 if (FP_REG_P (REGNO (dest
)))
3841 return dbl_p
? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
3843 if (ALL_COP_REG_P (REGNO (dest
)))
3845 static char retval
[] = "dmtc_\t%z1,%0";
3847 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
3848 return dbl_p
? retval
: retval
+ 1;
3851 if (dest_code
== MEM
)
3852 switch (GET_MODE_SIZE (mode
))
3854 case 1: return "sb\t%z1,%0";
3855 case 2: return "sh\t%z1,%0";
3856 case 4: return "sw\t%z1,%0";
3857 case 8: return "sd\t%z1,%0";
3860 if (dest_code
== REG
&& GP_REG_P (REGNO (dest
)))
3862 if (src_code
== REG
)
3864 /* Moves from HI are handled by special .md insns. */
3865 if (REGNO (src
) == LO_REGNUM
)
3867 /* When generating VR4120 or VR4130 code, we use MACC and
3868 DMACC instead of MFLO. This avoids both the normal
3869 MIPS III HI/LO hazards and the errata related to
3872 return dbl_p
? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
3876 if (DSP_ACC_REG_P (REGNO (src
)))
3878 static char retval
[] = "mf__\t%0,%q1";
3880 retval
[2] = reg_names
[REGNO (src
)][4];
3881 retval
[3] = reg_names
[REGNO (src
)][5];
3885 if (FP_REG_P (REGNO (src
)))
3886 return dbl_p
? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
3888 if (ALL_COP_REG_P (REGNO (src
)))
3890 static char retval
[] = "dmfc_\t%0,%1";
3892 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
3893 return dbl_p
? retval
: retval
+ 1;
3896 if (ST_REG_P (REGNO (src
)) && ISA_HAS_8CC
)
3897 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3900 if (src_code
== MEM
)
3901 switch (GET_MODE_SIZE (mode
))
3903 case 1: return "lbu\t%0,%1";
3904 case 2: return "lhu\t%0,%1";
3905 case 4: return "lw\t%0,%1";
3906 case 8: return "ld\t%0,%1";
3909 if (src_code
== CONST_INT
)
3911 /* Don't use the X format for the operand itself, because that
3912 will give out-of-range numbers for 64-bit hosts and 32-bit
3915 return "li\t%0,%1\t\t\t# %X1";
3917 if (SMALL_OPERAND_UNSIGNED (INTVAL (src
)))
3920 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src
)))
3924 if (src_code
== HIGH
)
3925 return TARGET_MIPS16
? "#" : "lui\t%0,%h1";
3927 if (CONST_GP_P (src
))
3928 return "move\t%0,%1";
3930 if (mips_symbolic_constant_p (src
, SYMBOL_CONTEXT_LEA
, &symbol_type
)
3931 && mips_lo_relocs
[symbol_type
] != 0)
3933 /* A signed 16-bit constant formed by applying a relocation
3934 operator to a symbolic address. */
3935 gcc_assert (!mips_split_p
[symbol_type
]);
3936 return "li\t%0,%R1";
3939 if (symbolic_operand (src
, VOIDmode
))
3941 gcc_assert (TARGET_MIPS16
3942 ? TARGET_MIPS16_TEXT_LOADS
3943 : !TARGET_EXPLICIT_RELOCS
);
3944 return dbl_p
? "dla\t%0,%1" : "la\t%0,%1";
3947 if (src_code
== REG
&& FP_REG_P (REGNO (src
)))
3949 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
3951 if (GET_MODE (dest
) == V2SFmode
)
3952 return "mov.ps\t%0,%1";
3954 return dbl_p
? "mov.d\t%0,%1" : "mov.s\t%0,%1";
3957 if (dest_code
== MEM
)
3958 return dbl_p
? "sdc1\t%1,%0" : "swc1\t%1,%0";
3960 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
3962 if (src_code
== MEM
)
3963 return dbl_p
? "ldc1\t%0,%1" : "lwc1\t%0,%1";
3965 if (dest_code
== REG
&& ALL_COP_REG_P (REGNO (dest
)) && src_code
== MEM
)
3967 static char retval
[] = "l_c_\t%0,%1";
3969 retval
[1] = (dbl_p
? 'd' : 'w');
3970 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
3973 if (dest_code
== MEM
&& src_code
== REG
&& ALL_COP_REG_P (REGNO (src
)))
3975 static char retval
[] = "s_c_\t%1,%0";
3977 retval
[1] = (dbl_p
? 'd' : 'w');
3978 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
3984 /* Return true if CMP1 is a suitable second operand for integer ordering
3985 test CODE. See also the *sCC patterns in mips.md. */
3988 mips_int_order_operand_ok_p (enum rtx_code code
, rtx cmp1
)
3994 return reg_or_0_operand (cmp1
, VOIDmode
);
3998 return !TARGET_MIPS16
&& cmp1
== const1_rtx
;
4002 return arith_operand (cmp1
, VOIDmode
);
4005 return sle_operand (cmp1
, VOIDmode
);
4008 return sleu_operand (cmp1
, VOIDmode
);
4015 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
4016 integer ordering test *CODE, or if an equivalent combination can
4017 be formed by adjusting *CODE and *CMP1. When returning true, update
4018 *CODE and *CMP1 with the chosen code and operand, otherwise leave
4022 mips_canonicalize_int_order_test (enum rtx_code
*code
, rtx
*cmp1
,
4023 enum machine_mode mode
)
4025 HOST_WIDE_INT plus_one
;
4027 if (mips_int_order_operand_ok_p (*code
, *cmp1
))
4030 if (GET_CODE (*cmp1
) == CONST_INT
)
4034 plus_one
= trunc_int_for_mode (UINTVAL (*cmp1
) + 1, mode
);
4035 if (INTVAL (*cmp1
) < plus_one
)
4038 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
4044 plus_one
= trunc_int_for_mode (UINTVAL (*cmp1
) + 1, mode
);
4048 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
4059 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
4060 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
4061 is nonnull, it's OK to set TARGET to the inverse of the result and
4062 flip *INVERT_PTR instead. */
4065 mips_emit_int_order_test (enum rtx_code code
, bool *invert_ptr
,
4066 rtx target
, rtx cmp0
, rtx cmp1
)
4068 enum machine_mode mode
;
4070 /* First see if there is a MIPS instruction that can do this operation.
4071 If not, try doing the same for the inverse operation. If that also
4072 fails, force CMP1 into a register and try again. */
4073 mode
= GET_MODE (cmp0
);
4074 if (mips_canonicalize_int_order_test (&code
, &cmp1
, mode
))
4075 mips_emit_binary (code
, target
, cmp0
, cmp1
);
4078 enum rtx_code inv_code
= reverse_condition (code
);
4079 if (!mips_canonicalize_int_order_test (&inv_code
, &cmp1
, mode
))
4081 cmp1
= force_reg (mode
, cmp1
);
4082 mips_emit_int_order_test (code
, invert_ptr
, target
, cmp0
, cmp1
);
4084 else if (invert_ptr
== 0)
4088 inv_target
= mips_force_binary (GET_MODE (target
),
4089 inv_code
, cmp0
, cmp1
);
4090 mips_emit_binary (XOR
, target
, inv_target
, const1_rtx
);
4094 *invert_ptr
= !*invert_ptr
;
4095 mips_emit_binary (inv_code
, target
, cmp0
, cmp1
);
4100 /* Return a register that is zero iff CMP0 and CMP1 are equal.
4101 The register will have the same mode as CMP0. */
4104 mips_zero_if_equal (rtx cmp0
, rtx cmp1
)
4106 if (cmp1
== const0_rtx
)
4109 if (uns_arith_operand (cmp1
, VOIDmode
))
4110 return expand_binop (GET_MODE (cmp0
), xor_optab
,
4111 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
4113 return expand_binop (GET_MODE (cmp0
), sub_optab
,
4114 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
4117 /* Convert *CODE into a code that can be used in a floating-point
4118 scc instruction (C.cond.fmt). Return true if the values of
4119 the condition code registers will be inverted, with 0 indicating
4120 that the condition holds. */
4123 mips_reversed_fp_cond (enum rtx_code
*code
)
4130 *code
= reverse_condition_maybe_unordered (*code
);
4138 /* Convert a comparison into something that can be used in a branch or
4139 conditional move. cmp_operands[0] and cmp_operands[1] are the values
4140 being compared and *CODE is the code used to compare them.
4142 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
4143 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
4144 otherwise any standard branch condition can be used. The standard branch
4147 - EQ or NE between two registers.
4148 - any comparison between a register and zero. */
4151 mips_emit_compare (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
, bool need_eq_ne_p
)
4153 if (GET_MODE_CLASS (GET_MODE (cmp_operands
[0])) == MODE_INT
)
4155 if (!need_eq_ne_p
&& cmp_operands
[1] == const0_rtx
)
4157 *op0
= cmp_operands
[0];
4158 *op1
= cmp_operands
[1];
4160 else if (*code
== EQ
|| *code
== NE
)
4164 *op0
= mips_zero_if_equal (cmp_operands
[0], cmp_operands
[1]);
4169 *op0
= cmp_operands
[0];
4170 *op1
= force_reg (GET_MODE (*op0
), cmp_operands
[1]);
4175 /* The comparison needs a separate scc instruction. Store the
4176 result of the scc in *OP0 and compare it against zero. */
4177 bool invert
= false;
4178 *op0
= gen_reg_rtx (GET_MODE (cmp_operands
[0]));
4179 mips_emit_int_order_test (*code
, &invert
, *op0
,
4180 cmp_operands
[0], cmp_operands
[1]);
4181 *code
= (invert
? EQ
: NE
);
4185 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands
[0])))
4187 *op0
= gen_rtx_REG (CCDSPmode
, CCDSP_CC_REGNUM
);
4188 mips_emit_binary (*code
, *op0
, cmp_operands
[0], cmp_operands
[1]);
4194 enum rtx_code cmp_code
;
4196 /* Floating-point tests use a separate C.cond.fmt comparison to
4197 set a condition code register. The branch or conditional move
4198 will then compare that register against zero.
4200 Set CMP_CODE to the code of the comparison instruction and
4201 *CODE to the code that the branch or move should use. */
4203 *code
= mips_reversed_fp_cond (&cmp_code
) ? EQ
: NE
;
4205 ? gen_reg_rtx (CCmode
)
4206 : gen_rtx_REG (CCmode
, FPSW_REGNUM
));
4208 mips_emit_binary (cmp_code
, *op0
, cmp_operands
[0], cmp_operands
[1]);
4212 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4213 Store the result in TARGET and return true if successful.
4215 On 64-bit targets, TARGET may be narrower than cmp_operands[0]. */
4218 mips_expand_scc (enum rtx_code code
, rtx target
)
4220 if (GET_MODE_CLASS (GET_MODE (cmp_operands
[0])) != MODE_INT
)
4223 if (code
== EQ
|| code
== NE
)
4225 rtx zie
= mips_zero_if_equal (cmp_operands
[0], cmp_operands
[1]);
4226 mips_emit_binary (code
, target
, zie
, const0_rtx
);
4229 mips_emit_int_order_test (code
, 0, target
,
4230 cmp_operands
[0], cmp_operands
[1]);
4234 /* Compare cmp_operands[0] with cmp_operands[1] using comparison code
4235 CODE and jump to OPERANDS[0] if the condition holds. */
4238 mips_expand_conditional_branch (rtx
*operands
, enum rtx_code code
)
4240 rtx op0
, op1
, condition
;
4242 mips_emit_compare (&code
, &op0
, &op1
, TARGET_MIPS16
);
4243 condition
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
4244 emit_jump_insn (gen_condjump (condition
, operands
[0]));
4249 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4250 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4253 mips_expand_vcondv2sf (rtx dest
, rtx true_src
, rtx false_src
,
4254 enum rtx_code cond
, rtx cmp_op0
, rtx cmp_op1
)
4259 reversed_p
= mips_reversed_fp_cond (&cond
);
4260 cmp_result
= gen_reg_rtx (CCV2mode
);
4261 emit_insn (gen_scc_ps (cmp_result
,
4262 gen_rtx_fmt_ee (cond
, VOIDmode
, cmp_op0
, cmp_op1
)));
4264 emit_insn (gen_mips_cond_move_tf_ps (dest
, false_src
, true_src
,
4267 emit_insn (gen_mips_cond_move_tf_ps (dest
, true_src
, false_src
,
4271 /* Compare cmp_operands[0] with cmp_operands[1] using the code of
4272 OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] if the condition
4273 holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
4276 mips_expand_conditional_move (rtx
*operands
)
4281 code
= GET_CODE (operands
[1]);
4282 mips_emit_compare (&code
, &op0
, &op1
, true);
4283 cond
= gen_rtx_fmt_ee (code
, GET_MODE (op0
), op0
, op1
),
4284 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
4285 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]), cond
,
4286 operands
[2], operands
[3])));
4289 /* Compare cmp_operands[0] with cmp_operands[1] using rtl code CODE,
4290 then trap if the condition holds. */
4293 mips_expand_conditional_trap (enum rtx_code code
)
4296 enum machine_mode mode
;
4298 /* MIPS conditional trap instructions don't have GT or LE flavors,
4299 so we must swap the operands and convert to LT and GE respectively. */
4306 code
= swap_condition (code
);
4307 op0
= cmp_operands
[1];
4308 op1
= cmp_operands
[0];
4312 op0
= cmp_operands
[0];
4313 op1
= cmp_operands
[1];
4317 mode
= GET_MODE (cmp_operands
[0]);
4318 op0
= force_reg (mode
, op0
);
4319 if (!arith_operand (op1
, mode
))
4320 op1
= force_reg (mode
, op1
);
4322 emit_insn (gen_rtx_TRAP_IF (VOIDmode
,
4323 gen_rtx_fmt_ee (code
, mode
, op0
, op1
),
4327 /* Initialize *CUM for a call to a function of type FNTYPE. */
4330 mips_init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
)
4332 memset (cum
, 0, sizeof (*cum
));
4333 cum
->prototype
= (fntype
&& prototype_p (fntype
));
4334 cum
->gp_reg_found
= (cum
->prototype
&& stdarg_p (fntype
));
4337 /* Fill INFO with information about a single argument. CUM is the
4338 cumulative state for earlier arguments. MODE is the mode of this
4339 argument and TYPE is its type (if known). NAMED is true if this
4340 is a named (fixed) argument rather than a variable one. */
4343 mips_get_arg_info (struct mips_arg_info
*info
, const CUMULATIVE_ARGS
*cum
,
4344 enum machine_mode mode
, tree type
, int named
)
4346 bool doubleword_aligned_p
;
4347 unsigned int num_bytes
, num_words
, max_regs
;
4349 /* Work out the size of the argument. */
4350 num_bytes
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
4351 num_words
= (num_bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
4353 /* Decide whether it should go in a floating-point register, assuming
4354 one is free. Later code checks for availability.
4356 The checks against UNITS_PER_FPVALUE handle the soft-float and
4357 single-float cases. */
4361 /* The EABI conventions have traditionally been defined in terms
4362 of TYPE_MODE, regardless of the actual type. */
4363 info
->fpr_p
= ((GET_MODE_CLASS (mode
) == MODE_FLOAT
4364 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
4365 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
4370 /* Only leading floating-point scalars are passed in
4371 floating-point registers. We also handle vector floats the same
4372 say, which is OK because they are not covered by the standard ABI. */
4373 info
->fpr_p
= (!cum
->gp_reg_found
4374 && cum
->arg_number
< 2
4376 || SCALAR_FLOAT_TYPE_P (type
)
4377 || VECTOR_FLOAT_TYPE_P (type
))
4378 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
4379 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
4380 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
4385 /* Scalar, complex and vector floating-point types are passed in
4386 floating-point registers, as long as this is a named rather
4387 than a variable argument. */
4388 info
->fpr_p
= (named
4389 && (type
== 0 || FLOAT_TYPE_P (type
))
4390 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
4391 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
4392 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
4393 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_FPVALUE
);
4395 /* ??? According to the ABI documentation, the real and imaginary
4396 parts of complex floats should be passed in individual registers.
4397 The real and imaginary parts of stack arguments are supposed
4398 to be contiguous and there should be an extra word of padding
4401 This has two problems. First, it makes it impossible to use a
4402 single "void *" va_list type, since register and stack arguments
4403 are passed differently. (At the time of writing, MIPSpro cannot
4404 handle complex float varargs correctly.) Second, it's unclear
4405 what should happen when there is only one register free.
4407 For now, we assume that named complex floats should go into FPRs
4408 if there are two FPRs free, otherwise they should be passed in the
4409 same way as a struct containing two floats. */
4411 && GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
4412 && GET_MODE_UNIT_SIZE (mode
) < UNITS_PER_FPVALUE
)
4414 if (cum
->num_gprs
>= MAX_ARGS_IN_REGISTERS
- 1)
4415 info
->fpr_p
= false;
4425 /* See whether the argument has doubleword alignment. */
4426 doubleword_aligned_p
= FUNCTION_ARG_BOUNDARY (mode
, type
) > BITS_PER_WORD
;
4428 /* Set REG_OFFSET to the register count we're interested in.
4429 The EABI allocates the floating-point registers separately,
4430 but the other ABIs allocate them like integer registers. */
4431 info
->reg_offset
= (mips_abi
== ABI_EABI
&& info
->fpr_p
4435 /* Advance to an even register if the argument is doubleword-aligned. */
4436 if (doubleword_aligned_p
)
4437 info
->reg_offset
+= info
->reg_offset
& 1;
4439 /* Work out the offset of a stack argument. */
4440 info
->stack_offset
= cum
->stack_words
;
4441 if (doubleword_aligned_p
)
4442 info
->stack_offset
+= info
->stack_offset
& 1;
4444 max_regs
= MAX_ARGS_IN_REGISTERS
- info
->reg_offset
;
4446 /* Partition the argument between registers and stack. */
4447 info
->reg_words
= MIN (num_words
, max_regs
);
4448 info
->stack_words
= num_words
- info
->reg_words
;
4451 /* INFO describes a register argument that has the normal format for the
4452 argument's mode. Return the register it uses, assuming that FPRs are
4453 available if HARD_FLOAT_P. */
4456 mips_arg_regno (const struct mips_arg_info
*info
, bool hard_float_p
)
4458 if (!info
->fpr_p
|| !hard_float_p
)
4459 return GP_ARG_FIRST
+ info
->reg_offset
;
4460 else if (mips_abi
== ABI_32
&& TARGET_DOUBLE_FLOAT
&& info
->reg_offset
> 0)
4461 /* In o32, the second argument is always passed in $f14
4462 for TARGET_DOUBLE_FLOAT, regardless of whether the
4463 first argument was a word or doubleword. */
4464 return FP_ARG_FIRST
+ 2;
4466 return FP_ARG_FIRST
+ info
->reg_offset
;
4469 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
4472 mips_strict_argument_naming (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
)
4474 return !TARGET_OLDABI
;
4477 /* Implement FUNCTION_ARG. */
4480 mips_function_arg (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4481 tree type
, int named
)
4483 struct mips_arg_info info
;
4485 /* We will be called with a mode of VOIDmode after the last argument
4486 has been seen. Whatever we return will be passed to the call expander.
4487 If we need a MIPS16 fp_code, return a REG with the code stored as
4489 if (mode
== VOIDmode
)
4491 if (TARGET_MIPS16
&& cum
->fp_code
!= 0)
4492 return gen_rtx_REG ((enum machine_mode
) cum
->fp_code
, 0);
4497 mips_get_arg_info (&info
, cum
, mode
, type
, named
);
4499 /* Return straight away if the whole argument is passed on the stack. */
4500 if (info
.reg_offset
== MAX_ARGS_IN_REGISTERS
)
4503 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4504 contains a double in its entirety, then that 64-bit chunk is passed
4505 in a floating-point register. */
4507 && TARGET_HARD_FLOAT
4510 && TREE_CODE (type
) == RECORD_TYPE
4511 && TYPE_SIZE_UNIT (type
)
4512 && host_integerp (TYPE_SIZE_UNIT (type
), 1))
4516 /* First check to see if there is any such field. */
4517 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
4518 if (TREE_CODE (field
) == FIELD_DECL
4519 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field
))
4520 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
4521 && host_integerp (bit_position (field
), 0)
4522 && int_bit_position (field
) % BITS_PER_WORD
== 0)
4527 /* Now handle the special case by returning a PARALLEL
4528 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4529 chunks are passed in registers. */
4531 HOST_WIDE_INT bitpos
;
4534 /* assign_parms checks the mode of ENTRY_PARM, so we must
4535 use the actual mode here. */
4536 ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (info
.reg_words
));
4539 field
= TYPE_FIELDS (type
);
4540 for (i
= 0; i
< info
.reg_words
; i
++)
4544 for (; field
; field
= TREE_CHAIN (field
))
4545 if (TREE_CODE (field
) == FIELD_DECL
4546 && int_bit_position (field
) >= bitpos
)
4550 && int_bit_position (field
) == bitpos
4551 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field
))
4552 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
)
4553 reg
= gen_rtx_REG (DFmode
, FP_ARG_FIRST
+ info
.reg_offset
+ i
);
4555 reg
= gen_rtx_REG (DImode
, GP_ARG_FIRST
+ info
.reg_offset
+ i
);
4558 = gen_rtx_EXPR_LIST (VOIDmode
, reg
,
4559 GEN_INT (bitpos
/ BITS_PER_UNIT
));
4561 bitpos
+= BITS_PER_WORD
;
4567 /* Handle the n32/n64 conventions for passing complex floating-point
4568 arguments in FPR pairs. The real part goes in the lower register
4569 and the imaginary part goes in the upper register. */
4572 && GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
4575 enum machine_mode inner
;
4578 inner
= GET_MODE_INNER (mode
);
4579 regno
= FP_ARG_FIRST
+ info
.reg_offset
;
4580 if (info
.reg_words
* UNITS_PER_WORD
== GET_MODE_SIZE (inner
))
4582 /* Real part in registers, imaginary part on stack. */
4583 gcc_assert (info
.stack_words
== info
.reg_words
);
4584 return gen_rtx_REG (inner
, regno
);
4588 gcc_assert (info
.stack_words
== 0);
4589 real
= gen_rtx_EXPR_LIST (VOIDmode
,
4590 gen_rtx_REG (inner
, regno
),
4592 imag
= gen_rtx_EXPR_LIST (VOIDmode
,
4594 regno
+ info
.reg_words
/ 2),
4595 GEN_INT (GET_MODE_SIZE (inner
)));
4596 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, real
, imag
));
4600 return gen_rtx_REG (mode
, mips_arg_regno (&info
, TARGET_HARD_FLOAT
));
4603 /* Implement FUNCTION_ARG_ADVANCE. */
4606 mips_function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4607 tree type
, int named
)
4609 struct mips_arg_info info
;
4611 mips_get_arg_info (&info
, cum
, mode
, type
, named
);
4614 cum
->gp_reg_found
= true;
4616 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4617 an explanation of what this code does. It assumes that we're using
4618 either the o32 or the o64 ABI, both of which pass at most 2 arguments
4620 if (cum
->arg_number
< 2 && info
.fpr_p
)
4621 cum
->fp_code
+= (mode
== SFmode
? 1 : 2) << (cum
->arg_number
* 2);
4623 /* Advance the register count. This has the effect of setting
4624 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4625 argument required us to skip the final GPR and pass the whole
4626 argument on the stack. */
4627 if (mips_abi
!= ABI_EABI
|| !info
.fpr_p
)
4628 cum
->num_gprs
= info
.reg_offset
+ info
.reg_words
;
4629 else if (info
.reg_words
> 0)
4630 cum
->num_fprs
+= MAX_FPRS_PER_FMT
;
4632 /* Advance the stack word count. */
4633 if (info
.stack_words
> 0)
4634 cum
->stack_words
= info
.stack_offset
+ info
.stack_words
;
4639 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4642 mips_arg_partial_bytes (CUMULATIVE_ARGS
*cum
,
4643 enum machine_mode mode
, tree type
, bool named
)
4645 struct mips_arg_info info
;
4647 mips_get_arg_info (&info
, cum
, mode
, type
, named
);
4648 return info
.stack_words
> 0 ? info
.reg_words
* UNITS_PER_WORD
: 0;
4651 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4652 PARM_BOUNDARY bits of alignment, but will be given anything up
4653 to STACK_BOUNDARY bits if the type requires it. */
4656 mips_function_arg_boundary (enum machine_mode mode
, tree type
)
4658 unsigned int alignment
;
4660 alignment
= type
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
);
4661 if (alignment
< PARM_BOUNDARY
)
4662 alignment
= PARM_BOUNDARY
;
4663 if (alignment
> STACK_BOUNDARY
)
4664 alignment
= STACK_BOUNDARY
;
4668 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4669 upward rather than downward. In other words, return true if the
4670 first byte of the stack slot has useful data, false if the last
4674 mips_pad_arg_upward (enum machine_mode mode
, const_tree type
)
4676 /* On little-endian targets, the first byte of every stack argument
4677 is passed in the first byte of the stack slot. */
4678 if (!BYTES_BIG_ENDIAN
)
4681 /* Otherwise, integral types are padded downward: the last byte of a
4682 stack argument is passed in the last byte of the stack slot. */
4684 ? (INTEGRAL_TYPE_P (type
)
4685 || POINTER_TYPE_P (type
)
4686 || FIXED_POINT_TYPE_P (type
))
4687 : (SCALAR_INT_MODE_P (mode
)
4688 || ALL_SCALAR_FIXED_POINT_MODE_P (mode
)))
4691 /* Big-endian o64 pads floating-point arguments downward. */
4692 if (mips_abi
== ABI_O64
)
4693 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4696 /* Other types are padded upward for o32, o64, n32 and n64. */
4697 if (mips_abi
!= ABI_EABI
)
4700 /* Arguments smaller than a stack slot are padded downward. */
4701 if (mode
!= BLKmode
)
4702 return GET_MODE_BITSIZE (mode
) >= PARM_BOUNDARY
;
4704 return int_size_in_bytes (type
) >= (PARM_BOUNDARY
/ BITS_PER_UNIT
);
4707 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4708 if the least significant byte of the register has useful data. Return
4709 the opposite if the most significant byte does. */
4712 mips_pad_reg_upward (enum machine_mode mode
, tree type
)
4714 /* No shifting is required for floating-point arguments. */
4715 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4716 return !BYTES_BIG_ENDIAN
;
4718 /* Otherwise, apply the same padding to register arguments as we do
4719 to stack arguments. */
4720 return mips_pad_arg_upward (mode
, type
);
4723 /* Return nonzero when an argument must be passed by reference. */
4726 mips_pass_by_reference (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
4727 enum machine_mode mode
, const_tree type
,
4728 bool named ATTRIBUTE_UNUSED
)
4730 if (mips_abi
== ABI_EABI
)
4734 /* ??? How should SCmode be handled? */
4735 if (mode
== DImode
|| mode
== DFmode
4736 || mode
== DQmode
|| mode
== UDQmode
4737 || mode
== DAmode
|| mode
== UDAmode
)
4740 size
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
4741 return size
== -1 || size
> UNITS_PER_WORD
;
4745 /* If we have a variable-sized parameter, we have no choice. */
4746 return targetm
.calls
.must_pass_in_stack (mode
, type
);
4750 /* Implement TARGET_CALLEE_COPIES. */
4753 mips_callee_copies (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
4754 enum machine_mode mode ATTRIBUTE_UNUSED
,
4755 const_tree type ATTRIBUTE_UNUSED
, bool named
)
4757 return mips_abi
== ABI_EABI
&& named
;
4760 /* See whether VALTYPE is a record whose fields should be returned in
4761 floating-point registers. If so, return the number of fields and
4762 list them in FIELDS (which should have two elements). Return 0
4765 For n32 & n64, a structure with one or two fields is returned in
4766 floating-point registers as long as every field has a floating-point
4770 mips_fpr_return_fields (const_tree valtype
, tree
*fields
)
4778 if (TREE_CODE (valtype
) != RECORD_TYPE
)
4782 for (field
= TYPE_FIELDS (valtype
); field
!= 0; field
= TREE_CHAIN (field
))
4784 if (TREE_CODE (field
) != FIELD_DECL
)
4787 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field
)))
4793 fields
[i
++] = field
;
4798 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4799 a value in the most significant part of $2/$3 if:
4801 - the target is big-endian;
4803 - the value has a structure or union type (we generalize this to
4804 cover aggregates from other languages too); and
4806 - the structure is not returned in floating-point registers. */
4809 mips_return_in_msb (const_tree valtype
)
4813 return (TARGET_NEWABI
4814 && TARGET_BIG_ENDIAN
4815 && AGGREGATE_TYPE_P (valtype
)
4816 && mips_fpr_return_fields (valtype
, fields
) == 0);
4819 /* Return true if the function return value MODE will get returned in a
4820 floating-point register. */
4823 mips_return_mode_in_fpr_p (enum machine_mode mode
)
4825 return ((GET_MODE_CLASS (mode
) == MODE_FLOAT
4826 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
4827 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
4828 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_HWFPVALUE
);
4831 /* Return the representation of an FPR return register when the
4832 value being returned in FP_RETURN has mode VALUE_MODE and the
4833 return type itself has mode TYPE_MODE. On NewABI targets,
4834 the two modes may be different for structures like:
4836 struct __attribute__((packed)) foo { float f; }
4838 where we return the SFmode value of "f" in FP_RETURN, but where
4839 the structure itself has mode BLKmode. */
4842 mips_return_fpr_single (enum machine_mode type_mode
,
4843 enum machine_mode value_mode
)
4847 x
= gen_rtx_REG (value_mode
, FP_RETURN
);
4848 if (type_mode
!= value_mode
)
4850 x
= gen_rtx_EXPR_LIST (VOIDmode
, x
, const0_rtx
);
4851 x
= gen_rtx_PARALLEL (type_mode
, gen_rtvec (1, x
));
4856 /* Return a composite value in a pair of floating-point registers.
4857 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4858 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4861 For n32 & n64, $f0 always holds the first value and $f2 the second.
4862 Otherwise the values are packed together as closely as possible. */
4865 mips_return_fpr_pair (enum machine_mode mode
,
4866 enum machine_mode mode1
, HOST_WIDE_INT offset1
,
4867 enum machine_mode mode2
, HOST_WIDE_INT offset2
)
4871 inc
= (TARGET_NEWABI
? 2 : MAX_FPRS_PER_FMT
);
4872 return gen_rtx_PARALLEL
4875 gen_rtx_EXPR_LIST (VOIDmode
,
4876 gen_rtx_REG (mode1
, FP_RETURN
),
4878 gen_rtx_EXPR_LIST (VOIDmode
,
4879 gen_rtx_REG (mode2
, FP_RETURN
+ inc
),
4880 GEN_INT (offset2
))));
4884 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4885 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4886 VALTYPE is null and MODE is the mode of the return value. */
4889 mips_function_value (const_tree valtype
, enum machine_mode mode
)
4896 mode
= TYPE_MODE (valtype
);
4897 unsigned_p
= TYPE_UNSIGNED (valtype
);
4899 /* Since TARGET_PROMOTE_FUNCTION_RETURN unconditionally returns true,
4900 we must promote the mode just as PROMOTE_MODE does. */
4901 mode
= promote_mode (valtype
, mode
, &unsigned_p
, 1);
4903 /* Handle structures whose fields are returned in $f0/$f2. */
4904 switch (mips_fpr_return_fields (valtype
, fields
))
4907 return mips_return_fpr_single (mode
,
4908 TYPE_MODE (TREE_TYPE (fields
[0])));
4911 return mips_return_fpr_pair (mode
,
4912 TYPE_MODE (TREE_TYPE (fields
[0])),
4913 int_byte_position (fields
[0]),
4914 TYPE_MODE (TREE_TYPE (fields
[1])),
4915 int_byte_position (fields
[1]));
4918 /* If a value is passed in the most significant part of a register, see
4919 whether we have to round the mode up to a whole number of words. */
4920 if (mips_return_in_msb (valtype
))
4922 HOST_WIDE_INT size
= int_size_in_bytes (valtype
);
4923 if (size
% UNITS_PER_WORD
!= 0)
4925 size
+= UNITS_PER_WORD
- size
% UNITS_PER_WORD
;
4926 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
4930 /* For EABI, the class of return register depends entirely on MODE.
4931 For example, "struct { some_type x; }" and "union { some_type x; }"
4932 are returned in the same way as a bare "some_type" would be.
4933 Other ABIs only use FPRs for scalar, complex or vector types. */
4934 if (mips_abi
!= ABI_EABI
&& !FLOAT_TYPE_P (valtype
))
4935 return gen_rtx_REG (mode
, GP_RETURN
);
4940 /* Handle long doubles for n32 & n64. */
4942 return mips_return_fpr_pair (mode
,
4944 DImode
, GET_MODE_SIZE (mode
) / 2);
4946 if (mips_return_mode_in_fpr_p (mode
))
4948 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
4949 return mips_return_fpr_pair (mode
,
4950 GET_MODE_INNER (mode
), 0,
4951 GET_MODE_INNER (mode
),
4952 GET_MODE_SIZE (mode
) / 2);
4954 return gen_rtx_REG (mode
, FP_RETURN
);
4958 return gen_rtx_REG (mode
, GP_RETURN
);
4961 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
4962 all BLKmode objects are returned in memory. Under the n32, n64
4963 and embedded ABIs, small structures are returned in a register.
4964 Objects with varying size must still be returned in memory, of
4968 mips_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
4970 return (TARGET_OLDABI
4971 ? TYPE_MODE (type
) == BLKmode
4972 : !IN_RANGE (int_size_in_bytes (type
), 0, 2 * UNITS_PER_WORD
));
4975 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
4978 mips_setup_incoming_varargs (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4979 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
4982 CUMULATIVE_ARGS local_cum
;
4983 int gp_saved
, fp_saved
;
4985 /* The caller has advanced CUM up to, but not beyond, the last named
4986 argument. Advance a local copy of CUM past the last "real" named
4987 argument, to find out how many registers are left over. */
4989 FUNCTION_ARG_ADVANCE (local_cum
, mode
, type
, true);
4991 /* Found out how many registers we need to save. */
4992 gp_saved
= MAX_ARGS_IN_REGISTERS
- local_cum
.num_gprs
;
4993 fp_saved
= (EABI_FLOAT_VARARGS_P
4994 ? MAX_ARGS_IN_REGISTERS
- local_cum
.num_fprs
5003 ptr
= plus_constant (virtual_incoming_args_rtx
,
5004 REG_PARM_STACK_SPACE (cfun
->decl
)
5005 - gp_saved
* UNITS_PER_WORD
);
5006 mem
= gen_frame_mem (BLKmode
, ptr
);
5007 set_mem_alias_set (mem
, get_varargs_alias_set ());
5009 move_block_from_reg (local_cum
.num_gprs
+ GP_ARG_FIRST
,
5014 /* We can't use move_block_from_reg, because it will use
5016 enum machine_mode mode
;
5019 /* Set OFF to the offset from virtual_incoming_args_rtx of
5020 the first float register. The FP save area lies below
5021 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
5022 off
= (-gp_saved
* UNITS_PER_WORD
) & -UNITS_PER_FPVALUE
;
5023 off
-= fp_saved
* UNITS_PER_FPREG
;
5025 mode
= TARGET_SINGLE_FLOAT
? SFmode
: DFmode
;
5027 for (i
= local_cum
.num_fprs
; i
< MAX_ARGS_IN_REGISTERS
;
5028 i
+= MAX_FPRS_PER_FMT
)
5032 ptr
= plus_constant (virtual_incoming_args_rtx
, off
);
5033 mem
= gen_frame_mem (mode
, ptr
);
5034 set_mem_alias_set (mem
, get_varargs_alias_set ());
5035 mips_emit_move (mem
, gen_rtx_REG (mode
, FP_ARG_FIRST
+ i
));
5036 off
+= UNITS_PER_HWFPVALUE
;
5040 if (REG_PARM_STACK_SPACE (cfun
->decl
) == 0)
5041 cfun
->machine
->varargs_size
= (gp_saved
* UNITS_PER_WORD
5042 + fp_saved
* UNITS_PER_FPREG
);
5045 /* Implement TARGET_BUILTIN_VA_LIST. */
5048 mips_build_builtin_va_list (void)
5050 if (EABI_FLOAT_VARARGS_P
)
5052 /* We keep 3 pointers, and two offsets.
5054 Two pointers are to the overflow area, which starts at the CFA.
5055 One of these is constant, for addressing into the GPR save area
5056 below it. The other is advanced up the stack through the
5059 The third pointer is to the bottom of the GPR save area.
5060 Since the FPR save area is just below it, we can address
5061 FPR slots off this pointer.
5063 We also keep two one-byte offsets, which are to be subtracted
5064 from the constant pointers to yield addresses in the GPR and
5065 FPR save areas. These are downcounted as float or non-float
5066 arguments are used, and when they get to zero, the argument
5067 must be obtained from the overflow region. */
5068 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
, f_res
, record
;
5071 record
= lang_hooks
.types
.make_type (RECORD_TYPE
);
5073 f_ovfl
= build_decl (FIELD_DECL
, get_identifier ("__overflow_argptr"),
5075 f_gtop
= build_decl (FIELD_DECL
, get_identifier ("__gpr_top"),
5077 f_ftop
= build_decl (FIELD_DECL
, get_identifier ("__fpr_top"),
5079 f_goff
= build_decl (FIELD_DECL
, get_identifier ("__gpr_offset"),
5080 unsigned_char_type_node
);
5081 f_foff
= build_decl (FIELD_DECL
, get_identifier ("__fpr_offset"),
5082 unsigned_char_type_node
);
5083 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5084 warn on every user file. */
5085 index
= build_int_cst (NULL_TREE
, GET_MODE_SIZE (ptr_mode
) - 2 - 1);
5086 array
= build_array_type (unsigned_char_type_node
,
5087 build_index_type (index
));
5088 f_res
= build_decl (FIELD_DECL
, get_identifier ("__reserved"), array
);
5090 DECL_FIELD_CONTEXT (f_ovfl
) = record
;
5091 DECL_FIELD_CONTEXT (f_gtop
) = record
;
5092 DECL_FIELD_CONTEXT (f_ftop
) = record
;
5093 DECL_FIELD_CONTEXT (f_goff
) = record
;
5094 DECL_FIELD_CONTEXT (f_foff
) = record
;
5095 DECL_FIELD_CONTEXT (f_res
) = record
;
5097 TYPE_FIELDS (record
) = f_ovfl
;
5098 TREE_CHAIN (f_ovfl
) = f_gtop
;
5099 TREE_CHAIN (f_gtop
) = f_ftop
;
5100 TREE_CHAIN (f_ftop
) = f_goff
;
5101 TREE_CHAIN (f_goff
) = f_foff
;
5102 TREE_CHAIN (f_foff
) = f_res
;
5104 layout_type (record
);
5107 else if (TARGET_IRIX
&& TARGET_IRIX6
)
5108 /* On IRIX 6, this type is 'char *'. */
5109 return build_pointer_type (char_type_node
);
5111 /* Otherwise, we use 'void *'. */
5112 return ptr_type_node
;
5115 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
5118 mips_va_start (tree valist
, rtx nextarg
)
5120 if (EABI_FLOAT_VARARGS_P
)
5122 const CUMULATIVE_ARGS
*cum
;
5123 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
5124 tree ovfl
, gtop
, ftop
, goff
, foff
;
5126 int gpr_save_area_size
;
5127 int fpr_save_area_size
;
5130 cum
= &crtl
->args
.info
;
5132 = (MAX_ARGS_IN_REGISTERS
- cum
->num_gprs
) * UNITS_PER_WORD
;
5134 = (MAX_ARGS_IN_REGISTERS
- cum
->num_fprs
) * UNITS_PER_FPREG
;
5136 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
5137 f_gtop
= TREE_CHAIN (f_ovfl
);
5138 f_ftop
= TREE_CHAIN (f_gtop
);
5139 f_goff
= TREE_CHAIN (f_ftop
);
5140 f_foff
= TREE_CHAIN (f_goff
);
5142 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
5144 gtop
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
), valist
, f_gtop
,
5146 ftop
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
), valist
, f_ftop
,
5148 goff
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
), valist
, f_goff
,
5150 foff
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
), valist
, f_foff
,
5153 /* Emit code to initialize OVFL, which points to the next varargs
5154 stack argument. CUM->STACK_WORDS gives the number of stack
5155 words used by named arguments. */
5156 t
= make_tree (TREE_TYPE (ovfl
), virtual_incoming_args_rtx
);
5157 if (cum
->stack_words
> 0)
5158 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ovfl
), t
,
5159 size_int (cum
->stack_words
* UNITS_PER_WORD
));
5160 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovfl
), ovfl
, t
);
5161 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5163 /* Emit code to initialize GTOP, the top of the GPR save area. */
5164 t
= make_tree (TREE_TYPE (gtop
), virtual_incoming_args_rtx
);
5165 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gtop
), gtop
, t
);
5166 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5168 /* Emit code to initialize FTOP, the top of the FPR save area.
5169 This address is gpr_save_area_bytes below GTOP, rounded
5170 down to the next fp-aligned boundary. */
5171 t
= make_tree (TREE_TYPE (ftop
), virtual_incoming_args_rtx
);
5172 fpr_offset
= gpr_save_area_size
+ UNITS_PER_FPVALUE
- 1;
5173 fpr_offset
&= -UNITS_PER_FPVALUE
;
5175 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ftop
), t
,
5176 size_int (-fpr_offset
));
5177 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ftop
), ftop
, t
);
5178 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5180 /* Emit code to initialize GOFF, the offset from GTOP of the
5181 next GPR argument. */
5182 t
= build2 (MODIFY_EXPR
, TREE_TYPE (goff
), goff
,
5183 build_int_cst (TREE_TYPE (goff
), gpr_save_area_size
));
5184 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5186 /* Likewise emit code to initialize FOFF, the offset from FTOP
5187 of the next FPR argument. */
5188 t
= build2 (MODIFY_EXPR
, TREE_TYPE (foff
), foff
,
5189 build_int_cst (TREE_TYPE (foff
), fpr_save_area_size
));
5190 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5194 nextarg
= plus_constant (nextarg
, -cfun
->machine
->varargs_size
);
5195 std_expand_builtin_va_start (valist
, nextarg
);
5199 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
5202 mips_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
5208 indirect_p
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
5210 type
= build_pointer_type (type
);
5212 if (!EABI_FLOAT_VARARGS_P
)
5213 addr
= std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
5216 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
5217 tree ovfl
, top
, off
, align
;
5218 HOST_WIDE_INT size
, rsize
, osize
;
5221 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
5222 f_gtop
= TREE_CHAIN (f_ovfl
);
5223 f_ftop
= TREE_CHAIN (f_gtop
);
5224 f_goff
= TREE_CHAIN (f_ftop
);
5225 f_foff
= TREE_CHAIN (f_goff
);
5229 TOP be the top of the GPR or FPR save area;
5230 OFF be the offset from TOP of the next register;
5231 ADDR_RTX be the address of the argument;
5232 SIZE be the number of bytes in the argument type;
5233 RSIZE be the number of bytes used to store the argument
5234 when it's in the register save area; and
5235 OSIZE be the number of bytes used to store it when it's
5236 in the stack overflow area.
5238 The code we want is:
5240 1: off &= -rsize; // round down
5243 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
5248 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
5249 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
5253 [1] and [9] can sometimes be optimized away. */
5255 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
5257 size
= int_size_in_bytes (type
);
5259 if (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
5260 && GET_MODE_SIZE (TYPE_MODE (type
)) <= UNITS_PER_FPVALUE
)
5262 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
), valist
, f_ftop
,
5264 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
), valist
, f_foff
,
5267 /* When va_start saves FPR arguments to the stack, each slot
5268 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
5269 argument's precision. */
5270 rsize
= UNITS_PER_HWFPVALUE
;
5272 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5273 (= PARM_BOUNDARY bits). This can be different from RSIZE
5276 (1) On 32-bit targets when TYPE is a structure such as:
5278 struct s { float f; };
5280 Such structures are passed in paired FPRs, so RSIZE
5281 will be 8 bytes. However, the structure only takes
5282 up 4 bytes of memory, so OSIZE will only be 4.
5284 (2) In combinations such as -mgp64 -msingle-float
5285 -fshort-double. Doubles passed in registers will then take
5286 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
5287 stack take up UNITS_PER_WORD bytes. */
5288 osize
= MAX (GET_MODE_SIZE (TYPE_MODE (type
)), UNITS_PER_WORD
);
5292 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
), valist
, f_gtop
,
5294 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
), valist
, f_goff
,
5296 rsize
= (size
+ UNITS_PER_WORD
- 1) & -UNITS_PER_WORD
;
5297 if (rsize
> UNITS_PER_WORD
)
5299 /* [1] Emit code for: off &= -rsize. */
5300 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (off
), off
,
5301 build_int_cst (NULL_TREE
, -rsize
));
5302 gimplify_assign (off
, t
, pre_p
);
5307 /* [2] Emit code to branch if off == 0. */
5308 t
= build2 (NE_EXPR
, boolean_type_node
, off
,
5309 build_int_cst (TREE_TYPE (off
), 0));
5310 addr
= build3 (COND_EXPR
, ptr_type_node
, t
, NULL_TREE
, NULL_TREE
);
5312 /* [5] Emit code for: off -= rsize. We do this as a form of
5313 post-decrement not available to C. */
5314 t
= fold_convert (TREE_TYPE (off
), build_int_cst (NULL_TREE
, rsize
));
5315 t
= build2 (POSTDECREMENT_EXPR
, TREE_TYPE (off
), off
, t
);
5317 /* [4] Emit code for:
5318 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
5319 t
= fold_convert (sizetype
, t
);
5320 t
= fold_build1 (NEGATE_EXPR
, sizetype
, t
);
5321 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (top
), top
, t
);
5322 if (BYTES_BIG_ENDIAN
&& rsize
> size
)
5324 u
= size_int (rsize
- size
);
5325 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, u
);
5327 COND_EXPR_THEN (addr
) = t
;
5329 if (osize
> UNITS_PER_WORD
)
5331 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
5332 u
= size_int (osize
- 1);
5333 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ovfl
), ovfl
, u
);
5334 t
= fold_convert (sizetype
, t
);
5335 u
= size_int (-osize
);
5336 t
= build2 (BIT_AND_EXPR
, sizetype
, t
, u
);
5337 t
= fold_convert (TREE_TYPE (ovfl
), t
);
5338 align
= build2 (MODIFY_EXPR
, TREE_TYPE (ovfl
), ovfl
, t
);
5343 /* [10, 11] Emit code for:
5344 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5346 u
= fold_convert (TREE_TYPE (ovfl
), build_int_cst (NULL_TREE
, osize
));
5347 t
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (ovfl
), ovfl
, u
);
5348 if (BYTES_BIG_ENDIAN
&& osize
> size
)
5350 u
= size_int (osize
- size
);
5351 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, u
);
5354 /* String [9] and [10, 11] together. */
5356 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (t
), align
, t
);
5357 COND_EXPR_ELSE (addr
) = t
;
5359 addr
= fold_convert (build_pointer_type (type
), addr
);
5360 addr
= build_va_arg_indirect_ref (addr
);
5364 addr
= build_va_arg_indirect_ref (addr
);
5369 /* Start a definition of function NAME. MIPS16_P indicates whether the
5370 function contains MIPS16 code. */
5373 mips_start_function_definition (const char *name
, bool mips16_p
)
5376 fprintf (asm_out_file
, "\t.set\tmips16\n");
5378 fprintf (asm_out_file
, "\t.set\tnomips16\n");
5380 if (!flag_inhibit_size_directive
)
5382 fputs ("\t.ent\t", asm_out_file
);
5383 assemble_name (asm_out_file
, name
);
5384 fputs ("\n", asm_out_file
);
5387 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, name
, "function");
5389 /* Start the definition proper. */
5390 assemble_name (asm_out_file
, name
);
5391 fputs (":\n", asm_out_file
);
5394 /* End a function definition started by mips_start_function_definition. */
5397 mips_end_function_definition (const char *name
)
5399 if (!flag_inhibit_size_directive
)
5401 fputs ("\t.end\t", asm_out_file
);
5402 assemble_name (asm_out_file
, name
);
5403 fputs ("\n", asm_out_file
);
5407 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5410 mips_ok_for_lazy_binding_p (rtx x
)
5412 return (TARGET_USE_GOT
5413 && GET_CODE (x
) == SYMBOL_REF
5414 && !SYMBOL_REF_BIND_NOW_P (x
)
5415 && !mips_symbol_binds_local_p (x
));
5418 /* Load function address ADDR into register DEST. TYPE is as for
5419 mips_expand_call. Return true if we used an explicit lazy-binding
5423 mips_load_call_address (enum mips_call_type type
, rtx dest
, rtx addr
)
5425 /* If we're generating PIC, and this call is to a global function,
5426 try to allow its address to be resolved lazily. This isn't
5427 possible for sibcalls when $gp is call-saved because the value
5428 of $gp on entry to the stub would be our caller's gp, not ours. */
5429 if (TARGET_EXPLICIT_RELOCS
5430 && !(type
== MIPS_CALL_SIBCALL
&& TARGET_CALL_SAVED_GP
)
5431 && mips_ok_for_lazy_binding_p (addr
))
5433 addr
= mips_got_load (dest
, addr
, SYMBOL_GOTOFF_CALL
);
5434 emit_insn (gen_rtx_SET (VOIDmode
, dest
, addr
));
5439 mips_emit_move (dest
, addr
);
5444 /* Each locally-defined hard-float MIPS16 function has a local symbol
5445 associated with it. This hash table maps the function symbol (FUNC)
5446 to the local symbol (LOCAL). */
5447 struct mips16_local_alias
GTY(()) {
5451 static GTY ((param_is (struct mips16_local_alias
))) htab_t mips16_local_aliases
;
5453 /* Hash table callbacks for mips16_local_aliases. */
5456 mips16_local_aliases_hash (const void *entry
)
5458 const struct mips16_local_alias
*alias
;
5460 alias
= (const struct mips16_local_alias
*) entry
;
5461 return htab_hash_string (XSTR (alias
->func
, 0));
5465 mips16_local_aliases_eq (const void *entry1
, const void *entry2
)
5467 const struct mips16_local_alias
*alias1
, *alias2
;
5469 alias1
= (const struct mips16_local_alias
*) entry1
;
5470 alias2
= (const struct mips16_local_alias
*) entry2
;
5471 return rtx_equal_p (alias1
->func
, alias2
->func
);
5474 /* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
5475 Return a local alias for it, creating a new one if necessary. */
5478 mips16_local_alias (rtx func
)
5480 struct mips16_local_alias
*alias
, tmp_alias
;
5483 /* Create the hash table if this is the first call. */
5484 if (mips16_local_aliases
== NULL
)
5485 mips16_local_aliases
= htab_create_ggc (37, mips16_local_aliases_hash
,
5486 mips16_local_aliases_eq
, NULL
);
5488 /* Look up the function symbol, creating a new entry if need be. */
5489 tmp_alias
.func
= func
;
5490 slot
= htab_find_slot (mips16_local_aliases
, &tmp_alias
, INSERT
);
5491 gcc_assert (slot
!= NULL
);
5493 alias
= (struct mips16_local_alias
*) *slot
;
5496 const char *func_name
, *local_name
;
5499 /* Create a new SYMBOL_REF for the local symbol. The choice of
5500 __fn_local_* is based on the __fn_stub_* names that we've
5501 traditionally used for the non-MIPS16 stub. */
5502 func_name
= targetm
.strip_name_encoding (XSTR (func
, 0));
5503 local_name
= ACONCAT (("__fn_local_", func_name
, NULL
));
5504 local
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (local_name
));
5505 SYMBOL_REF_FLAGS (local
) = SYMBOL_REF_FLAGS (func
) | SYMBOL_FLAG_LOCAL
;
5507 /* Create a new structure to represent the mapping. */
5508 alias
= GGC_NEW (struct mips16_local_alias
);
5510 alias
->local
= local
;
5513 return alias
->local
;
5516 /* A chained list of functions for which mips16_build_call_stub has already
5517 generated a stub. NAME is the name of the function and FP_RET_P is true
5518 if the function returns a value in floating-point registers. */
5519 struct mips16_stub
{
5520 struct mips16_stub
*next
;
5524 static struct mips16_stub
*mips16_stubs
;
5526 /* Return a SYMBOL_REF for a MIPS16 function called NAME. */
5529 mips16_stub_function (const char *name
)
5533 x
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
5534 SYMBOL_REF_FLAGS (x
) |= (SYMBOL_FLAG_EXTERNAL
| SYMBOL_FLAG_FUNCTION
);
5538 /* Return the two-character string that identifies floating-point
5539 return mode MODE in the name of a MIPS16 function stub. */
5542 mips16_call_stub_mode_suffix (enum machine_mode mode
)
5546 else if (mode
== DFmode
)
5548 else if (mode
== SCmode
)
5550 else if (mode
== DCmode
)
5552 else if (mode
== V2SFmode
)
5558 /* Write instructions to move a 32-bit value between general register
5559 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5560 from GPREG to FPREG and 'f' to move in the opposite direction. */
5563 mips_output_32bit_xfer (char direction
, unsigned int gpreg
, unsigned int fpreg
)
5565 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
5566 reg_names
[gpreg
], reg_names
[fpreg
]);
5569 /* Likewise for 64-bit values. */
5572 mips_output_64bit_xfer (char direction
, unsigned int gpreg
, unsigned int fpreg
)
5575 fprintf (asm_out_file
, "\tdm%cc1\t%s,%s\n", direction
,
5576 reg_names
[gpreg
], reg_names
[fpreg
]);
5577 else if (TARGET_FLOAT64
)
5579 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
5580 reg_names
[gpreg
+ TARGET_BIG_ENDIAN
], reg_names
[fpreg
]);
5581 fprintf (asm_out_file
, "\tm%chc1\t%s,%s\n", direction
,
5582 reg_names
[gpreg
+ TARGET_LITTLE_ENDIAN
], reg_names
[fpreg
]);
5586 /* Move the least-significant word. */
5587 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
5588 reg_names
[gpreg
+ TARGET_BIG_ENDIAN
], reg_names
[fpreg
]);
5589 /* ...then the most significant word. */
5590 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
5591 reg_names
[gpreg
+ TARGET_LITTLE_ENDIAN
], reg_names
[fpreg
+ 1]);
5595 /* Write out code to move floating-point arguments into or out of
5596 general registers. FP_CODE is the code describing which arguments
5597 are present (see the comment above the definition of CUMULATIVE_ARGS
5598 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
5601 mips_output_args_xfer (int fp_code
, char direction
)
5603 unsigned int gparg
, fparg
, f
;
5604 CUMULATIVE_ARGS cum
;
5606 /* This code only works for o32 and o64. */
5607 gcc_assert (TARGET_OLDABI
);
5609 mips_init_cumulative_args (&cum
, NULL
);
5611 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
5613 enum machine_mode mode
;
5614 struct mips_arg_info info
;
5618 else if ((f
& 3) == 2)
5623 mips_get_arg_info (&info
, &cum
, mode
, NULL
, true);
5624 gparg
= mips_arg_regno (&info
, false);
5625 fparg
= mips_arg_regno (&info
, true);
5628 mips_output_32bit_xfer (direction
, gparg
, fparg
);
5630 mips_output_64bit_xfer (direction
, gparg
, fparg
);
5632 mips_function_arg_advance (&cum
, mode
, NULL
, true);
5636 /* Write a MIPS16 stub for the current function. This stub is used
5637 for functions which take arguments in the floating-point registers.
5638 It is normal-mode code that moves the floating-point arguments
5639 into the general registers and then jumps to the MIPS16 code. */
5642 mips16_build_function_stub (void)
5644 const char *fnname
, *alias_name
, *separator
;
5645 char *secname
, *stubname
;
5650 /* Create the name of the stub, and its unique section. */
5651 symbol
= XEXP (DECL_RTL (current_function_decl
), 0);
5652 alias
= mips16_local_alias (symbol
);
5654 fnname
= targetm
.strip_name_encoding (XSTR (symbol
, 0));
5655 alias_name
= targetm
.strip_name_encoding (XSTR (alias
, 0));
5656 secname
= ACONCAT ((".mips16.fn.", fnname
, NULL
));
5657 stubname
= ACONCAT (("__fn_stub_", fnname
, NULL
));
5659 /* Build a decl for the stub. */
5660 stubdecl
= build_decl (FUNCTION_DECL
, get_identifier (stubname
),
5661 build_function_type (void_type_node
, NULL_TREE
));
5662 DECL_SECTION_NAME (stubdecl
) = build_string (strlen (secname
), secname
);
5663 DECL_RESULT (stubdecl
) = build_decl (RESULT_DECL
, NULL_TREE
, void_type_node
);
5665 /* Output a comment. */
5666 fprintf (asm_out_file
, "\t# Stub function for %s (",
5667 current_function_name ());
5669 for (f
= (unsigned int) crtl
->args
.info
.fp_code
; f
!= 0; f
>>= 2)
5671 fprintf (asm_out_file
, "%s%s", separator
,
5672 (f
& 3) == 1 ? "float" : "double");
5675 fprintf (asm_out_file
, ")\n");
5677 /* Start the function definition. */
5678 assemble_start_function (stubdecl
, stubname
);
5679 mips_start_function_definition (stubname
, false);
5681 /* If generating pic2 code, either set up the global pointer or
5683 if (TARGET_ABICALLS_PIC2
)
5685 if (TARGET_ABSOLUTE_ABICALLS
)
5686 fprintf (asm_out_file
, "\t.option\tpic0\n");
5689 output_asm_insn ("%(.cpload\t%^%)", NULL
);
5690 /* Emit an R_MIPS_NONE relocation to tell the linker what the
5691 target function is. Use a local GOT access when loading the
5692 symbol, to cut down on the number of unnecessary GOT entries
5693 for stubs that aren't needed. */
5694 output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol
);
5699 /* Load the address of the MIPS16 function into $25. Do this first so
5700 that targets with coprocessor interlocks can use an MFC1 to fill the
5702 output_asm_insn ("la\t%^,%0", &symbol
);
5704 /* Move the arguments from floating-point registers to general registers. */
5705 mips_output_args_xfer (crtl
->args
.info
.fp_code
, 'f');
5707 /* Jump to the MIPS16 function. */
5708 output_asm_insn ("jr\t%^", NULL
);
5710 if (TARGET_ABICALLS_PIC2
&& TARGET_ABSOLUTE_ABICALLS
)
5711 fprintf (asm_out_file
, "\t.option\tpic2\n");
5713 mips_end_function_definition (stubname
);
5715 /* If the linker needs to create a dynamic symbol for the target
5716 function, it will associate the symbol with the stub (which,
5717 unlike the target function, follows the proper calling conventions).
5718 It is therefore useful to have a local alias for the target function,
5719 so that it can still be identified as MIPS16 code. As an optimization,
5720 this symbol can also be used for indirect MIPS16 references from
5721 within this file. */
5722 ASM_OUTPUT_DEF (asm_out_file
, alias_name
, fnname
);
5724 switch_to_section (function_section (current_function_decl
));
5727 /* The current function is a MIPS16 function that returns a value in an FPR.
5728 Copy the return value from its soft-float to its hard-float location.
5729 libgcc2 has special non-MIPS16 helper functions for each case. */
5732 mips16_copy_fpr_return_value (void)
5734 rtx fn
, insn
, retval
;
5736 enum machine_mode return_mode
;
5739 return_type
= DECL_RESULT (current_function_decl
);
5740 return_mode
= DECL_MODE (return_type
);
5742 name
= ACONCAT (("__mips16_ret_",
5743 mips16_call_stub_mode_suffix (return_mode
),
5745 fn
= mips16_stub_function (name
);
5747 /* The function takes arguments in $2 (and possibly $3), so calls
5748 to it cannot be lazily bound. */
5749 SYMBOL_REF_FLAGS (fn
) |= SYMBOL_FLAG_BIND_NOW
;
5751 /* Model the call as something that takes the GPR return value as
5752 argument and returns an "updated" value. */
5753 retval
= gen_rtx_REG (return_mode
, GP_RETURN
);
5754 insn
= mips_expand_call (MIPS_CALL_EPILOGUE
, retval
, fn
,
5755 const0_rtx
, NULL_RTX
, false);
5756 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), retval
);
5759 /* Consider building a stub for a MIPS16 call to function *FN_PTR.
5760 RETVAL is the location of the return value, or null if this is
5761 a "call" rather than a "call_value". ARGS_SIZE is the size of the
5762 arguments and FP_CODE is the code built by mips_function_arg;
5763 see the comment above CUMULATIVE_ARGS for details.
5765 There are three alternatives:
5767 - If a stub was needed, emit the call and return the call insn itself.
5769 - If we can avoid using a stub by redirecting the call, set *FN_PTR
5770 to the new target and return null.
5772 - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
5775 A stub is needed for calls to functions that, in normal mode,
5776 receive arguments in FPRs or return values in FPRs. The stub
5777 copies the arguments from their soft-float positions to their
5778 hard-float positions, calls the real function, then copies the
5779 return value from its hard-float position to its soft-float
5782 We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
5783 If *FN_PTR turns out to be to a non-MIPS16 function, the linker
5784 automatically redirects the JAL to the stub, otherwise the JAL
5785 continues to call FN directly. */
5788 mips16_build_call_stub (rtx retval
, rtx
*fn_ptr
, rtx args_size
, int fp_code
)
5792 struct mips16_stub
*l
;
5795 /* We don't need to do anything if we aren't in MIPS16 mode, or if
5796 we were invoked with the -msoft-float option. */
5797 if (!TARGET_MIPS16
|| TARGET_SOFT_FLOAT_ABI
)
5800 /* Figure out whether the value might come back in a floating-point
5802 fp_ret_p
= retval
&& mips_return_mode_in_fpr_p (GET_MODE (retval
));
5804 /* We don't need to do anything if there were no floating-point
5805 arguments and the value will not be returned in a floating-point
5807 if (fp_code
== 0 && !fp_ret_p
)
5810 /* We don't need to do anything if this is a call to a special
5811 MIPS16 support function. */
5813 if (mips16_stub_function_p (fn
))
5816 /* This code will only work for o32 and o64 abis. The other ABI's
5817 require more sophisticated support. */
5818 gcc_assert (TARGET_OLDABI
);
5820 /* If we're calling via a function pointer, use one of the magic
5821 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
5822 Each stub expects the function address to arrive in register $2. */
5823 if (GET_CODE (fn
) != SYMBOL_REF
5824 || !call_insn_operand (fn
, VOIDmode
))
5827 rtx stub_fn
, insn
, addr
;
5830 /* If this is a locally-defined and locally-binding function,
5831 avoid the stub by calling the local alias directly. */
5832 if (mips16_local_function_p (fn
))
5834 *fn_ptr
= mips16_local_alias (fn
);
5838 /* Create a SYMBOL_REF for the libgcc.a function. */
5840 sprintf (buf
, "__mips16_call_stub_%s_%d",
5841 mips16_call_stub_mode_suffix (GET_MODE (retval
)),
5844 sprintf (buf
, "__mips16_call_stub_%d", fp_code
);
5845 stub_fn
= mips16_stub_function (buf
);
5847 /* The function uses $2 as an argument, so calls to it
5848 cannot be lazily bound. */
5849 SYMBOL_REF_FLAGS (stub_fn
) |= SYMBOL_FLAG_BIND_NOW
;
5851 /* Load the target function into $2. */
5852 addr
= gen_rtx_REG (Pmode
, GP_REG_FIRST
+ 2);
5853 lazy_p
= mips_load_call_address (MIPS_CALL_NORMAL
, addr
, fn
);
5855 /* Emit the call. */
5856 insn
= mips_expand_call (MIPS_CALL_NORMAL
, retval
, stub_fn
,
5857 args_size
, NULL_RTX
, lazy_p
);
5859 /* Tell GCC that this call does indeed use the value of $2. */
5860 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), addr
);
5862 /* If we are handling a floating-point return value, we need to
5863 save $18 in the function prologue. Putting a note on the
5864 call will mean that df_regs_ever_live_p ($18) will be true if the
5865 call is not eliminated, and we can check that in the prologue
5868 CALL_INSN_FUNCTION_USAGE (insn
) =
5869 gen_rtx_EXPR_LIST (VOIDmode
,
5870 gen_rtx_CLOBBER (VOIDmode
,
5871 gen_rtx_REG (word_mode
, 18)),
5872 CALL_INSN_FUNCTION_USAGE (insn
));
5877 /* We know the function we are going to call. If we have already
5878 built a stub, we don't need to do anything further. */
5879 fnname
= targetm
.strip_name_encoding (XSTR (fn
, 0));
5880 for (l
= mips16_stubs
; l
!= NULL
; l
= l
->next
)
5881 if (strcmp (l
->name
, fnname
) == 0)
5886 const char *separator
;
5887 char *secname
, *stubname
;
5888 tree stubid
, stubdecl
;
5891 /* If the function does not return in FPRs, the special stub
5895 If the function does return in FPRs, the stub section is named
5896 .mips16.call.fp.FNNAME
5898 Build a decl for the stub. */
5899 secname
= ACONCAT ((".mips16.call.", fp_ret_p
? "fp." : "",
5901 stubname
= ACONCAT (("__call_stub_", fp_ret_p
? "fp_" : "",
5903 stubid
= get_identifier (stubname
);
5904 stubdecl
= build_decl (FUNCTION_DECL
, stubid
,
5905 build_function_type (void_type_node
, NULL_TREE
));
5906 DECL_SECTION_NAME (stubdecl
) = build_string (strlen (secname
), secname
);
5907 DECL_RESULT (stubdecl
) = build_decl (RESULT_DECL
, NULL_TREE
,
5910 /* Output a comment. */
5911 fprintf (asm_out_file
, "\t# Stub function to call %s%s (",
5913 ? (GET_MODE (retval
) == SFmode
? "float " : "double ")
5917 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
5919 fprintf (asm_out_file
, "%s%s", separator
,
5920 (f
& 3) == 1 ? "float" : "double");
5923 fprintf (asm_out_file
, ")\n");
5925 /* Start the function definition. */
5926 assemble_start_function (stubdecl
, stubname
);
5927 mips_start_function_definition (stubname
, false);
5931 /* Load the address of the MIPS16 function into $25. Do this
5932 first so that targets with coprocessor interlocks can use
5933 an MFC1 to fill the delay slot. */
5934 if (TARGET_EXPLICIT_RELOCS
)
5936 output_asm_insn ("lui\t%^,%%hi(%0)", &fn
);
5937 output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn
);
5940 output_asm_insn ("la\t%^,%0", &fn
);
5943 /* Move the arguments from general registers to floating-point
5945 mips_output_args_xfer (fp_code
, 't');
5949 /* Jump to the previously-loaded address. */
5950 output_asm_insn ("jr\t%^", NULL
);
5954 /* Save the return address in $18 and call the non-MIPS16 function.
5955 The stub's caller knows that $18 might be clobbered, even though
5956 $18 is usually a call-saved register. */
5957 fprintf (asm_out_file
, "\tmove\t%s,%s\n",
5958 reg_names
[GP_REG_FIRST
+ 18], reg_names
[GP_REG_FIRST
+ 31]);
5959 output_asm_insn (MIPS_CALL ("jal", &fn
, 0), &fn
);
5961 /* Move the result from floating-point registers to
5962 general registers. */
5963 switch (GET_MODE (retval
))
5966 mips_output_32bit_xfer ('f', GP_RETURN
+ 1,
5967 FP_REG_FIRST
+ MAX_FPRS_PER_FMT
);
5970 mips_output_32bit_xfer ('f', GP_RETURN
, FP_REG_FIRST
);
5971 if (GET_MODE (retval
) == SCmode
&& TARGET_64BIT
)
5973 /* On 64-bit targets, complex floats are returned in
5974 a single GPR, such that "sd" on a suitably-aligned
5975 target would store the value correctly. */
5976 fprintf (asm_out_file
, "\tdsll\t%s,%s,32\n",
5977 reg_names
[GP_RETURN
+ TARGET_LITTLE_ENDIAN
],
5978 reg_names
[GP_RETURN
+ TARGET_LITTLE_ENDIAN
]);
5979 fprintf (asm_out_file
, "\tor\t%s,%s,%s\n",
5980 reg_names
[GP_RETURN
],
5981 reg_names
[GP_RETURN
],
5982 reg_names
[GP_RETURN
+ 1]);
5987 mips_output_64bit_xfer ('f', GP_RETURN
+ (8 / UNITS_PER_WORD
),
5988 FP_REG_FIRST
+ MAX_FPRS_PER_FMT
);
5992 mips_output_64bit_xfer ('f', GP_RETURN
, FP_REG_FIRST
);
5998 fprintf (asm_out_file
, "\tjr\t%s\n", reg_names
[GP_REG_FIRST
+ 18]);
6001 #ifdef ASM_DECLARE_FUNCTION_SIZE
6002 ASM_DECLARE_FUNCTION_SIZE (asm_out_file
, stubname
, stubdecl
);
6005 mips_end_function_definition (stubname
);
6007 /* Record this stub. */
6008 l
= XNEW (struct mips16_stub
);
6009 l
->name
= xstrdup (fnname
);
6010 l
->fp_ret_p
= fp_ret_p
;
6011 l
->next
= mips16_stubs
;
6015 /* If we expect a floating-point return value, but we've built a
6016 stub which does not expect one, then we're in trouble. We can't
6017 use the existing stub, because it won't handle the floating-point
6018 value. We can't build a new stub, because the linker won't know
6019 which stub to use for the various calls in this object file.
6020 Fortunately, this case is illegal, since it means that a function
6021 was declared in two different ways in a single compilation. */
6022 if (fp_ret_p
&& !l
->fp_ret_p
)
6023 error ("cannot handle inconsistent calls to %qs", fnname
);
6025 if (retval
== NULL_RTX
)
6026 insn
= gen_call_internal_direct (fn
, args_size
);
6028 insn
= gen_call_value_internal_direct (retval
, fn
, args_size
);
6029 insn
= mips_emit_call_insn (insn
, fn
, fn
, false);
6031 /* If we are calling a stub which handles a floating-point return
6032 value, we need to arrange to save $18 in the prologue. We do this
6033 by marking the function call as using the register. The prologue
6034 will later see that it is used, and emit code to save it. */
6036 CALL_INSN_FUNCTION_USAGE (insn
) =
6037 gen_rtx_EXPR_LIST (VOIDmode
,
6038 gen_rtx_CLOBBER (VOIDmode
,
6039 gen_rtx_REG (word_mode
, 18)),
6040 CALL_INSN_FUNCTION_USAGE (insn
));
6045 /* Expand a call of type TYPE. RESULT is where the result will go (null
6046 for "call"s and "sibcall"s), ADDR is the address of the function,
6047 ARGS_SIZE is the size of the arguments and AUX is the value passed
6048 to us by mips_function_arg. LAZY_P is true if this call already
6049 involves a lazily-bound function address (such as when calling
6050 functions through a MIPS16 hard-float stub).
6052 Return the call itself. */
6055 mips_expand_call (enum mips_call_type type
, rtx result
, rtx addr
,
6056 rtx args_size
, rtx aux
, bool lazy_p
)
6058 rtx orig_addr
, pattern
, insn
;
6061 fp_code
= aux
== 0 ? 0 : (int) GET_MODE (aux
);
6062 insn
= mips16_build_call_stub (result
, &addr
, args_size
, fp_code
);
6065 gcc_assert (!lazy_p
&& type
== MIPS_CALL_NORMAL
);
6070 if (!call_insn_operand (addr
, VOIDmode
))
6072 if (type
== MIPS_CALL_EPILOGUE
)
6073 addr
= MIPS_EPILOGUE_TEMP (Pmode
);
6075 addr
= gen_reg_rtx (Pmode
);
6076 lazy_p
|= mips_load_call_address (type
, addr
, orig_addr
);
6081 rtx (*fn
) (rtx
, rtx
);
6083 if (type
== MIPS_CALL_EPILOGUE
&& TARGET_SPLIT_CALLS
)
6084 fn
= gen_call_split
;
6085 else if (type
== MIPS_CALL_SIBCALL
)
6086 fn
= gen_sibcall_internal
;
6088 fn
= gen_call_internal
;
6090 pattern
= fn (addr
, args_size
);
6092 else if (GET_CODE (result
) == PARALLEL
&& XVECLEN (result
, 0) == 2)
6094 /* Handle return values created by mips_return_fpr_pair. */
6095 rtx (*fn
) (rtx
, rtx
, rtx
, rtx
);
6098 if (type
== MIPS_CALL_EPILOGUE
&& TARGET_SPLIT_CALLS
)
6099 fn
= gen_call_value_multiple_split
;
6100 else if (type
== MIPS_CALL_SIBCALL
)
6101 fn
= gen_sibcall_value_multiple_internal
;
6103 fn
= gen_call_value_multiple_internal
;
6105 reg1
= XEXP (XVECEXP (result
, 0, 0), 0);
6106 reg2
= XEXP (XVECEXP (result
, 0, 1), 0);
6107 pattern
= fn (reg1
, addr
, args_size
, reg2
);
6111 rtx (*fn
) (rtx
, rtx
, rtx
);
6113 if (type
== MIPS_CALL_EPILOGUE
&& TARGET_SPLIT_CALLS
)
6114 fn
= gen_call_value_split
;
6115 else if (type
== MIPS_CALL_SIBCALL
)
6116 fn
= gen_sibcall_value_internal
;
6118 fn
= gen_call_value_internal
;
6120 /* Handle return values created by mips_return_fpr_single. */
6121 if (GET_CODE (result
) == PARALLEL
&& XVECLEN (result
, 0) == 1)
6122 result
= XEXP (XVECEXP (result
, 0, 0), 0);
6123 pattern
= fn (result
, addr
, args_size
);
6126 return mips_emit_call_insn (pattern
, orig_addr
, addr
, lazy_p
);
6129 /* Split call instruction INSN into a $gp-clobbering call and
6130 (where necessary) an instruction to restore $gp from its save slot.
6131 CALL_PATTERN is the pattern of the new call. */
6134 mips_split_call (rtx insn
, rtx call_pattern
)
6138 new_insn
= emit_call_insn (call_pattern
);
6139 CALL_INSN_FUNCTION_USAGE (new_insn
)
6140 = copy_rtx (CALL_INSN_FUNCTION_USAGE (insn
));
6141 if (!find_reg_note (insn
, REG_NORETURN
, 0))
6142 /* Pick a temporary register that is suitable for both MIPS16 and
6143 non-MIPS16 code. $4 and $5 are used for returning complex double
6144 values in soft-float code, so $6 is the first suitable candidate. */
6145 mips_restore_gp (gen_rtx_REG (Pmode
, GP_ARG_FIRST
+ 2));
6148 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
6151 mips_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
6153 if (!TARGET_SIBCALLS
)
6156 /* We can't do a sibcall if the called function is a MIPS16 function
6157 because there is no direct "jx" instruction equivalent to "jalx" to
6158 switch the ISA mode. We only care about cases where the sibling
6159 and normal calls would both be direct. */
6160 if (mips_use_mips16_mode_p (decl
)
6161 && const_call_insn_operand (XEXP (DECL_RTL (decl
), 0), VOIDmode
))
6164 /* When -minterlink-mips16 is in effect, assume that non-locally-binding
6165 functions could be MIPS16 ones unless an attribute explicitly tells
6167 if (TARGET_INTERLINK_MIPS16
6169 && (DECL_EXTERNAL (decl
) || !targetm
.binds_local_p (decl
))
6170 && !mips_nomips16_decl_p (decl
)
6171 && const_call_insn_operand (XEXP (DECL_RTL (decl
), 0), VOIDmode
))
6178 /* Emit code to move general operand SRC into condition-code
6179 register DEST given that SCRATCH is a scratch TFmode FPR.
6186 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
6189 mips_expand_fcc_reload (rtx dest
, rtx src
, rtx scratch
)
6193 /* Change the source to SFmode. */
6195 src
= adjust_address (src
, SFmode
, 0);
6196 else if (REG_P (src
) || GET_CODE (src
) == SUBREG
)
6197 src
= gen_rtx_REG (SFmode
, true_regnum (src
));
6199 fp1
= gen_rtx_REG (SFmode
, REGNO (scratch
));
6200 fp2
= gen_rtx_REG (SFmode
, REGNO (scratch
) + MAX_FPRS_PER_FMT
);
6202 mips_emit_move (copy_rtx (fp1
), src
);
6203 mips_emit_move (copy_rtx (fp2
), CONST0_RTX (SFmode
));
6204 emit_insn (gen_slt_sf (dest
, fp2
, fp1
));
6207 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
6208 Assume that the areas do not overlap. */
6211 mips_block_move_straight (rtx dest
, rtx src
, HOST_WIDE_INT length
)
6213 HOST_WIDE_INT offset
, delta
;
6214 unsigned HOST_WIDE_INT bits
;
6216 enum machine_mode mode
;
6219 /* Work out how many bits to move at a time. If both operands have
6220 half-word alignment, it is usually better to move in half words.
6221 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
6222 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
6223 Otherwise move word-sized chunks. */
6224 if (MEM_ALIGN (src
) == BITS_PER_WORD
/ 2
6225 && MEM_ALIGN (dest
) == BITS_PER_WORD
/ 2)
6226 bits
= BITS_PER_WORD
/ 2;
6228 bits
= BITS_PER_WORD
;
6230 mode
= mode_for_size (bits
, MODE_INT
, 0);
6231 delta
= bits
/ BITS_PER_UNIT
;
6233 /* Allocate a buffer for the temporary registers. */
6234 regs
= XALLOCAVEC (rtx
, length
/ delta
);
6236 /* Load as many BITS-sized chunks as possible. Use a normal load if
6237 the source has enough alignment, otherwise use left/right pairs. */
6238 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
6240 regs
[i
] = gen_reg_rtx (mode
);
6241 if (MEM_ALIGN (src
) >= bits
)
6242 mips_emit_move (regs
[i
], adjust_address (src
, mode
, offset
));
6245 rtx part
= adjust_address (src
, BLKmode
, offset
);
6246 if (!mips_expand_ext_as_unaligned_load (regs
[i
], part
, bits
, 0))
6251 /* Copy the chunks to the destination. */
6252 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
6253 if (MEM_ALIGN (dest
) >= bits
)
6254 mips_emit_move (adjust_address (dest
, mode
, offset
), regs
[i
]);
6257 rtx part
= adjust_address (dest
, BLKmode
, offset
);
6258 if (!mips_expand_ins_as_unaligned_store (part
, regs
[i
], bits
, 0))
6262 /* Mop up any left-over bytes. */
6263 if (offset
< length
)
6265 src
= adjust_address (src
, BLKmode
, offset
);
6266 dest
= adjust_address (dest
, BLKmode
, offset
);
6267 move_by_pieces (dest
, src
, length
- offset
,
6268 MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
)), 0);
6272 /* Helper function for doing a loop-based block operation on memory
6273 reference MEM. Each iteration of the loop will operate on LENGTH
6276 Create a new base register for use within the loop and point it to
6277 the start of MEM. Create a new memory reference that uses this
6278 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
6281 mips_adjust_block_mem (rtx mem
, HOST_WIDE_INT length
,
6282 rtx
*loop_reg
, rtx
*loop_mem
)
6284 *loop_reg
= copy_addr_to_reg (XEXP (mem
, 0));
6286 /* Although the new mem does not refer to a known location,
6287 it does keep up to LENGTH bytes of alignment. */
6288 *loop_mem
= change_address (mem
, BLKmode
, *loop_reg
);
6289 set_mem_align (*loop_mem
, MIN (MEM_ALIGN (mem
), length
* BITS_PER_UNIT
));
6292 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
6293 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
6294 the memory regions do not overlap. */
6297 mips_block_move_loop (rtx dest
, rtx src
, HOST_WIDE_INT length
,
6298 HOST_WIDE_INT bytes_per_iter
)
6300 rtx label
, src_reg
, dest_reg
, final_src
;
6301 HOST_WIDE_INT leftover
;
6303 leftover
= length
% bytes_per_iter
;
6306 /* Create registers and memory references for use within the loop. */
6307 mips_adjust_block_mem (src
, bytes_per_iter
, &src_reg
, &src
);
6308 mips_adjust_block_mem (dest
, bytes_per_iter
, &dest_reg
, &dest
);
6310 /* Calculate the value that SRC_REG should have after the last iteration
6312 final_src
= expand_simple_binop (Pmode
, PLUS
, src_reg
, GEN_INT (length
),
6315 /* Emit the start of the loop. */
6316 label
= gen_label_rtx ();
6319 /* Emit the loop body. */
6320 mips_block_move_straight (dest
, src
, bytes_per_iter
);
6322 /* Move on to the next block. */
6323 mips_emit_move (src_reg
, plus_constant (src_reg
, bytes_per_iter
));
6324 mips_emit_move (dest_reg
, plus_constant (dest_reg
, bytes_per_iter
));
6326 /* Emit the loop condition. */
6327 if (Pmode
== DImode
)
6328 emit_insn (gen_cmpdi (src_reg
, final_src
));
6330 emit_insn (gen_cmpsi (src_reg
, final_src
));
6331 emit_jump_insn (gen_bne (label
));
6333 /* Mop up any left-over bytes. */
6335 mips_block_move_straight (dest
, src
, leftover
);
6338 /* Expand a movmemsi instruction, which copies LENGTH bytes from
6339 memory reference SRC to memory reference DEST. */
6342 mips_expand_block_move (rtx dest
, rtx src
, rtx length
)
6344 if (GET_CODE (length
) == CONST_INT
)
6346 if (INTVAL (length
) <= MIPS_MAX_MOVE_BYTES_STRAIGHT
)
6348 mips_block_move_straight (dest
, src
, INTVAL (length
));
6353 mips_block_move_loop (dest
, src
, INTVAL (length
),
6354 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER
);
6361 /* Expand a loop of synci insns for the address range [BEGIN, END). */
6364 mips_expand_synci_loop (rtx begin
, rtx end
)
6366 rtx inc
, label
, cmp
, cmp_result
;
6368 /* Load INC with the cache line size (rdhwr INC,$1). */
6369 inc
= gen_reg_rtx (SImode
);
6370 emit_insn (gen_rdhwr (inc
, const1_rtx
));
6372 /* Loop back to here. */
6373 label
= gen_label_rtx ();
6376 emit_insn (gen_synci (begin
));
6378 cmp
= mips_force_binary (Pmode
, GTU
, begin
, end
);
6380 mips_emit_binary (PLUS
, begin
, begin
, inc
);
6382 cmp_result
= gen_rtx_EQ (VOIDmode
, cmp
, const0_rtx
);
6383 emit_jump_insn (gen_condjump (cmp_result
, label
));
6386 /* Expand a QI or HI mode atomic memory operation.
6388 GENERATOR contains a pointer to the gen_* function that generates
6389 the SI mode underlying atomic operation using masks that we
6392 RESULT is the return register for the operation. Its value is NULL
6395 MEM is the location of the atomic access.
6397 OLDVAL is the first operand for the operation.
6399 NEWVAL is the optional second operand for the operation. Its value
6400 is NULL if unused. */
6403 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator
,
6404 rtx result
, rtx mem
, rtx oldval
, rtx newval
)
6406 rtx orig_addr
, memsi_addr
, memsi
, shift
, shiftsi
, unshifted_mask
;
6407 rtx unshifted_mask_reg
, mask
, inverted_mask
, si_op
;
6409 enum machine_mode mode
;
6411 mode
= GET_MODE (mem
);
6413 /* Compute the address of the containing SImode value. */
6414 orig_addr
= force_reg (Pmode
, XEXP (mem
, 0));
6415 memsi_addr
= mips_force_binary (Pmode
, AND
, orig_addr
,
6416 force_reg (Pmode
, GEN_INT (-4)));
6418 /* Create a memory reference for it. */
6419 memsi
= gen_rtx_MEM (SImode
, memsi_addr
);
6420 set_mem_alias_set (memsi
, ALIAS_SET_MEMORY_BARRIER
);
6421 MEM_VOLATILE_P (memsi
) = MEM_VOLATILE_P (mem
);
6423 /* Work out the byte offset of the QImode or HImode value,
6424 counting from the least significant byte. */
6425 shift
= mips_force_binary (Pmode
, AND
, orig_addr
, GEN_INT (3));
6426 if (TARGET_BIG_ENDIAN
)
6427 mips_emit_binary (XOR
, shift
, shift
, GEN_INT (mode
== QImode
? 3 : 2));
6429 /* Multiply by eight to convert the shift value from bytes to bits. */
6430 mips_emit_binary (ASHIFT
, shift
, shift
, GEN_INT (3));
6432 /* Make the final shift an SImode value, so that it can be used in
6433 SImode operations. */
6434 shiftsi
= force_reg (SImode
, gen_lowpart (SImode
, shift
));
6436 /* Set MASK to an inclusive mask of the QImode or HImode value. */
6437 unshifted_mask
= GEN_INT (GET_MODE_MASK (mode
));
6438 unshifted_mask_reg
= force_reg (SImode
, unshifted_mask
);
6439 mask
= mips_force_binary (SImode
, ASHIFT
, unshifted_mask_reg
, shiftsi
);
6441 /* Compute the equivalent exclusive mask. */
6442 inverted_mask
= gen_reg_rtx (SImode
);
6443 emit_insn (gen_rtx_SET (VOIDmode
, inverted_mask
,
6444 gen_rtx_NOT (SImode
, mask
)));
6446 /* Shift the old value into place. */
6447 if (oldval
!= const0_rtx
)
6449 oldval
= convert_modes (SImode
, mode
, oldval
, true);
6450 oldval
= force_reg (SImode
, oldval
);
6451 oldval
= mips_force_binary (SImode
, ASHIFT
, oldval
, shiftsi
);
6454 /* Do the same for the new value. */
6455 if (newval
&& newval
!= const0_rtx
)
6457 newval
= convert_modes (SImode
, mode
, newval
, true);
6458 newval
= force_reg (SImode
, newval
);
6459 newval
= mips_force_binary (SImode
, ASHIFT
, newval
, shiftsi
);
6462 /* Do the SImode atomic access. */
6464 res
= gen_reg_rtx (SImode
);
6466 si_op
= generator
.fn_6 (res
, memsi
, mask
, inverted_mask
, oldval
, newval
);
6468 si_op
= generator
.fn_5 (res
, memsi
, mask
, inverted_mask
, oldval
);
6470 si_op
= generator
.fn_4 (memsi
, mask
, inverted_mask
, oldval
);
6476 /* Shift and convert the result. */
6477 mips_emit_binary (AND
, res
, res
, mask
);
6478 mips_emit_binary (LSHIFTRT
, res
, res
, shiftsi
);
6479 mips_emit_move (result
, gen_lowpart (GET_MODE (result
), res
));
6483 /* Return true if it is possible to use left/right accesses for a
6484 bitfield of WIDTH bits starting BITPOS bits into *OP. When
6485 returning true, update *OP, *LEFT and *RIGHT as follows:
6487 *OP is a BLKmode reference to the whole field.
6489 *LEFT is a QImode reference to the first byte if big endian or
6490 the last byte if little endian. This address can be used in the
6491 left-side instructions (LWL, SWL, LDL, SDL).
6493 *RIGHT is a QImode reference to the opposite end of the field and
6494 can be used in the patterning right-side instruction. */
6497 mips_get_unaligned_mem (rtx
*op
, HOST_WIDE_INT width
, HOST_WIDE_INT bitpos
,
6498 rtx
*left
, rtx
*right
)
6502 /* Check that the operand really is a MEM. Not all the extv and
6503 extzv predicates are checked. */
6507 /* Check that the size is valid. */
6508 if (width
!= 32 && (!TARGET_64BIT
|| width
!= 64))
6511 /* We can only access byte-aligned values. Since we are always passed
6512 a reference to the first byte of the field, it is not necessary to
6513 do anything with BITPOS after this check. */
6514 if (bitpos
% BITS_PER_UNIT
!= 0)
6517 /* Reject aligned bitfields: we want to use a normal load or store
6518 instead of a left/right pair. */
6519 if (MEM_ALIGN (*op
) >= width
)
6522 /* Adjust *OP to refer to the whole field. This also has the effect
6523 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
6524 *op
= adjust_address (*op
, BLKmode
, 0);
6525 set_mem_size (*op
, GEN_INT (width
/ BITS_PER_UNIT
));
6527 /* Get references to both ends of the field. We deliberately don't
6528 use the original QImode *OP for FIRST since the new BLKmode one
6529 might have a simpler address. */
6530 first
= adjust_address (*op
, QImode
, 0);
6531 last
= adjust_address (*op
, QImode
, width
/ BITS_PER_UNIT
- 1);
6533 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
6534 correspond to the MSB and RIGHT to the LSB. */
6535 if (TARGET_BIG_ENDIAN
)
6536 *left
= first
, *right
= last
;
6538 *left
= last
, *right
= first
;
6543 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
6544 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
6545 the operation is the equivalent of:
6547 (set DEST (*_extract SRC WIDTH BITPOS))
6549 Return true on success. */
6552 mips_expand_ext_as_unaligned_load (rtx dest
, rtx src
, HOST_WIDE_INT width
,
6553 HOST_WIDE_INT bitpos
)
6555 rtx left
, right
, temp
;
6557 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
6558 be a paradoxical word_mode subreg. This is the only case in which
6559 we allow the destination to be larger than the source. */
6560 if (GET_CODE (dest
) == SUBREG
6561 && GET_MODE (dest
) == DImode
6562 && GET_MODE (SUBREG_REG (dest
)) == SImode
)
6563 dest
= SUBREG_REG (dest
);
6565 /* After the above adjustment, the destination must be the same
6566 width as the source. */
6567 if (GET_MODE_BITSIZE (GET_MODE (dest
)) != width
)
6570 if (!mips_get_unaligned_mem (&src
, width
, bitpos
, &left
, &right
))
6573 temp
= gen_reg_rtx (GET_MODE (dest
));
6574 if (GET_MODE (dest
) == DImode
)
6576 emit_insn (gen_mov_ldl (temp
, src
, left
));
6577 emit_insn (gen_mov_ldr (dest
, copy_rtx (src
), right
, temp
));
6581 emit_insn (gen_mov_lwl (temp
, src
, left
));
6582 emit_insn (gen_mov_lwr (dest
, copy_rtx (src
), right
, temp
));
6587 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
6588 BITPOS and SRC are the operands passed to the expander; the operation
6589 is the equivalent of:
6591 (set (zero_extract DEST WIDTH BITPOS) SRC)
6593 Return true on success. */
6596 mips_expand_ins_as_unaligned_store (rtx dest
, rtx src
, HOST_WIDE_INT width
,
6597 HOST_WIDE_INT bitpos
)
6600 enum machine_mode mode
;
6602 if (!mips_get_unaligned_mem (&dest
, width
, bitpos
, &left
, &right
))
6605 mode
= mode_for_size (width
, MODE_INT
, 0);
6606 src
= gen_lowpart (mode
, src
);
6609 emit_insn (gen_mov_sdl (dest
, src
, left
));
6610 emit_insn (gen_mov_sdr (copy_rtx (dest
), copy_rtx (src
), right
));
6614 emit_insn (gen_mov_swl (dest
, src
, left
));
6615 emit_insn (gen_mov_swr (copy_rtx (dest
), copy_rtx (src
), right
));
6620 /* Return true if X is a MEM with the same size as MODE. */
6623 mips_mem_fits_mode_p (enum machine_mode mode
, rtx x
)
6630 size
= MEM_SIZE (x
);
6631 return size
&& INTVAL (size
) == GET_MODE_SIZE (mode
);
6634 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
6635 source of an "ext" instruction or the destination of an "ins"
6636 instruction. OP must be a register operand and the following
6637 conditions must hold:
6639 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
6640 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6641 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6643 Also reject lengths equal to a word as they are better handled
6644 by the move patterns. */
6647 mips_use_ins_ext_p (rtx op
, HOST_WIDE_INT width
, HOST_WIDE_INT bitpos
)
6649 if (!ISA_HAS_EXT_INS
6650 || !register_operand (op
, VOIDmode
)
6651 || GET_MODE_BITSIZE (GET_MODE (op
)) > BITS_PER_WORD
)
6654 if (!IN_RANGE (width
, 1, GET_MODE_BITSIZE (GET_MODE (op
)) - 1))
6657 if (bitpos
< 0 || bitpos
+ width
> GET_MODE_BITSIZE (GET_MODE (op
)))
6663 /* Check if MASK and SHIFT are valid in mask-low-and-shift-left
6664 operation if MAXLEN is the maxium length of consecutive bits that
6665 can make up MASK. MODE is the mode of the operation. See
6666 mask_low_and_shift_len for the actual definition. */
6669 mask_low_and_shift_p (enum machine_mode mode
, rtx mask
, rtx shift
, int maxlen
)
6671 return IN_RANGE (mask_low_and_shift_len (mode
, mask
, shift
), 1, maxlen
);
6674 /* The canonical form of a mask-low-and-shift-left operation is
6675 (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
6676 cleared. Thus we need to shift MASK to the right before checking if it
6677 is a valid mask value. MODE is the mode of the operation. If true
6678 return the length of the mask, otherwise return -1. */
6681 mask_low_and_shift_len (enum machine_mode mode
, rtx mask
, rtx shift
)
6683 HOST_WIDE_INT shval
;
6685 shval
= INTVAL (shift
) & (GET_MODE_BITSIZE (mode
) - 1);
6686 return exact_log2 ((UINTVAL (mask
) >> shval
) + 1);
6689 /* Return true if -msplit-addresses is selected and should be honored.
6691 -msplit-addresses is a half-way house between explicit relocations
6692 and the traditional assembler macros. It can split absolute 32-bit
6693 symbolic constants into a high/lo_sum pair but uses macros for other
6696 Like explicit relocation support for REL targets, it relies
6697 on GNU extensions in the assembler and the linker.
6699 Although this code should work for -O0, it has traditionally
6700 been treated as an optimization. */
6703 mips_split_addresses_p (void)
6705 return (TARGET_SPLIT_ADDRESSES
6709 && !ABI_HAS_64BIT_SYMBOLS
);
6712 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
6715 mips_init_relocs (void)
6717 memset (mips_split_p
, '\0', sizeof (mips_split_p
));
6718 memset (mips_split_hi_p
, '\0', sizeof (mips_split_hi_p
));
6719 memset (mips_hi_relocs
, '\0', sizeof (mips_hi_relocs
));
6720 memset (mips_lo_relocs
, '\0', sizeof (mips_lo_relocs
));
6722 if (ABI_HAS_64BIT_SYMBOLS
)
6724 if (TARGET_EXPLICIT_RELOCS
)
6726 mips_split_p
[SYMBOL_64_HIGH
] = true;
6727 mips_hi_relocs
[SYMBOL_64_HIGH
] = "%highest(";
6728 mips_lo_relocs
[SYMBOL_64_HIGH
] = "%higher(";
6730 mips_split_p
[SYMBOL_64_MID
] = true;
6731 mips_hi_relocs
[SYMBOL_64_MID
] = "%higher(";
6732 mips_lo_relocs
[SYMBOL_64_MID
] = "%hi(";
6734 mips_split_p
[SYMBOL_64_LOW
] = true;
6735 mips_hi_relocs
[SYMBOL_64_LOW
] = "%hi(";
6736 mips_lo_relocs
[SYMBOL_64_LOW
] = "%lo(";
6738 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
6739 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
6744 if (TARGET_EXPLICIT_RELOCS
|| mips_split_addresses_p () || TARGET_MIPS16
)
6746 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
6747 mips_hi_relocs
[SYMBOL_ABSOLUTE
] = "%hi(";
6748 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
6750 mips_lo_relocs
[SYMBOL_32_HIGH
] = "%hi(";
6756 /* The high part is provided by a pseudo copy of $gp. */
6757 mips_split_p
[SYMBOL_GP_RELATIVE
] = true;
6758 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gprel(";
6760 else if (TARGET_EXPLICIT_RELOCS
)
6761 /* Small data constants are kept whole until after reload,
6762 then lowered by mips_rewrite_small_data. */
6763 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gp_rel(";
6765 if (TARGET_EXPLICIT_RELOCS
)
6767 mips_split_p
[SYMBOL_GOT_PAGE_OFST
] = true;
6770 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got_page(";
6771 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%got_ofst(";
6775 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got(";
6776 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%lo(";
6779 /* Expose the use of $28 as soon as possible. */
6780 mips_split_hi_p
[SYMBOL_GOT_PAGE_OFST
] = true;
6784 /* The HIGH and LO_SUM are matched by special .md patterns. */
6785 mips_split_p
[SYMBOL_GOT_DISP
] = true;
6787 mips_split_p
[SYMBOL_GOTOFF_DISP
] = true;
6788 mips_hi_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_hi(";
6789 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_lo(";
6791 mips_split_p
[SYMBOL_GOTOFF_CALL
] = true;
6792 mips_hi_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_hi(";
6793 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_lo(";
6798 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_disp(";
6800 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got(";
6801 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call16(";
6803 /* Expose the use of $28 as soon as possible. */
6804 mips_split_p
[SYMBOL_GOT_DISP
] = true;
6810 mips_split_p
[SYMBOL_GOTOFF_LOADGP
] = true;
6811 mips_hi_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%hi(%neg(%gp_rel(";
6812 mips_lo_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%lo(%neg(%gp_rel(";
6815 mips_lo_relocs
[SYMBOL_TLSGD
] = "%tlsgd(";
6816 mips_lo_relocs
[SYMBOL_TLSLDM
] = "%tlsldm(";
6818 mips_split_p
[SYMBOL_DTPREL
] = true;
6819 mips_hi_relocs
[SYMBOL_DTPREL
] = "%dtprel_hi(";
6820 mips_lo_relocs
[SYMBOL_DTPREL
] = "%dtprel_lo(";
6822 mips_lo_relocs
[SYMBOL_GOTTPREL
] = "%gottprel(";
6824 mips_split_p
[SYMBOL_TPREL
] = true;
6825 mips_hi_relocs
[SYMBOL_TPREL
] = "%tprel_hi(";
6826 mips_lo_relocs
[SYMBOL_TPREL
] = "%tprel_lo(";
6828 mips_lo_relocs
[SYMBOL_HALF
] = "%half(";
6831 /* If OP is an UNSPEC address, return the address to which it refers,
6832 otherwise return OP itself. */
6835 mips_strip_unspec_address (rtx op
)
6839 split_const (op
, &base
, &offset
);
6840 if (UNSPEC_ADDRESS_P (base
))
6841 op
= plus_constant (UNSPEC_ADDRESS (base
), INTVAL (offset
));
6845 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6846 in context CONTEXT. RELOCS is the array of relocations to use. */
6849 mips_print_operand_reloc (FILE *file
, rtx op
, enum mips_symbol_context context
,
6850 const char **relocs
)
6852 enum mips_symbol_type symbol_type
;
6855 symbol_type
= mips_classify_symbolic_expression (op
, context
);
6856 gcc_assert (relocs
[symbol_type
]);
6858 fputs (relocs
[symbol_type
], file
);
6859 output_addr_const (file
, mips_strip_unspec_address (op
));
6860 for (p
= relocs
[symbol_type
]; *p
!= 0; p
++)
6865 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6866 The punctuation characters are:
6868 '(' Start a nested ".set noreorder" block.
6869 ')' End a nested ".set noreorder" block.
6870 '[' Start a nested ".set noat" block.
6871 ']' End a nested ".set noat" block.
6872 '<' Start a nested ".set nomacro" block.
6873 '>' End a nested ".set nomacro" block.
6874 '*' Behave like %(%< if generating a delayed-branch sequence.
6875 '#' Print a nop if in a ".set noreorder" block.
6876 '/' Like '#', but do nothing within a delayed-branch sequence.
6877 '?' Print "l" if mips_branch_likely is true
6878 '.' Print the name of the register with a hard-wired zero (zero or $0).
6879 '@' Print the name of the assembler temporary register (at or $1).
6880 '^' Print the name of the pic call-through register (t9 or $25).
6881 '+' Print the name of the gp register (usually gp or $28).
6882 '$' Print the name of the stack pointer register (sp or $29).
6883 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6884 '-' Print ".set pop" under the same conditions for '|'.
6886 See also mips_init_print_operand_pucnt. */
6889 mips_print_operand_punctuation (FILE *file
, int ch
)
6894 if (set_noreorder
++ == 0)
6895 fputs (".set\tnoreorder\n\t", file
);
6899 gcc_assert (set_noreorder
> 0);
6900 if (--set_noreorder
== 0)
6901 fputs ("\n\t.set\treorder", file
);
6905 if (set_noat
++ == 0)
6906 fputs (".set\tnoat\n\t", file
);
6910 gcc_assert (set_noat
> 0);
6911 if (--set_noat
== 0)
6912 fputs ("\n\t.set\tat", file
);
6916 if (set_nomacro
++ == 0)
6917 fputs (".set\tnomacro\n\t", file
);
6921 gcc_assert (set_nomacro
> 0);
6922 if (--set_nomacro
== 0)
6923 fputs ("\n\t.set\tmacro", file
);
6927 if (final_sequence
!= 0)
6929 mips_print_operand_punctuation (file
, '(');
6930 mips_print_operand_punctuation (file
, '<');
6935 if (set_noreorder
!= 0)
6936 fputs ("\n\tnop", file
);
6940 /* Print an extra newline so that the delayed insn is separated
6941 from the following ones. This looks neater and is consistent
6942 with non-nop delayed sequences. */
6943 if (set_noreorder
!= 0 && final_sequence
== 0)
6944 fputs ("\n\tnop\n", file
);
6948 if (mips_branch_likely
)
6953 fputs (reg_names
[GP_REG_FIRST
+ 0], file
);
6957 fputs (reg_names
[GP_REG_FIRST
+ 1], file
);
6961 fputs (reg_names
[PIC_FUNCTION_ADDR_REGNUM
], file
);
6965 fputs (reg_names
[PIC_OFFSET_TABLE_REGNUM
], file
);
6969 fputs (reg_names
[STACK_POINTER_REGNUM
], file
);
6974 fputs (".set\tpush\n\t.set\tmips2\n\t", file
);
6979 fputs ("\n\t.set\tpop", file
);
6988 /* Initialize mips_print_operand_punct. */
6991 mips_init_print_operand_punct (void)
6995 for (p
= "()[]<>*#/?.@^+$|-"; *p
; p
++)
6996 mips_print_operand_punct
[(unsigned char) *p
] = true;
6999 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
7000 associated with condition CODE. Print the condition part of the
7004 mips_print_int_branch_condition (FILE *file
, enum rtx_code code
, int letter
)
7018 /* Conveniently, the MIPS names for these conditions are the same
7019 as their RTL equivalents. */
7020 fputs (GET_RTX_NAME (code
), file
);
7024 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter
);
7029 /* Likewise floating-point branches. */
7032 mips_print_float_branch_condition (FILE *file
, enum rtx_code code
, int letter
)
7037 fputs ("c1f", file
);
7041 fputs ("c1t", file
);
7045 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter
);
7050 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
7052 'X' Print CONST_INT OP in hexadecimal format.
7053 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
7054 'd' Print CONST_INT OP in decimal.
7055 'm' Print one less than CONST_INT OP in decimal.
7056 'h' Print the high-part relocation associated with OP, after stripping
7058 'R' Print the low-part relocation associated with OP.
7059 'C' Print the integer branch condition for comparison OP.
7060 'N' Print the inverse of the integer branch condition for comparison OP.
7061 'F' Print the FPU branch condition for comparison OP.
7062 'W' Print the inverse of the FPU branch condition for comparison OP.
7063 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
7064 'z' for (eq:?I ...), 'n' for (ne:?I ...).
7065 't' Like 'T', but with the EQ/NE cases reversed
7066 'Y' Print mips_fp_conditions[INTVAL (OP)]
7067 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
7068 'q' Print a DSP accumulator register.
7069 'D' Print the second part of a double-word register or memory operand.
7070 'L' Print the low-order register in a double-word register operand.
7071 'M' Print high-order register in a double-word register operand.
7072 'z' Print $0 if OP is zero, otherwise print OP normally. */
7075 mips_print_operand (FILE *file
, rtx op
, int letter
)
7079 if (PRINT_OPERAND_PUNCT_VALID_P (letter
))
7081 mips_print_operand_punctuation (file
, letter
);
7086 code
= GET_CODE (op
);
7091 if (GET_CODE (op
) == CONST_INT
)
7092 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (op
));
7094 output_operand_lossage ("invalid use of '%%%c'", letter
);
7098 if (GET_CODE (op
) == CONST_INT
)
7099 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (op
) & 0xffff);
7101 output_operand_lossage ("invalid use of '%%%c'", letter
);
7105 if (GET_CODE (op
) == CONST_INT
)
7106 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (op
));
7108 output_operand_lossage ("invalid use of '%%%c'", letter
);
7112 if (GET_CODE (op
) == CONST_INT
)
7113 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (op
) - 1);
7115 output_operand_lossage ("invalid use of '%%%c'", letter
);
7121 mips_print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_hi_relocs
);
7125 mips_print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_lo_relocs
);
7129 mips_print_int_branch_condition (file
, code
, letter
);
7133 mips_print_int_branch_condition (file
, reverse_condition (code
), letter
);
7137 mips_print_float_branch_condition (file
, code
, letter
);
7141 mips_print_float_branch_condition (file
, reverse_condition (code
),
7148 int truth
= (code
== NE
) == (letter
== 'T');
7149 fputc ("zfnt"[truth
* 2 + (GET_MODE (op
) == CCmode
)], file
);
7154 if (code
== CONST_INT
&& UINTVAL (op
) < ARRAY_SIZE (mips_fp_conditions
))
7155 fputs (mips_fp_conditions
[UINTVAL (op
)], file
);
7157 output_operand_lossage ("'%%%c' is not a valid operand prefix",
7164 mips_print_operand (file
, op
, 0);
7170 if (code
== REG
&& MD_REG_P (REGNO (op
)))
7171 fprintf (file
, "$ac0");
7172 else if (code
== REG
&& DSP_ACC_REG_P (REGNO (op
)))
7173 fprintf (file
, "$ac%c", reg_names
[REGNO (op
)][3]);
7175 output_operand_lossage ("invalid use of '%%%c'", letter
);
7183 unsigned int regno
= REGNO (op
);
7184 if ((letter
== 'M' && TARGET_LITTLE_ENDIAN
)
7185 || (letter
== 'L' && TARGET_BIG_ENDIAN
)
7188 fprintf (file
, "%s", reg_names
[regno
]);
7194 output_address (plus_constant (XEXP (op
, 0), 4));
7196 output_address (XEXP (op
, 0));
7200 if (letter
== 'z' && op
== CONST0_RTX (GET_MODE (op
)))
7201 fputs (reg_names
[GP_REG_FIRST
], file
);
7202 else if (CONST_GP_P (op
))
7203 fputs (reg_names
[GLOBAL_POINTER_REGNUM
], file
);
7205 output_addr_const (file
, mips_strip_unspec_address (op
));
7211 /* Output address operand X to FILE. */
7214 mips_print_operand_address (FILE *file
, rtx x
)
7216 struct mips_address_info addr
;
7218 if (mips_classify_address (&addr
, x
, word_mode
, true))
7222 mips_print_operand (file
, addr
.offset
, 0);
7223 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
7226 case ADDRESS_LO_SUM
:
7227 mips_print_operand_reloc (file
, addr
.offset
, SYMBOL_CONTEXT_MEM
,
7229 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
7232 case ADDRESS_CONST_INT
:
7233 output_addr_const (file
, x
);
7234 fprintf (file
, "(%s)", reg_names
[GP_REG_FIRST
]);
7237 case ADDRESS_SYMBOLIC
:
7238 output_addr_const (file
, mips_strip_unspec_address (x
));
7244 /* Implement TARGET_ENCODE_SECTION_INFO. */
7247 mips_encode_section_info (tree decl
, rtx rtl
, int first
)
7249 default_encode_section_info (decl
, rtl
, first
);
7251 if (TREE_CODE (decl
) == FUNCTION_DECL
)
7253 rtx symbol
= XEXP (rtl
, 0);
7254 tree type
= TREE_TYPE (decl
);
7256 /* Encode whether the symbol is short or long. */
7257 if ((TARGET_LONG_CALLS
&& !mips_near_type_p (type
))
7258 || mips_far_type_p (type
))
7259 SYMBOL_REF_FLAGS (symbol
) |= SYMBOL_FLAG_LONG_CALL
;
7263 /* Implement TARGET_SELECT_RTX_SECTION. */
7266 mips_select_rtx_section (enum machine_mode mode
, rtx x
,
7267 unsigned HOST_WIDE_INT align
)
7269 /* ??? Consider using mergeable small data sections. */
7270 if (mips_rtx_constant_in_small_data_p (mode
))
7271 return get_named_section (NULL
, ".sdata", 0);
7273 return default_elf_select_rtx_section (mode
, x
, align
);
7276 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7278 The complication here is that, with the combination TARGET_ABICALLS
7279 && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
7280 absolute addresses, and should therefore not be included in the
7281 read-only part of a DSO. Handle such cases by selecting a normal
7282 data section instead of a read-only one. The logic apes that in
7283 default_function_rodata_section. */
7286 mips_function_rodata_section (tree decl
)
7288 if (!TARGET_ABICALLS
|| TARGET_ABSOLUTE_ABICALLS
|| TARGET_GPWORD
)
7289 return default_function_rodata_section (decl
);
7291 if (decl
&& DECL_SECTION_NAME (decl
))
7293 const char *name
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
7294 if (DECL_ONE_ONLY (decl
) && strncmp (name
, ".gnu.linkonce.t.", 16) == 0)
7296 char *rname
= ASTRDUP (name
);
7298 return get_section (rname
, SECTION_LINKONCE
| SECTION_WRITE
, decl
);
7300 else if (flag_function_sections
7301 && flag_data_sections
7302 && strncmp (name
, ".text.", 6) == 0)
7304 char *rname
= ASTRDUP (name
);
7305 memcpy (rname
+ 1, "data", 4);
7306 return get_section (rname
, SECTION_WRITE
, decl
);
7309 return data_section
;
7312 /* Implement TARGET_IN_SMALL_DATA_P. */
7315 mips_in_small_data_p (const_tree decl
)
7317 unsigned HOST_WIDE_INT size
;
7319 if (TREE_CODE (decl
) == STRING_CST
|| TREE_CODE (decl
) == FUNCTION_DECL
)
7322 /* We don't yet generate small-data references for -mabicalls
7323 or VxWorks RTP code. See the related -G handling in
7324 mips_override_options. */
7325 if (TARGET_ABICALLS
|| TARGET_VXWORKS_RTP
)
7328 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
) != 0)
7332 /* Reject anything that isn't in a known small-data section. */
7333 name
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
7334 if (strcmp (name
, ".sdata") != 0 && strcmp (name
, ".sbss") != 0)
7337 /* If a symbol is defined externally, the assembler will use the
7338 usual -G rules when deciding how to implement macros. */
7339 if (mips_lo_relocs
[SYMBOL_GP_RELATIVE
] || !DECL_EXTERNAL (decl
))
7342 else if (TARGET_EMBEDDED_DATA
)
7344 /* Don't put constants into the small data section: we want them
7345 to be in ROM rather than RAM. */
7346 if (TREE_CODE (decl
) != VAR_DECL
)
7349 if (TREE_READONLY (decl
)
7350 && !TREE_SIDE_EFFECTS (decl
)
7351 && (!DECL_INITIAL (decl
) || TREE_CONSTANT (DECL_INITIAL (decl
))))
7355 /* Enforce -mlocal-sdata. */
7356 if (!TARGET_LOCAL_SDATA
&& !TREE_PUBLIC (decl
))
7359 /* Enforce -mextern-sdata. */
7360 if (!TARGET_EXTERN_SDATA
&& DECL_P (decl
))
7362 if (DECL_EXTERNAL (decl
))
7364 if (DECL_COMMON (decl
) && DECL_INITIAL (decl
) == NULL
)
7368 /* We have traditionally not treated zero-sized objects as small data,
7369 so this is now effectively part of the ABI. */
7370 size
= int_size_in_bytes (TREE_TYPE (decl
));
7371 return size
> 0 && size
<= mips_small_data_threshold
;
7374 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7375 anchors for small data: the GP register acts as an anchor in that
7376 case. We also don't want to use them for PC-relative accesses,
7377 where the PC acts as an anchor. */
7380 mips_use_anchors_for_symbol_p (const_rtx symbol
)
7382 switch (mips_classify_symbol (symbol
, SYMBOL_CONTEXT_MEM
))
7384 case SYMBOL_PC_RELATIVE
:
7385 case SYMBOL_GP_RELATIVE
:
7389 return default_use_anchors_for_symbol_p (symbol
);
7393 /* The MIPS debug format wants all automatic variables and arguments
7394 to be in terms of the virtual frame pointer (stack pointer before
7395 any adjustment in the function), while the MIPS 3.0 linker wants
7396 the frame pointer to be the stack pointer after the initial
7397 adjustment. So, we do the adjustment here. The arg pointer (which
7398 is eliminated) points to the virtual frame pointer, while the frame
7399 pointer (which may be eliminated) points to the stack pointer after
7400 the initial adjustments. */
7403 mips_debugger_offset (rtx addr
, HOST_WIDE_INT offset
)
7405 rtx offset2
= const0_rtx
;
7406 rtx reg
= eliminate_constant_term (addr
, &offset2
);
7409 offset
= INTVAL (offset2
);
7411 if (reg
== stack_pointer_rtx
7412 || reg
== frame_pointer_rtx
7413 || reg
== hard_frame_pointer_rtx
)
7415 offset
-= cfun
->machine
->frame
.total_size
;
7416 if (reg
== hard_frame_pointer_rtx
)
7417 offset
+= cfun
->machine
->frame
.hard_frame_pointer_offset
;
7420 /* sdbout_parms does not want this to crash for unrecognized cases. */
7422 else if (reg
!= arg_pointer_rtx
)
7423 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
7430 /* Implement ASM_OUTPUT_EXTERNAL. */
7433 mips_output_external (FILE *file
, tree decl
, const char *name
)
7435 default_elf_asm_output_external (file
, decl
, name
);
7437 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
7438 set in order to avoid putting out names that are never really
7440 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl
)))
7442 if (!TARGET_EXPLICIT_RELOCS
&& mips_in_small_data_p (decl
))
7444 /* When using assembler macros, emit .extern directives for
7445 all small-data externs so that the assembler knows how
7448 In most cases it would be safe (though pointless) to emit
7449 .externs for other symbols too. One exception is when an
7450 object is within the -G limit but declared by the user to
7451 be in a section other than .sbss or .sdata. */
7452 fputs ("\t.extern\t", file
);
7453 assemble_name (file
, name
);
7454 fprintf (file
, ", " HOST_WIDE_INT_PRINT_DEC
"\n",
7455 int_size_in_bytes (TREE_TYPE (decl
)));
7457 else if (TARGET_IRIX
7458 && mips_abi
== ABI_32
7459 && TREE_CODE (decl
) == FUNCTION_DECL
)
7461 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
7462 `.global name .text' directive for every used but
7463 undefined function. If we don't, the linker may perform
7464 an optimization (skipping over the insns that set $gp)
7465 when it is unsafe. */
7466 fputs ("\t.globl ", file
);
7467 assemble_name (file
, name
);
7468 fputs (" .text\n", file
);
7473 /* Implement ASM_OUTPUT_SOURCE_FILENAME. */
7476 mips_output_filename (FILE *stream
, const char *name
)
7478 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
7480 if (write_symbols
== DWARF2_DEBUG
)
7482 else if (mips_output_filename_first_time
)
7484 mips_output_filename_first_time
= 0;
7485 num_source_filenames
+= 1;
7486 current_function_file
= name
;
7487 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
7488 output_quoted_string (stream
, name
);
7489 putc ('\n', stream
);
7491 /* If we are emitting stabs, let dbxout.c handle this (except for
7492 the mips_output_filename_first_time case). */
7493 else if (write_symbols
== DBX_DEBUG
)
7495 else if (name
!= current_function_file
7496 && strcmp (name
, current_function_file
) != 0)
7498 num_source_filenames
+= 1;
7499 current_function_file
= name
;
7500 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
7501 output_quoted_string (stream
, name
);
7502 putc ('\n', stream
);
7506 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
7508 static void ATTRIBUTE_UNUSED
7509 mips_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
7514 fputs ("\t.dtprelword\t", file
);
7518 fputs ("\t.dtpreldword\t", file
);
7524 output_addr_const (file
, x
);
7525 fputs ("+0x8000", file
);
7528 /* Implement TARGET_DWARF_REGISTER_SPAN. */
7531 mips_dwarf_register_span (rtx reg
)
7534 enum machine_mode mode
;
7536 /* By default, GCC maps increasing register numbers to increasing
7537 memory locations, but paired FPRs are always little-endian,
7538 regardless of the prevailing endianness. */
7539 mode
= GET_MODE (reg
);
7540 if (FP_REG_P (REGNO (reg
))
7541 && TARGET_BIG_ENDIAN
7542 && MAX_FPRS_PER_FMT
> 1
7543 && GET_MODE_SIZE (mode
) > UNITS_PER_FPREG
)
7545 gcc_assert (GET_MODE_SIZE (mode
) == UNITS_PER_HWFPVALUE
);
7546 high
= mips_subword (reg
, true);
7547 low
= mips_subword (reg
, false);
7548 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, high
, low
));
7554 /* Implement ASM_OUTPUT_ASCII. */
7557 mips_output_ascii (FILE *stream
, const char *string
, size_t len
)
7563 fprintf (stream
, "\t.ascii\t\"");
7564 for (i
= 0; i
< len
; i
++)
7568 c
= (unsigned char) string
[i
];
7571 if (c
== '\\' || c
== '\"')
7573 putc ('\\', stream
);
7581 fprintf (stream
, "\\%03o", c
);
7585 if (cur_pos
> 72 && i
+1 < len
)
7588 fprintf (stream
, "\"\n\t.ascii\t\"");
7591 fprintf (stream
, "\"\n");
7594 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7595 macros, mark the symbol as written so that mips_asm_output_external
7596 won't emit an .extern for it. STREAM is the output file, NAME is the
7597 name of the symbol, INIT_STRING is the string that should be written
7598 before the symbol and FINAL_STRING is the string that should be
7599 written after it. FINAL_STRING is a printf format that consumes the
7600 remaining arguments. */
7603 mips_declare_object (FILE *stream
, const char *name
, const char *init_string
,
7604 const char *final_string
, ...)
7608 fputs (init_string
, stream
);
7609 assemble_name (stream
, name
);
7610 va_start (ap
, final_string
);
7611 vfprintf (stream
, final_string
, ap
);
7614 if (!TARGET_EXPLICIT_RELOCS
)
7616 tree name_tree
= get_identifier (name
);
7617 TREE_ASM_WRITTEN (name_tree
) = 1;
7621 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7622 NAME is the name of the object and ALIGN is the required alignment
7623 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7624 alignment argument. */
7627 mips_declare_common_object (FILE *stream
, const char *name
,
7628 const char *init_string
,
7629 unsigned HOST_WIDE_INT size
,
7630 unsigned int align
, bool takes_alignment_p
)
7632 if (!takes_alignment_p
)
7634 size
+= (align
/ BITS_PER_UNIT
) - 1;
7635 size
-= size
% (align
/ BITS_PER_UNIT
);
7636 mips_declare_object (stream
, name
, init_string
,
7637 "," HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
7640 mips_declare_object (stream
, name
, init_string
,
7641 "," HOST_WIDE_INT_PRINT_UNSIGNED
",%u\n",
7642 size
, align
/ BITS_PER_UNIT
);
7645 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7646 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7649 mips_output_aligned_decl_common (FILE *stream
, tree decl
, const char *name
,
7650 unsigned HOST_WIDE_INT size
,
7653 /* If the target wants uninitialized const declarations in
7654 .rdata then don't put them in .comm. */
7655 if (TARGET_EMBEDDED_DATA
7656 && TARGET_UNINIT_CONST_IN_RODATA
7657 && TREE_CODE (decl
) == VAR_DECL
7658 && TREE_READONLY (decl
)
7659 && (DECL_INITIAL (decl
) == 0 || DECL_INITIAL (decl
) == error_mark_node
))
7661 if (TREE_PUBLIC (decl
) && DECL_NAME (decl
))
7662 targetm
.asm_out
.globalize_label (stream
, name
);
7664 switch_to_section (readonly_data_section
);
7665 ASM_OUTPUT_ALIGN (stream
, floor_log2 (align
/ BITS_PER_UNIT
));
7666 mips_declare_object (stream
, name
, "",
7667 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
7671 mips_declare_common_object (stream
, name
, "\n\t.comm\t",
7675 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7676 extern int size_directive_output
;
7678 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7679 definitions except that it uses mips_declare_object to emit the label. */
7682 mips_declare_object_name (FILE *stream
, const char *name
,
7683 tree decl ATTRIBUTE_UNUSED
)
7685 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7686 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
7689 size_directive_output
= 0;
7690 if (!flag_inhibit_size_directive
&& DECL_SIZE (decl
))
7694 size_directive_output
= 1;
7695 size
= int_size_in_bytes (TREE_TYPE (decl
));
7696 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
7699 mips_declare_object (stream
, name
, "", ":\n");
7702 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7705 mips_finish_declare_object (FILE *stream
, tree decl
, int top_level
, int at_end
)
7709 name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
7710 if (!flag_inhibit_size_directive
7711 && DECL_SIZE (decl
) != 0
7714 && DECL_INITIAL (decl
) == error_mark_node
7715 && !size_directive_output
)
7719 size_directive_output
= 1;
7720 size
= int_size_in_bytes (TREE_TYPE (decl
));
7721 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
7726 /* Return the FOO in the name of the ".mdebug.FOO" section associated
7727 with the current ABI. */
7730 mips_mdebug_abi_name (void)
7743 return TARGET_64BIT
? "eabi64" : "eabi32";
7749 /* Implement TARGET_ASM_FILE_START. */
7752 mips_file_start (void)
7754 default_file_start ();
7756 /* Generate a special section to describe the ABI switches used to
7757 produce the resultant binary. This is unnecessary on IRIX and
7758 causes unwanted warnings from the native linker. */
7761 /* Record the ABI itself. Modern versions of binutils encode
7762 this information in the ELF header flags, but GDB needs the
7763 information in order to correctly debug binaries produced by
7764 older binutils. See the function mips_gdbarch_init in
7766 fprintf (asm_out_file
, "\t.section .mdebug.%s\n\t.previous\n",
7767 mips_mdebug_abi_name ());
7769 /* There is no ELF header flag to distinguish long32 forms of the
7770 EABI from long64 forms. Emit a special section to help tools
7771 such as GDB. Do the same for o64, which is sometimes used with
7773 if (mips_abi
== ABI_EABI
|| mips_abi
== ABI_O64
)
7774 fprintf (asm_out_file
, "\t.section .gcc_compiled_long%d\n"
7775 "\t.previous\n", TARGET_LONG64
? 64 : 32);
7777 #ifdef HAVE_AS_GNU_ATTRIBUTE
7778 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n",
7779 (TARGET_HARD_FLOAT_ABI
7780 ? (TARGET_DOUBLE_FLOAT
7781 ? ((!TARGET_64BIT
&& TARGET_FLOAT64
) ? 4 : 1) : 2) : 3));
7785 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
7786 if (TARGET_ABICALLS
)
7788 fprintf (asm_out_file
, "\t.abicalls\n");
7789 if (TARGET_ABICALLS_PIC0
)
7790 fprintf (asm_out_file
, "\t.option\tpic0\n");
7793 if (flag_verbose_asm
)
7794 fprintf (asm_out_file
, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7796 mips_small_data_threshold
, mips_arch_info
->name
, mips_isa
);
7799 /* Make the last instruction frame-related and note that it performs
7800 the operation described by FRAME_PATTERN. */
7803 mips_set_frame_expr (rtx frame_pattern
)
7807 insn
= get_last_insn ();
7808 RTX_FRAME_RELATED_P (insn
) = 1;
7809 REG_NOTES (insn
) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7814 /* Return a frame-related rtx that stores REG at MEM.
7815 REG must be a single register. */
7818 mips_frame_set (rtx mem
, rtx reg
)
7822 /* If we're saving the return address register and the DWARF return
7823 address column differs from the hard register number, adjust the
7824 note reg to refer to the former. */
7825 if (REGNO (reg
) == GP_REG_FIRST
+ 31
7826 && DWARF_FRAME_RETURN_COLUMN
!= GP_REG_FIRST
+ 31)
7827 reg
= gen_rtx_REG (GET_MODE (reg
), DWARF_FRAME_RETURN_COLUMN
);
7829 set
= gen_rtx_SET (VOIDmode
, mem
, reg
);
7830 RTX_FRAME_RELATED_P (set
) = 1;
7835 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7836 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7837 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7838 static const unsigned char mips16e_s2_s8_regs
[] = {
7839 30, 23, 22, 21, 20, 19, 18
7841 static const unsigned char mips16e_a0_a3_regs
[] = {
7845 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7846 ordered from the uppermost in memory to the lowest in memory. */
7847 static const unsigned char mips16e_save_restore_regs
[] = {
7848 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7851 /* Return the index of the lowest X in the range [0, SIZE) for which
7852 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7855 mips16e_find_first_register (unsigned int mask
, const unsigned char *regs
,
7860 for (i
= 0; i
< size
; i
++)
7861 if (BITSET_P (mask
, regs
[i
]))
7867 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7868 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7869 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7870 is true for all indexes (X, SIZE). */
7873 mips16e_mask_registers (unsigned int *mask_ptr
, const unsigned char *regs
,
7874 unsigned int size
, unsigned int *num_regs_ptr
)
7878 i
= mips16e_find_first_register (*mask_ptr
, regs
, size
);
7879 for (i
++; i
< size
; i
++)
7880 if (!BITSET_P (*mask_ptr
, regs
[i
]))
7883 *mask_ptr
|= 1 << regs
[i
];
7887 /* Return a simplified form of X using the register values in REG_VALUES.
7888 REG_VALUES[R] is the last value assigned to hard register R, or null
7889 if R has not been modified.
7891 This function is rather limited, but is good enough for our purposes. */
7894 mips16e_collect_propagate_value (rtx x
, rtx
*reg_values
)
7896 x
= avoid_constant_pool_reference (x
);
7900 rtx x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
7901 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
),
7902 x0
, GET_MODE (XEXP (x
, 0)));
7905 if (ARITHMETIC_P (x
))
7907 rtx x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
7908 rtx x1
= mips16e_collect_propagate_value (XEXP (x
, 1), reg_values
);
7909 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), x0
, x1
);
7913 && reg_values
[REGNO (x
)]
7914 && !rtx_unstable_p (reg_values
[REGNO (x
)]))
7915 return reg_values
[REGNO (x
)];
7920 /* Return true if (set DEST SRC) stores an argument register into its
7921 caller-allocated save slot, storing the number of that argument
7922 register in *REGNO_PTR if so. REG_VALUES is as for
7923 mips16e_collect_propagate_value. */
7926 mips16e_collect_argument_save_p (rtx dest
, rtx src
, rtx
*reg_values
,
7927 unsigned int *regno_ptr
)
7929 unsigned int argno
, regno
;
7930 HOST_WIDE_INT offset
, required_offset
;
7933 /* Check that this is a word-mode store. */
7934 if (!MEM_P (dest
) || !REG_P (src
) || GET_MODE (dest
) != word_mode
)
7937 /* Check that the register being saved is an unmodified argument
7939 regno
= REGNO (src
);
7940 if (!IN_RANGE (regno
, GP_ARG_FIRST
, GP_ARG_LAST
) || reg_values
[regno
])
7942 argno
= regno
- GP_ARG_FIRST
;
7944 /* Check whether the address is an appropriate stack-pointer or
7945 frame-pointer access. */
7946 addr
= mips16e_collect_propagate_value (XEXP (dest
, 0), reg_values
);
7947 mips_split_plus (addr
, &base
, &offset
);
7948 required_offset
= cfun
->machine
->frame
.total_size
+ argno
* UNITS_PER_WORD
;
7949 if (base
== hard_frame_pointer_rtx
)
7950 required_offset
-= cfun
->machine
->frame
.hard_frame_pointer_offset
;
7951 else if (base
!= stack_pointer_rtx
)
7953 if (offset
!= required_offset
)
7960 /* A subroutine of mips_expand_prologue, called only when generating
7961 MIPS16e SAVE instructions. Search the start of the function for any
7962 instructions that save argument registers into their caller-allocated
7963 save slots. Delete such instructions and return a value N such that
7964 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7965 instructions redundant. */
7968 mips16e_collect_argument_saves (void)
7970 rtx reg_values
[FIRST_PSEUDO_REGISTER
];
7971 rtx insn
, next
, set
, dest
, src
;
7972 unsigned int nargs
, regno
;
7974 push_topmost_sequence ();
7976 memset (reg_values
, 0, sizeof (reg_values
));
7977 for (insn
= get_insns (); insn
; insn
= next
)
7979 next
= NEXT_INSN (insn
);
7986 set
= PATTERN (insn
);
7987 if (GET_CODE (set
) != SET
)
7990 dest
= SET_DEST (set
);
7991 src
= SET_SRC (set
);
7992 if (mips16e_collect_argument_save_p (dest
, src
, reg_values
, ®no
))
7994 if (!BITSET_P (cfun
->machine
->frame
.mask
, regno
))
7997 nargs
= MAX (nargs
, (regno
- GP_ARG_FIRST
) + 1);
8000 else if (REG_P (dest
) && GET_MODE (dest
) == word_mode
)
8001 reg_values
[REGNO (dest
)]
8002 = mips16e_collect_propagate_value (src
, reg_values
);
8006 pop_topmost_sequence ();
8011 /* Return a move between register REGNO and memory location SP + OFFSET.
8012 Make the move a load if RESTORE_P, otherwise make it a frame-related
8016 mips16e_save_restore_reg (bool restore_p
, HOST_WIDE_INT offset
,
8021 mem
= gen_frame_mem (SImode
, plus_constant (stack_pointer_rtx
, offset
));
8022 reg
= gen_rtx_REG (SImode
, regno
);
8024 ? gen_rtx_SET (VOIDmode
, reg
, mem
)
8025 : mips_frame_set (mem
, reg
));
8028 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
8029 The instruction must:
8031 - Allocate or deallocate SIZE bytes in total; SIZE is known
8034 - Save or restore as many registers in *MASK_PTR as possible.
8035 The instruction saves the first registers at the top of the
8036 allocated area, with the other registers below it.
8038 - Save NARGS argument registers above the allocated area.
8040 (NARGS is always zero if RESTORE_P.)
8042 The SAVE and RESTORE instructions cannot save and restore all general
8043 registers, so there may be some registers left over for the caller to
8044 handle. Destructively modify *MASK_PTR so that it contains the registers
8045 that still need to be saved or restored. The caller can save these
8046 registers in the memory immediately below *OFFSET_PTR, which is a
8047 byte offset from the bottom of the allocated stack area. */
8050 mips16e_build_save_restore (bool restore_p
, unsigned int *mask_ptr
,
8051 HOST_WIDE_INT
*offset_ptr
, unsigned int nargs
,
8055 HOST_WIDE_INT offset
, top_offset
;
8056 unsigned int i
, regno
;
8059 gcc_assert (cfun
->machine
->frame
.num_fp
== 0);
8061 /* Calculate the number of elements in the PARALLEL. We need one element
8062 for the stack adjustment, one for each argument register save, and one
8063 for each additional register move. */
8065 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
8066 if (BITSET_P (*mask_ptr
, mips16e_save_restore_regs
[i
]))
8069 /* Create the final PARALLEL. */
8070 pattern
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (n
));
8073 /* Add the stack pointer adjustment. */
8074 set
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8075 plus_constant (stack_pointer_rtx
,
8076 restore_p
? size
: -size
));
8077 RTX_FRAME_RELATED_P (set
) = 1;
8078 XVECEXP (pattern
, 0, n
++) = set
;
8080 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8081 top_offset
= restore_p
? size
: 0;
8083 /* Save the arguments. */
8084 for (i
= 0; i
< nargs
; i
++)
8086 offset
= top_offset
+ i
* UNITS_PER_WORD
;
8087 set
= mips16e_save_restore_reg (restore_p
, offset
, GP_ARG_FIRST
+ i
);
8088 XVECEXP (pattern
, 0, n
++) = set
;
8091 /* Then fill in the other register moves. */
8092 offset
= top_offset
;
8093 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
8095 regno
= mips16e_save_restore_regs
[i
];
8096 if (BITSET_P (*mask_ptr
, regno
))
8098 offset
-= UNITS_PER_WORD
;
8099 set
= mips16e_save_restore_reg (restore_p
, offset
, regno
);
8100 XVECEXP (pattern
, 0, n
++) = set
;
8101 *mask_ptr
&= ~(1 << regno
);
8105 /* Tell the caller what offset it should use for the remaining registers. */
8106 *offset_ptr
= size
+ (offset
- top_offset
);
8108 gcc_assert (n
== XVECLEN (pattern
, 0));
8113 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8114 pointer. Return true if PATTERN matches the kind of instruction
8115 generated by mips16e_build_save_restore. If INFO is nonnull,
8116 initialize it when returning true. */
8119 mips16e_save_restore_pattern_p (rtx pattern
, HOST_WIDE_INT adjust
,
8120 struct mips16e_save_restore_info
*info
)
8122 unsigned int i
, nargs
, mask
, extra
;
8123 HOST_WIDE_INT top_offset
, save_offset
, offset
;
8124 rtx set
, reg
, mem
, base
;
8127 if (!GENERATE_MIPS16E_SAVE_RESTORE
)
8130 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8131 top_offset
= adjust
> 0 ? adjust
: 0;
8133 /* Interpret all other members of the PARALLEL. */
8134 save_offset
= top_offset
- UNITS_PER_WORD
;
8138 for (n
= 1; n
< XVECLEN (pattern
, 0); n
++)
8140 /* Check that we have a SET. */
8141 set
= XVECEXP (pattern
, 0, n
);
8142 if (GET_CODE (set
) != SET
)
8145 /* Check that the SET is a load (if restoring) or a store
8147 mem
= adjust
> 0 ? SET_SRC (set
) : SET_DEST (set
);
8151 /* Check that the address is the sum of the stack pointer and a
8152 possibly-zero constant offset. */
8153 mips_split_plus (XEXP (mem
, 0), &base
, &offset
);
8154 if (base
!= stack_pointer_rtx
)
8157 /* Check that SET's other operand is a register. */
8158 reg
= adjust
> 0 ? SET_DEST (set
) : SET_SRC (set
);
8162 /* Check for argument saves. */
8163 if (offset
== top_offset
+ nargs
* UNITS_PER_WORD
8164 && REGNO (reg
) == GP_ARG_FIRST
+ nargs
)
8166 else if (offset
== save_offset
)
8168 while (mips16e_save_restore_regs
[i
++] != REGNO (reg
))
8169 if (i
== ARRAY_SIZE (mips16e_save_restore_regs
))
8172 mask
|= 1 << REGNO (reg
);
8173 save_offset
-= UNITS_PER_WORD
;
8179 /* Check that the restrictions on register ranges are met. */
8181 mips16e_mask_registers (&mask
, mips16e_s2_s8_regs
,
8182 ARRAY_SIZE (mips16e_s2_s8_regs
), &extra
);
8183 mips16e_mask_registers (&mask
, mips16e_a0_a3_regs
,
8184 ARRAY_SIZE (mips16e_a0_a3_regs
), &extra
);
8188 /* Make sure that the topmost argument register is not saved twice.
8189 The checks above ensure that the same is then true for the other
8190 argument registers. */
8191 if (nargs
> 0 && BITSET_P (mask
, GP_ARG_FIRST
+ nargs
- 1))
8194 /* Pass back information, if requested. */
8197 info
->nargs
= nargs
;
8199 info
->size
= (adjust
> 0 ? adjust
: -adjust
);
8205 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8206 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8207 the null terminator. */
8210 mips16e_add_register_range (char *s
, unsigned int min_reg
,
8211 unsigned int max_reg
)
8213 if (min_reg
!= max_reg
)
8214 s
+= sprintf (s
, ",%s-%s", reg_names
[min_reg
], reg_names
[max_reg
]);
8216 s
+= sprintf (s
, ",%s", reg_names
[min_reg
]);
8220 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8221 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8224 mips16e_output_save_restore (rtx pattern
, HOST_WIDE_INT adjust
)
8226 static char buffer
[300];
8228 struct mips16e_save_restore_info info
;
8229 unsigned int i
, end
;
8232 /* Parse the pattern. */
8233 if (!mips16e_save_restore_pattern_p (pattern
, adjust
, &info
))
8236 /* Add the mnemonic. */
8237 s
= strcpy (buffer
, adjust
> 0 ? "restore\t" : "save\t");
8240 /* Save the arguments. */
8242 s
+= sprintf (s
, "%s-%s,", reg_names
[GP_ARG_FIRST
],
8243 reg_names
[GP_ARG_FIRST
+ info
.nargs
- 1]);
8244 else if (info
.nargs
== 1)
8245 s
+= sprintf (s
, "%s,", reg_names
[GP_ARG_FIRST
]);
8247 /* Emit the amount of stack space to allocate or deallocate. */
8248 s
+= sprintf (s
, "%d", (int) info
.size
);
8250 /* Save or restore $16. */
8251 if (BITSET_P (info
.mask
, 16))
8252 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 16]);
8254 /* Save or restore $17. */
8255 if (BITSET_P (info
.mask
, 17))
8256 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 17]);
8258 /* Save or restore registers in the range $s2...$s8, which
8259 mips16e_s2_s8_regs lists in decreasing order. Note that this
8260 is a software register range; the hardware registers are not
8261 numbered consecutively. */
8262 end
= ARRAY_SIZE (mips16e_s2_s8_regs
);
8263 i
= mips16e_find_first_register (info
.mask
, mips16e_s2_s8_regs
, end
);
8265 s
= mips16e_add_register_range (s
, mips16e_s2_s8_regs
[end
- 1],
8266 mips16e_s2_s8_regs
[i
]);
8268 /* Save or restore registers in the range $a0...$a3. */
8269 end
= ARRAY_SIZE (mips16e_a0_a3_regs
);
8270 i
= mips16e_find_first_register (info
.mask
, mips16e_a0_a3_regs
, end
);
8272 s
= mips16e_add_register_range (s
, mips16e_a0_a3_regs
[i
],
8273 mips16e_a0_a3_regs
[end
- 1]);
8275 /* Save or restore $31. */
8276 if (BITSET_P (info
.mask
, 31))
8277 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 31]);
8282 /* Return true if the current function has an insn that implicitly
8286 mips_function_has_gp_insn (void)
8288 /* Don't bother rechecking if we found one last time. */
8289 if (!cfun
->machine
->has_gp_insn_p
)
8293 push_topmost_sequence ();
8294 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8295 if (USEFUL_INSN_P (insn
)
8296 && (get_attr_got (insn
) != GOT_UNSET
8297 || mips_small_data_pattern_p (PATTERN (insn
))))
8299 cfun
->machine
->has_gp_insn_p
= true;
8302 pop_topmost_sequence ();
8304 return cfun
->machine
->has_gp_insn_p
;
8307 /* Return true if the current function returns its value in a floating-point
8308 register in MIPS16 mode. */
8311 mips16_cfun_returns_in_fpr_p (void)
8313 tree return_type
= DECL_RESULT (current_function_decl
);
8314 return (TARGET_MIPS16
8315 && TARGET_HARD_FLOAT_ABI
8316 && !aggregate_value_p (return_type
, current_function_decl
)
8317 && mips_return_mode_in_fpr_p (DECL_MODE (return_type
)));
8320 /* Return the register that should be used as the global pointer
8321 within this function. Return 0 if the function doesn't need
8322 a global pointer. */
8325 mips_global_pointer (void)
8329 /* $gp is always available unless we're using a GOT. */
8330 if (!TARGET_USE_GOT
)
8331 return GLOBAL_POINTER_REGNUM
;
8333 /* We must always provide $gp when it is used implicitly. */
8334 if (!TARGET_EXPLICIT_RELOCS
)
8335 return GLOBAL_POINTER_REGNUM
;
8337 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
8340 return GLOBAL_POINTER_REGNUM
;
8342 /* If the function has a nonlocal goto, $gp must hold the correct
8343 global pointer for the target function. */
8344 if (crtl
->has_nonlocal_goto
)
8345 return GLOBAL_POINTER_REGNUM
;
8347 /* There's no need to initialize $gp if it isn't referenced now,
8348 and if we can be sure that no new references will be added during
8350 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM
)
8351 && !mips_function_has_gp_insn ())
8353 /* The function doesn't use $gp at the moment. If we're generating
8354 -call_nonpic code, no new uses will be introduced during or after
8356 if (TARGET_ABICALLS_PIC0
)
8359 /* We need to handle the following implicit gp references:
8361 - Reload can sometimes introduce constant pool references
8362 into a function that otherwise didn't need them. For example,
8363 suppose we have an instruction like:
8365 (set (reg:DF R1) (float:DF (reg:SI R2)))
8367 If R2 turns out to be constant such as 1, the instruction may
8368 have a REG_EQUAL note saying that R1 == 1.0. Reload then has
8369 the option of using this constant if R2 doesn't get allocated
8372 In cases like these, reload will have added the constant to the
8373 pool but no instruction will yet refer to it.
8375 - MIPS16 functions that return in FPRs need to call an
8376 external libgcc routine. */
8377 if (!crtl
->uses_const_pool
8378 && !mips16_cfun_returns_in_fpr_p ())
8382 /* We need a global pointer, but perhaps we can use a call-clobbered
8383 register instead of $gp. */
8384 if (TARGET_CALL_SAVED_GP
&& current_function_is_leaf
)
8385 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
8386 if (!df_regs_ever_live_p (regno
)
8387 && call_really_used_regs
[regno
]
8388 && !fixed_regs
[regno
]
8389 && regno
!= PIC_FUNCTION_ADDR_REGNUM
)
8392 return GLOBAL_POINTER_REGNUM
;
8395 /* Return true if the current function must save register REGNO. */
8398 mips_save_reg_p (unsigned int regno
)
8400 /* We need to save $gp if TARGET_CALL_SAVED_GP and if we have not
8401 chosen a call-clobbered substitute. */
8402 if (TARGET_CALL_SAVED_GP
8403 && regno
== GLOBAL_POINTER_REGNUM
8404 && cfun
->machine
->global_pointer
== regno
)
8407 /* Check call-saved registers. */
8408 if ((crtl
->saves_all_registers
|| df_regs_ever_live_p (regno
))
8409 && !call_really_used_regs
[regno
])
8412 /* Save both registers in an FPR pair if either one is used. This is
8413 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
8414 register to be used without the even register. */
8415 if (FP_REG_P (regno
)
8416 && MAX_FPRS_PER_FMT
== 2
8417 && df_regs_ever_live_p (regno
+ 1)
8418 && !call_really_used_regs
[regno
+ 1])
8421 /* We need to save the old frame pointer before setting up a new one. */
8422 if (regno
== HARD_FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
8425 /* Check for registers that must be saved for FUNCTION_PROFILER. */
8426 if (crtl
->profile
&& MIPS_SAVE_REG_FOR_PROFILING_P (regno
))
8429 /* We need to save the incoming return address if it is ever clobbered
8430 within the function, if __builtin_eh_return is being used to set a
8431 different return address, or if a stub is being used to return a
8433 if (regno
== GP_REG_FIRST
+ 31
8434 && (df_regs_ever_live_p (regno
)
8435 || crtl
->calls_eh_return
8436 || mips16_cfun_returns_in_fpr_p ()))
8442 /* Populate the current function's mips_frame_info structure.
8444 MIPS stack frames look like:
8446 +-------------------------------+
8448 | incoming stack arguments |
8450 +-------------------------------+
8452 | caller-allocated save area |
8453 A | for register arguments |
8455 +-------------------------------+ <-- incoming stack pointer
8457 | callee-allocated save area |
8458 B | for arguments that are |
8459 | split between registers and |
8462 +-------------------------------+ <-- arg_pointer_rtx
8464 C | callee-allocated save area |
8465 | for register varargs |
8467 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
8468 | | + UNITS_PER_HWFPVALUE
8471 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
8472 | | + UNITS_PER_WORD
8475 +-------------------------------+
8477 | local variables | | var_size
8479 +-------------------------------+
8481 | $gp save area | | cprestore_size
8483 P +-------------------------------+ <-- hard_frame_pointer_rtx for
8485 | outgoing stack arguments |
8487 +-------------------------------+
8489 | caller-allocated save area |
8490 | for register arguments |
8492 +-------------------------------+ <-- stack_pointer_rtx
8494 hard_frame_pointer_rtx for
8497 At least two of A, B and C will be empty.
8499 Dynamic stack allocations such as alloca insert data at point P.
8500 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
8501 hard_frame_pointer_rtx unchanged. */
8504 mips_compute_frame_info (void)
8506 struct mips_frame_info
*frame
;
8507 HOST_WIDE_INT offset
, size
;
8508 unsigned int regno
, i
;
8510 frame
= &cfun
->machine
->frame
;
8511 memset (frame
, 0, sizeof (*frame
));
8512 size
= get_frame_size ();
8514 cfun
->machine
->global_pointer
= mips_global_pointer ();
8516 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
8517 area and the $gp save slot. This area isn't needed in leaf functions,
8518 but if the target-independent frame size is nonzero, we're committed
8519 to allocating it anyway. */
8520 if (size
== 0 && current_function_is_leaf
)
8522 /* The MIPS 3.0 linker does not like functions that dynamically
8523 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
8524 looks like we are trying to create a second frame pointer to the
8525 function, so allocate some stack space to make it happy. */
8526 if (cfun
->calls_alloca
)
8527 frame
->args_size
= REG_PARM_STACK_SPACE (cfun
->decl
);
8529 frame
->args_size
= 0;
8530 frame
->cprestore_size
= 0;
8534 frame
->args_size
= crtl
->outgoing_args_size
;
8535 frame
->cprestore_size
= STARTING_FRAME_OFFSET
- frame
->args_size
;
8537 offset
= frame
->args_size
+ frame
->cprestore_size
;
8539 /* Move above the local variables. */
8540 frame
->var_size
= MIPS_STACK_ALIGN (size
);
8541 offset
+= frame
->var_size
;
8543 /* Find out which GPRs we need to save. */
8544 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
8545 if (mips_save_reg_p (regno
))
8548 frame
->mask
|= 1 << (regno
- GP_REG_FIRST
);
8551 /* If this function calls eh_return, we must also save and restore the
8552 EH data registers. */
8553 if (crtl
->calls_eh_return
)
8554 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; i
++)
8557 frame
->mask
|= 1 << (EH_RETURN_DATA_REGNO (i
) - GP_REG_FIRST
);
8560 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
8561 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
8562 save all later registers too. */
8563 if (GENERATE_MIPS16E_SAVE_RESTORE
)
8565 mips16e_mask_registers (&frame
->mask
, mips16e_s2_s8_regs
,
8566 ARRAY_SIZE (mips16e_s2_s8_regs
), &frame
->num_gp
);
8567 mips16e_mask_registers (&frame
->mask
, mips16e_a0_a3_regs
,
8568 ARRAY_SIZE (mips16e_a0_a3_regs
), &frame
->num_gp
);
8571 /* Move above the GPR save area. */
8572 if (frame
->num_gp
> 0)
8574 offset
+= MIPS_STACK_ALIGN (frame
->num_gp
* UNITS_PER_WORD
);
8575 frame
->gp_sp_offset
= offset
- UNITS_PER_WORD
;
8578 /* Find out which FPRs we need to save. This loop must iterate over
8579 the same space as its companion in mips_for_each_saved_reg. */
8580 if (TARGET_HARD_FLOAT
)
8581 for (regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
+= MAX_FPRS_PER_FMT
)
8582 if (mips_save_reg_p (regno
))
8584 frame
->num_fp
+= MAX_FPRS_PER_FMT
;
8585 frame
->fmask
|= ~(~0 << MAX_FPRS_PER_FMT
) << (regno
- FP_REG_FIRST
);
8588 /* Move above the FPR save area. */
8589 if (frame
->num_fp
> 0)
8591 offset
+= MIPS_STACK_ALIGN (frame
->num_fp
* UNITS_PER_FPREG
);
8592 frame
->fp_sp_offset
= offset
- UNITS_PER_HWFPVALUE
;
8595 /* Move above the callee-allocated varargs save area. */
8596 offset
+= MIPS_STACK_ALIGN (cfun
->machine
->varargs_size
);
8597 frame
->arg_pointer_offset
= offset
;
8599 /* Move above the callee-allocated area for pretend stack arguments. */
8600 offset
+= crtl
->args
.pretend_args_size
;
8601 frame
->total_size
= offset
;
8603 /* Work out the offsets of the save areas from the top of the frame. */
8604 if (frame
->gp_sp_offset
> 0)
8605 frame
->gp_save_offset
= frame
->gp_sp_offset
- offset
;
8606 if (frame
->fp_sp_offset
> 0)
8607 frame
->fp_save_offset
= frame
->fp_sp_offset
- offset
;
8609 /* MIPS16 code offsets the frame pointer by the size of the outgoing
8610 arguments. This tends to increase the chances of using unextended
8611 instructions for local variables and incoming arguments. */
8613 frame
->hard_frame_pointer_offset
= frame
->args_size
;
8616 /* Return the style of GP load sequence that is being used for the
8617 current function. */
8619 enum mips_loadgp_style
8620 mips_current_loadgp_style (void)
8622 if (!TARGET_USE_GOT
|| cfun
->machine
->global_pointer
== 0)
8628 if (TARGET_ABSOLUTE_ABICALLS
)
8629 return LOADGP_ABSOLUTE
;
8631 return TARGET_NEWABI
? LOADGP_NEWABI
: LOADGP_OLDABI
;
8634 /* Implement FRAME_POINTER_REQUIRED. */
8637 mips_frame_pointer_required (void)
8639 /* If the function contains dynamic stack allocations, we need to
8640 use the frame pointer to access the static parts of the frame. */
8641 if (cfun
->calls_alloca
)
8644 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
8645 reload may be unable to compute the address of a local variable,
8646 since there is no way to add a large constant to the stack pointer
8647 without using a second temporary register. */
8650 mips_compute_frame_info ();
8651 if (!SMALL_OPERAND (cfun
->machine
->frame
.total_size
))
8658 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
8659 or argument pointer. TO is either the stack pointer or hard frame
8663 mips_initial_elimination_offset (int from
, int to
)
8665 HOST_WIDE_INT offset
;
8667 mips_compute_frame_info ();
8669 /* Set OFFSET to the offset from the soft frame pointer, which is also
8670 the offset from the end-of-prologue stack pointer. */
8673 case FRAME_POINTER_REGNUM
:
8677 case ARG_POINTER_REGNUM
:
8678 offset
= cfun
->machine
->frame
.arg_pointer_offset
;
8685 if (to
== HARD_FRAME_POINTER_REGNUM
)
8686 offset
-= cfun
->machine
->frame
.hard_frame_pointer_offset
;
8691 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
8694 mips_extra_live_on_entry (bitmap regs
)
8698 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
8699 the global pointer. */
8700 if (!TARGET_ABSOLUTE_ABICALLS
)
8701 bitmap_set_bit (regs
, PIC_FUNCTION_ADDR_REGNUM
);
8703 /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
8704 the global pointer. */
8706 bitmap_set_bit (regs
, MIPS16_PIC_TEMP_REGNUM
);
8708 /* See the comment above load_call<mode> for details. */
8709 bitmap_set_bit (regs
, GOT_VERSION_REGNUM
);
8713 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
8717 mips_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
8722 return get_hard_reg_initial_val (Pmode
, GP_REG_FIRST
+ 31);
8725 /* Emit code to change the current function's return address to
8726 ADDRESS. SCRATCH is available as a scratch register, if needed.
8727 ADDRESS and SCRATCH are both word-mode GPRs. */
8730 mips_set_return_address (rtx address
, rtx scratch
)
8734 gcc_assert (BITSET_P (cfun
->machine
->frame
.mask
, 31));
8735 slot_address
= mips_add_offset (scratch
, stack_pointer_rtx
,
8736 cfun
->machine
->frame
.gp_sp_offset
);
8737 mips_emit_move (gen_frame_mem (GET_MODE (address
), slot_address
), address
);
8740 /* Return a MEM rtx for the cprestore slot, using TEMP as a temporary base
8741 register if need be. */
8744 mips_cprestore_slot (rtx temp
)
8746 const struct mips_frame_info
*frame
;
8748 HOST_WIDE_INT offset
;
8750 frame
= &cfun
->machine
->frame
;
8751 if (frame_pointer_needed
)
8753 base
= hard_frame_pointer_rtx
;
8754 offset
= frame
->args_size
- frame
->hard_frame_pointer_offset
;
8758 base
= stack_pointer_rtx
;
8759 offset
= frame
->args_size
;
8761 return gen_frame_mem (Pmode
, mips_add_offset (temp
, base
, offset
));
8764 /* Restore $gp from its save slot, using TEMP as a temporary base register
8765 if need be. This function is for o32 and o64 abicalls only. */
8768 mips_restore_gp (rtx temp
)
8770 gcc_assert (TARGET_ABICALLS
&& TARGET_OLDABI
);
8772 if (cfun
->machine
->global_pointer
== 0)
8777 mips_emit_move (temp
, mips_cprestore_slot (temp
));
8778 mips_emit_move (pic_offset_table_rtx
, temp
);
8781 mips_emit_move (pic_offset_table_rtx
, mips_cprestore_slot (temp
));
8782 if (!TARGET_EXPLICIT_RELOCS
)
8783 emit_insn (gen_blockage ());
8786 /* A function to save or store a register. The first argument is the
8787 register and the second is the stack slot. */
8788 typedef void (*mips_save_restore_fn
) (rtx
, rtx
);
8790 /* Use FN to save or restore register REGNO. MODE is the register's
8791 mode and OFFSET is the offset of its save slot from the current
8795 mips_save_restore_reg (enum machine_mode mode
, int regno
,
8796 HOST_WIDE_INT offset
, mips_save_restore_fn fn
)
8800 mem
= gen_frame_mem (mode
, plus_constant (stack_pointer_rtx
, offset
));
8801 fn (gen_rtx_REG (mode
, regno
), mem
);
8804 /* Call FN for each register that is saved by the current function.
8805 SP_OFFSET is the offset of the current stack pointer from the start
8809 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset
, mips_save_restore_fn fn
)
8811 enum machine_mode fpr_mode
;
8812 HOST_WIDE_INT offset
;
8815 /* Save registers starting from high to low. The debuggers prefer at least
8816 the return register be stored at func+4, and also it allows us not to
8817 need a nop in the epilogue if at least one register is reloaded in
8818 addition to return address. */
8819 offset
= cfun
->machine
->frame
.gp_sp_offset
- sp_offset
;
8820 for (regno
= GP_REG_LAST
; regno
>= GP_REG_FIRST
; regno
--)
8821 if (BITSET_P (cfun
->machine
->frame
.mask
, regno
- GP_REG_FIRST
))
8823 mips_save_restore_reg (word_mode
, regno
, offset
, fn
);
8824 offset
-= UNITS_PER_WORD
;
8827 /* This loop must iterate over the same space as its companion in
8828 mips_compute_frame_info. */
8829 offset
= cfun
->machine
->frame
.fp_sp_offset
- sp_offset
;
8830 fpr_mode
= (TARGET_SINGLE_FLOAT
? SFmode
: DFmode
);
8831 for (regno
= FP_REG_LAST
- MAX_FPRS_PER_FMT
+ 1;
8832 regno
>= FP_REG_FIRST
;
8833 regno
-= MAX_FPRS_PER_FMT
)
8834 if (BITSET_P (cfun
->machine
->frame
.fmask
, regno
- FP_REG_FIRST
))
8836 mips_save_restore_reg (fpr_mode
, regno
, offset
, fn
);
8837 offset
-= GET_MODE_SIZE (fpr_mode
);
8841 /* If we're generating n32 or n64 abicalls, and the current function
8842 does not use $28 as its global pointer, emit a cplocal directive.
8843 Use pic_offset_table_rtx as the argument to the directive. */
8846 mips_output_cplocal (void)
8848 if (!TARGET_EXPLICIT_RELOCS
8849 && cfun
->machine
->global_pointer
> 0
8850 && cfun
->machine
->global_pointer
!= GLOBAL_POINTER_REGNUM
)
8851 output_asm_insn (".cplocal %+", 0);
8854 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
8857 mips_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
8861 #ifdef SDB_DEBUGGING_INFO
8862 if (debug_info_level
!= DINFO_LEVEL_TERSE
&& write_symbols
== SDB_DEBUG
)
8863 SDB_OUTPUT_SOURCE_LINE (file
, DECL_SOURCE_LINE (current_function_decl
));
8866 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
8867 floating-point arguments. */
8869 && TARGET_HARD_FLOAT_ABI
8870 && crtl
->args
.info
.fp_code
!= 0)
8871 mips16_build_function_stub ();
8873 /* Get the function name the same way that toplev.c does before calling
8874 assemble_start_function. This is needed so that the name used here
8875 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8876 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
8877 mips_start_function_definition (fnname
, TARGET_MIPS16
);
8879 /* Stop mips_file_end from treating this function as external. */
8880 if (TARGET_IRIX
&& mips_abi
== ABI_32
)
8881 TREE_ASM_WRITTEN (DECL_NAME (cfun
->decl
)) = 1;
8883 /* Output MIPS-specific frame information. */
8884 if (!flag_inhibit_size_directive
)
8886 const struct mips_frame_info
*frame
;
8888 frame
= &cfun
->machine
->frame
;
8890 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
8892 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC
",%s\t\t"
8893 "# vars= " HOST_WIDE_INT_PRINT_DEC
8895 ", args= " HOST_WIDE_INT_PRINT_DEC
8896 ", gp= " HOST_WIDE_INT_PRINT_DEC
"\n",
8897 reg_names
[frame_pointer_needed
8898 ? HARD_FRAME_POINTER_REGNUM
8899 : STACK_POINTER_REGNUM
],
8900 (frame_pointer_needed
8901 ? frame
->total_size
- frame
->hard_frame_pointer_offset
8902 : frame
->total_size
),
8903 reg_names
[GP_REG_FIRST
+ 31],
8905 frame
->num_gp
, frame
->num_fp
,
8907 frame
->cprestore_size
);
8909 /* .mask MASK, OFFSET. */
8910 fprintf (file
, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
8911 frame
->mask
, frame
->gp_save_offset
);
8913 /* .fmask MASK, OFFSET. */
8914 fprintf (file
, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
8915 frame
->fmask
, frame
->fp_save_offset
);
8918 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
8919 Also emit the ".set noreorder; .set nomacro" sequence for functions
8921 if (mips_current_loadgp_style () == LOADGP_OLDABI
)
8925 /* This is a fixed-form sequence. The position of the
8926 first two instructions is important because of the
8927 way _gp_disp is defined. */
8928 output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
8929 output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
8930 output_asm_insn ("sll\t$2,16", 0);
8931 output_asm_insn ("addu\t$2,$3", 0);
8933 /* .cpload must be in a .set noreorder but not a .set nomacro block. */
8934 else if (!cfun
->machine
->all_noreorder_p
)
8935 output_asm_insn ("%(.cpload\t%^%)", 0);
8937 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8939 else if (cfun
->machine
->all_noreorder_p
)
8940 output_asm_insn ("%(%<", 0);
8942 /* Tell the assembler which register we're using as the global
8943 pointer. This is needed for thunks, since they can use either
8944 explicit relocs or assembler macros. */
8945 mips_output_cplocal ();
8948 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
8951 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
8952 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
8956 /* Reinstate the normal $gp. */
8957 SET_REGNO (pic_offset_table_rtx
, GLOBAL_POINTER_REGNUM
);
8958 mips_output_cplocal ();
8960 if (cfun
->machine
->all_noreorder_p
)
8962 /* Avoid using %>%) since it adds excess whitespace. */
8963 output_asm_insn (".set\tmacro", 0);
8964 output_asm_insn (".set\treorder", 0);
8965 set_noreorder
= set_nomacro
= 0;
8968 /* Get the function name the same way that toplev.c does before calling
8969 assemble_start_function. This is needed so that the name used here
8970 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8971 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
8972 mips_end_function_definition (fnname
);
8975 /* Save register REG to MEM. Make the instruction frame-related. */
8978 mips_save_reg (rtx reg
, rtx mem
)
8980 if (GET_MODE (reg
) == DFmode
&& !TARGET_FLOAT64
)
8984 if (mips_split_64bit_move_p (mem
, reg
))
8985 mips_split_doubleword_move (mem
, reg
);
8987 mips_emit_move (mem
, reg
);
8989 x1
= mips_frame_set (mips_subword (mem
, false),
8990 mips_subword (reg
, false));
8991 x2
= mips_frame_set (mips_subword (mem
, true),
8992 mips_subword (reg
, true));
8993 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, x1
, x2
)));
8998 && REGNO (reg
) != GP_REG_FIRST
+ 31
8999 && !M16_REG_P (REGNO (reg
)))
9001 /* Save a non-MIPS16 register by moving it through a temporary.
9002 We don't need to do this for $31 since there's a special
9003 instruction for it. */
9004 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg
)), reg
);
9005 mips_emit_move (mem
, MIPS_PROLOGUE_TEMP (GET_MODE (reg
)));
9008 mips_emit_move (mem
, reg
);
9010 mips_set_frame_expr (mips_frame_set (mem
, reg
));
9014 /* The __gnu_local_gp symbol. */
9016 static GTY(()) rtx mips_gnu_local_gp
;
9018 /* If we're generating n32 or n64 abicalls, emit instructions
9019 to set up the global pointer. */
9022 mips_emit_loadgp (void)
9024 rtx addr
, offset
, incoming_address
, base
, index
, pic_reg
;
9026 pic_reg
= TARGET_MIPS16
? MIPS16_PIC_TEMP
: pic_offset_table_rtx
;
9027 switch (mips_current_loadgp_style ())
9029 case LOADGP_ABSOLUTE
:
9030 if (mips_gnu_local_gp
== NULL
)
9032 mips_gnu_local_gp
= gen_rtx_SYMBOL_REF (Pmode
, "__gnu_local_gp");
9033 SYMBOL_REF_FLAGS (mips_gnu_local_gp
) |= SYMBOL_FLAG_LOCAL
;
9035 emit_insn (Pmode
== SImode
9036 ? gen_loadgp_absolute_si (pic_reg
, mips_gnu_local_gp
)
9037 : gen_loadgp_absolute_di (pic_reg
, mips_gnu_local_gp
));
9041 /* Added by mips_output_function_prologue. */
9045 addr
= XEXP (DECL_RTL (current_function_decl
), 0);
9046 offset
= mips_unspec_address (addr
, SYMBOL_GOTOFF_LOADGP
);
9047 incoming_address
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
9048 emit_insn (Pmode
== SImode
9049 ? gen_loadgp_newabi_si (pic_reg
, offset
, incoming_address
)
9050 : gen_loadgp_newabi_di (pic_reg
, offset
, incoming_address
));
9054 base
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_BASE
));
9055 index
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_INDEX
));
9056 emit_insn (Pmode
== SImode
9057 ? gen_loadgp_rtp_si (pic_reg
, base
, index
)
9058 : gen_loadgp_rtp_di (pic_reg
, base
, index
));
9066 emit_insn (gen_copygp_mips16 (pic_offset_table_rtx
, pic_reg
));
9068 /* Emit a blockage if there are implicit uses of the GP register.
9069 This includes profiled functions, because FUNCTION_PROFILE uses
9071 if (!TARGET_EXPLICIT_RELOCS
|| crtl
->profile
)
9072 emit_insn (gen_loadgp_blockage ());
9075 /* Expand the "prologue" pattern. */
9078 mips_expand_prologue (void)
9080 const struct mips_frame_info
*frame
;
9085 if (cfun
->machine
->global_pointer
> 0)
9086 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
9088 frame
= &cfun
->machine
->frame
;
9089 size
= frame
->total_size
;
9091 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
9092 bytes beforehand; this is enough to cover the register save area
9093 without going out of range. */
9094 if ((frame
->mask
| frame
->fmask
) != 0)
9096 HOST_WIDE_INT step1
;
9098 step1
= MIN (size
, MIPS_MAX_FIRST_STACK_STEP
);
9099 if (GENERATE_MIPS16E_SAVE_RESTORE
)
9101 HOST_WIDE_INT offset
;
9102 unsigned int mask
, regno
;
9104 /* Try to merge argument stores into the save instruction. */
9105 nargs
= mips16e_collect_argument_saves ();
9107 /* Build the save instruction. */
9109 insn
= mips16e_build_save_restore (false, &mask
, &offset
,
9111 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
9114 /* Check if we need to save other registers. */
9115 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
9116 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
9118 offset
-= UNITS_PER_WORD
;
9119 mips_save_restore_reg (word_mode
, regno
,
9120 offset
, mips_save_reg
);
9125 insn
= gen_add3_insn (stack_pointer_rtx
,
9128 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
9130 mips_for_each_saved_reg (size
, mips_save_reg
);
9134 /* Allocate the rest of the frame. */
9137 if (SMALL_OPERAND (-size
))
9138 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx
,
9140 GEN_INT (-size
)))) = 1;
9143 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), GEN_INT (size
));
9146 /* There are no instructions to add or subtract registers
9147 from the stack pointer, so use the frame pointer as a
9148 temporary. We should always be using a frame pointer
9149 in this case anyway. */
9150 gcc_assert (frame_pointer_needed
);
9151 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
9152 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx
,
9153 hard_frame_pointer_rtx
,
9154 MIPS_PROLOGUE_TEMP (Pmode
)));
9155 mips_emit_move (stack_pointer_rtx
, hard_frame_pointer_rtx
);
9158 emit_insn (gen_sub3_insn (stack_pointer_rtx
,
9160 MIPS_PROLOGUE_TEMP (Pmode
)));
9162 /* Describe the combined effect of the previous instructions. */
9164 (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9165 plus_constant (stack_pointer_rtx
, -size
)));
9169 /* Set up the frame pointer, if we're using one. */
9170 if (frame_pointer_needed
)
9172 HOST_WIDE_INT offset
;
9174 offset
= frame
->hard_frame_pointer_offset
;
9177 insn
= mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
9178 RTX_FRAME_RELATED_P (insn
) = 1;
9180 else if (SMALL_OPERAND (offset
))
9182 insn
= gen_add3_insn (hard_frame_pointer_rtx
,
9183 stack_pointer_rtx
, GEN_INT (offset
));
9184 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
9188 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), GEN_INT (offset
));
9189 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
9190 emit_insn (gen_add3_insn (hard_frame_pointer_rtx
,
9191 hard_frame_pointer_rtx
,
9192 MIPS_PROLOGUE_TEMP (Pmode
)));
9194 (gen_rtx_SET (VOIDmode
, hard_frame_pointer_rtx
,
9195 plus_constant (stack_pointer_rtx
, offset
)));
9199 mips_emit_loadgp ();
9201 /* Initialize the $gp save slot. */
9202 if (frame
->cprestore_size
> 0
9203 && cfun
->machine
->global_pointer
!= 0)
9206 mips_emit_move (mips_cprestore_slot (MIPS_PROLOGUE_TEMP (Pmode
)),
9208 else if (TARGET_ABICALLS_PIC2
)
9209 emit_insn (gen_cprestore (GEN_INT (frame
->args_size
)));
9211 emit_move_insn (mips_cprestore_slot (MIPS_PROLOGUE_TEMP (Pmode
)),
9212 pic_offset_table_rtx
);
9215 /* If we are profiling, make sure no instructions are scheduled before
9216 the call to mcount. */
9218 emit_insn (gen_blockage ());
9221 /* Emit instructions to restore register REG from slot MEM. */
9224 mips_restore_reg (rtx reg
, rtx mem
)
9226 /* There's no MIPS16 instruction to load $31 directly. Load into
9227 $7 instead and adjust the return insn appropriately. */
9228 if (TARGET_MIPS16
&& REGNO (reg
) == GP_REG_FIRST
+ 31)
9229 reg
= gen_rtx_REG (GET_MODE (reg
), GP_REG_FIRST
+ 7);
9231 if (TARGET_MIPS16
&& !M16_REG_P (REGNO (reg
)))
9233 /* Can't restore directly; move through a temporary. */
9234 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg
)), mem
);
9235 mips_emit_move (reg
, MIPS_EPILOGUE_TEMP (GET_MODE (reg
)));
9238 mips_emit_move (reg
, mem
);
9241 /* Emit any instructions needed before a return. */
9244 mips_expand_before_return (void)
9246 /* When using a call-clobbered gp, we start out with unified call
9247 insns that include instructions to restore the gp. We then split
9248 these unified calls after reload. These split calls explicitly
9249 clobber gp, so there is no need to define
9250 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
9252 For consistency, we should also insert an explicit clobber of $28
9253 before return insns, so that the post-reload optimizers know that
9254 the register is not live on exit. */
9255 if (TARGET_CALL_CLOBBERED_GP
)
9256 emit_clobber (pic_offset_table_rtx
);
9259 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
9263 mips_expand_epilogue (bool sibcall_p
)
9265 const struct mips_frame_info
*frame
;
9266 HOST_WIDE_INT step1
, step2
;
9269 if (!sibcall_p
&& mips_can_use_return_insn ())
9271 emit_jump_insn (gen_return ());
9275 /* In MIPS16 mode, if the return value should go into a floating-point
9276 register, we need to call a helper routine to copy it over. */
9277 if (mips16_cfun_returns_in_fpr_p ())
9278 mips16_copy_fpr_return_value ();
9280 /* Split the frame into two. STEP1 is the amount of stack we should
9281 deallocate before restoring the registers. STEP2 is the amount we
9282 should deallocate afterwards.
9284 Start off by assuming that no registers need to be restored. */
9285 frame
= &cfun
->machine
->frame
;
9286 step1
= frame
->total_size
;
9289 /* Work out which register holds the frame address. */
9290 if (!frame_pointer_needed
)
9291 base
= stack_pointer_rtx
;
9294 base
= hard_frame_pointer_rtx
;
9295 step1
-= frame
->hard_frame_pointer_offset
;
9298 /* If we need to restore registers, deallocate as much stack as
9299 possible in the second step without going out of range. */
9300 if ((frame
->mask
| frame
->fmask
) != 0)
9302 step2
= MIN (step1
, MIPS_MAX_FIRST_STACK_STEP
);
9306 /* Set TARGET to BASE + STEP1. */
9312 /* Get an rtx for STEP1 that we can add to BASE. */
9313 adjust
= GEN_INT (step1
);
9314 if (!SMALL_OPERAND (step1
))
9316 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), adjust
);
9317 adjust
= MIPS_EPILOGUE_TEMP (Pmode
);
9320 /* Normal mode code can copy the result straight into $sp. */
9322 target
= stack_pointer_rtx
;
9324 emit_insn (gen_add3_insn (target
, base
, adjust
));
9327 /* Copy TARGET into the stack pointer. */
9328 if (target
!= stack_pointer_rtx
)
9329 mips_emit_move (stack_pointer_rtx
, target
);
9331 /* If we're using addressing macros, $gp is implicitly used by all
9332 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
9334 if (TARGET_CALL_SAVED_GP
&& !TARGET_EXPLICIT_RELOCS
)
9335 emit_insn (gen_blockage ());
9337 if (GENERATE_MIPS16E_SAVE_RESTORE
&& frame
->mask
!= 0)
9339 unsigned int regno
, mask
;
9340 HOST_WIDE_INT offset
;
9343 /* Generate the restore instruction. */
9345 restore
= mips16e_build_save_restore (true, &mask
, &offset
, 0, step2
);
9347 /* Restore any other registers manually. */
9348 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
9349 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
9351 offset
-= UNITS_PER_WORD
;
9352 mips_save_restore_reg (word_mode
, regno
, offset
, mips_restore_reg
);
9355 /* Restore the remaining registers and deallocate the final bit
9357 emit_insn (restore
);
9361 /* Restore the registers. */
9362 mips_for_each_saved_reg (frame
->total_size
- step2
, mips_restore_reg
);
9364 /* Deallocate the final bit of the frame. */
9366 emit_insn (gen_add3_insn (stack_pointer_rtx
,
9371 /* Add in the __builtin_eh_return stack adjustment. We need to
9372 use a temporary in MIPS16 code. */
9373 if (crtl
->calls_eh_return
)
9377 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), stack_pointer_rtx
);
9378 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode
),
9379 MIPS_EPILOGUE_TEMP (Pmode
),
9380 EH_RETURN_STACKADJ_RTX
));
9381 mips_emit_move (stack_pointer_rtx
, MIPS_EPILOGUE_TEMP (Pmode
));
9384 emit_insn (gen_add3_insn (stack_pointer_rtx
,
9386 EH_RETURN_STACKADJ_RTX
));
9393 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
9394 path will restore the return address into $7 rather than $31. */
9396 && !GENERATE_MIPS16E_SAVE_RESTORE
9397 && BITSET_P (frame
->mask
, 31))
9398 regno
= GP_REG_FIRST
+ 7;
9400 regno
= GP_REG_FIRST
+ 31;
9401 mips_expand_before_return ();
9402 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode
, regno
)));
9406 /* Return nonzero if this function is known to have a null epilogue.
9407 This allows the optimizer to omit jumps to jumps if no stack
9411 mips_can_use_return_insn (void)
9413 if (!reload_completed
)
9419 /* In MIPS16 mode, a function that returns a floating-point value
9420 needs to arrange to copy the return value into the floating-point
9422 if (mips16_cfun_returns_in_fpr_p ())
9425 return cfun
->machine
->frame
.total_size
== 0;
9428 /* Return true if register REGNO can store a value of mode MODE.
9429 The result of this function is cached in mips_hard_regno_mode_ok. */
9432 mips_hard_regno_mode_ok_p (unsigned int regno
, enum machine_mode mode
)
9435 enum mode_class mclass
;
9437 if (mode
== CCV2mode
)
9440 && (regno
- ST_REG_FIRST
) % 2 == 0);
9442 if (mode
== CCV4mode
)
9445 && (regno
- ST_REG_FIRST
) % 4 == 0);
9450 return regno
== FPSW_REGNUM
;
9452 return (ST_REG_P (regno
)
9454 || FP_REG_P (regno
));
9457 size
= GET_MODE_SIZE (mode
);
9458 mclass
= GET_MODE_CLASS (mode
);
9460 if (GP_REG_P (regno
))
9461 return ((regno
- GP_REG_FIRST
) & 1) == 0 || size
<= UNITS_PER_WORD
;
9463 if (FP_REG_P (regno
)
9464 && (((regno
- FP_REG_FIRST
) % MAX_FPRS_PER_FMT
) == 0
9465 || (MIN_FPRS_PER_FMT
== 1 && size
<= UNITS_PER_FPREG
)))
9467 /* Allow TFmode for CCmode reloads. */
9468 if (mode
== TFmode
&& ISA_HAS_8CC
)
9471 /* Allow 64-bit vector modes for Loongson-2E/2F. */
9472 if (TARGET_LOONGSON_VECTORS
9473 && (mode
== V2SImode
9479 if (mclass
== MODE_FLOAT
9480 || mclass
== MODE_COMPLEX_FLOAT
9481 || mclass
== MODE_VECTOR_FLOAT
)
9482 return size
<= UNITS_PER_FPVALUE
;
9484 /* Allow integer modes that fit into a single register. We need
9485 to put integers into FPRs when using instructions like CVT
9486 and TRUNC. There's no point allowing sizes smaller than a word,
9487 because the FPU has no appropriate load/store instructions. */
9488 if (mclass
== MODE_INT
)
9489 return size
>= MIN_UNITS_PER_WORD
&& size
<= UNITS_PER_FPREG
;
9492 if (ACC_REG_P (regno
)
9493 && (INTEGRAL_MODE_P (mode
) || ALL_FIXED_POINT_MODE_P (mode
)))
9495 if (MD_REG_P (regno
))
9497 /* After a multiplication or division, clobbering HI makes
9498 the value of LO unpredictable, and vice versa. This means
9499 that, for all interesting cases, HI and LO are effectively
9502 We model this by requiring that any value that uses HI
9504 if (size
<= UNITS_PER_WORD
* 2)
9505 return regno
== (size
<= UNITS_PER_WORD
? LO_REGNUM
: MD_REG_FIRST
);
9509 /* DSP accumulators do not have the same restrictions as
9510 HI and LO, so we can treat them as normal doubleword
9512 if (size
<= UNITS_PER_WORD
)
9515 if (size
<= UNITS_PER_WORD
* 2
9516 && ((regno
- DSP_ACC_REG_FIRST
) & 1) == 0)
9521 if (ALL_COP_REG_P (regno
))
9522 return mclass
== MODE_INT
&& size
<= UNITS_PER_WORD
;
9524 if (regno
== GOT_VERSION_REGNUM
)
9525 return mode
== SImode
;
9530 /* Implement HARD_REGNO_NREGS. */
9533 mips_hard_regno_nregs (int regno
, enum machine_mode mode
)
9535 if (ST_REG_P (regno
))
9536 /* The size of FP status registers is always 4, because they only hold
9537 CCmode values, and CCmode is always considered to be 4 bytes wide. */
9538 return (GET_MODE_SIZE (mode
) + 3) / 4;
9540 if (FP_REG_P (regno
))
9541 return (GET_MODE_SIZE (mode
) + UNITS_PER_FPREG
- 1) / UNITS_PER_FPREG
;
9543 /* All other registers are word-sized. */
9544 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
9547 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
9548 in mips_hard_regno_nregs. */
9551 mips_class_max_nregs (enum reg_class rclass
, enum machine_mode mode
)
9557 COPY_HARD_REG_SET (left
, reg_class_contents
[(int) rclass
]);
9558 if (hard_reg_set_intersect_p (left
, reg_class_contents
[(int) ST_REGS
]))
9560 size
= MIN (size
, 4);
9561 AND_COMPL_HARD_REG_SET (left
, reg_class_contents
[(int) ST_REGS
]);
9563 if (hard_reg_set_intersect_p (left
, reg_class_contents
[(int) FP_REGS
]))
9565 size
= MIN (size
, UNITS_PER_FPREG
);
9566 AND_COMPL_HARD_REG_SET (left
, reg_class_contents
[(int) FP_REGS
]);
9568 if (!hard_reg_set_empty_p (left
))
9569 size
= MIN (size
, UNITS_PER_WORD
);
9570 return (GET_MODE_SIZE (mode
) + size
- 1) / size
;
9573 /* Implement CANNOT_CHANGE_MODE_CLASS. */
9576 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED
,
9577 enum machine_mode to ATTRIBUTE_UNUSED
,
9578 enum reg_class rclass
)
9580 /* There are several problems with changing the modes of values
9581 in floating-point registers:
9583 - When a multi-word value is stored in paired floating-point
9584 registers, the first register always holds the low word.
9585 We therefore can't allow FPRs to change between single-word
9586 and multi-word modes on big-endian targets.
9588 - GCC assumes that each word of a multiword register can be accessed
9589 individually using SUBREGs. This is not true for floating-point
9590 registers if they are bigger than a word.
9592 - Loading a 32-bit value into a 64-bit floating-point register
9593 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9594 We can't allow FPRs to change from SImode to to a wider mode on
9597 - If the FPU has already interpreted a value in one format, we must
9598 not ask it to treat the value as having a different format.
9600 We therefore disallow all mode changes involving FPRs. */
9601 return reg_classes_intersect_p (FP_REGS
, rclass
);
9604 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
9607 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode
)
9612 return TARGET_HARD_FLOAT
;
9615 return TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
;
9618 return TARGET_HARD_FLOAT
&& TARGET_PAIRED_SINGLE_FLOAT
;
9625 /* Implement MODES_TIEABLE_P. */
9628 mips_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
9630 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
9631 prefer to put one of them in FPRs. */
9632 return (mode1
== mode2
9633 || (!mips_mode_ok_for_mov_fmt_p (mode1
)
9634 && !mips_mode_ok_for_mov_fmt_p (mode2
)));
9637 /* Implement PREFERRED_RELOAD_CLASS. */
9640 mips_preferred_reload_class (rtx x
, enum reg_class rclass
)
9642 if (mips_dangerous_for_la25_p (x
) && reg_class_subset_p (LEA_REGS
, rclass
))
9645 if (reg_class_subset_p (FP_REGS
, rclass
)
9646 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x
)))
9649 if (reg_class_subset_p (GR_REGS
, rclass
))
9652 if (TARGET_MIPS16
&& reg_class_subset_p (M16_REGS
, rclass
))
9658 /* Implement REGISTER_MOVE_COST. */
9661 mips_register_move_cost (enum machine_mode mode
,
9662 enum reg_class to
, enum reg_class from
)
9666 /* ??? We cannot move general registers into HI and LO because
9667 MIPS16 has no MTHI and MTLO instructions. Make the cost of
9668 moves in the opposite direction just as high, which stops the
9669 register allocators from using HI and LO for pseudos. */
9670 if (reg_class_subset_p (from
, GENERAL_REGS
)
9671 && reg_class_subset_p (to
, GENERAL_REGS
))
9673 if (reg_class_subset_p (from
, M16_REGS
)
9674 || reg_class_subset_p (to
, M16_REGS
))
9680 else if (reg_class_subset_p (from
, GENERAL_REGS
))
9682 if (reg_class_subset_p (to
, GENERAL_REGS
))
9684 if (reg_class_subset_p (to
, FP_REGS
))
9686 if (reg_class_subset_p (to
, ALL_COP_AND_GR_REGS
))
9688 if (reg_class_subset_p (to
, ACC_REGS
))
9691 else if (reg_class_subset_p (to
, GENERAL_REGS
))
9693 if (reg_class_subset_p (from
, FP_REGS
))
9695 if (reg_class_subset_p (from
, ST_REGS
))
9696 /* LUI followed by MOVF. */
9698 if (reg_class_subset_p (from
, ALL_COP_AND_GR_REGS
))
9700 if (reg_class_subset_p (from
, ACC_REGS
))
9703 else if (reg_class_subset_p (from
, FP_REGS
))
9705 if (reg_class_subset_p (to
, FP_REGS
)
9706 && mips_mode_ok_for_mov_fmt_p (mode
))
9708 if (reg_class_subset_p (to
, ST_REGS
))
9709 /* An expensive sequence. */
9716 /* Return the register class required for a secondary register when
9717 copying between one of the registers in RCLASS and value X, which
9718 has mode MODE. X is the source of the move if IN_P, otherwise it
9719 is the destination. Return NO_REGS if no secondary register is
9723 mips_secondary_reload_class (enum reg_class rclass
,
9724 enum machine_mode mode
, rtx x
, bool in_p
)
9728 /* If X is a constant that cannot be loaded into $25, it must be loaded
9729 into some other GPR. No other register class allows a direct move. */
9730 if (mips_dangerous_for_la25_p (x
))
9731 return reg_class_subset_p (rclass
, LEA_REGS
) ? NO_REGS
: LEA_REGS
;
9733 regno
= true_regnum (x
);
9736 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
9737 if (!reg_class_subset_p (rclass
, M16_REGS
) && !M16_REG_P (regno
))
9740 /* We can't really copy to HI or LO at all in MIPS16 mode. */
9741 if (in_p
? reg_classes_intersect_p (rclass
, ACC_REGS
) : ACC_REG_P (regno
))
9747 /* Copying from accumulator registers to anywhere other than a general
9748 register requires a temporary general register. */
9749 if (reg_class_subset_p (rclass
, ACC_REGS
))
9750 return GP_REG_P (regno
) ? NO_REGS
: GR_REGS
;
9751 if (ACC_REG_P (regno
))
9752 return reg_class_subset_p (rclass
, GR_REGS
) ? NO_REGS
: GR_REGS
;
9754 /* We can only copy a value to a condition code register from a
9755 floating-point register, and even then we require a scratch
9756 floating-point register. We can only copy a value out of a
9757 condition-code register into a general register. */
9758 if (reg_class_subset_p (rclass
, ST_REGS
))
9762 return GP_REG_P (regno
) ? NO_REGS
: GR_REGS
;
9764 if (ST_REG_P (regno
))
9768 return reg_class_subset_p (rclass
, GR_REGS
) ? NO_REGS
: GR_REGS
;
9771 if (reg_class_subset_p (rclass
, FP_REGS
))
9774 && (GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8))
9775 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9776 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9779 if (GP_REG_P (regno
) || x
== CONST0_RTX (mode
))
9780 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9783 if (CONSTANT_P (x
) && !targetm
.cannot_force_const_mem (x
))
9784 /* We can force the constant to memory and use lwc1
9785 and ldc1. As above, we will use pairs of lwc1s if
9786 ldc1 is not supported. */
9789 if (FP_REG_P (regno
) && mips_mode_ok_for_mov_fmt_p (mode
))
9790 /* In this case we can use mov.fmt. */
9793 /* Otherwise, we need to reload through an integer register. */
9796 if (FP_REG_P (regno
))
9797 return reg_class_subset_p (rclass
, GR_REGS
) ? NO_REGS
: GR_REGS
;
9802 /* Implement TARGET_MODE_REP_EXTENDED. */
9805 mips_mode_rep_extended (enum machine_mode mode
, enum machine_mode mode_rep
)
9807 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
9808 if (TARGET_64BIT
&& mode
== SImode
&& mode_rep
== DImode
)
9814 /* Implement TARGET_VALID_POINTER_MODE. */
9817 mips_valid_pointer_mode (enum machine_mode mode
)
9819 return mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
);
9822 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
9825 mips_vector_mode_supported_p (enum machine_mode mode
)
9830 return TARGET_PAIRED_SINGLE_FLOAT
;
9845 return TARGET_LOONGSON_VECTORS
;
9852 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9855 mips_scalar_mode_supported_p (enum machine_mode mode
)
9857 if (ALL_FIXED_POINT_MODE_P (mode
)
9858 && GET_MODE_PRECISION (mode
) <= 2 * BITS_PER_WORD
)
9861 return default_scalar_mode_supported_p (mode
);
9864 /* Implement TARGET_INIT_LIBFUNCS. */
9866 #include "config/gofast.h"
9869 mips_init_libfuncs (void)
9871 if (TARGET_FIX_VR4120
)
9873 /* Register the special divsi3 and modsi3 functions needed to work
9874 around VR4120 division errata. */
9875 set_optab_libfunc (sdiv_optab
, SImode
, "__vr4120_divsi3");
9876 set_optab_libfunc (smod_optab
, SImode
, "__vr4120_modsi3");
9879 if (TARGET_MIPS16
&& TARGET_HARD_FLOAT_ABI
)
9881 /* Register the MIPS16 -mhard-float stubs. */
9882 set_optab_libfunc (add_optab
, SFmode
, "__mips16_addsf3");
9883 set_optab_libfunc (sub_optab
, SFmode
, "__mips16_subsf3");
9884 set_optab_libfunc (smul_optab
, SFmode
, "__mips16_mulsf3");
9885 set_optab_libfunc (sdiv_optab
, SFmode
, "__mips16_divsf3");
9887 set_optab_libfunc (eq_optab
, SFmode
, "__mips16_eqsf2");
9888 set_optab_libfunc (ne_optab
, SFmode
, "__mips16_nesf2");
9889 set_optab_libfunc (gt_optab
, SFmode
, "__mips16_gtsf2");
9890 set_optab_libfunc (ge_optab
, SFmode
, "__mips16_gesf2");
9891 set_optab_libfunc (lt_optab
, SFmode
, "__mips16_ltsf2");
9892 set_optab_libfunc (le_optab
, SFmode
, "__mips16_lesf2");
9893 set_optab_libfunc (unord_optab
, SFmode
, "__mips16_unordsf2");
9895 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, "__mips16_fix_truncsfsi");
9896 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, "__mips16_floatsisf");
9897 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, "__mips16_floatunsisf");
9899 if (TARGET_DOUBLE_FLOAT
)
9901 set_optab_libfunc (add_optab
, DFmode
, "__mips16_adddf3");
9902 set_optab_libfunc (sub_optab
, DFmode
, "__mips16_subdf3");
9903 set_optab_libfunc (smul_optab
, DFmode
, "__mips16_muldf3");
9904 set_optab_libfunc (sdiv_optab
, DFmode
, "__mips16_divdf3");
9906 set_optab_libfunc (eq_optab
, DFmode
, "__mips16_eqdf2");
9907 set_optab_libfunc (ne_optab
, DFmode
, "__mips16_nedf2");
9908 set_optab_libfunc (gt_optab
, DFmode
, "__mips16_gtdf2");
9909 set_optab_libfunc (ge_optab
, DFmode
, "__mips16_gedf2");
9910 set_optab_libfunc (lt_optab
, DFmode
, "__mips16_ltdf2");
9911 set_optab_libfunc (le_optab
, DFmode
, "__mips16_ledf2");
9912 set_optab_libfunc (unord_optab
, DFmode
, "__mips16_unorddf2");
9914 set_conv_libfunc (sext_optab
, DFmode
, SFmode
,
9915 "__mips16_extendsfdf2");
9916 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
,
9917 "__mips16_truncdfsf2");
9918 set_conv_libfunc (sfix_optab
, SImode
, DFmode
,
9919 "__mips16_fix_truncdfsi");
9920 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
,
9921 "__mips16_floatsidf");
9922 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
,
9923 "__mips16_floatunsidf");
9927 /* Register the gofast functions if selected using --enable-gofast. */
9928 gofast_maybe_init_libfuncs ();
9930 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
9931 on an external non-MIPS16 routine to implement __sync_synchronize. */
9933 synchronize_libfunc
= init_one_libfunc ("__sync_synchronize");
9936 /* Return the length of INSN. LENGTH is the initial length computed by
9937 attributes in the machine-description file. */
9940 mips_adjust_insn_length (rtx insn
, int length
)
9942 /* A unconditional jump has an unfilled delay slot if it is not part
9943 of a sequence. A conditional jump normally has a delay slot, but
9944 does not on MIPS16. */
9945 if (CALL_P (insn
) || (TARGET_MIPS16
? simplejump_p (insn
) : JUMP_P (insn
)))
9948 /* See how many nops might be needed to avoid hardware hazards. */
9949 if (!cfun
->machine
->ignore_hazard_length_p
&& INSN_CODE (insn
) >= 0)
9950 switch (get_attr_hazard (insn
))
9964 /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
9965 the .md file length attributes are 4-based for both modes.
9966 Adjust the MIPS16 ones here. */
9973 /* Return an asm sequence to start a noat block and load the address
9974 of a label into $1. */
9977 mips_output_load_label (void)
9979 if (TARGET_EXPLICIT_RELOCS
)
9983 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9986 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9989 if (ISA_HAS_LOAD_DELAY
)
9990 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9991 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9995 if (Pmode
== DImode
)
9996 return "%[dla\t%@,%0";
9998 return "%[la\t%@,%0";
10002 /* Return the assembly code for INSN, which has the operands given by
10003 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10004 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10005 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10006 version of BRANCH_IF_TRUE. */
10009 mips_output_conditional_branch (rtx insn
, rtx
*operands
,
10010 const char *branch_if_true
,
10011 const char *branch_if_false
)
10013 unsigned int length
;
10014 rtx taken
, not_taken
;
10016 length
= get_attr_length (insn
);
10019 /* Just a simple conditional branch. */
10020 mips_branch_likely
= (final_sequence
&& INSN_ANNULLED_BRANCH_P (insn
));
10021 return branch_if_true
;
10024 /* Generate a reversed branch around a direct jump. This fallback does
10025 not use branch-likely instructions. */
10026 mips_branch_likely
= false;
10027 not_taken
= gen_label_rtx ();
10028 taken
= operands
[1];
10030 /* Generate the reversed branch to NOT_TAKEN. */
10031 operands
[1] = not_taken
;
10032 output_asm_insn (branch_if_false
, operands
);
10034 /* If INSN has a delay slot, we must provide delay slots for both the
10035 branch to NOT_TAKEN and the conditional jump. We must also ensure
10036 that INSN's delay slot is executed in the appropriate cases. */
10037 if (final_sequence
)
10039 /* This first delay slot will always be executed, so use INSN's
10040 delay slot if is not annulled. */
10041 if (!INSN_ANNULLED_BRANCH_P (insn
))
10043 final_scan_insn (XVECEXP (final_sequence
, 0, 1),
10044 asm_out_file
, optimize
, 1, NULL
);
10045 INSN_DELETED_P (XVECEXP (final_sequence
, 0, 1)) = 1;
10048 output_asm_insn ("nop", 0);
10049 fprintf (asm_out_file
, "\n");
10052 /* Output the unconditional branch to TAKEN. */
10054 output_asm_insn ("j\t%0%/", &taken
);
10057 output_asm_insn (mips_output_load_label (), &taken
);
10058 output_asm_insn ("jr\t%@%]%/", 0);
10061 /* Now deal with its delay slot; see above. */
10062 if (final_sequence
)
10064 /* This delay slot will only be executed if the branch is taken.
10065 Use INSN's delay slot if is annulled. */
10066 if (INSN_ANNULLED_BRANCH_P (insn
))
10068 final_scan_insn (XVECEXP (final_sequence
, 0, 1),
10069 asm_out_file
, optimize
, 1, NULL
);
10070 INSN_DELETED_P (XVECEXP (final_sequence
, 0, 1)) = 1;
10073 output_asm_insn ("nop", 0);
10074 fprintf (asm_out_file
, "\n");
10077 /* Output NOT_TAKEN. */
10078 targetm
.asm_out
.internal_label (asm_out_file
, "L",
10079 CODE_LABEL_NUMBER (not_taken
));
10083 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10084 if some ordering condition is true. The condition is given by
10085 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10086 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10087 its second is always zero. */
10090 mips_output_order_conditional_branch (rtx insn
, rtx
*operands
, bool inverted_p
)
10092 const char *branch
[2];
10094 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10095 Make BRANCH[0] branch on the inverse condition. */
10096 switch (GET_CODE (operands
[0]))
10098 /* These cases are equivalent to comparisons against zero. */
10100 inverted_p
= !inverted_p
;
10101 /* Fall through. */
10103 branch
[!inverted_p
] = MIPS_BRANCH ("bne", "%2,%.,%1");
10104 branch
[inverted_p
] = MIPS_BRANCH ("beq", "%2,%.,%1");
10107 /* These cases are always true or always false. */
10109 inverted_p
= !inverted_p
;
10110 /* Fall through. */
10112 branch
[!inverted_p
] = MIPS_BRANCH ("beq", "%.,%.,%1");
10113 branch
[inverted_p
] = MIPS_BRANCH ("bne", "%.,%.,%1");
10117 branch
[!inverted_p
] = MIPS_BRANCH ("b%C0z", "%2,%1");
10118 branch
[inverted_p
] = MIPS_BRANCH ("b%N0z", "%2,%1");
10121 return mips_output_conditional_branch (insn
, operands
, branch
[1], branch
[0]);
10124 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
10125 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
10127 When working around R4000 and R4400 errata, we need to make sure that
10128 the division is not immediately followed by a shift[1][2]. We also
10129 need to stop the division from being put into a branch delay slot[3].
10130 The easiest way to avoid both problems is to add a nop after the
10131 division. When a divide-by-zero check is needed, this nop can be
10132 used to fill the branch delay slot.
10134 [1] If a double-word or a variable shift executes immediately
10135 after starting an integer division, the shift may give an
10136 incorrect result. See quotations of errata #16 and #28 from
10137 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10138 in mips.md for details.
10140 [2] A similar bug to [1] exists for all revisions of the
10141 R4000 and the R4400 when run in an MC configuration.
10142 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10144 "19. In this following sequence:
10146 ddiv (or ddivu or div or divu)
10147 dsll32 (or dsrl32, dsra32)
10149 if an MPT stall occurs, while the divide is slipping the cpu
10150 pipeline, then the following double shift would end up with an
10153 Workaround: The compiler needs to avoid generating any
10154 sequence with divide followed by extended double shift."
10156 This erratum is also present in "MIPS R4400MC Errata, Processor
10157 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10158 & 3.0" as errata #10 and #4, respectively.
10160 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10161 (also valid for MIPS R4000MC processors):
10163 "52. R4000SC: This bug does not apply for the R4000PC.
10165 There are two flavors of this bug:
10167 1) If the instruction just after divide takes an RF exception
10168 (tlb-refill, tlb-invalid) and gets an instruction cache
10169 miss (both primary and secondary) and the line which is
10170 currently in secondary cache at this index had the first
10171 data word, where the bits 5..2 are set, then R4000 would
10172 get a wrong result for the div.
10177 ------------------- # end-of page. -tlb-refill
10182 ------------------- # end-of page. -tlb-invalid
10185 2) If the divide is in the taken branch delay slot, where the
10186 target takes RF exception and gets an I-cache miss for the
10187 exception vector or where I-cache miss occurs for the
10188 target address, under the above mentioned scenarios, the
10189 div would get wrong results.
10192 j r2 # to next page mapped or unmapped
10193 div r8,r9 # this bug would be there as long
10194 # as there is an ICache miss and
10195 nop # the "data pattern" is present
10198 beq r0, r0, NextPage # to Next page
10202 This bug is present for div, divu, ddiv, and ddivu
10205 Workaround: For item 1), OS could make sure that the next page
10206 after the divide instruction is also mapped. For item 2), the
10207 compiler could make sure that the divide instruction is not in
10208 the branch delay slot."
10210 These processors have PRId values of 0x00004220 and 0x00004300 for
10211 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10214 mips_output_division (const char *division
, rtx
*operands
)
10219 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
10221 output_asm_insn (s
, operands
);
10224 if (TARGET_CHECK_ZERO_DIV
)
10228 output_asm_insn (s
, operands
);
10229 s
= "bnez\t%2,1f\n\tbreak\t7\n1:";
10231 else if (GENERATE_DIVIDE_TRAPS
)
10233 output_asm_insn (s
, operands
);
10234 s
= "teq\t%2,%.,7";
10238 output_asm_insn ("%(bne\t%2,%.,1f", operands
);
10239 output_asm_insn (s
, operands
);
10240 s
= "break\t7%)\n1:";
10246 /* Return true if IN_INSN is a multiply-add or multiply-subtract
10247 instruction and if OUT_INSN assigns to the accumulator operand. */
10250 mips_linked_madd_p (rtx out_insn
, rtx in_insn
)
10254 x
= single_set (in_insn
);
10260 if (GET_CODE (x
) == PLUS
10261 && GET_CODE (XEXP (x
, 0)) == MULT
10262 && reg_set_p (XEXP (x
, 1), out_insn
))
10265 if (GET_CODE (x
) == MINUS
10266 && GET_CODE (XEXP (x
, 1)) == MULT
10267 && reg_set_p (XEXP (x
, 0), out_insn
))
10273 /* True if the dependency between OUT_INSN and IN_INSN is on the store
10274 data rather than the address. We need this because the cprestore
10275 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
10276 which causes the default routine to abort. We just return false
10280 mips_store_data_bypass_p (rtx out_insn
, rtx in_insn
)
10282 if (GET_CODE (PATTERN (in_insn
)) == UNSPEC_VOLATILE
)
10285 return !store_data_bypass_p (out_insn
, in_insn
);
10289 /* Variables and flags used in scheduler hooks when tuning for
10293 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
10296 /* If true, then next ALU1/2 instruction will go to ALU1. */
10299 /* If true, then next FALU1/2 unstruction will go to FALU1. */
10302 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
10303 int alu1_core_unit_code
;
10304 int alu2_core_unit_code
;
10305 int falu1_core_unit_code
;
10306 int falu2_core_unit_code
;
10308 /* True if current cycle has a multi instruction.
10309 This flag is used in mips_ls2_dfa_post_advance_cycle. */
10310 bool cycle_has_multi_p
;
10312 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
10313 These are used in mips_ls2_dfa_post_advance_cycle to initialize
10315 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
10316 instruction to go ALU1. */
10317 rtx alu1_turn_enabled_insn
;
10318 rtx alu2_turn_enabled_insn
;
10319 rtx falu1_turn_enabled_insn
;
10320 rtx falu2_turn_enabled_insn
;
10323 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
10324 dependencies have no cost, except on the 20Kc where output-dependence
10325 is treated like input-dependence. */
10328 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED
, rtx link
,
10329 rtx dep ATTRIBUTE_UNUSED
, int cost
)
10331 if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
10334 if (REG_NOTE_KIND (link
) != 0)
10339 /* Return the number of instructions that can be issued per cycle. */
10342 mips_issue_rate (void)
10346 case PROCESSOR_74KC
:
10347 case PROCESSOR_74KF2_1
:
10348 case PROCESSOR_74KF1_1
:
10349 case PROCESSOR_74KF3_2
:
10350 /* The 74k is not strictly quad-issue cpu, but can be seen as one
10351 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
10352 but in reality only a maximum of 3 insns can be issued as
10353 floating-point loads and stores also require a slot in the
10357 case PROCESSOR_20KC
:
10358 case PROCESSOR_R4130
:
10359 case PROCESSOR_R5400
:
10360 case PROCESSOR_R5500
:
10361 case PROCESSOR_R7000
:
10362 case PROCESSOR_R9000
:
10365 case PROCESSOR_SB1
:
10366 case PROCESSOR_SB1A
:
10367 /* This is actually 4, but we get better performance if we claim 3.
10368 This is partly because of unwanted speculative code motion with the
10369 larger number, and partly because in most common cases we can't
10370 reach the theoretical max of 4. */
10373 case PROCESSOR_LOONGSON_2E
:
10374 case PROCESSOR_LOONGSON_2F
:
10382 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
10385 mips_ls2_init_dfa_post_cycle_insn (void)
10388 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
10389 mips_ls2
.alu1_turn_enabled_insn
= get_insns ();
10393 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
10394 mips_ls2
.alu2_turn_enabled_insn
= get_insns ();
10398 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
10399 mips_ls2
.falu1_turn_enabled_insn
= get_insns ();
10403 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
10404 mips_ls2
.falu2_turn_enabled_insn
= get_insns ();
10407 mips_ls2
.alu1_core_unit_code
= get_cpu_unit_code ("ls2_alu1_core");
10408 mips_ls2
.alu2_core_unit_code
= get_cpu_unit_code ("ls2_alu2_core");
10409 mips_ls2
.falu1_core_unit_code
= get_cpu_unit_code ("ls2_falu1_core");
10410 mips_ls2
.falu2_core_unit_code
= get_cpu_unit_code ("ls2_falu2_core");
10413 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
10414 Init data used in mips_dfa_post_advance_cycle. */
10417 mips_init_dfa_post_cycle_insn (void)
10419 if (TUNE_LOONGSON_2EF
)
10420 mips_ls2_init_dfa_post_cycle_insn ();
10423 /* Initialize STATE when scheduling for Loongson 2E/2F.
10424 Support round-robin dispatch scheme by enabling only one of
10425 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
10429 mips_ls2_dfa_post_advance_cycle (state_t state
)
10431 if (cpu_unit_reservation_p (state
, mips_ls2
.alu1_core_unit_code
))
10433 /* Though there are no non-pipelined ALU1 insns,
10434 we can get an instruction of type 'multi' before reload. */
10435 gcc_assert (mips_ls2
.cycle_has_multi_p
);
10436 mips_ls2
.alu1_turn_p
= false;
10439 mips_ls2
.cycle_has_multi_p
= false;
10441 if (cpu_unit_reservation_p (state
, mips_ls2
.alu2_core_unit_code
))
10442 /* We have a non-pipelined alu instruction in the core,
10443 adjust round-robin counter. */
10444 mips_ls2
.alu1_turn_p
= true;
10446 if (mips_ls2
.alu1_turn_p
)
10448 if (state_transition (state
, mips_ls2
.alu1_turn_enabled_insn
) >= 0)
10449 gcc_unreachable ();
10453 if (state_transition (state
, mips_ls2
.alu2_turn_enabled_insn
) >= 0)
10454 gcc_unreachable ();
10457 if (cpu_unit_reservation_p (state
, mips_ls2
.falu1_core_unit_code
))
10459 /* There are no non-pipelined FALU1 insns. */
10460 gcc_unreachable ();
10461 mips_ls2
.falu1_turn_p
= false;
10464 if (cpu_unit_reservation_p (state
, mips_ls2
.falu2_core_unit_code
))
10465 /* We have a non-pipelined falu instruction in the core,
10466 adjust round-robin counter. */
10467 mips_ls2
.falu1_turn_p
= true;
10469 if (mips_ls2
.falu1_turn_p
)
10471 if (state_transition (state
, mips_ls2
.falu1_turn_enabled_insn
) >= 0)
10472 gcc_unreachable ();
10476 if (state_transition (state
, mips_ls2
.falu2_turn_enabled_insn
) >= 0)
10477 gcc_unreachable ();
10481 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
10482 This hook is being called at the start of each cycle. */
10485 mips_dfa_post_advance_cycle (void)
10487 if (TUNE_LOONGSON_2EF
)
10488 mips_ls2_dfa_post_advance_cycle (curr_state
);
10491 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
10492 be as wide as the scheduling freedom in the DFA. */
10495 mips_multipass_dfa_lookahead (void)
10497 /* Can schedule up to 4 of the 6 function units in any one cycle. */
10501 if (TUNE_LOONGSON_2EF
)
10507 /* Remove the instruction at index LOWER from ready queue READY and
10508 reinsert it in front of the instruction at index HIGHER. LOWER must
10512 mips_promote_ready (rtx
*ready
, int lower
, int higher
)
10517 new_head
= ready
[lower
];
10518 for (i
= lower
; i
< higher
; i
++)
10519 ready
[i
] = ready
[i
+ 1];
10520 ready
[i
] = new_head
;
10523 /* If the priority of the instruction at POS2 in the ready queue READY
10524 is within LIMIT units of that of the instruction at POS1, swap the
10525 instructions if POS2 is not already less than POS1. */
10528 mips_maybe_swap_ready (rtx
*ready
, int pos1
, int pos2
, int limit
)
10531 && INSN_PRIORITY (ready
[pos1
]) + limit
>= INSN_PRIORITY (ready
[pos2
]))
10535 temp
= ready
[pos1
];
10536 ready
[pos1
] = ready
[pos2
];
10537 ready
[pos2
] = temp
;
10541 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10542 that may clobber hi or lo. */
10543 static rtx mips_macc_chains_last_hilo
;
10545 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10546 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10549 mips_macc_chains_record (rtx insn
)
10551 if (get_attr_may_clobber_hilo (insn
))
10552 mips_macc_chains_last_hilo
= insn
;
10555 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10556 has NREADY elements, looking for a multiply-add or multiply-subtract
10557 instruction that is cumulative with mips_macc_chains_last_hilo.
10558 If there is one, promote it ahead of anything else that might
10559 clobber hi or lo. */
10562 mips_macc_chains_reorder (rtx
*ready
, int nready
)
10566 if (mips_macc_chains_last_hilo
!= 0)
10567 for (i
= nready
- 1; i
>= 0; i
--)
10568 if (mips_linked_madd_p (mips_macc_chains_last_hilo
, ready
[i
]))
10570 for (j
= nready
- 1; j
> i
; j
--)
10571 if (recog_memoized (ready
[j
]) >= 0
10572 && get_attr_may_clobber_hilo (ready
[j
]))
10574 mips_promote_ready (ready
, i
, j
);
10581 /* The last instruction to be scheduled. */
10582 static rtx vr4130_last_insn
;
10584 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10585 points to an rtx that is initially an instruction. Nullify the rtx
10586 if the instruction uses the value of register X. */
10589 vr4130_true_reg_dependence_p_1 (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
,
10594 insn_ptr
= (rtx
*) data
;
10597 && reg_referenced_p (x
, PATTERN (*insn_ptr
)))
10601 /* Return true if there is true register dependence between vr4130_last_insn
10605 vr4130_true_reg_dependence_p (rtx insn
)
10607 note_stores (PATTERN (vr4130_last_insn
),
10608 vr4130_true_reg_dependence_p_1
, &insn
);
10612 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10613 the ready queue and that INSN2 is the instruction after it, return
10614 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10615 in which INSN1 and INSN2 can probably issue in parallel, but for
10616 which (INSN2, INSN1) should be less sensitive to instruction
10617 alignment than (INSN1, INSN2). See 4130.md for more details. */
10620 vr4130_swap_insns_p (rtx insn1
, rtx insn2
)
10622 sd_iterator_def sd_it
;
10625 /* Check for the following case:
10627 1) there is some other instruction X with an anti dependence on INSN1;
10628 2) X has a higher priority than INSN2; and
10629 3) X is an arithmetic instruction (and thus has no unit restrictions).
10631 If INSN1 is the last instruction blocking X, it would better to
10632 choose (INSN1, X) over (INSN2, INSN1). */
10633 FOR_EACH_DEP (insn1
, SD_LIST_FORW
, sd_it
, dep
)
10634 if (DEP_TYPE (dep
) == REG_DEP_ANTI
10635 && INSN_PRIORITY (DEP_CON (dep
)) > INSN_PRIORITY (insn2
)
10636 && recog_memoized (DEP_CON (dep
)) >= 0
10637 && get_attr_vr4130_class (DEP_CON (dep
)) == VR4130_CLASS_ALU
)
10640 if (vr4130_last_insn
!= 0
10641 && recog_memoized (insn1
) >= 0
10642 && recog_memoized (insn2
) >= 0)
10644 /* See whether INSN1 and INSN2 use different execution units,
10645 or if they are both ALU-type instructions. If so, they can
10646 probably execute in parallel. */
10647 enum attr_vr4130_class class1
= get_attr_vr4130_class (insn1
);
10648 enum attr_vr4130_class class2
= get_attr_vr4130_class (insn2
);
10649 if (class1
!= class2
|| class1
== VR4130_CLASS_ALU
)
10651 /* If only one of the instructions has a dependence on
10652 vr4130_last_insn, prefer to schedule the other one first. */
10653 bool dep1_p
= vr4130_true_reg_dependence_p (insn1
);
10654 bool dep2_p
= vr4130_true_reg_dependence_p (insn2
);
10655 if (dep1_p
!= dep2_p
)
10658 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10659 is not an ALU-type instruction and if INSN1 uses the same
10660 execution unit. (Note that if this condition holds, we already
10661 know that INSN2 uses a different execution unit.) */
10662 if (class1
!= VR4130_CLASS_ALU
10663 && recog_memoized (vr4130_last_insn
) >= 0
10664 && class1
== get_attr_vr4130_class (vr4130_last_insn
))
10671 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10672 queue with at least two instructions. Swap the first two if
10673 vr4130_swap_insns_p says that it could be worthwhile. */
10676 vr4130_reorder (rtx
*ready
, int nready
)
10678 if (vr4130_swap_insns_p (ready
[nready
- 1], ready
[nready
- 2]))
10679 mips_promote_ready (ready
, nready
- 2, nready
- 1);
10682 /* Record whether last 74k AGEN instruction was a load or store. */
10683 static enum attr_type mips_last_74k_agen_insn
= TYPE_UNKNOWN
;
10685 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
10686 resets to TYPE_UNKNOWN state. */
10689 mips_74k_agen_init (rtx insn
)
10691 if (!insn
|| !NONJUMP_INSN_P (insn
))
10692 mips_last_74k_agen_insn
= TYPE_UNKNOWN
;
10695 enum attr_type type
= get_attr_type (insn
);
10696 if (type
== TYPE_LOAD
|| type
== TYPE_STORE
)
10697 mips_last_74k_agen_insn
= type
;
10701 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
10702 loads to be grouped together, and multiple stores to be grouped
10703 together. Swap things around in the ready queue to make this happen. */
10706 mips_74k_agen_reorder (rtx
*ready
, int nready
)
10709 int store_pos
, load_pos
;
10714 for (i
= nready
- 1; i
>= 0; i
--)
10716 rtx insn
= ready
[i
];
10717 if (USEFUL_INSN_P (insn
))
10718 switch (get_attr_type (insn
))
10721 if (store_pos
== -1)
10726 if (load_pos
== -1)
10735 if (load_pos
== -1 || store_pos
== -1)
10738 switch (mips_last_74k_agen_insn
)
10741 /* Prefer to schedule loads since they have a higher latency. */
10743 /* Swap loads to the front of the queue. */
10744 mips_maybe_swap_ready (ready
, load_pos
, store_pos
, 4);
10747 /* Swap stores to the front of the queue. */
10748 mips_maybe_swap_ready (ready
, store_pos
, load_pos
, 4);
10755 /* Implement TARGET_SCHED_INIT. */
10758 mips_sched_init (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
10759 int max_ready ATTRIBUTE_UNUSED
)
10761 mips_macc_chains_last_hilo
= 0;
10762 vr4130_last_insn
= 0;
10763 mips_74k_agen_init (NULL_RTX
);
10765 /* When scheduling for Loongson2, branch instructions go to ALU1,
10766 therefore basic block is most likely to start with round-robin counter
10767 pointed to ALU2. */
10768 mips_ls2
.alu1_turn_p
= false;
10769 mips_ls2
.falu1_turn_p
= true;
10772 /* Implement TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
10775 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
10776 rtx
*ready
, int *nreadyp
, int cycle ATTRIBUTE_UNUSED
)
10778 if (!reload_completed
10779 && TUNE_MACC_CHAINS
10781 mips_macc_chains_reorder (ready
, *nreadyp
);
10783 if (reload_completed
10785 && !TARGET_VR4130_ALIGN
10787 vr4130_reorder (ready
, *nreadyp
);
10790 mips_74k_agen_reorder (ready
, *nreadyp
);
10792 return mips_issue_rate ();
10795 /* Update round-robin counters for ALU1/2 and FALU1/2. */
10798 mips_ls2_variable_issue (rtx insn
)
10800 if (mips_ls2
.alu1_turn_p
)
10802 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.alu1_core_unit_code
))
10803 mips_ls2
.alu1_turn_p
= false;
10807 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.alu2_core_unit_code
))
10808 mips_ls2
.alu1_turn_p
= true;
10811 if (mips_ls2
.falu1_turn_p
)
10813 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.falu1_core_unit_code
))
10814 mips_ls2
.falu1_turn_p
= false;
10818 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.falu2_core_unit_code
))
10819 mips_ls2
.falu1_turn_p
= true;
10822 if (recog_memoized (insn
) >= 0)
10823 mips_ls2
.cycle_has_multi_p
|= (get_attr_type (insn
) == TYPE_MULTI
);
10826 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
10829 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
10830 rtx insn
, int more
)
10832 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
10833 if (USEFUL_INSN_P (insn
))
10836 if (!reload_completed
&& TUNE_MACC_CHAINS
)
10837 mips_macc_chains_record (insn
);
10838 vr4130_last_insn
= insn
;
10840 mips_74k_agen_init (insn
);
10841 else if (TUNE_LOONGSON_2EF
)
10842 mips_ls2_variable_issue (insn
);
10845 /* Instructions of type 'multi' should all be split before
10846 the second scheduling pass. */
10847 gcc_assert (!reload_completed
10848 || recog_memoized (insn
) < 0
10849 || get_attr_type (insn
) != TYPE_MULTI
);
10854 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
10855 return the first operand of the associated PREF or PREFX insn. */
10858 mips_prefetch_cookie (rtx write
, rtx locality
)
10860 /* store_streamed / load_streamed. */
10861 if (INTVAL (locality
) <= 0)
10862 return GEN_INT (INTVAL (write
) + 4);
10864 /* store / load. */
10865 if (INTVAL (locality
) <= 2)
10868 /* store_retained / load_retained. */
10869 return GEN_INT (INTVAL (write
) + 6);
10872 /* Flags that indicate when a built-in function is available.
10874 BUILTIN_AVAIL_NON_MIPS16
10875 The function is available on the current target, but only
10876 in non-MIPS16 mode. */
10877 #define BUILTIN_AVAIL_NON_MIPS16 1
10879 /* Declare an availability predicate for built-in functions that
10880 require non-MIPS16 mode and also require COND to be true.
10881 NAME is the main part of the predicate's name. */
10882 #define AVAIL_NON_MIPS16(NAME, COND) \
10883 static unsigned int \
10884 mips_builtin_avail_##NAME (void) \
10886 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
10889 /* This structure describes a single built-in function. */
10890 struct mips_builtin_description
{
10891 /* The code of the main .md file instruction. See mips_builtin_type
10892 for more information. */
10893 enum insn_code icode
;
10895 /* The floating-point comparison code to use with ICODE, if any. */
10896 enum mips_fp_condition cond
;
10898 /* The name of the built-in function. */
10901 /* Specifies how the function should be expanded. */
10902 enum mips_builtin_type builtin_type
;
10904 /* The function's prototype. */
10905 enum mips_function_type function_type
;
10907 /* Whether the function is available. */
10908 unsigned int (*avail
) (void);
10911 AVAIL_NON_MIPS16 (paired_single
, TARGET_PAIRED_SINGLE_FLOAT
)
10912 AVAIL_NON_MIPS16 (sb1_paired_single
, TARGET_SB1
&& TARGET_PAIRED_SINGLE_FLOAT
)
10913 AVAIL_NON_MIPS16 (mips3d
, TARGET_MIPS3D
)
10914 AVAIL_NON_MIPS16 (dsp
, TARGET_DSP
)
10915 AVAIL_NON_MIPS16 (dspr2
, TARGET_DSPR2
)
10916 AVAIL_NON_MIPS16 (dsp_32
, !TARGET_64BIT
&& TARGET_DSP
)
10917 AVAIL_NON_MIPS16 (dspr2_32
, !TARGET_64BIT
&& TARGET_DSPR2
)
10918 AVAIL_NON_MIPS16 (loongson
, TARGET_LOONGSON_VECTORS
)
10920 /* Construct a mips_builtin_description from the given arguments.
10922 INSN is the name of the associated instruction pattern, without the
10923 leading CODE_FOR_mips_.
10925 CODE is the floating-point condition code associated with the
10926 function. It can be 'f' if the field is not applicable.
10928 NAME is the name of the function itself, without the leading
10931 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
10933 AVAIL is the name of the availability predicate, without the leading
10934 mips_builtin_avail_. */
10935 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
10936 FUNCTION_TYPE, AVAIL) \
10937 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
10938 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
10939 mips_builtin_avail_ ## AVAIL }
10941 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
10942 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
10943 are as for MIPS_BUILTIN. */
10944 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
10945 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
10947 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
10948 are subject to mips_builtin_avail_<AVAIL>. */
10949 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
10950 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
10951 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
10952 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
10953 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
10955 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10956 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
10957 while the any and all forms are subject to mips_builtin_avail_mips3d. */
10958 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
10959 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
10960 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
10962 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
10963 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
10965 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
10966 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
10968 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
10969 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
10972 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10973 are subject to mips_builtin_avail_mips3d. */
10974 #define CMP_4S_BUILTINS(INSN, COND) \
10975 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
10976 MIPS_BUILTIN_CMP_ANY, \
10977 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
10978 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
10979 MIPS_BUILTIN_CMP_ALL, \
10980 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
10982 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10983 instruction requires mips_builtin_avail_<AVAIL>. */
10984 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
10985 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
10986 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10988 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
10989 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10992 /* Define all the built-in functions related to C.cond.fmt condition COND. */
10993 #define CMP_BUILTINS(COND) \
10994 MOVTF_BUILTINS (c, COND, paired_single), \
10995 MOVTF_BUILTINS (cabs, COND, mips3d), \
10996 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
10997 CMP_PS_BUILTINS (c, COND, paired_single), \
10998 CMP_PS_BUILTINS (cabs, COND, mips3d), \
10999 CMP_4S_BUILTINS (c, COND), \
11000 CMP_4S_BUILTINS (cabs, COND)
11002 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
11003 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
11004 and AVAIL are as for MIPS_BUILTIN. */
11005 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
11006 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
11007 FUNCTION_TYPE, AVAIL)
11009 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11010 branch instruction. AVAIL is as for MIPS_BUILTIN. */
11011 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
11012 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
11013 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
11015 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
11016 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
11017 builtin_description field. */
11018 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
11019 { CODE_FOR_loongson_ ## INSN, 0, "__builtin_loongson_" #FN_NAME, \
11020 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, mips_builtin_avail_loongson }
11022 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
11023 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
11024 builtin_description field. */
11025 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
11026 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
11028 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
11029 We use functions of this form when the same insn can be usefully applied
11030 to more than one datatype. */
11031 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
11032 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
11034 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11035 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11036 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11037 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11038 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11039 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11041 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
11042 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
11043 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
11044 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
11045 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
11046 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
11047 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
11048 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
11049 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
11050 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
11051 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
11052 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
11053 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
11054 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
11055 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
11056 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
11057 #define CODE_FOR_loongson_biadd CODE_FOR_reduc_uplus_v8qi
11058 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
11059 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
11060 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
11061 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
11062 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
11063 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
11064 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
11065 #define CODE_FOR_loongson_punpckhbh CODE_FOR_vec_interleave_highv8qi
11066 #define CODE_FOR_loongson_punpckhhw CODE_FOR_vec_interleave_highv4hi
11067 #define CODE_FOR_loongson_punpckhwd CODE_FOR_vec_interleave_highv2si
11068 #define CODE_FOR_loongson_punpcklbh CODE_FOR_vec_interleave_lowv8qi
11069 #define CODE_FOR_loongson_punpcklhw CODE_FOR_vec_interleave_lowv4hi
11070 #define CODE_FOR_loongson_punpcklwd CODE_FOR_vec_interleave_lowv2si
11072 static const struct mips_builtin_description mips_builtins
[] = {
11073 DIRECT_BUILTIN (pll_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
11074 DIRECT_BUILTIN (pul_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
11075 DIRECT_BUILTIN (plu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
11076 DIRECT_BUILTIN (puu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
11077 DIRECT_BUILTIN (cvt_ps_s
, MIPS_V2SF_FTYPE_SF_SF
, paired_single
),
11078 DIRECT_BUILTIN (cvt_s_pl
, MIPS_SF_FTYPE_V2SF
, paired_single
),
11079 DIRECT_BUILTIN (cvt_s_pu
, MIPS_SF_FTYPE_V2SF
, paired_single
),
11080 DIRECT_BUILTIN (abs_ps
, MIPS_V2SF_FTYPE_V2SF
, paired_single
),
11082 DIRECT_BUILTIN (alnv_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF_INT
, paired_single
),
11083 DIRECT_BUILTIN (addr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
11084 DIRECT_BUILTIN (mulr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
11085 DIRECT_BUILTIN (cvt_pw_ps
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
11086 DIRECT_BUILTIN (cvt_ps_pw
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
11088 DIRECT_BUILTIN (recip1_s
, MIPS_SF_FTYPE_SF
, mips3d
),
11089 DIRECT_BUILTIN (recip1_d
, MIPS_DF_FTYPE_DF
, mips3d
),
11090 DIRECT_BUILTIN (recip1_ps
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
11091 DIRECT_BUILTIN (recip2_s
, MIPS_SF_FTYPE_SF_SF
, mips3d
),
11092 DIRECT_BUILTIN (recip2_d
, MIPS_DF_FTYPE_DF_DF
, mips3d
),
11093 DIRECT_BUILTIN (recip2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
11095 DIRECT_BUILTIN (rsqrt1_s
, MIPS_SF_FTYPE_SF
, mips3d
),
11096 DIRECT_BUILTIN (rsqrt1_d
, MIPS_DF_FTYPE_DF
, mips3d
),
11097 DIRECT_BUILTIN (rsqrt1_ps
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
11098 DIRECT_BUILTIN (rsqrt2_s
, MIPS_SF_FTYPE_SF_SF
, mips3d
),
11099 DIRECT_BUILTIN (rsqrt2_d
, MIPS_DF_FTYPE_DF_DF
, mips3d
),
11100 DIRECT_BUILTIN (rsqrt2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
11102 MIPS_FP_CONDITIONS (CMP_BUILTINS
),
11104 /* Built-in functions for the SB-1 processor. */
11105 DIRECT_BUILTIN (sqrt_ps
, MIPS_V2SF_FTYPE_V2SF
, sb1_paired_single
),
11107 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
11108 DIRECT_BUILTIN (addq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
11109 DIRECT_BUILTIN (addq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
11110 DIRECT_BUILTIN (addq_s_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
11111 DIRECT_BUILTIN (addu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
11112 DIRECT_BUILTIN (addu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
11113 DIRECT_BUILTIN (subq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
11114 DIRECT_BUILTIN (subq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
11115 DIRECT_BUILTIN (subq_s_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
11116 DIRECT_BUILTIN (subu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
11117 DIRECT_BUILTIN (subu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
11118 DIRECT_BUILTIN (addsc
, MIPS_SI_FTYPE_SI_SI
, dsp
),
11119 DIRECT_BUILTIN (addwc
, MIPS_SI_FTYPE_SI_SI
, dsp
),
11120 DIRECT_BUILTIN (modsub
, MIPS_SI_FTYPE_SI_SI
, dsp
),
11121 DIRECT_BUILTIN (raddu_w_qb
, MIPS_SI_FTYPE_V4QI
, dsp
),
11122 DIRECT_BUILTIN (absq_s_ph
, MIPS_V2HI_FTYPE_V2HI
, dsp
),
11123 DIRECT_BUILTIN (absq_s_w
, MIPS_SI_FTYPE_SI
, dsp
),
11124 DIRECT_BUILTIN (precrq_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, dsp
),
11125 DIRECT_BUILTIN (precrq_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, dsp
),
11126 DIRECT_BUILTIN (precrq_rs_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, dsp
),
11127 DIRECT_BUILTIN (precrqu_s_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, dsp
),
11128 DIRECT_BUILTIN (preceq_w_phl
, MIPS_SI_FTYPE_V2HI
, dsp
),
11129 DIRECT_BUILTIN (preceq_w_phr
, MIPS_SI_FTYPE_V2HI
, dsp
),
11130 DIRECT_BUILTIN (precequ_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
11131 DIRECT_BUILTIN (precequ_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
11132 DIRECT_BUILTIN (precequ_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
11133 DIRECT_BUILTIN (precequ_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
11134 DIRECT_BUILTIN (preceu_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
11135 DIRECT_BUILTIN (preceu_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
11136 DIRECT_BUILTIN (preceu_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
11137 DIRECT_BUILTIN (preceu_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
11138 DIRECT_BUILTIN (shll_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dsp
),
11139 DIRECT_BUILTIN (shll_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
11140 DIRECT_BUILTIN (shll_s_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
11141 DIRECT_BUILTIN (shll_s_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
11142 DIRECT_BUILTIN (shrl_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dsp
),
11143 DIRECT_BUILTIN (shra_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
11144 DIRECT_BUILTIN (shra_r_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
11145 DIRECT_BUILTIN (shra_r_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
11146 DIRECT_BUILTIN (muleu_s_ph_qbl
, MIPS_V2HI_FTYPE_V4QI_V2HI
, dsp
),
11147 DIRECT_BUILTIN (muleu_s_ph_qbr
, MIPS_V2HI_FTYPE_V4QI_V2HI
, dsp
),
11148 DIRECT_BUILTIN (mulq_rs_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
11149 DIRECT_BUILTIN (muleq_s_w_phl
, MIPS_SI_FTYPE_V2HI_V2HI
, dsp
),
11150 DIRECT_BUILTIN (muleq_s_w_phr
, MIPS_SI_FTYPE_V2HI_V2HI
, dsp
),
11151 DIRECT_BUILTIN (bitrev
, MIPS_SI_FTYPE_SI
, dsp
),
11152 DIRECT_BUILTIN (insv
, MIPS_SI_FTYPE_SI_SI
, dsp
),
11153 DIRECT_BUILTIN (repl_qb
, MIPS_V4QI_FTYPE_SI
, dsp
),
11154 DIRECT_BUILTIN (repl_ph
, MIPS_V2HI_FTYPE_SI
, dsp
),
11155 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, dsp
),
11156 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, dsp
),
11157 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, dsp
),
11158 DIRECT_BUILTIN (cmpgu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dsp
),
11159 DIRECT_BUILTIN (cmpgu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dsp
),
11160 DIRECT_BUILTIN (cmpgu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dsp
),
11161 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, dsp
),
11162 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, dsp
),
11163 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, dsp
),
11164 DIRECT_BUILTIN (pick_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
11165 DIRECT_BUILTIN (pick_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
11166 DIRECT_BUILTIN (packrl_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
11167 DIRECT_NO_TARGET_BUILTIN (wrdsp
, MIPS_VOID_FTYPE_SI_SI
, dsp
),
11168 DIRECT_BUILTIN (rddsp
, MIPS_SI_FTYPE_SI
, dsp
),
11169 DIRECT_BUILTIN (lbux
, MIPS_SI_FTYPE_POINTER_SI
, dsp
),
11170 DIRECT_BUILTIN (lhx
, MIPS_SI_FTYPE_POINTER_SI
, dsp
),
11171 DIRECT_BUILTIN (lwx
, MIPS_SI_FTYPE_POINTER_SI
, dsp
),
11172 BPOSGE_BUILTIN (32, dsp
),
11174 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
11175 DIRECT_BUILTIN (absq_s_qb
, MIPS_V4QI_FTYPE_V4QI
, dspr2
),
11176 DIRECT_BUILTIN (addu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11177 DIRECT_BUILTIN (addu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11178 DIRECT_BUILTIN (adduh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
11179 DIRECT_BUILTIN (adduh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
11180 DIRECT_BUILTIN (append
, MIPS_SI_FTYPE_SI_SI_SI
, dspr2
),
11181 DIRECT_BUILTIN (balign
, MIPS_SI_FTYPE_SI_SI_SI
, dspr2
),
11182 DIRECT_BUILTIN (cmpgdu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dspr2
),
11183 DIRECT_BUILTIN (cmpgdu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dspr2
),
11184 DIRECT_BUILTIN (cmpgdu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dspr2
),
11185 DIRECT_BUILTIN (mul_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11186 DIRECT_BUILTIN (mul_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11187 DIRECT_BUILTIN (mulq_rs_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
11188 DIRECT_BUILTIN (mulq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11189 DIRECT_BUILTIN (mulq_s_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
11190 DIRECT_BUILTIN (precr_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, dspr2
),
11191 DIRECT_BUILTIN (precr_sra_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, dspr2
),
11192 DIRECT_BUILTIN (precr_sra_r_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, dspr2
),
11193 DIRECT_BUILTIN (prepend
, MIPS_SI_FTYPE_SI_SI_SI
, dspr2
),
11194 DIRECT_BUILTIN (shra_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dspr2
),
11195 DIRECT_BUILTIN (shra_r_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dspr2
),
11196 DIRECT_BUILTIN (shrl_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dspr2
),
11197 DIRECT_BUILTIN (subu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11198 DIRECT_BUILTIN (subu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11199 DIRECT_BUILTIN (subuh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
11200 DIRECT_BUILTIN (subuh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
11201 DIRECT_BUILTIN (addqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11202 DIRECT_BUILTIN (addqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11203 DIRECT_BUILTIN (addqh_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
11204 DIRECT_BUILTIN (addqh_r_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
11205 DIRECT_BUILTIN (subqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11206 DIRECT_BUILTIN (subqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
11207 DIRECT_BUILTIN (subqh_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
11208 DIRECT_BUILTIN (subqh_r_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
11210 /* Built-in functions for the DSP ASE (32-bit only). */
11211 DIRECT_BUILTIN (dpau_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
11212 DIRECT_BUILTIN (dpau_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
11213 DIRECT_BUILTIN (dpsu_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
11214 DIRECT_BUILTIN (dpsu_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
11215 DIRECT_BUILTIN (dpaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
11216 DIRECT_BUILTIN (dpsq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
11217 DIRECT_BUILTIN (mulsaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
11218 DIRECT_BUILTIN (dpaq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
11219 DIRECT_BUILTIN (dpsq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
11220 DIRECT_BUILTIN (maq_s_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
11221 DIRECT_BUILTIN (maq_s_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
11222 DIRECT_BUILTIN (maq_sa_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
11223 DIRECT_BUILTIN (maq_sa_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
11224 DIRECT_BUILTIN (extr_w
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
11225 DIRECT_BUILTIN (extr_r_w
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
11226 DIRECT_BUILTIN (extr_rs_w
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
11227 DIRECT_BUILTIN (extr_s_h
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
11228 DIRECT_BUILTIN (extp
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
11229 DIRECT_BUILTIN (extpdp
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
11230 DIRECT_BUILTIN (shilo
, MIPS_DI_FTYPE_DI_SI
, dsp_32
),
11231 DIRECT_BUILTIN (mthlip
, MIPS_DI_FTYPE_DI_SI
, dsp_32
),
11233 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
11234 DIRECT_BUILTIN (dpa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
11235 DIRECT_BUILTIN (dps_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
11236 DIRECT_BUILTIN (madd
, MIPS_DI_FTYPE_DI_SI_SI
, dspr2_32
),
11237 DIRECT_BUILTIN (maddu
, MIPS_DI_FTYPE_DI_USI_USI
, dspr2_32
),
11238 DIRECT_BUILTIN (msub
, MIPS_DI_FTYPE_DI_SI_SI
, dspr2_32
),
11239 DIRECT_BUILTIN (msubu
, MIPS_DI_FTYPE_DI_USI_USI
, dspr2_32
),
11240 DIRECT_BUILTIN (mulsa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
11241 DIRECT_BUILTIN (mult
, MIPS_DI_FTYPE_SI_SI
, dspr2_32
),
11242 DIRECT_BUILTIN (multu
, MIPS_DI_FTYPE_USI_USI
, dspr2_32
),
11243 DIRECT_BUILTIN (dpax_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
11244 DIRECT_BUILTIN (dpsx_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
11245 DIRECT_BUILTIN (dpaqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
11246 DIRECT_BUILTIN (dpaqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
11247 DIRECT_BUILTIN (dpsqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
11248 DIRECT_BUILTIN (dpsqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
11250 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
11251 LOONGSON_BUILTIN (packsswh
, MIPS_V4HI_FTYPE_V2SI_V2SI
),
11252 LOONGSON_BUILTIN (packsshb
, MIPS_V8QI_FTYPE_V4HI_V4HI
),
11253 LOONGSON_BUILTIN (packushb
, MIPS_UV8QI_FTYPE_UV4HI_UV4HI
),
11254 LOONGSON_BUILTIN_SUFFIX (paddw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
11255 LOONGSON_BUILTIN_SUFFIX (paddh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11256 LOONGSON_BUILTIN_SUFFIX (paddb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11257 LOONGSON_BUILTIN_SUFFIX (paddw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
11258 LOONGSON_BUILTIN_SUFFIX (paddh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11259 LOONGSON_BUILTIN_SUFFIX (paddb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
11260 LOONGSON_BUILTIN_SUFFIX (paddd
, u
, MIPS_UDI_FTYPE_UDI_UDI
),
11261 LOONGSON_BUILTIN_SUFFIX (paddd
, s
, MIPS_DI_FTYPE_DI_DI
),
11262 LOONGSON_BUILTIN (paddsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11263 LOONGSON_BUILTIN (paddsb
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
11264 LOONGSON_BUILTIN (paddush
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11265 LOONGSON_BUILTIN (paddusb
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11266 LOONGSON_BUILTIN_ALIAS (pandn_d
, pandn_ud
, MIPS_UDI_FTYPE_UDI_UDI
),
11267 LOONGSON_BUILTIN_ALIAS (pandn_w
, pandn_uw
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
11268 LOONGSON_BUILTIN_ALIAS (pandn_h
, pandn_uh
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11269 LOONGSON_BUILTIN_ALIAS (pandn_b
, pandn_ub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11270 LOONGSON_BUILTIN_ALIAS (pandn_d
, pandn_sd
, MIPS_DI_FTYPE_DI_DI
),
11271 LOONGSON_BUILTIN_ALIAS (pandn_w
, pandn_sw
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
11272 LOONGSON_BUILTIN_ALIAS (pandn_h
, pandn_sh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11273 LOONGSON_BUILTIN_ALIAS (pandn_b
, pandn_sb
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
11274 LOONGSON_BUILTIN (pavgh
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11275 LOONGSON_BUILTIN (pavgb
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11276 LOONGSON_BUILTIN_SUFFIX (pcmpeqw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
11277 LOONGSON_BUILTIN_SUFFIX (pcmpeqh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11278 LOONGSON_BUILTIN_SUFFIX (pcmpeqb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11279 LOONGSON_BUILTIN_SUFFIX (pcmpeqw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
11280 LOONGSON_BUILTIN_SUFFIX (pcmpeqh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11281 LOONGSON_BUILTIN_SUFFIX (pcmpeqb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
11282 LOONGSON_BUILTIN_SUFFIX (pcmpgtw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
11283 LOONGSON_BUILTIN_SUFFIX (pcmpgth
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11284 LOONGSON_BUILTIN_SUFFIX (pcmpgtb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11285 LOONGSON_BUILTIN_SUFFIX (pcmpgtw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
11286 LOONGSON_BUILTIN_SUFFIX (pcmpgth
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11287 LOONGSON_BUILTIN_SUFFIX (pcmpgtb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
11288 LOONGSON_BUILTIN_SUFFIX (pextrh
, u
, MIPS_UV4HI_FTYPE_UV4HI_USI
),
11289 LOONGSON_BUILTIN_SUFFIX (pextrh
, s
, MIPS_V4HI_FTYPE_V4HI_USI
),
11290 LOONGSON_BUILTIN_SUFFIX (pinsrh_0
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11291 LOONGSON_BUILTIN_SUFFIX (pinsrh_1
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11292 LOONGSON_BUILTIN_SUFFIX (pinsrh_2
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11293 LOONGSON_BUILTIN_SUFFIX (pinsrh_3
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11294 LOONGSON_BUILTIN_SUFFIX (pinsrh_0
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11295 LOONGSON_BUILTIN_SUFFIX (pinsrh_1
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11296 LOONGSON_BUILTIN_SUFFIX (pinsrh_2
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11297 LOONGSON_BUILTIN_SUFFIX (pinsrh_3
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11298 LOONGSON_BUILTIN (pmaddhw
, MIPS_V2SI_FTYPE_V4HI_V4HI
),
11299 LOONGSON_BUILTIN (pmaxsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11300 LOONGSON_BUILTIN (pmaxub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11301 LOONGSON_BUILTIN (pminsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11302 LOONGSON_BUILTIN (pminub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11303 LOONGSON_BUILTIN_SUFFIX (pmovmskb
, u
, MIPS_UV8QI_FTYPE_UV8QI
),
11304 LOONGSON_BUILTIN_SUFFIX (pmovmskb
, s
, MIPS_V8QI_FTYPE_V8QI
),
11305 LOONGSON_BUILTIN (pmulhuh
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11306 LOONGSON_BUILTIN (pmulhh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11307 LOONGSON_BUILTIN (pmullh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11308 LOONGSON_BUILTIN (pmuluw
, MIPS_UDI_FTYPE_UV2SI_UV2SI
),
11309 LOONGSON_BUILTIN (pasubub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11310 LOONGSON_BUILTIN (biadd
, MIPS_UV4HI_FTYPE_UV8QI
),
11311 LOONGSON_BUILTIN (psadbh
, MIPS_UV4HI_FTYPE_UV8QI_UV8QI
),
11312 LOONGSON_BUILTIN_SUFFIX (pshufh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI_UQI
),
11313 LOONGSON_BUILTIN_SUFFIX (pshufh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI_UQI
),
11314 LOONGSON_BUILTIN_SUFFIX (psllh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
11315 LOONGSON_BUILTIN_SUFFIX (psllh
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
11316 LOONGSON_BUILTIN_SUFFIX (psllw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UQI
),
11317 LOONGSON_BUILTIN_SUFFIX (psllw
, s
, MIPS_V2SI_FTYPE_V2SI_UQI
),
11318 LOONGSON_BUILTIN_SUFFIX (psrah
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
11319 LOONGSON_BUILTIN_SUFFIX (psrah
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
11320 LOONGSON_BUILTIN_SUFFIX (psraw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UQI
),
11321 LOONGSON_BUILTIN_SUFFIX (psraw
, s
, MIPS_V2SI_FTYPE_V2SI_UQI
),
11322 LOONGSON_BUILTIN_SUFFIX (psrlh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
11323 LOONGSON_BUILTIN_SUFFIX (psrlh
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
11324 LOONGSON_BUILTIN_SUFFIX (psrlw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UQI
),
11325 LOONGSON_BUILTIN_SUFFIX (psrlw
, s
, MIPS_V2SI_FTYPE_V2SI_UQI
),
11326 LOONGSON_BUILTIN_SUFFIX (psubw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
11327 LOONGSON_BUILTIN_SUFFIX (psubh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11328 LOONGSON_BUILTIN_SUFFIX (psubb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11329 LOONGSON_BUILTIN_SUFFIX (psubw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
11330 LOONGSON_BUILTIN_SUFFIX (psubh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11331 LOONGSON_BUILTIN_SUFFIX (psubb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
11332 LOONGSON_BUILTIN_SUFFIX (psubd
, u
, MIPS_UDI_FTYPE_UDI_UDI
),
11333 LOONGSON_BUILTIN_SUFFIX (psubd
, s
, MIPS_DI_FTYPE_DI_DI
),
11334 LOONGSON_BUILTIN (psubsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11335 LOONGSON_BUILTIN (psubsb
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
11336 LOONGSON_BUILTIN (psubush
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11337 LOONGSON_BUILTIN (psubusb
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11338 LOONGSON_BUILTIN_SUFFIX (punpckhbh
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11339 LOONGSON_BUILTIN_SUFFIX (punpckhhw
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11340 LOONGSON_BUILTIN_SUFFIX (punpckhwd
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
11341 LOONGSON_BUILTIN_SUFFIX (punpckhbh
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
11342 LOONGSON_BUILTIN_SUFFIX (punpckhhw
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11343 LOONGSON_BUILTIN_SUFFIX (punpckhwd
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
11344 LOONGSON_BUILTIN_SUFFIX (punpcklbh
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
11345 LOONGSON_BUILTIN_SUFFIX (punpcklhw
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
11346 LOONGSON_BUILTIN_SUFFIX (punpcklwd
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
11347 LOONGSON_BUILTIN_SUFFIX (punpcklbh
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
11348 LOONGSON_BUILTIN_SUFFIX (punpcklhw
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
11349 LOONGSON_BUILTIN_SUFFIX (punpcklwd
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
)
11352 /* MODE is a vector mode whose elements have type TYPE. Return the type
11353 of the vector itself. */
11356 mips_builtin_vector_type (tree type
, enum machine_mode mode
)
11358 static tree types
[2 * (int) MAX_MACHINE_MODE
];
11361 mode_index
= (int) mode
;
11363 if (TREE_CODE (type
) == INTEGER_TYPE
&& TYPE_UNSIGNED (type
))
11364 mode_index
+= MAX_MACHINE_MODE
;
11366 if (types
[mode_index
] == NULL_TREE
)
11367 types
[mode_index
] = build_vector_type_for_mode (type
, mode
);
11368 return types
[mode_index
];
11371 /* Source-level argument types. */
11372 #define MIPS_ATYPE_VOID void_type_node
11373 #define MIPS_ATYPE_INT integer_type_node
11374 #define MIPS_ATYPE_POINTER ptr_type_node
11376 /* Standard mode-based argument types. */
11377 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
11378 #define MIPS_ATYPE_SI intSI_type_node
11379 #define MIPS_ATYPE_USI unsigned_intSI_type_node
11380 #define MIPS_ATYPE_DI intDI_type_node
11381 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
11382 #define MIPS_ATYPE_SF float_type_node
11383 #define MIPS_ATYPE_DF double_type_node
11385 /* Vector argument types. */
11386 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
11387 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
11388 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
11389 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
11390 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
11391 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
11392 #define MIPS_ATYPE_UV2SI \
11393 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
11394 #define MIPS_ATYPE_UV4HI \
11395 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
11396 #define MIPS_ATYPE_UV8QI \
11397 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
11399 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
11400 their associated MIPS_ATYPEs. */
11401 #define MIPS_FTYPE_ATYPES1(A, B) \
11402 MIPS_ATYPE_##A, MIPS_ATYPE_##B
11404 #define MIPS_FTYPE_ATYPES2(A, B, C) \
11405 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
11407 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
11408 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
11410 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
11411 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
11414 /* Return the function type associated with function prototype TYPE. */
11417 mips_build_function_type (enum mips_function_type type
)
11419 static tree types
[(int) MIPS_MAX_FTYPE_MAX
];
11421 if (types
[(int) type
] == NULL_TREE
)
11424 #define DEF_MIPS_FTYPE(NUM, ARGS) \
11425 case MIPS_FTYPE_NAME##NUM ARGS: \
11426 types[(int) type] \
11427 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
11430 #include "config/mips/mips-ftypes.def"
11431 #undef DEF_MIPS_FTYPE
11433 gcc_unreachable ();
11436 return types
[(int) type
];
11439 /* Implement TARGET_INIT_BUILTINS. */
11442 mips_init_builtins (void)
11444 const struct mips_builtin_description
*d
;
11447 /* Iterate through all of the bdesc arrays, initializing all of the
11448 builtin functions. */
11449 for (i
= 0; i
< ARRAY_SIZE (mips_builtins
); i
++)
11451 d
= &mips_builtins
[i
];
11453 add_builtin_function (d
->name
,
11454 mips_build_function_type (d
->function_type
),
11455 i
, BUILT_IN_MD
, NULL
, NULL
);
11459 /* Take argument ARGNO from EXP's argument list and convert it into a
11460 form suitable for input operand OPNO of instruction ICODE. Return the
11464 mips_prepare_builtin_arg (enum insn_code icode
,
11465 unsigned int opno
, tree exp
, unsigned int argno
)
11468 enum machine_mode mode
;
11470 value
= expand_normal (CALL_EXPR_ARG (exp
, argno
));
11471 mode
= insn_data
[icode
].operand
[opno
].mode
;
11472 if (!insn_data
[icode
].operand
[opno
].predicate (value
, mode
))
11474 value
= copy_to_mode_reg (mode
, value
);
11475 /* Check the predicate again. */
11476 if (!insn_data
[icode
].operand
[opno
].predicate (value
, mode
))
11478 error ("invalid argument to built-in function");
11486 /* Return an rtx suitable for output operand OP of instruction ICODE.
11487 If TARGET is non-null, try to use it where possible. */
11490 mips_prepare_builtin_target (enum insn_code icode
, unsigned int op
, rtx target
)
11492 enum machine_mode mode
;
11494 mode
= insn_data
[icode
].operand
[op
].mode
;
11495 if (target
== 0 || !insn_data
[icode
].operand
[op
].predicate (target
, mode
))
11496 target
= gen_reg_rtx (mode
);
11501 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
11502 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
11503 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
11504 suggests a good place to put the result. */
11507 mips_expand_builtin_direct (enum insn_code icode
, rtx target
, tree exp
,
11510 rtx ops
[MAX_RECOG_OPERANDS
];
11513 /* Map any target to operand 0. */
11517 ops
[opno
] = mips_prepare_builtin_target (icode
, opno
, target
);
11521 /* Map the arguments to the other operands. The n_operands value
11522 for an expander includes match_dups and match_scratches as well as
11523 match_operands, so n_operands is only an upper bound on the number
11524 of arguments to the expander function. */
11525 gcc_assert (opno
+ call_expr_nargs (exp
) <= insn_data
[icode
].n_operands
);
11526 for (argno
= 0; argno
< call_expr_nargs (exp
); argno
++, opno
++)
11527 ops
[opno
] = mips_prepare_builtin_arg (icode
, opno
, exp
, argno
);
11532 emit_insn (GEN_FCN (icode
) (ops
[0], ops
[1]));
11536 emit_insn (GEN_FCN (icode
) (ops
[0], ops
[1], ops
[2]));
11540 emit_insn (GEN_FCN (icode
) (ops
[0], ops
[1], ops
[2], ops
[3]));
11544 gcc_unreachable ();
11549 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
11550 function; TYPE says which. EXP is the CALL_EXPR that calls the
11551 function, ICODE is the instruction that should be used to compare
11552 the first two arguments, and COND is the condition it should test.
11553 TARGET, if nonnull, suggests a good place to put the result. */
11556 mips_expand_builtin_movtf (enum mips_builtin_type type
,
11557 enum insn_code icode
, enum mips_fp_condition cond
,
11558 rtx target
, tree exp
)
11560 rtx cmp_result
, op0
, op1
;
11562 cmp_result
= mips_prepare_builtin_target (icode
, 0, 0);
11563 op0
= mips_prepare_builtin_arg (icode
, 1, exp
, 0);
11564 op1
= mips_prepare_builtin_arg (icode
, 2, exp
, 1);
11565 emit_insn (GEN_FCN (icode
) (cmp_result
, op0
, op1
, GEN_INT (cond
)));
11567 icode
= CODE_FOR_mips_cond_move_tf_ps
;
11568 target
= mips_prepare_builtin_target (icode
, 0, target
);
11569 if (type
== MIPS_BUILTIN_MOVT
)
11571 op1
= mips_prepare_builtin_arg (icode
, 2, exp
, 2);
11572 op0
= mips_prepare_builtin_arg (icode
, 1, exp
, 3);
11576 op0
= mips_prepare_builtin_arg (icode
, 1, exp
, 2);
11577 op1
= mips_prepare_builtin_arg (icode
, 2, exp
, 3);
11579 emit_insn (gen_mips_cond_move_tf_ps (target
, op0
, op1
, cmp_result
));
11583 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11584 into TARGET otherwise. Return TARGET. */
11587 mips_builtin_branch_and_move (rtx condition
, rtx target
,
11588 rtx value_if_true
, rtx value_if_false
)
11590 rtx true_label
, done_label
;
11592 true_label
= gen_label_rtx ();
11593 done_label
= gen_label_rtx ();
11595 /* First assume that CONDITION is false. */
11596 mips_emit_move (target
, value_if_false
);
11598 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11599 emit_jump_insn (gen_condjump (condition
, true_label
));
11600 emit_jump_insn (gen_jump (done_label
));
11603 /* Fix TARGET if CONDITION is true. */
11604 emit_label (true_label
);
11605 mips_emit_move (target
, value_if_true
);
11607 emit_label (done_label
);
11611 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
11612 the CALL_EXPR that calls the function, ICODE is the code of the
11613 comparison instruction, and COND is the condition it should test.
11614 TARGET, if nonnull, suggests a good place to put the boolean result. */
11617 mips_expand_builtin_compare (enum mips_builtin_type builtin_type
,
11618 enum insn_code icode
, enum mips_fp_condition cond
,
11619 rtx target
, tree exp
)
11621 rtx offset
, condition
, cmp_result
, args
[MAX_RECOG_OPERANDS
];
11624 if (target
== 0 || GET_MODE (target
) != SImode
)
11625 target
= gen_reg_rtx (SImode
);
11627 /* The instruction should have a target operand, an operand for each
11628 argument, and an operand for COND. */
11629 gcc_assert (call_expr_nargs (exp
) + 2 == insn_data
[icode
].n_operands
);
11631 /* Prepare the operands to the comparison. */
11632 cmp_result
= mips_prepare_builtin_target (icode
, 0, 0);
11633 for (argno
= 0; argno
< call_expr_nargs (exp
); argno
++)
11634 args
[argno
] = mips_prepare_builtin_arg (icode
, argno
+ 1, exp
, argno
);
11636 switch (insn_data
[icode
].n_operands
)
11639 emit_insn (GEN_FCN (icode
) (cmp_result
, args
[0], args
[1],
11644 emit_insn (GEN_FCN (icode
) (cmp_result
, args
[0], args
[1],
11645 args
[2], args
[3], GEN_INT (cond
)));
11649 gcc_unreachable ();
11652 /* If the comparison sets more than one register, we define the result
11653 to be 0 if all registers are false and -1 if all registers are true.
11654 The value of the complete result is indeterminate otherwise. */
11655 switch (builtin_type
)
11657 case MIPS_BUILTIN_CMP_ALL
:
11658 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, constm1_rtx
);
11659 return mips_builtin_branch_and_move (condition
, target
,
11660 const0_rtx
, const1_rtx
);
11662 case MIPS_BUILTIN_CMP_UPPER
:
11663 case MIPS_BUILTIN_CMP_LOWER
:
11664 offset
= GEN_INT (builtin_type
== MIPS_BUILTIN_CMP_UPPER
);
11665 condition
= gen_single_cc (cmp_result
, offset
);
11666 return mips_builtin_branch_and_move (condition
, target
,
11667 const1_rtx
, const0_rtx
);
11670 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, const0_rtx
);
11671 return mips_builtin_branch_and_move (condition
, target
,
11672 const1_rtx
, const0_rtx
);
11676 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
11677 if nonnull, suggests a good place to put the boolean result. */
11680 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type
, rtx target
)
11682 rtx condition
, cmp_result
;
11685 if (target
== 0 || GET_MODE (target
) != SImode
)
11686 target
= gen_reg_rtx (SImode
);
11688 cmp_result
= gen_rtx_REG (CCDSPmode
, CCDSP_PO_REGNUM
);
11690 if (builtin_type
== MIPS_BUILTIN_BPOSGE32
)
11695 condition
= gen_rtx_GE (VOIDmode
, cmp_result
, GEN_INT (cmp_value
));
11696 return mips_builtin_branch_and_move (condition
, target
,
11697 const1_rtx
, const0_rtx
);
11700 /* Implement TARGET_EXPAND_BUILTIN. */
11703 mips_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
11704 enum machine_mode mode ATTRIBUTE_UNUSED
,
11705 int ignore ATTRIBUTE_UNUSED
)
11708 unsigned int fcode
, avail
;
11709 const struct mips_builtin_description
*d
;
11711 fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
11712 fcode
= DECL_FUNCTION_CODE (fndecl
);
11713 gcc_assert (fcode
< ARRAY_SIZE (mips_builtins
));
11714 d
= &mips_builtins
[fcode
];
11715 avail
= d
->avail ();
11716 gcc_assert (avail
!= 0);
11719 error ("built-in function %qs not supported for MIPS16",
11720 IDENTIFIER_POINTER (DECL_NAME (fndecl
)));
11723 switch (d
->builtin_type
)
11725 case MIPS_BUILTIN_DIRECT
:
11726 return mips_expand_builtin_direct (d
->icode
, target
, exp
, true);
11728 case MIPS_BUILTIN_DIRECT_NO_TARGET
:
11729 return mips_expand_builtin_direct (d
->icode
, target
, exp
, false);
11731 case MIPS_BUILTIN_MOVT
:
11732 case MIPS_BUILTIN_MOVF
:
11733 return mips_expand_builtin_movtf (d
->builtin_type
, d
->icode
,
11734 d
->cond
, target
, exp
);
11736 case MIPS_BUILTIN_CMP_ANY
:
11737 case MIPS_BUILTIN_CMP_ALL
:
11738 case MIPS_BUILTIN_CMP_UPPER
:
11739 case MIPS_BUILTIN_CMP_LOWER
:
11740 case MIPS_BUILTIN_CMP_SINGLE
:
11741 return mips_expand_builtin_compare (d
->builtin_type
, d
->icode
,
11742 d
->cond
, target
, exp
);
11744 case MIPS_BUILTIN_BPOSGE32
:
11745 return mips_expand_builtin_bposge (d
->builtin_type
, target
);
11747 gcc_unreachable ();
11750 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
11751 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
11752 struct mips16_constant
{
11753 struct mips16_constant
*next
;
11756 enum machine_mode mode
;
11759 /* Information about an incomplete MIPS16 constant pool. FIRST is the
11760 first constant, HIGHEST_ADDRESS is the highest address that the first
11761 byte of the pool can have, and INSN_ADDRESS is the current instruction
11763 struct mips16_constant_pool
{
11764 struct mips16_constant
*first
;
11765 int highest_address
;
11769 /* Add constant VALUE to POOL and return its label. MODE is the
11770 value's mode (used for CONST_INTs, etc.). */
11773 mips16_add_constant (struct mips16_constant_pool
*pool
,
11774 rtx value
, enum machine_mode mode
)
11776 struct mips16_constant
**p
, *c
;
11777 bool first_of_size_p
;
11779 /* See whether the constant is already in the pool. If so, return the
11780 existing label, otherwise leave P pointing to the place where the
11781 constant should be added.
11783 Keep the pool sorted in increasing order of mode size so that we can
11784 reduce the number of alignments needed. */
11785 first_of_size_p
= true;
11786 for (p
= &pool
->first
; *p
!= 0; p
= &(*p
)->next
)
11788 if (mode
== (*p
)->mode
&& rtx_equal_p (value
, (*p
)->value
))
11789 return (*p
)->label
;
11790 if (GET_MODE_SIZE (mode
) < GET_MODE_SIZE ((*p
)->mode
))
11792 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE ((*p
)->mode
))
11793 first_of_size_p
= false;
11796 /* In the worst case, the constant needed by the earliest instruction
11797 will end up at the end of the pool. The entire pool must then be
11798 accessible from that instruction.
11800 When adding the first constant, set the pool's highest address to
11801 the address of the first out-of-range byte. Adjust this address
11802 downwards each time a new constant is added. */
11803 if (pool
->first
== 0)
11804 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
11805 of the instruction with the lowest two bits clear. The base PC
11806 value for LDPC has the lowest three bits clear. Assume the worst
11807 case here; namely that the PC-relative instruction occupies the
11808 last 2 bytes in an aligned word. */
11809 pool
->highest_address
= pool
->insn_address
- (UNITS_PER_WORD
- 2) + 0x8000;
11810 pool
->highest_address
-= GET_MODE_SIZE (mode
);
11811 if (first_of_size_p
)
11812 /* Take into account the worst possible padding due to alignment. */
11813 pool
->highest_address
-= GET_MODE_SIZE (mode
) - 1;
11815 /* Create a new entry. */
11816 c
= XNEW (struct mips16_constant
);
11819 c
->label
= gen_label_rtx ();
11826 /* Output constant VALUE after instruction INSN and return the last
11827 instruction emitted. MODE is the mode of the constant. */
11830 mips16_emit_constants_1 (enum machine_mode mode
, rtx value
, rtx insn
)
11832 if (SCALAR_INT_MODE_P (mode
) || ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
11834 rtx size
= GEN_INT (GET_MODE_SIZE (mode
));
11835 return emit_insn_after (gen_consttable_int (value
, size
), insn
);
11838 if (SCALAR_FLOAT_MODE_P (mode
))
11839 return emit_insn_after (gen_consttable_float (value
), insn
);
11841 if (VECTOR_MODE_P (mode
))
11845 for (i
= 0; i
< CONST_VECTOR_NUNITS (value
); i
++)
11846 insn
= mips16_emit_constants_1 (GET_MODE_INNER (mode
),
11847 CONST_VECTOR_ELT (value
, i
), insn
);
11851 gcc_unreachable ();
11854 /* Dump out the constants in CONSTANTS after INSN. */
11857 mips16_emit_constants (struct mips16_constant
*constants
, rtx insn
)
11859 struct mips16_constant
*c
, *next
;
11863 for (c
= constants
; c
!= NULL
; c
= next
)
11865 /* If necessary, increase the alignment of PC. */
11866 if (align
< GET_MODE_SIZE (c
->mode
))
11868 int align_log
= floor_log2 (GET_MODE_SIZE (c
->mode
));
11869 insn
= emit_insn_after (gen_align (GEN_INT (align_log
)), insn
);
11871 align
= GET_MODE_SIZE (c
->mode
);
11873 insn
= emit_label_after (c
->label
, insn
);
11874 insn
= mips16_emit_constants_1 (c
->mode
, c
->value
, insn
);
11880 emit_barrier_after (insn
);
11883 /* Return the length of instruction INSN. */
11886 mips16_insn_length (rtx insn
)
11890 rtx body
= PATTERN (insn
);
11891 if (GET_CODE (body
) == ADDR_VEC
)
11892 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 0);
11893 if (GET_CODE (body
) == ADDR_DIFF_VEC
)
11894 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 1);
11896 return get_attr_length (insn
);
11899 /* If *X is a symbolic constant that refers to the constant pool, add
11900 the constant to POOL and rewrite *X to use the constant's label. */
11903 mips16_rewrite_pool_constant (struct mips16_constant_pool
*pool
, rtx
*x
)
11905 rtx base
, offset
, label
;
11907 split_const (*x
, &base
, &offset
);
11908 if (GET_CODE (base
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (base
))
11910 label
= mips16_add_constant (pool
, get_pool_constant (base
),
11911 get_pool_mode (base
));
11912 base
= gen_rtx_LABEL_REF (Pmode
, label
);
11913 *x
= mips_unspec_address_offset (base
, offset
, SYMBOL_PC_RELATIVE
);
11917 /* This structure is used to communicate with mips16_rewrite_pool_refs.
11918 INSN is the instruction we're rewriting and POOL points to the current
11920 struct mips16_rewrite_pool_refs_info
{
11922 struct mips16_constant_pool
*pool
;
11925 /* Rewrite *X so that constant pool references refer to the constant's
11926 label instead. DATA points to a mips16_rewrite_pool_refs_info
11930 mips16_rewrite_pool_refs (rtx
*x
, void *data
)
11932 struct mips16_rewrite_pool_refs_info
*info
=
11933 (struct mips16_rewrite_pool_refs_info
*) data
;
11935 if (force_to_mem_operand (*x
, Pmode
))
11937 rtx mem
= force_const_mem (GET_MODE (*x
), *x
);
11938 validate_change (info
->insn
, x
, mem
, false);
11943 mips16_rewrite_pool_constant (info
->pool
, &XEXP (*x
, 0));
11947 if (TARGET_MIPS16_TEXT_LOADS
)
11948 mips16_rewrite_pool_constant (info
->pool
, x
);
11950 return GET_CODE (*x
) == CONST
? -1 : 0;
11953 /* Build MIPS16 constant pools. */
11956 mips16_lay_out_constants (void)
11958 struct mips16_constant_pool pool
;
11959 struct mips16_rewrite_pool_refs_info info
;
11962 if (!TARGET_MIPS16_PCREL_LOADS
)
11965 split_all_insns_noflow ();
11967 memset (&pool
, 0, sizeof (pool
));
11968 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
11970 /* Rewrite constant pool references in INSN. */
11975 for_each_rtx (&PATTERN (insn
), mips16_rewrite_pool_refs
, &info
);
11978 pool
.insn_address
+= mips16_insn_length (insn
);
11980 if (pool
.first
!= NULL
)
11982 /* If there are no natural barriers between the first user of
11983 the pool and the highest acceptable address, we'll need to
11984 create a new instruction to jump around the constant pool.
11985 In the worst case, this instruction will be 4 bytes long.
11987 If it's too late to do this transformation after INSN,
11988 do it immediately before INSN. */
11989 if (barrier
== 0 && pool
.insn_address
+ 4 > pool
.highest_address
)
11993 label
= gen_label_rtx ();
11995 jump
= emit_jump_insn_before (gen_jump (label
), insn
);
11996 JUMP_LABEL (jump
) = label
;
11997 LABEL_NUSES (label
) = 1;
11998 barrier
= emit_barrier_after (jump
);
12000 emit_label_after (label
, barrier
);
12001 pool
.insn_address
+= 4;
12004 /* See whether the constant pool is now out of range of the first
12005 user. If so, output the constants after the previous barrier.
12006 Note that any instructions between BARRIER and INSN (inclusive)
12007 will use negative offsets to refer to the pool. */
12008 if (pool
.insn_address
> pool
.highest_address
)
12010 mips16_emit_constants (pool
.first
, barrier
);
12014 else if (BARRIER_P (insn
))
12018 mips16_emit_constants (pool
.first
, get_last_insn ());
12021 /* A temporary variable used by for_each_rtx callbacks, etc. */
12022 static rtx mips_sim_insn
;
12024 /* A structure representing the state of the processor pipeline.
12025 Used by the mips_sim_* family of functions. */
12027 /* The maximum number of instructions that can be issued in a cycle.
12028 (Caches mips_issue_rate.) */
12029 unsigned int issue_rate
;
12031 /* The current simulation time. */
12034 /* How many more instructions can be issued in the current cycle. */
12035 unsigned int insns_left
;
12037 /* LAST_SET[X].INSN is the last instruction to set register X.
12038 LAST_SET[X].TIME is the time at which that instruction was issued.
12039 INSN is null if no instruction has yet set register X. */
12043 } last_set
[FIRST_PSEUDO_REGISTER
];
12045 /* The pipeline's current DFA state. */
12049 /* Reset STATE to the initial simulation state. */
12052 mips_sim_reset (struct mips_sim
*state
)
12055 state
->insns_left
= state
->issue_rate
;
12056 memset (&state
->last_set
, 0, sizeof (state
->last_set
));
12057 state_reset (state
->dfa_state
);
12060 /* Initialize STATE before its first use. DFA_STATE points to an
12061 allocated but uninitialized DFA state. */
12064 mips_sim_init (struct mips_sim
*state
, state_t dfa_state
)
12066 state
->issue_rate
= mips_issue_rate ();
12067 state
->dfa_state
= dfa_state
;
12068 mips_sim_reset (state
);
12071 /* Advance STATE by one clock cycle. */
12074 mips_sim_next_cycle (struct mips_sim
*state
)
12077 state
->insns_left
= state
->issue_rate
;
12078 state_transition (state
->dfa_state
, 0);
12081 /* Advance simulation state STATE until instruction INSN can read
12085 mips_sim_wait_reg (struct mips_sim
*state
, rtx insn
, rtx reg
)
12087 unsigned int regno
, end_regno
;
12089 end_regno
= END_REGNO (reg
);
12090 for (regno
= REGNO (reg
); regno
< end_regno
; regno
++)
12091 if (state
->last_set
[regno
].insn
!= 0)
12095 t
= (state
->last_set
[regno
].time
12096 + insn_latency (state
->last_set
[regno
].insn
, insn
));
12097 while (state
->time
< t
)
12098 mips_sim_next_cycle (state
);
12102 /* A for_each_rtx callback. If *X is a register, advance simulation state
12103 DATA until mips_sim_insn can read the register's value. */
12106 mips_sim_wait_regs_2 (rtx
*x
, void *data
)
12109 mips_sim_wait_reg ((struct mips_sim
*) data
, mips_sim_insn
, *x
);
12113 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
12116 mips_sim_wait_regs_1 (rtx
*x
, void *data
)
12118 for_each_rtx (x
, mips_sim_wait_regs_2
, data
);
12121 /* Advance simulation state STATE until all of INSN's register
12122 dependencies are satisfied. */
12125 mips_sim_wait_regs (struct mips_sim
*state
, rtx insn
)
12127 mips_sim_insn
= insn
;
12128 note_uses (&PATTERN (insn
), mips_sim_wait_regs_1
, state
);
12131 /* Advance simulation state STATE until the units required by
12132 instruction INSN are available. */
12135 mips_sim_wait_units (struct mips_sim
*state
, rtx insn
)
12139 tmp_state
= alloca (state_size ());
12140 while (state
->insns_left
== 0
12141 || (memcpy (tmp_state
, state
->dfa_state
, state_size ()),
12142 state_transition (tmp_state
, insn
) >= 0))
12143 mips_sim_next_cycle (state
);
12146 /* Advance simulation state STATE until INSN is ready to issue. */
12149 mips_sim_wait_insn (struct mips_sim
*state
, rtx insn
)
12151 mips_sim_wait_regs (state
, insn
);
12152 mips_sim_wait_units (state
, insn
);
12155 /* mips_sim_insn has just set X. Update the LAST_SET array
12156 in simulation state DATA. */
12159 mips_sim_record_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
12161 struct mips_sim
*state
;
12163 state
= (struct mips_sim
*) data
;
12166 unsigned int regno
, end_regno
;
12168 end_regno
= END_REGNO (x
);
12169 for (regno
= REGNO (x
); regno
< end_regno
; regno
++)
12171 state
->last_set
[regno
].insn
= mips_sim_insn
;
12172 state
->last_set
[regno
].time
= state
->time
;
12177 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
12178 can issue immediately (i.e., that mips_sim_wait_insn has already
12182 mips_sim_issue_insn (struct mips_sim
*state
, rtx insn
)
12184 state_transition (state
->dfa_state
, insn
);
12185 state
->insns_left
--;
12187 mips_sim_insn
= insn
;
12188 note_stores (PATTERN (insn
), mips_sim_record_set
, state
);
12191 /* Simulate issuing a NOP in state STATE. */
12194 mips_sim_issue_nop (struct mips_sim
*state
)
12196 if (state
->insns_left
== 0)
12197 mips_sim_next_cycle (state
);
12198 state
->insns_left
--;
12201 /* Update simulation state STATE so that it's ready to accept the instruction
12202 after INSN. INSN should be part of the main rtl chain, not a member of a
12206 mips_sim_finish_insn (struct mips_sim
*state
, rtx insn
)
12208 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
12210 mips_sim_issue_nop (state
);
12212 switch (GET_CODE (SEQ_BEGIN (insn
)))
12216 /* We can't predict the processor state after a call or label. */
12217 mips_sim_reset (state
);
12221 /* The delay slots of branch likely instructions are only executed
12222 when the branch is taken. Therefore, if the caller has simulated
12223 the delay slot instruction, STATE does not really reflect the state
12224 of the pipeline for the instruction after the delay slot. Also,
12225 branch likely instructions tend to incur a penalty when not taken,
12226 so there will probably be an extra delay between the branch and
12227 the instruction after the delay slot. */
12228 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn
)))
12229 mips_sim_reset (state
);
12237 /* The VR4130 pipeline issues aligned pairs of instructions together,
12238 but it stalls the second instruction if it depends on the first.
12239 In order to cut down the amount of logic required, this dependence
12240 check is not based on a full instruction decode. Instead, any non-SPECIAL
12241 instruction is assumed to modify the register specified by bits 20-16
12242 (which is usually the "rt" field).
12244 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
12245 input, so we can end up with a false dependence between the branch
12246 and its delay slot. If this situation occurs in instruction INSN,
12247 try to avoid it by swapping rs and rt. */
12250 vr4130_avoid_branch_rt_conflict (rtx insn
)
12254 first
= SEQ_BEGIN (insn
);
12255 second
= SEQ_END (insn
);
12257 && NONJUMP_INSN_P (second
)
12258 && GET_CODE (PATTERN (first
)) == SET
12259 && GET_CODE (SET_DEST (PATTERN (first
))) == PC
12260 && GET_CODE (SET_SRC (PATTERN (first
))) == IF_THEN_ELSE
)
12262 /* Check for the right kind of condition. */
12263 rtx cond
= XEXP (SET_SRC (PATTERN (first
)), 0);
12264 if ((GET_CODE (cond
) == EQ
|| GET_CODE (cond
) == NE
)
12265 && REG_P (XEXP (cond
, 0))
12266 && REG_P (XEXP (cond
, 1))
12267 && reg_referenced_p (XEXP (cond
, 1), PATTERN (second
))
12268 && !reg_referenced_p (XEXP (cond
, 0), PATTERN (second
)))
12270 /* SECOND mentions the rt register but not the rs register. */
12271 rtx tmp
= XEXP (cond
, 0);
12272 XEXP (cond
, 0) = XEXP (cond
, 1);
12273 XEXP (cond
, 1) = tmp
;
12278 /* Implement -mvr4130-align. Go through each basic block and simulate the
12279 processor pipeline. If we find that a pair of instructions could execute
12280 in parallel, and the first of those instructions is not 8-byte aligned,
12281 insert a nop to make it aligned. */
12284 vr4130_align_insns (void)
12286 struct mips_sim state
;
12287 rtx insn
, subinsn
, last
, last2
, next
;
12292 /* LAST is the last instruction before INSN to have a nonzero length.
12293 LAST2 is the last such instruction before LAST. */
12297 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
12300 mips_sim_init (&state
, alloca (state_size ()));
12301 for (insn
= get_insns (); insn
!= 0; insn
= next
)
12303 unsigned int length
;
12305 next
= NEXT_INSN (insn
);
12307 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
12308 This isn't really related to the alignment pass, but we do it on
12309 the fly to avoid a separate instruction walk. */
12310 vr4130_avoid_branch_rt_conflict (insn
);
12312 if (USEFUL_INSN_P (insn
))
12313 FOR_EACH_SUBINSN (subinsn
, insn
)
12315 mips_sim_wait_insn (&state
, subinsn
);
12317 /* If we want this instruction to issue in parallel with the
12318 previous one, make sure that the previous instruction is
12319 aligned. There are several reasons why this isn't worthwhile
12320 when the second instruction is a call:
12322 - Calls are less likely to be performance critical,
12323 - There's a good chance that the delay slot can execute
12324 in parallel with the call.
12325 - The return address would then be unaligned.
12327 In general, if we're going to insert a nop between instructions
12328 X and Y, it's better to insert it immediately after X. That
12329 way, if the nop makes Y aligned, it will also align any labels
12330 between X and Y. */
12331 if (state
.insns_left
!= state
.issue_rate
12332 && !CALL_P (subinsn
))
12334 if (subinsn
== SEQ_BEGIN (insn
) && aligned_p
)
12336 /* SUBINSN is the first instruction in INSN and INSN is
12337 aligned. We want to align the previous instruction
12338 instead, so insert a nop between LAST2 and LAST.
12340 Note that LAST could be either a single instruction
12341 or a branch with a delay slot. In the latter case,
12342 LAST, like INSN, is already aligned, but the delay
12343 slot must have some extra delay that stops it from
12344 issuing at the same time as the branch. We therefore
12345 insert a nop before the branch in order to align its
12347 emit_insn_after (gen_nop (), last2
);
12350 else if (subinsn
!= SEQ_BEGIN (insn
) && !aligned_p
)
12352 /* SUBINSN is the delay slot of INSN, but INSN is
12353 currently unaligned. Insert a nop between
12354 LAST and INSN to align it. */
12355 emit_insn_after (gen_nop (), last
);
12359 mips_sim_issue_insn (&state
, subinsn
);
12361 mips_sim_finish_insn (&state
, insn
);
12363 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
12364 length
= get_attr_length (insn
);
12367 /* If the instruction is an asm statement or multi-instruction
12368 mips.md patern, the length is only an estimate. Insert an
12369 8 byte alignment after it so that the following instructions
12370 can be handled correctly. */
12371 if (NONJUMP_INSN_P (SEQ_BEGIN (insn
))
12372 && (recog_memoized (insn
) < 0 || length
>= 8))
12374 next
= emit_insn_after (gen_align (GEN_INT (3)), insn
);
12375 next
= NEXT_INSN (next
);
12376 mips_sim_next_cycle (&state
);
12379 else if (length
& 4)
12380 aligned_p
= !aligned_p
;
12385 /* See whether INSN is an aligned label. */
12386 if (LABEL_P (insn
) && label_to_alignment (insn
) >= 3)
12392 /* This structure records that the current function has a LO_SUM
12393 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
12394 the largest offset applied to BASE by all such LO_SUMs. */
12395 struct mips_lo_sum_offset
{
12397 HOST_WIDE_INT offset
;
12400 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
12403 mips_hash_base (rtx base
)
12405 int do_not_record_p
;
12407 return hash_rtx (base
, GET_MODE (base
), &do_not_record_p
, NULL
, false);
12410 /* Hash-table callbacks for mips_lo_sum_offsets. */
12413 mips_lo_sum_offset_hash (const void *entry
)
12415 return mips_hash_base (((const struct mips_lo_sum_offset
*) entry
)->base
);
12419 mips_lo_sum_offset_eq (const void *entry
, const void *value
)
12421 return rtx_equal_p (((const struct mips_lo_sum_offset
*) entry
)->base
,
12422 (const_rtx
) value
);
12425 /* Look up symbolic constant X in HTAB, which is a hash table of
12426 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
12427 paired with a recorded LO_SUM, otherwise record X in the table. */
12430 mips_lo_sum_offset_lookup (htab_t htab
, rtx x
, enum insert_option option
)
12434 struct mips_lo_sum_offset
*entry
;
12436 /* Split X into a base and offset. */
12437 split_const (x
, &base
, &offset
);
12438 if (UNSPEC_ADDRESS_P (base
))
12439 base
= UNSPEC_ADDRESS (base
);
12441 /* Look up the base in the hash table. */
12442 slot
= htab_find_slot_with_hash (htab
, base
, mips_hash_base (base
), option
);
12446 entry
= (struct mips_lo_sum_offset
*) *slot
;
12447 if (option
== INSERT
)
12451 entry
= XNEW (struct mips_lo_sum_offset
);
12452 entry
->base
= base
;
12453 entry
->offset
= INTVAL (offset
);
12458 if (INTVAL (offset
) > entry
->offset
)
12459 entry
->offset
= INTVAL (offset
);
12462 return INTVAL (offset
) <= entry
->offset
;
12465 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
12466 Record every LO_SUM in *LOC. */
12469 mips_record_lo_sum (rtx
*loc
, void *data
)
12471 if (GET_CODE (*loc
) == LO_SUM
)
12472 mips_lo_sum_offset_lookup ((htab_t
) data
, XEXP (*loc
, 1), INSERT
);
12476 /* Return true if INSN is a SET of an orphaned high-part relocation.
12477 HTAB is a hash table of mips_lo_sum_offsets that describes all the
12478 LO_SUMs in the current function. */
12481 mips_orphaned_high_part_p (htab_t htab
, rtx insn
)
12483 enum mips_symbol_type type
;
12486 set
= single_set (insn
);
12489 /* Check for %his. */
12491 if (GET_CODE (x
) == HIGH
12492 && absolute_symbolic_operand (XEXP (x
, 0), VOIDmode
))
12493 return !mips_lo_sum_offset_lookup (htab
, XEXP (x
, 0), NO_INSERT
);
12495 /* Check for local %gots (and %got_pages, which is redundant but OK). */
12496 if (GET_CODE (x
) == UNSPEC
12497 && XINT (x
, 1) == UNSPEC_LOAD_GOT
12498 && mips_symbolic_constant_p (XVECEXP (x
, 0, 1),
12499 SYMBOL_CONTEXT_LEA
, &type
)
12500 && type
== SYMBOL_GOTOFF_PAGE
)
12501 return !mips_lo_sum_offset_lookup (htab
, XVECEXP (x
, 0, 1), NO_INSERT
);
12506 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
12507 INSN and a previous instruction, avoid it by inserting nops after
12510 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
12511 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
12512 before using the value of that register. *HILO_DELAY counts the
12513 number of instructions since the last hilo hazard (that is,
12514 the number of instructions since the last MFLO or MFHI).
12516 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
12517 for the next instruction.
12519 LO_REG is an rtx for the LO register, used in dependence checking. */
12522 mips_avoid_hazard (rtx after
, rtx insn
, int *hilo_delay
,
12523 rtx
*delayed_reg
, rtx lo_reg
)
12528 pattern
= PATTERN (insn
);
12530 /* Do not put the whole function in .set noreorder if it contains
12531 an asm statement. We don't know whether there will be hazards
12532 between the asm statement and the gcc-generated code. */
12533 if (GET_CODE (pattern
) == ASM_INPUT
|| asm_noperands (pattern
) >= 0)
12534 cfun
->machine
->all_noreorder_p
= false;
12536 /* Ignore zero-length instructions (barriers and the like). */
12537 ninsns
= get_attr_length (insn
) / 4;
12541 /* Work out how many nops are needed. Note that we only care about
12542 registers that are explicitly mentioned in the instruction's pattern.
12543 It doesn't matter that calls use the argument registers or that they
12544 clobber hi and lo. */
12545 if (*hilo_delay
< 2 && reg_set_p (lo_reg
, pattern
))
12546 nops
= 2 - *hilo_delay
;
12547 else if (*delayed_reg
!= 0 && reg_referenced_p (*delayed_reg
, pattern
))
12552 /* Insert the nops between this instruction and the previous one.
12553 Each new nop takes us further from the last hilo hazard. */
12554 *hilo_delay
+= nops
;
12556 emit_insn_after (gen_hazard_nop (), after
);
12558 /* Set up the state for the next instruction. */
12559 *hilo_delay
+= ninsns
;
12561 if (INSN_CODE (insn
) >= 0)
12562 switch (get_attr_hazard (insn
))
12572 set
= single_set (insn
);
12574 *delayed_reg
= SET_DEST (set
);
12579 /* Go through the instruction stream and insert nops where necessary.
12580 Also delete any high-part relocations whose partnering low parts
12581 are now all dead. See if the whole function can then be put into
12582 .set noreorder and .set nomacro. */
12585 mips_reorg_process_insns (void)
12587 rtx insn
, last_insn
, subinsn
, next_insn
, lo_reg
, delayed_reg
;
12591 /* Force all instructions to be split into their final form. */
12592 split_all_insns_noflow ();
12594 /* Recalculate instruction lengths without taking nops into account. */
12595 cfun
->machine
->ignore_hazard_length_p
= true;
12596 shorten_branches (get_insns ());
12598 cfun
->machine
->all_noreorder_p
= true;
12600 /* We don't track MIPS16 PC-relative offsets closely enough to make
12601 a good job of "set .noreorder" code in MIPS16 mode. */
12603 cfun
->machine
->all_noreorder_p
= false;
12605 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
12606 if (!TARGET_EXPLICIT_RELOCS
)
12607 cfun
->machine
->all_noreorder_p
= false;
12609 /* Profiled functions can't be all noreorder because the profiler
12610 support uses assembler macros. */
12612 cfun
->machine
->all_noreorder_p
= false;
12614 /* Code compiled with -mfix-vr4120 can't be all noreorder because
12615 we rely on the assembler to work around some errata. */
12616 if (TARGET_FIX_VR4120
)
12617 cfun
->machine
->all_noreorder_p
= false;
12619 /* The same is true for -mfix-vr4130 if we might generate MFLO or
12620 MFHI instructions. Note that we avoid using MFLO and MFHI if
12621 the VR4130 MACC and DMACC instructions are available instead;
12622 see the *mfhilo_{si,di}_macc patterns. */
12623 if (TARGET_FIX_VR4130
&& !ISA_HAS_MACCHI
)
12624 cfun
->machine
->all_noreorder_p
= false;
12626 htab
= htab_create (37, mips_lo_sum_offset_hash
,
12627 mips_lo_sum_offset_eq
, free
);
12629 /* Make a first pass over the instructions, recording all the LO_SUMs. */
12630 for (insn
= get_insns (); insn
!= 0; insn
= NEXT_INSN (insn
))
12631 FOR_EACH_SUBINSN (subinsn
, insn
)
12632 if (INSN_P (subinsn
))
12633 for_each_rtx (&PATTERN (subinsn
), mips_record_lo_sum
, htab
);
12638 lo_reg
= gen_rtx_REG (SImode
, LO_REGNUM
);
12640 /* Make a second pass over the instructions. Delete orphaned
12641 high-part relocations or turn them into NOPs. Avoid hazards
12642 by inserting NOPs. */
12643 for (insn
= get_insns (); insn
!= 0; insn
= next_insn
)
12645 next_insn
= NEXT_INSN (insn
);
12648 if (GET_CODE (PATTERN (insn
)) == SEQUENCE
)
12650 /* If we find an orphaned high-part relocation in a delay
12651 slot, it's easier to turn that instruction into a NOP than
12652 to delete it. The delay slot will be a NOP either way. */
12653 FOR_EACH_SUBINSN (subinsn
, insn
)
12654 if (INSN_P (subinsn
))
12656 if (mips_orphaned_high_part_p (htab
, subinsn
))
12658 PATTERN (subinsn
) = gen_nop ();
12659 INSN_CODE (subinsn
) = CODE_FOR_nop
;
12661 mips_avoid_hazard (last_insn
, subinsn
, &hilo_delay
,
12662 &delayed_reg
, lo_reg
);
12668 /* INSN is a single instruction. Delete it if it's an
12669 orphaned high-part relocation. */
12670 if (mips_orphaned_high_part_p (htab
, insn
))
12671 delete_insn (insn
);
12674 mips_avoid_hazard (last_insn
, insn
, &hilo_delay
,
12675 &delayed_reg
, lo_reg
);
12682 htab_delete (htab
);
12685 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
12690 mips16_lay_out_constants ();
12691 if (mips_base_delayed_branch
)
12692 dbr_schedule (get_insns ());
12693 mips_reorg_process_insns ();
12695 && TARGET_EXPLICIT_RELOCS
12697 && TARGET_VR4130_ALIGN
)
12698 vr4130_align_insns ();
12701 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
12702 in order to avoid duplicating too much logic from elsewhere. */
12705 mips_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
12706 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
12709 rtx this_rtx
, temp1
, temp2
, insn
, fnaddr
;
12710 bool use_sibcall_p
;
12712 /* Pretend to be a post-reload pass while generating rtl. */
12713 reload_completed
= 1;
12715 /* Mark the end of the (empty) prologue. */
12716 emit_note (NOTE_INSN_PROLOGUE_END
);
12718 /* Determine if we can use a sibcall to call FUNCTION directly. */
12719 fnaddr
= XEXP (DECL_RTL (function
), 0);
12720 use_sibcall_p
= (mips_function_ok_for_sibcall (function
, NULL
)
12721 && const_call_insn_operand (fnaddr
, Pmode
));
12723 /* Determine if we need to load FNADDR from the GOT. */
12725 && (mips_got_symbol_type_p
12726 (mips_classify_symbol (fnaddr
, SYMBOL_CONTEXT_LEA
))))
12728 /* Pick a global pointer. Use a call-clobbered register if
12729 TARGET_CALL_SAVED_GP. */
12730 cfun
->machine
->global_pointer
12731 = TARGET_CALL_SAVED_GP
? 15 : GLOBAL_POINTER_REGNUM
;
12732 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
12734 /* Set up the global pointer for n32 or n64 abicalls. */
12735 mips_emit_loadgp ();
12738 /* We need two temporary registers in some cases. */
12739 temp1
= gen_rtx_REG (Pmode
, 2);
12740 temp2
= gen_rtx_REG (Pmode
, 3);
12742 /* Find out which register contains the "this" pointer. */
12743 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
12744 this_rtx
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
+ 1);
12746 this_rtx
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
12748 /* Add DELTA to THIS_RTX. */
12751 rtx offset
= GEN_INT (delta
);
12752 if (!SMALL_OPERAND (delta
))
12754 mips_emit_move (temp1
, offset
);
12757 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, offset
));
12760 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
12761 if (vcall_offset
!= 0)
12765 /* Set TEMP1 to *THIS_RTX. */
12766 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, this_rtx
));
12768 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
12769 addr
= mips_add_offset (temp2
, temp1
, vcall_offset
);
12771 /* Load the offset and add it to THIS_RTX. */
12772 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, addr
));
12773 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, temp1
));
12776 /* Jump to the target function. Use a sibcall if direct jumps are
12777 allowed, otherwise load the address into a register first. */
12780 insn
= emit_call_insn (gen_sibcall_internal (fnaddr
, const0_rtx
));
12781 SIBLING_CALL_P (insn
) = 1;
12785 /* This is messy. GAS treats "la $25,foo" as part of a call
12786 sequence and may allow a global "foo" to be lazily bound.
12787 The general move patterns therefore reject this combination.
12789 In this context, lazy binding would actually be OK
12790 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
12791 TARGET_CALL_SAVED_GP; see mips_load_call_address.
12792 We must therefore load the address via a temporary
12793 register if mips_dangerous_for_la25_p.
12795 If we jump to the temporary register rather than $25,
12796 the assembler can use the move insn to fill the jump's
12799 We can use the same technique for MIPS16 code, where $25
12800 is not a valid JR register. */
12801 if (TARGET_USE_PIC_FN_ADDR_REG
12803 && !mips_dangerous_for_la25_p (fnaddr
))
12804 temp1
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
12805 mips_load_call_address (MIPS_CALL_SIBCALL
, temp1
, fnaddr
);
12807 if (TARGET_USE_PIC_FN_ADDR_REG
12808 && REGNO (temp1
) != PIC_FUNCTION_ADDR_REGNUM
)
12809 mips_emit_move (gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
), temp1
);
12810 emit_jump_insn (gen_indirect_jump (temp1
));
12813 /* Run just enough of rest_of_compilation. This sequence was
12814 "borrowed" from alpha.c. */
12815 insn
= get_insns ();
12816 insn_locators_alloc ();
12817 split_all_insns_noflow ();
12818 mips16_lay_out_constants ();
12819 shorten_branches (insn
);
12820 final_start_function (insn
, file
, 1);
12821 final (insn
, file
, 1);
12822 final_end_function ();
12823 free_after_compilation (cfun
);
12825 /* Clean up the vars set above. Note that final_end_function resets
12826 the global pointer for us. */
12827 reload_completed
= 0;
12830 /* The last argument passed to mips_set_mips16_mode, or negative if the
12831 function hasn't been called yet.
12833 There are two copies of this information. One is saved and restored
12834 by the PCH process while the other is specific to this compiler
12835 invocation. The information calculated by mips_set_mips16_mode
12836 is invalid unless the two variables are the same. */
12837 static int was_mips16_p
= -1;
12838 static GTY(()) int was_mips16_pch_p
= -1;
12840 /* Set up the target-dependent global state so that it matches the
12841 current function's ISA mode. */
12844 mips_set_mips16_mode (int mips16_p
)
12846 if (mips16_p
== was_mips16_p
12847 && mips16_p
== was_mips16_pch_p
)
12850 /* Restore base settings of various flags. */
12851 target_flags
= mips_base_target_flags
;
12852 flag_schedule_insns
= mips_base_schedule_insns
;
12853 flag_reorder_blocks_and_partition
= mips_base_reorder_blocks_and_partition
;
12854 flag_move_loop_invariants
= mips_base_move_loop_invariants
;
12855 align_loops
= mips_base_align_loops
;
12856 align_jumps
= mips_base_align_jumps
;
12857 align_functions
= mips_base_align_functions
;
12861 /* Switch to MIPS16 mode. */
12862 target_flags
|= MASK_MIPS16
;
12864 /* Don't run the scheduler before reload, since it tends to
12865 increase register pressure. */
12866 flag_schedule_insns
= 0;
12868 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
12869 the whole function to be in a single section. */
12870 flag_reorder_blocks_and_partition
= 0;
12872 /* Don't move loop invariants, because it tends to increase
12873 register pressure. It also introduces an extra move in cases
12874 where the constant is the first operand in a two-operand binary
12875 instruction, or when it forms a register argument to a functon
12877 flag_move_loop_invariants
= 0;
12879 target_flags
|= MASK_EXPLICIT_RELOCS
;
12881 /* Experiments suggest we get the best overall section-anchor
12882 results from using the range of an unextended LW or SW. Code
12883 that makes heavy use of byte or short accesses can do better
12884 with ranges of 0...31 and 0...63 respectively, but most code is
12885 sensitive to the range of LW and SW instead. */
12886 targetm
.min_anchor_offset
= 0;
12887 targetm
.max_anchor_offset
= 127;
12889 if (flag_pic
&& !TARGET_OLDABI
)
12890 sorry ("MIPS16 PIC for ABIs other than o32 and o64");
12893 sorry ("MIPS16 -mxgot code");
12895 if (TARGET_HARD_FLOAT_ABI
&& !TARGET_OLDABI
)
12896 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
12900 /* Switch to normal (non-MIPS16) mode. */
12901 target_flags
&= ~MASK_MIPS16
;
12903 /* Provide default values for align_* for 64-bit targets. */
12906 if (align_loops
== 0)
12908 if (align_jumps
== 0)
12910 if (align_functions
== 0)
12911 align_functions
= 8;
12914 targetm
.min_anchor_offset
= -32768;
12915 targetm
.max_anchor_offset
= 32767;
12918 /* (Re)initialize MIPS target internals for new ISA. */
12919 mips_init_relocs ();
12921 if (was_mips16_p
>= 0 || was_mips16_pch_p
>= 0)
12922 /* Reinitialize target-dependent state. */
12925 was_mips16_p
= mips16_p
;
12926 was_mips16_pch_p
= mips16_p
;
12929 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
12930 function should use the MIPS16 ISA and switch modes accordingly. */
12933 mips_set_current_function (tree fndecl
)
12935 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl
));
12938 /* Allocate a chunk of memory for per-function machine-dependent data. */
12940 static struct machine_function
*
12941 mips_init_machine_status (void)
12943 return ((struct machine_function
*)
12944 ggc_alloc_cleared (sizeof (struct machine_function
)));
12947 /* Return the processor associated with the given ISA level, or null
12948 if the ISA isn't valid. */
12950 static const struct mips_cpu_info
*
12951 mips_cpu_info_from_isa (int isa
)
12955 for (i
= 0; i
< ARRAY_SIZE (mips_cpu_info_table
); i
++)
12956 if (mips_cpu_info_table
[i
].isa
== isa
)
12957 return mips_cpu_info_table
+ i
;
12962 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
12963 with a final "000" replaced by "k". Ignore case.
12965 Note: this function is shared between GCC and GAS. */
12968 mips_strict_matching_cpu_name_p (const char *canonical
, const char *given
)
12970 while (*given
!= 0 && TOLOWER (*given
) == TOLOWER (*canonical
))
12971 given
++, canonical
++;
12973 return ((*given
== 0 && *canonical
== 0)
12974 || (strcmp (canonical
, "000") == 0 && strcasecmp (given
, "k") == 0));
12977 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
12978 CPU name. We've traditionally allowed a lot of variation here.
12980 Note: this function is shared between GCC and GAS. */
12983 mips_matching_cpu_name_p (const char *canonical
, const char *given
)
12985 /* First see if the name matches exactly, or with a final "000"
12986 turned into "k". */
12987 if (mips_strict_matching_cpu_name_p (canonical
, given
))
12990 /* If not, try comparing based on numerical designation alone.
12991 See if GIVEN is an unadorned number, or 'r' followed by a number. */
12992 if (TOLOWER (*given
) == 'r')
12994 if (!ISDIGIT (*given
))
12997 /* Skip over some well-known prefixes in the canonical name,
12998 hoping to find a number there too. */
12999 if (TOLOWER (canonical
[0]) == 'v' && TOLOWER (canonical
[1]) == 'r')
13001 else if (TOLOWER (canonical
[0]) == 'r' && TOLOWER (canonical
[1]) == 'm')
13003 else if (TOLOWER (canonical
[0]) == 'r')
13006 return mips_strict_matching_cpu_name_p (canonical
, given
);
13009 /* Return the mips_cpu_info entry for the processor or ISA given
13010 by CPU_STRING. Return null if the string isn't recognized.
13012 A similar function exists in GAS. */
13014 static const struct mips_cpu_info
*
13015 mips_parse_cpu (const char *cpu_string
)
13020 /* In the past, we allowed upper-case CPU names, but it doesn't
13021 work well with the multilib machinery. */
13022 for (s
= cpu_string
; *s
!= 0; s
++)
13025 warning (0, "CPU names must be lower case");
13029 /* 'from-abi' selects the most compatible architecture for the given
13030 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
13031 EABIs, we have to decide whether we're using the 32-bit or 64-bit
13033 if (strcasecmp (cpu_string
, "from-abi") == 0)
13034 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS
? 1
13035 : ABI_NEEDS_64BIT_REGS
? 3
13036 : (TARGET_64BIT
? 3 : 1));
13038 /* 'default' has traditionally been a no-op. Probably not very useful. */
13039 if (strcasecmp (cpu_string
, "default") == 0)
13042 for (i
= 0; i
< ARRAY_SIZE (mips_cpu_info_table
); i
++)
13043 if (mips_matching_cpu_name_p (mips_cpu_info_table
[i
].name
, cpu_string
))
13044 return mips_cpu_info_table
+ i
;
13049 /* Set up globals to generate code for the ISA or processor
13050 described by INFO. */
13053 mips_set_architecture (const struct mips_cpu_info
*info
)
13057 mips_arch_info
= info
;
13058 mips_arch
= info
->cpu
;
13059 mips_isa
= info
->isa
;
13063 /* Likewise for tuning. */
13066 mips_set_tune (const struct mips_cpu_info
*info
)
13070 mips_tune_info
= info
;
13071 mips_tune
= info
->cpu
;
13075 /* Implement TARGET_HANDLE_OPTION. */
13078 mips_handle_option (size_t code
, const char *arg
, int value ATTRIBUTE_UNUSED
)
13083 if (strcmp (arg
, "32") == 0)
13085 else if (strcmp (arg
, "o64") == 0)
13086 mips_abi
= ABI_O64
;
13087 else if (strcmp (arg
, "n32") == 0)
13088 mips_abi
= ABI_N32
;
13089 else if (strcmp (arg
, "64") == 0)
13091 else if (strcmp (arg
, "eabi") == 0)
13092 mips_abi
= ABI_EABI
;
13099 return mips_parse_cpu (arg
) != 0;
13102 mips_isa_option_info
= mips_parse_cpu (ACONCAT (("mips", arg
, NULL
)));
13103 return mips_isa_option_info
!= 0;
13105 case OPT_mno_flush_func
:
13106 mips_cache_flush_func
= NULL
;
13109 case OPT_mcode_readable_
:
13110 if (strcmp (arg
, "yes") == 0)
13111 mips_code_readable
= CODE_READABLE_YES
;
13112 else if (strcmp (arg
, "pcrel") == 0)
13113 mips_code_readable
= CODE_READABLE_PCREL
;
13114 else if (strcmp (arg
, "no") == 0)
13115 mips_code_readable
= CODE_READABLE_NO
;
13125 /* Implement OVERRIDE_OPTIONS. */
13128 mips_override_options (void)
13130 int i
, start
, regno
, mode
;
13132 /* Process flags as though we were generating non-MIPS16 code. */
13133 mips_base_mips16
= TARGET_MIPS16
;
13134 target_flags
&= ~MASK_MIPS16
;
13136 #ifdef SUBTARGET_OVERRIDE_OPTIONS
13137 SUBTARGET_OVERRIDE_OPTIONS
;
13140 /* Set the small data limit. */
13141 mips_small_data_threshold
= (g_switch_set
13143 : MIPS_DEFAULT_GVALUE
);
13145 /* The following code determines the architecture and register size.
13146 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
13147 The GAS and GCC code should be kept in sync as much as possible. */
13149 if (mips_arch_string
!= 0)
13150 mips_set_architecture (mips_parse_cpu (mips_arch_string
));
13152 if (mips_isa_option_info
!= 0)
13154 if (mips_arch_info
== 0)
13155 mips_set_architecture (mips_isa_option_info
);
13156 else if (mips_arch_info
->isa
!= mips_isa_option_info
->isa
)
13157 error ("%<-%s%> conflicts with the other architecture options, "
13158 "which specify a %s processor",
13159 mips_isa_option_info
->name
,
13160 mips_cpu_info_from_isa (mips_arch_info
->isa
)->name
);
13163 if (mips_arch_info
== 0)
13165 #ifdef MIPS_CPU_STRING_DEFAULT
13166 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT
));
13168 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT
));
13172 if (ABI_NEEDS_64BIT_REGS
&& !ISA_HAS_64BIT_REGS
)
13173 error ("%<-march=%s%> is not compatible with the selected ABI",
13174 mips_arch_info
->name
);
13176 /* Optimize for mips_arch, unless -mtune selects a different processor. */
13177 if (mips_tune_string
!= 0)
13178 mips_set_tune (mips_parse_cpu (mips_tune_string
));
13180 if (mips_tune_info
== 0)
13181 mips_set_tune (mips_arch_info
);
13183 if ((target_flags_explicit
& MASK_64BIT
) != 0)
13185 /* The user specified the size of the integer registers. Make sure
13186 it agrees with the ABI and ISA. */
13187 if (TARGET_64BIT
&& !ISA_HAS_64BIT_REGS
)
13188 error ("%<-mgp64%> used with a 32-bit processor");
13189 else if (!TARGET_64BIT
&& ABI_NEEDS_64BIT_REGS
)
13190 error ("%<-mgp32%> used with a 64-bit ABI");
13191 else if (TARGET_64BIT
&& ABI_NEEDS_32BIT_REGS
)
13192 error ("%<-mgp64%> used with a 32-bit ABI");
13196 /* Infer the integer register size from the ABI and processor.
13197 Restrict ourselves to 32-bit registers if that's all the
13198 processor has, or if the ABI cannot handle 64-bit registers. */
13199 if (ABI_NEEDS_32BIT_REGS
|| !ISA_HAS_64BIT_REGS
)
13200 target_flags
&= ~MASK_64BIT
;
13202 target_flags
|= MASK_64BIT
;
13205 if ((target_flags_explicit
& MASK_FLOAT64
) != 0)
13207 if (TARGET_SINGLE_FLOAT
&& TARGET_FLOAT64
)
13208 error ("unsupported combination: %s", "-mfp64 -msingle-float");
13209 else if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
&& !TARGET_FLOAT64
)
13210 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
13211 else if (!TARGET_64BIT
&& TARGET_FLOAT64
)
13213 if (!ISA_HAS_MXHC1
)
13214 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
13215 " the target supports the mfhc1 and mthc1 instructions");
13216 else if (mips_abi
!= ABI_32
)
13217 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
13223 /* -msingle-float selects 32-bit float registers. Otherwise the
13224 float registers should be the same size as the integer ones. */
13225 if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
)
13226 target_flags
|= MASK_FLOAT64
;
13228 target_flags
&= ~MASK_FLOAT64
;
13231 /* End of code shared with GAS. */
13233 /* If no -mlong* option was given, infer it from the other options. */
13234 if ((target_flags_explicit
& MASK_LONG64
) == 0)
13236 if ((mips_abi
== ABI_EABI
&& TARGET_64BIT
) || mips_abi
== ABI_64
)
13237 target_flags
|= MASK_LONG64
;
13239 target_flags
&= ~MASK_LONG64
;
13242 if (!TARGET_OLDABI
)
13243 flag_pcc_struct_return
= 0;
13245 /* Decide which rtx_costs structure to use. */
13247 mips_cost
= &mips_rtx_cost_optimize_size
;
13249 mips_cost
= &mips_rtx_cost_data
[mips_tune
];
13251 /* If the user hasn't specified a branch cost, use the processor's
13253 if (mips_branch_cost
== 0)
13254 mips_branch_cost
= mips_cost
->branch_cost
;
13256 /* If neither -mbranch-likely nor -mno-branch-likely was given
13257 on the command line, set MASK_BRANCHLIKELY based on the target
13258 architecture and tuning flags. Annulled delay slots are a
13259 size win, so we only consider the processor-specific tuning
13260 for !optimize_size. */
13261 if ((target_flags_explicit
& MASK_BRANCHLIKELY
) == 0)
13263 if (ISA_HAS_BRANCHLIKELY
13265 || (mips_tune_info
->tune_flags
& PTF_AVOID_BRANCHLIKELY
) == 0))
13266 target_flags
|= MASK_BRANCHLIKELY
;
13268 target_flags
&= ~MASK_BRANCHLIKELY
;
13270 else if (TARGET_BRANCHLIKELY
&& !ISA_HAS_BRANCHLIKELY
)
13271 warning (0, "the %qs architecture does not support branch-likely"
13272 " instructions", mips_arch_info
->name
);
13274 /* The effect of -mabicalls isn't defined for the EABI. */
13275 if (mips_abi
== ABI_EABI
&& TARGET_ABICALLS
)
13277 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
13278 target_flags
&= ~MASK_ABICALLS
;
13281 if (TARGET_ABICALLS_PIC2
)
13282 /* We need to set flag_pic for executables as well as DSOs
13283 because we may reference symbols that are not defined in
13284 the final executable. (MIPS does not use things like
13285 copy relocs, for example.)
13287 There is a body of code that uses __PIC__ to distinguish
13288 between -mabicalls and -mno-abicalls code. The non-__PIC__
13289 variant is usually appropriate for TARGET_ABICALLS_PIC0, as
13290 long as any indirect jumps use $25. */
13293 /* -mvr4130-align is a "speed over size" optimization: it usually produces
13294 faster code, but at the expense of more nops. Enable it at -O3 and
13296 if (optimize
> 2 && (target_flags_explicit
& MASK_VR4130_ALIGN
) == 0)
13297 target_flags
|= MASK_VR4130_ALIGN
;
13299 /* Prefer a call to memcpy over inline code when optimizing for size,
13300 though see MOVE_RATIO in mips.h. */
13301 if (optimize_size
&& (target_flags_explicit
& MASK_MEMCPY
) == 0)
13302 target_flags
|= MASK_MEMCPY
;
13304 /* If we have a nonzero small-data limit, check that the -mgpopt
13305 setting is consistent with the other target flags. */
13306 if (mips_small_data_threshold
> 0)
13310 if (!TARGET_EXPLICIT_RELOCS
)
13311 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
13313 TARGET_LOCAL_SDATA
= false;
13314 TARGET_EXTERN_SDATA
= false;
13318 if (TARGET_VXWORKS_RTP
)
13319 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
13321 if (TARGET_ABICALLS
)
13322 warning (0, "cannot use small-data accesses for %qs",
13327 #ifdef MIPS_TFMODE_FORMAT
13328 REAL_MODE_FORMAT (TFmode
) = &MIPS_TFMODE_FORMAT
;
13331 /* Make sure that the user didn't turn off paired single support when
13332 MIPS-3D support is requested. */
13334 && (target_flags_explicit
& MASK_PAIRED_SINGLE_FLOAT
)
13335 && !TARGET_PAIRED_SINGLE_FLOAT
)
13336 error ("%<-mips3d%> requires %<-mpaired-single%>");
13338 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
13340 target_flags
|= MASK_PAIRED_SINGLE_FLOAT
;
13342 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
13343 and TARGET_HARD_FLOAT_ABI are both true. */
13344 if (TARGET_PAIRED_SINGLE_FLOAT
&& !(TARGET_FLOAT64
&& TARGET_HARD_FLOAT_ABI
))
13345 error ("%qs must be used with %qs",
13346 TARGET_MIPS3D
? "-mips3d" : "-mpaired-single",
13347 TARGET_HARD_FLOAT_ABI
? "-mfp64" : "-mhard-float");
13349 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
13351 if (TARGET_PAIRED_SINGLE_FLOAT
&& !ISA_HAS_PAIRED_SINGLE
)
13352 warning (0, "the %qs architecture does not support paired-single"
13353 " instructions", mips_arch_info
->name
);
13355 /* If TARGET_DSPR2, enable MASK_DSP. */
13357 target_flags
|= MASK_DSP
;
13359 mips_init_print_operand_punct ();
13361 /* Set up array to map GCC register number to debug register number.
13362 Ignore the special purpose register numbers. */
13364 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
13366 mips_dbx_regno
[i
] = INVALID_REGNUM
;
13367 if (GP_REG_P (i
) || FP_REG_P (i
) || ALL_COP_REG_P (i
))
13368 mips_dwarf_regno
[i
] = i
;
13370 mips_dwarf_regno
[i
] = INVALID_REGNUM
;
13373 start
= GP_DBX_FIRST
- GP_REG_FIRST
;
13374 for (i
= GP_REG_FIRST
; i
<= GP_REG_LAST
; i
++)
13375 mips_dbx_regno
[i
] = i
+ start
;
13377 start
= FP_DBX_FIRST
- FP_REG_FIRST
;
13378 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
13379 mips_dbx_regno
[i
] = i
+ start
;
13381 /* Accumulator debug registers use big-endian ordering. */
13382 mips_dbx_regno
[HI_REGNUM
] = MD_DBX_FIRST
+ 0;
13383 mips_dbx_regno
[LO_REGNUM
] = MD_DBX_FIRST
+ 1;
13384 mips_dwarf_regno
[HI_REGNUM
] = MD_REG_FIRST
+ 0;
13385 mips_dwarf_regno
[LO_REGNUM
] = MD_REG_FIRST
+ 1;
13386 for (i
= DSP_ACC_REG_FIRST
; i
<= DSP_ACC_REG_LAST
; i
+= 2)
13388 mips_dwarf_regno
[i
+ TARGET_LITTLE_ENDIAN
] = i
;
13389 mips_dwarf_regno
[i
+ TARGET_BIG_ENDIAN
] = i
+ 1;
13392 /* Set up mips_hard_regno_mode_ok. */
13393 for (mode
= 0; mode
< MAX_MACHINE_MODE
; mode
++)
13394 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
13395 mips_hard_regno_mode_ok
[(int)mode
][regno
]
13396 = mips_hard_regno_mode_ok_p (regno
, mode
);
13398 /* Function to allocate machine-dependent function status. */
13399 init_machine_status
= &mips_init_machine_status
;
13401 /* Default to working around R4000 errata only if the processor
13402 was selected explicitly. */
13403 if ((target_flags_explicit
& MASK_FIX_R4000
) == 0
13404 && mips_matching_cpu_name_p (mips_arch_info
->name
, "r4000"))
13405 target_flags
|= MASK_FIX_R4000
;
13407 /* Default to working around R4400 errata only if the processor
13408 was selected explicitly. */
13409 if ((target_flags_explicit
& MASK_FIX_R4400
) == 0
13410 && mips_matching_cpu_name_p (mips_arch_info
->name
, "r4400"))
13411 target_flags
|= MASK_FIX_R4400
;
13413 /* Save base state of options. */
13414 mips_base_target_flags
= target_flags
;
13415 mips_base_delayed_branch
= flag_delayed_branch
;
13416 mips_base_schedule_insns
= flag_schedule_insns
;
13417 mips_base_reorder_blocks_and_partition
= flag_reorder_blocks_and_partition
;
13418 mips_base_move_loop_invariants
= flag_move_loop_invariants
;
13419 mips_base_align_loops
= align_loops
;
13420 mips_base_align_jumps
= align_jumps
;
13421 mips_base_align_functions
= align_functions
;
13423 /* Now select the ISA mode.
13425 Do all CPP-sensitive stuff in non-MIPS16 mode; we'll switch to
13426 MIPS16 mode afterwards if need be. */
13427 mips_set_mips16_mode (false);
13429 /* We call dbr_schedule from within mips_reorg. */
13430 flag_delayed_branch
= 0;
13433 /* Swap the register information for registers I and I + 1, which
13434 currently have the wrong endianness. Note that the registers'
13435 fixedness and call-clobberedness might have been set on the
13439 mips_swap_registers (unsigned int i
)
13444 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
13445 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
13447 SWAP_INT (fixed_regs
[i
], fixed_regs
[i
+ 1]);
13448 SWAP_INT (call_used_regs
[i
], call_used_regs
[i
+ 1]);
13449 SWAP_INT (call_really_used_regs
[i
], call_really_used_regs
[i
+ 1]);
13450 SWAP_STRING (reg_names
[i
], reg_names
[i
+ 1]);
13456 /* Implement CONDITIONAL_REGISTER_USAGE. */
13459 mips_conditional_register_usage (void)
13465 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
++)
13466 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
13468 if (!TARGET_HARD_FLOAT
)
13472 for (regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
++)
13473 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
13474 for (regno
= ST_REG_FIRST
; regno
<= ST_REG_LAST
; regno
++)
13475 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
13477 else if (! ISA_HAS_8CC
)
13481 /* We only have a single condition-code register. We implement
13482 this by fixing all the condition-code registers and generating
13483 RTL that refers directly to ST_REG_FIRST. */
13484 for (regno
= ST_REG_FIRST
; regno
<= ST_REG_LAST
; regno
++)
13485 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
13487 /* In MIPS16 mode, we permit the $t temporary registers to be used
13488 for reload. We prohibit the unused $s registers, since they
13489 are call-saved, and saving them via a MIPS16 register would
13490 probably waste more time than just reloading the value. */
13493 fixed_regs
[18] = call_used_regs
[18] = 1;
13494 fixed_regs
[19] = call_used_regs
[19] = 1;
13495 fixed_regs
[20] = call_used_regs
[20] = 1;
13496 fixed_regs
[21] = call_used_regs
[21] = 1;
13497 fixed_regs
[22] = call_used_regs
[22] = 1;
13498 fixed_regs
[23] = call_used_regs
[23] = 1;
13499 fixed_regs
[26] = call_used_regs
[26] = 1;
13500 fixed_regs
[27] = call_used_regs
[27] = 1;
13501 fixed_regs
[30] = call_used_regs
[30] = 1;
13503 /* $f20-$f23 are call-clobbered for n64. */
13504 if (mips_abi
== ABI_64
)
13507 for (regno
= FP_REG_FIRST
+ 20; regno
< FP_REG_FIRST
+ 24; regno
++)
13508 call_really_used_regs
[regno
] = call_used_regs
[regno
] = 1;
13510 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
13512 if (mips_abi
== ABI_N32
)
13515 for (regno
= FP_REG_FIRST
+ 21; regno
<= FP_REG_FIRST
+ 31; regno
+=2)
13516 call_really_used_regs
[regno
] = call_used_regs
[regno
] = 1;
13518 /* Make sure that double-register accumulator values are correctly
13519 ordered for the current endianness. */
13520 if (TARGET_LITTLE_ENDIAN
)
13522 unsigned int regno
;
13524 mips_swap_registers (MD_REG_FIRST
);
13525 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
+= 2)
13526 mips_swap_registers (regno
);
13530 /* Initialize vector TARGET to VALS. */
13533 mips_expand_vector_init (rtx target
, rtx vals
)
13535 enum machine_mode mode
;
13536 enum machine_mode inner
;
13537 unsigned int i
, n_elts
;
13540 mode
= GET_MODE (target
);
13541 inner
= GET_MODE_INNER (mode
);
13542 n_elts
= GET_MODE_NUNITS (mode
);
13544 gcc_assert (VECTOR_MODE_P (mode
));
13546 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
), 0);
13547 for (i
= 0; i
< n_elts
; i
++)
13548 emit_move_insn (adjust_address_nv (mem
, inner
, i
* GET_MODE_SIZE (inner
)),
13549 XVECEXP (vals
, 0, i
));
13551 emit_move_insn (target
, mem
);
13554 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
13555 other registers for instructions for which it is possible. This
13556 encourages the compiler to use CMP in cases where an XOR would
13557 require some register shuffling. */
13560 mips_order_regs_for_local_alloc (void)
13564 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
13565 reg_alloc_order
[i
] = i
;
13569 /* It really doesn't matter where we put register 0, since it is
13570 a fixed register anyhow. */
13571 reg_alloc_order
[0] = 24;
13572 reg_alloc_order
[24] = 0;
13576 /* Initialize the GCC target structure. */
13577 #undef TARGET_ASM_ALIGNED_HI_OP
13578 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
13579 #undef TARGET_ASM_ALIGNED_SI_OP
13580 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
13581 #undef TARGET_ASM_ALIGNED_DI_OP
13582 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
13584 #undef TARGET_ASM_FUNCTION_PROLOGUE
13585 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
13586 #undef TARGET_ASM_FUNCTION_EPILOGUE
13587 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
13588 #undef TARGET_ASM_SELECT_RTX_SECTION
13589 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
13590 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
13591 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
13593 #undef TARGET_SCHED_INIT
13594 #define TARGET_SCHED_INIT mips_sched_init
13595 #undef TARGET_SCHED_REORDER
13596 #define TARGET_SCHED_REORDER mips_sched_reorder
13597 #undef TARGET_SCHED_REORDER2
13598 #define TARGET_SCHED_REORDER2 mips_sched_reorder
13599 #undef TARGET_SCHED_VARIABLE_ISSUE
13600 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
13601 #undef TARGET_SCHED_ADJUST_COST
13602 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
13603 #undef TARGET_SCHED_ISSUE_RATE
13604 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
13605 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
13606 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
13607 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
13608 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
13609 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
13610 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
13611 mips_multipass_dfa_lookahead
13613 #undef TARGET_DEFAULT_TARGET_FLAGS
13614 #define TARGET_DEFAULT_TARGET_FLAGS \
13616 | TARGET_CPU_DEFAULT \
13617 | TARGET_ENDIAN_DEFAULT \
13618 | TARGET_FP_EXCEPTIONS_DEFAULT \
13619 | MASK_CHECK_ZERO_DIV \
13621 #undef TARGET_HANDLE_OPTION
13622 #define TARGET_HANDLE_OPTION mips_handle_option
13624 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
13625 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
13627 #undef TARGET_INSERT_ATTRIBUTES
13628 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
13629 #undef TARGET_MERGE_DECL_ATTRIBUTES
13630 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
13631 #undef TARGET_SET_CURRENT_FUNCTION
13632 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
13634 #undef TARGET_VALID_POINTER_MODE
13635 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
13636 #undef TARGET_RTX_COSTS
13637 #define TARGET_RTX_COSTS mips_rtx_costs
13638 #undef TARGET_ADDRESS_COST
13639 #define TARGET_ADDRESS_COST mips_address_cost
13641 #undef TARGET_IN_SMALL_DATA_P
13642 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
13644 #undef TARGET_MACHINE_DEPENDENT_REORG
13645 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
13647 #undef TARGET_ASM_FILE_START
13648 #define TARGET_ASM_FILE_START mips_file_start
13649 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
13650 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
13652 #undef TARGET_INIT_LIBFUNCS
13653 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
13655 #undef TARGET_BUILD_BUILTIN_VA_LIST
13656 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
13657 #undef TARGET_EXPAND_BUILTIN_VA_START
13658 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
13659 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
13660 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
13662 #undef TARGET_PROMOTE_FUNCTION_ARGS
13663 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
13664 #undef TARGET_PROMOTE_FUNCTION_RETURN
13665 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
13666 #undef TARGET_PROMOTE_PROTOTYPES
13667 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
13669 #undef TARGET_RETURN_IN_MEMORY
13670 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
13671 #undef TARGET_RETURN_IN_MSB
13672 #define TARGET_RETURN_IN_MSB mips_return_in_msb
13674 #undef TARGET_ASM_OUTPUT_MI_THUNK
13675 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
13676 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
13677 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
13679 #undef TARGET_SETUP_INCOMING_VARARGS
13680 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
13681 #undef TARGET_STRICT_ARGUMENT_NAMING
13682 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
13683 #undef TARGET_MUST_PASS_IN_STACK
13684 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
13685 #undef TARGET_PASS_BY_REFERENCE
13686 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
13687 #undef TARGET_CALLEE_COPIES
13688 #define TARGET_CALLEE_COPIES mips_callee_copies
13689 #undef TARGET_ARG_PARTIAL_BYTES
13690 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
13692 #undef TARGET_MODE_REP_EXTENDED
13693 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
13695 #undef TARGET_VECTOR_MODE_SUPPORTED_P
13696 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
13698 #undef TARGET_SCALAR_MODE_SUPPORTED_P
13699 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
13701 #undef TARGET_INIT_BUILTINS
13702 #define TARGET_INIT_BUILTINS mips_init_builtins
13703 #undef TARGET_EXPAND_BUILTIN
13704 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
13706 #undef TARGET_HAVE_TLS
13707 #define TARGET_HAVE_TLS HAVE_AS_TLS
13709 #undef TARGET_CANNOT_FORCE_CONST_MEM
13710 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
13712 #undef TARGET_ENCODE_SECTION_INFO
13713 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
13715 #undef TARGET_ATTRIBUTE_TABLE
13716 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
13717 /* All our function attributes are related to how out-of-line copies should
13718 be compiled or called. They don't in themselves prevent inlining. */
13719 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
13720 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
13722 #undef TARGET_EXTRA_LIVE_ON_ENTRY
13723 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
13725 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
13726 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
13727 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
13728 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
13730 #undef TARGET_COMP_TYPE_ATTRIBUTES
13731 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
13733 #ifdef HAVE_AS_DTPRELWORD
13734 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
13735 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
13737 #undef TARGET_DWARF_REGISTER_SPAN
13738 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
13740 struct gcc_target targetm
= TARGET_INITIALIZER
;
13742 #include "gt-mips.h"