PR testsuite/35843
[official-gcc.git] / gcc / config / mips / mips.c
blob94a1427453317c9d961cd956b5b6b6d7e0aac596
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
59 #include "bitmap.h"
60 #include "diagnostic.h"
62 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
70 XVECEXP (X, 0, 0)
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
91 part of the frame. */
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
99 (INSN_P (INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
110 : (INSN))
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
116 : (INSN))
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
130 ADDRESS_REG
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
134 ADDRESS_LO_SUM
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
138 ADDRESS_CONST_INT
139 A signed 16-bit constant address.
141 ADDRESS_SYMBOLIC:
142 A constant symbolic address. */
143 enum mips_address_type {
144 ADDRESS_REG,
145 ADDRESS_LO_SUM,
146 ADDRESS_CONST_INT,
147 ADDRESS_SYMBOLIC
150 /* Macros to create an enumeration identifier for a function prototype. */
151 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
152 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
153 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
154 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
156 /* Classifies the prototype of a built-in function. */
157 enum mips_function_type {
158 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
159 #include "config/mips/mips-ftypes.def"
160 #undef DEF_MIPS_FTYPE
161 MIPS_MAX_FTYPE_MAX
164 /* Specifies how a built-in function should be converted into rtl. */
165 enum mips_builtin_type {
166 /* The function corresponds directly to an .md pattern. The return
167 value is mapped to operand 0 and the arguments are mapped to
168 operands 1 and above. */
169 MIPS_BUILTIN_DIRECT,
171 /* The function corresponds directly to an .md pattern. There is no return
172 value and the arguments are mapped to operands 0 and above. */
173 MIPS_BUILTIN_DIRECT_NO_TARGET,
175 /* The function corresponds to a comparison instruction followed by
176 a mips_cond_move_tf_ps pattern. The first two arguments are the
177 values to compare and the second two arguments are the vector
178 operands for the movt.ps or movf.ps instruction (in assembly order). */
179 MIPS_BUILTIN_MOVF,
180 MIPS_BUILTIN_MOVT,
182 /* The function corresponds to a V2SF comparison instruction. Operand 0
183 of this instruction is the result of the comparison, which has mode
184 CCV2 or CCV4. The function arguments are mapped to operands 1 and
185 above. The function's return value is an SImode boolean that is
186 true under the following conditions:
188 MIPS_BUILTIN_CMP_ANY: one of the registers is true
189 MIPS_BUILTIN_CMP_ALL: all of the registers are true
190 MIPS_BUILTIN_CMP_LOWER: the first register is true
191 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
192 MIPS_BUILTIN_CMP_ANY,
193 MIPS_BUILTIN_CMP_ALL,
194 MIPS_BUILTIN_CMP_UPPER,
195 MIPS_BUILTIN_CMP_LOWER,
197 /* As above, but the instruction only sets a single $fcc register. */
198 MIPS_BUILTIN_CMP_SINGLE,
200 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
201 MIPS_BUILTIN_BPOSGE32
204 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
205 #define MIPS_FP_CONDITIONS(MACRO) \
206 MACRO (f), \
207 MACRO (un), \
208 MACRO (eq), \
209 MACRO (ueq), \
210 MACRO (olt), \
211 MACRO (ult), \
212 MACRO (ole), \
213 MACRO (ule), \
214 MACRO (sf), \
215 MACRO (ngle), \
216 MACRO (seq), \
217 MACRO (ngl), \
218 MACRO (lt), \
219 MACRO (nge), \
220 MACRO (le), \
221 MACRO (ngt)
223 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
224 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
225 enum mips_fp_condition {
226 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
229 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
230 #define STRINGIFY(X) #X
231 static const char *const mips_fp_conditions[] = {
232 MIPS_FP_CONDITIONS (STRINGIFY)
235 /* Information about a function's frame layout. */
236 struct mips_frame_info GTY(()) {
237 /* The size of the frame in bytes. */
238 HOST_WIDE_INT total_size;
240 /* The number of bytes allocated to variables. */
241 HOST_WIDE_INT var_size;
243 /* The number of bytes allocated to outgoing function arguments. */
244 HOST_WIDE_INT args_size;
246 /* The number of bytes allocated to the .cprestore slot, or 0 if there
247 is no such slot. */
248 HOST_WIDE_INT cprestore_size;
250 /* Bit X is set if the function saves or restores GPR X. */
251 unsigned int mask;
253 /* Likewise FPR X. */
254 unsigned int fmask;
256 /* The number of GPRs and FPRs saved. */
257 unsigned int num_gp;
258 unsigned int num_fp;
260 /* The offset of the topmost GPR and FPR save slots from the top of
261 the frame, or zero if no such slots are needed. */
262 HOST_WIDE_INT gp_save_offset;
263 HOST_WIDE_INT fp_save_offset;
265 /* Likewise, but giving offsets from the bottom of the frame. */
266 HOST_WIDE_INT gp_sp_offset;
267 HOST_WIDE_INT fp_sp_offset;
269 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
270 HOST_WIDE_INT arg_pointer_offset;
272 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
273 HOST_WIDE_INT hard_frame_pointer_offset;
276 struct machine_function GTY(()) {
277 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
278 rtx mips16_gp_pseudo_rtx;
280 /* The number of extra stack bytes taken up by register varargs.
281 This area is allocated by the callee at the very top of the frame. */
282 int varargs_size;
284 /* The current frame information, calculated by mips_compute_frame_info. */
285 struct mips_frame_info frame;
287 /* The register to use as the function's global pointer. */
288 unsigned int global_pointer;
290 /* True if mips_adjust_insn_length should ignore an instruction's
291 hazard attribute. */
292 bool ignore_hazard_length_p;
294 /* True if the whole function is suitable for .set noreorder and
295 .set nomacro. */
296 bool all_noreorder_p;
298 /* True if the function is known to have an instruction that needs $gp. */
299 bool has_gp_insn_p;
301 /* True if we have emitted an instruction to initialize
302 mips16_gp_pseudo_rtx. */
303 bool initialized_mips16_gp_pseudo_p;
306 /* Information about a single argument. */
307 struct mips_arg_info {
308 /* True if the argument is passed in a floating-point register, or
309 would have been if we hadn't run out of registers. */
310 bool fpr_p;
312 /* The number of words passed in registers, rounded up. */
313 unsigned int reg_words;
315 /* For EABI, the offset of the first register from GP_ARG_FIRST or
316 FP_ARG_FIRST. For other ABIs, the offset of the first register from
317 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
318 comment for details).
320 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
321 on the stack. */
322 unsigned int reg_offset;
324 /* The number of words that must be passed on the stack, rounded up. */
325 unsigned int stack_words;
327 /* The offset from the start of the stack overflow area of the argument's
328 first stack word. Only meaningful when STACK_WORDS is nonzero. */
329 unsigned int stack_offset;
332 /* Information about an address described by mips_address_type.
334 ADDRESS_CONST_INT
335 No fields are used.
337 ADDRESS_REG
338 REG is the base register and OFFSET is the constant offset.
340 ADDRESS_LO_SUM
341 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
342 is the type of symbol it references.
344 ADDRESS_SYMBOLIC
345 SYMBOL_TYPE is the type of symbol that the address references. */
346 struct mips_address_info {
347 enum mips_address_type type;
348 rtx reg;
349 rtx offset;
350 enum mips_symbol_type symbol_type;
353 /* One stage in a constant building sequence. These sequences have
354 the form:
356 A = VALUE[0]
357 A = A CODE[1] VALUE[1]
358 A = A CODE[2] VALUE[2]
361 where A is an accumulator, each CODE[i] is a binary rtl operation
362 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
363 struct mips_integer_op {
364 enum rtx_code code;
365 unsigned HOST_WIDE_INT value;
368 /* The largest number of operations needed to load an integer constant.
369 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
370 When the lowest bit is clear, we can try, but reject a sequence with
371 an extra SLL at the end. */
372 #define MIPS_MAX_INTEGER_OPS 7
374 /* Information about a MIPS16e SAVE or RESTORE instruction. */
375 struct mips16e_save_restore_info {
376 /* The number of argument registers saved by a SAVE instruction.
377 0 for RESTORE instructions. */
378 unsigned int nargs;
380 /* Bit X is set if the instruction saves or restores GPR X. */
381 unsigned int mask;
383 /* The total number of bytes to allocate. */
384 HOST_WIDE_INT size;
387 /* Global variables for machine-dependent things. */
389 /* The -G setting, or the configuration's default small-data limit if
390 no -G option is given. */
391 static unsigned int mips_small_data_threshold;
393 /* The number of file directives written by mips_output_filename. */
394 int num_source_filenames;
396 /* The name that appeared in the last .file directive written by
397 mips_output_filename, or "" if mips_output_filename hasn't
398 written anything yet. */
399 const char *current_function_file = "";
401 /* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
402 int sdb_label_count;
404 /* Arrays that map GCC register numbers to debugger register numbers. */
405 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
406 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
408 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
409 int set_noreorder;
410 int set_nomacro;
411 static int set_noat;
413 /* True if we're writing out a branch-likely instruction rather than a
414 normal branch. */
415 static bool mips_branch_likely;
417 /* The operands passed to the last cmpMM expander. */
418 rtx cmp_operands[2];
420 /* The current instruction-set architecture. */
421 enum processor_type mips_arch;
422 const struct mips_cpu_info *mips_arch_info;
424 /* The processor that we should tune the code for. */
425 enum processor_type mips_tune;
426 const struct mips_cpu_info *mips_tune_info;
428 /* The ISA level associated with mips_arch. */
429 int mips_isa;
431 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
432 static const struct mips_cpu_info *mips_isa_option_info;
434 /* Which ABI to use. */
435 int mips_abi = MIPS_ABI_DEFAULT;
437 /* Which cost information to use. */
438 const struct mips_rtx_cost_data *mips_cost;
440 /* The ambient target flags, excluding MASK_MIPS16. */
441 static int mips_base_target_flags;
443 /* True if MIPS16 is the default mode. */
444 static bool mips_base_mips16;
446 /* The ambient values of other global variables. */
447 static int mips_base_delayed_branch; /* flag_delayed_branch */
448 static int mips_base_schedule_insns; /* flag_schedule_insns */
449 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
450 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
451 static int mips_base_align_loops; /* align_loops */
452 static int mips_base_align_jumps; /* align_jumps */
453 static int mips_base_align_functions; /* align_functions */
455 /* The -mcode-readable setting. */
456 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
458 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
459 bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
461 /* Index C is true if character C is a valid PRINT_OPERAND punctation
462 character. */
463 bool mips_print_operand_punct[256];
465 static GTY (()) int mips_output_filename_first_time = 1;
467 /* mips_split_p[X] is true if symbols of type X can be split by
468 mips_split_symbol. */
469 bool mips_split_p[NUM_SYMBOL_TYPES];
471 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
472 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
473 if they are matched by a special .md file pattern. */
474 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
476 /* Likewise for HIGHs. */
477 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
479 /* Index R is the smallest register class that contains register R. */
480 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
481 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
482 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
483 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
484 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
485 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
486 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
487 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
488 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
489 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
490 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
491 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
492 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
493 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
494 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
495 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
496 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
497 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
498 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
499 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
500 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
501 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
502 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
503 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
504 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
505 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
506 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
507 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
508 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
509 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
510 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
511 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
512 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
513 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
514 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
515 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
516 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
517 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
518 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
519 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
520 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
521 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
522 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
523 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
524 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
525 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
526 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
527 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
530 /* The value of TARGET_ATTRIBUTE_TABLE. */
531 const struct attribute_spec mips_attribute_table[] = {
532 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
533 { "long_call", 0, 0, false, true, true, NULL },
534 { "far", 0, 0, false, true, true, NULL },
535 { "near", 0, 0, false, true, true, NULL },
536 /* We would really like to treat "mips16" and "nomips16" as type
537 attributes, but GCC doesn't provide the hooks we need to support
538 the right conversion rules. As declaration attributes, they affect
539 code generation but don't carry other semantics. */
540 { "mips16", 0, 0, true, false, false, NULL },
541 { "nomips16", 0, 0, true, false, false, NULL },
542 { NULL, 0, 0, false, false, false, NULL }
545 /* A table describing all the processors GCC knows about. Names are
546 matched in the order listed. The first mention of an ISA level is
547 taken as the canonical name for that ISA.
549 To ease comparison, please keep this table in the same order
550 as GAS's mips_cpu_info_table. Please also make sure that
551 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
552 options correctly. */
553 static const struct mips_cpu_info mips_cpu_info_table[] = {
554 /* Entries for generic ISAs. */
555 { "mips1", PROCESSOR_R3000, 1, 0 },
556 { "mips2", PROCESSOR_R6000, 2, 0 },
557 { "mips3", PROCESSOR_R4000, 3, 0 },
558 { "mips4", PROCESSOR_R8000, 4, 0 },
559 /* Prefer not to use branch-likely instructions for generic MIPS32rX
560 and MIPS64rX code. The instructions were officially deprecated
561 in revisions 2 and earlier, but revision 3 is likely to downgrade
562 that to a recommendation to avoid the instructions in code that
563 isn't tuned to a specific processor. */
564 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
565 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
566 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
568 /* MIPS I processors. */
569 { "r3000", PROCESSOR_R3000, 1, 0 },
570 { "r2000", PROCESSOR_R3000, 1, 0 },
571 { "r3900", PROCESSOR_R3900, 1, 0 },
573 /* MIPS II processors. */
574 { "r6000", PROCESSOR_R6000, 2, 0 },
576 /* MIPS III processors. */
577 { "r4000", PROCESSOR_R4000, 3, 0 },
578 { "vr4100", PROCESSOR_R4100, 3, 0 },
579 { "vr4111", PROCESSOR_R4111, 3, 0 },
580 { "vr4120", PROCESSOR_R4120, 3, 0 },
581 { "vr4130", PROCESSOR_R4130, 3, 0 },
582 { "vr4300", PROCESSOR_R4300, 3, 0 },
583 { "r4400", PROCESSOR_R4000, 3, 0 },
584 { "r4600", PROCESSOR_R4600, 3, 0 },
585 { "orion", PROCESSOR_R4600, 3, 0 },
586 { "r4650", PROCESSOR_R4650, 3, 0 },
588 /* MIPS IV processors. */
589 { "r8000", PROCESSOR_R8000, 4, 0 },
590 { "vr5000", PROCESSOR_R5000, 4, 0 },
591 { "vr5400", PROCESSOR_R5400, 4, 0 },
592 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
593 { "rm7000", PROCESSOR_R7000, 4, 0 },
594 { "rm9000", PROCESSOR_R9000, 4, 0 },
596 /* MIPS32 processors. */
597 { "4kc", PROCESSOR_4KC, 32, 0 },
598 { "4km", PROCESSOR_4KC, 32, 0 },
599 { "4kp", PROCESSOR_4KP, 32, 0 },
600 { "4ksc", PROCESSOR_4KC, 32, 0 },
602 /* MIPS32 Release 2 processors. */
603 { "m4k", PROCESSOR_M4K, 33, 0 },
604 { "4kec", PROCESSOR_4KC, 33, 0 },
605 { "4kem", PROCESSOR_4KC, 33, 0 },
606 { "4kep", PROCESSOR_4KP, 33, 0 },
607 { "4ksd", PROCESSOR_4KC, 33, 0 },
609 { "24kc", PROCESSOR_24KC, 33, 0 },
610 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
611 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
612 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
613 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
614 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
616 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP. */
617 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
618 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
619 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
620 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
621 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
623 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP. */
624 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
625 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
626 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
627 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
628 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
630 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2. */
631 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
632 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
633 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
634 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
635 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
636 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
638 /* MIPS64 processors. */
639 { "5kc", PROCESSOR_5KC, 64, 0 },
640 { "5kf", PROCESSOR_5KF, 64, 0 },
641 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
642 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
643 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
644 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
647 /* Default costs. If these are used for a processor we should look
648 up the actual costs. */
649 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
650 COSTS_N_INSNS (7), /* fp_mult_sf */ \
651 COSTS_N_INSNS (8), /* fp_mult_df */ \
652 COSTS_N_INSNS (23), /* fp_div_sf */ \
653 COSTS_N_INSNS (36), /* fp_div_df */ \
654 COSTS_N_INSNS (10), /* int_mult_si */ \
655 COSTS_N_INSNS (10), /* int_mult_di */ \
656 COSTS_N_INSNS (69), /* int_div_si */ \
657 COSTS_N_INSNS (69), /* int_div_di */ \
658 2, /* branch_cost */ \
659 4 /* memory_latency */
661 /* Floating-point costs for processors without an FPU. Just assume that
662 all floating-point libcalls are very expensive. */
663 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
664 COSTS_N_INSNS (256), /* fp_mult_sf */ \
665 COSTS_N_INSNS (256), /* fp_mult_df */ \
666 COSTS_N_INSNS (256), /* fp_div_sf */ \
667 COSTS_N_INSNS (256) /* fp_div_df */
669 /* Costs to use when optimizing for size. */
670 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
671 COSTS_N_INSNS (1), /* fp_add */
672 COSTS_N_INSNS (1), /* fp_mult_sf */
673 COSTS_N_INSNS (1), /* fp_mult_df */
674 COSTS_N_INSNS (1), /* fp_div_sf */
675 COSTS_N_INSNS (1), /* fp_div_df */
676 COSTS_N_INSNS (1), /* int_mult_si */
677 COSTS_N_INSNS (1), /* int_mult_di */
678 COSTS_N_INSNS (1), /* int_div_si */
679 COSTS_N_INSNS (1), /* int_div_di */
680 2, /* branch_cost */
681 4 /* memory_latency */
684 /* Costs to use when optimizing for speed, indexed by processor. */
685 static const struct mips_rtx_cost_data mips_rtx_cost_data[PROCESSOR_MAX] = {
686 { /* R3000 */
687 COSTS_N_INSNS (2), /* fp_add */
688 COSTS_N_INSNS (4), /* fp_mult_sf */
689 COSTS_N_INSNS (5), /* fp_mult_df */
690 COSTS_N_INSNS (12), /* fp_div_sf */
691 COSTS_N_INSNS (19), /* fp_div_df */
692 COSTS_N_INSNS (12), /* int_mult_si */
693 COSTS_N_INSNS (12), /* int_mult_di */
694 COSTS_N_INSNS (35), /* int_div_si */
695 COSTS_N_INSNS (35), /* int_div_di */
696 1, /* branch_cost */
697 4 /* memory_latency */
699 { /* 4KC */
700 SOFT_FP_COSTS,
701 COSTS_N_INSNS (6), /* int_mult_si */
702 COSTS_N_INSNS (6), /* int_mult_di */
703 COSTS_N_INSNS (36), /* int_div_si */
704 COSTS_N_INSNS (36), /* int_div_di */
705 1, /* branch_cost */
706 4 /* memory_latency */
708 { /* 4KP */
709 SOFT_FP_COSTS,
710 COSTS_N_INSNS (36), /* int_mult_si */
711 COSTS_N_INSNS (36), /* int_mult_di */
712 COSTS_N_INSNS (37), /* int_div_si */
713 COSTS_N_INSNS (37), /* int_div_di */
714 1, /* branch_cost */
715 4 /* memory_latency */
717 { /* 5KC */
718 SOFT_FP_COSTS,
719 COSTS_N_INSNS (4), /* int_mult_si */
720 COSTS_N_INSNS (11), /* int_mult_di */
721 COSTS_N_INSNS (36), /* int_div_si */
722 COSTS_N_INSNS (68), /* int_div_di */
723 1, /* branch_cost */
724 4 /* memory_latency */
726 { /* 5KF */
727 COSTS_N_INSNS (4), /* fp_add */
728 COSTS_N_INSNS (4), /* fp_mult_sf */
729 COSTS_N_INSNS (5), /* fp_mult_df */
730 COSTS_N_INSNS (17), /* fp_div_sf */
731 COSTS_N_INSNS (32), /* fp_div_df */
732 COSTS_N_INSNS (4), /* int_mult_si */
733 COSTS_N_INSNS (11), /* int_mult_di */
734 COSTS_N_INSNS (36), /* int_div_si */
735 COSTS_N_INSNS (68), /* int_div_di */
736 1, /* branch_cost */
737 4 /* memory_latency */
739 { /* 20KC */
740 COSTS_N_INSNS (4), /* fp_add */
741 COSTS_N_INSNS (4), /* fp_mult_sf */
742 COSTS_N_INSNS (5), /* fp_mult_df */
743 COSTS_N_INSNS (17), /* fp_div_sf */
744 COSTS_N_INSNS (32), /* fp_div_df */
745 COSTS_N_INSNS (4), /* int_mult_si */
746 COSTS_N_INSNS (7), /* int_mult_di */
747 COSTS_N_INSNS (42), /* int_div_si */
748 COSTS_N_INSNS (72), /* int_div_di */
749 1, /* branch_cost */
750 4 /* memory_latency */
752 { /* 24KC */
753 SOFT_FP_COSTS,
754 COSTS_N_INSNS (5), /* int_mult_si */
755 COSTS_N_INSNS (5), /* int_mult_di */
756 COSTS_N_INSNS (41), /* int_div_si */
757 COSTS_N_INSNS (41), /* int_div_di */
758 1, /* branch_cost */
759 4 /* memory_latency */
761 { /* 24KF2_1 */
762 COSTS_N_INSNS (8), /* fp_add */
763 COSTS_N_INSNS (8), /* fp_mult_sf */
764 COSTS_N_INSNS (10), /* fp_mult_df */
765 COSTS_N_INSNS (34), /* fp_div_sf */
766 COSTS_N_INSNS (64), /* fp_div_df */
767 COSTS_N_INSNS (5), /* int_mult_si */
768 COSTS_N_INSNS (5), /* int_mult_di */
769 COSTS_N_INSNS (41), /* int_div_si */
770 COSTS_N_INSNS (41), /* int_div_di */
771 1, /* branch_cost */
772 4 /* memory_latency */
774 { /* 24KF1_1 */
775 COSTS_N_INSNS (4), /* fp_add */
776 COSTS_N_INSNS (4), /* fp_mult_sf */
777 COSTS_N_INSNS (5), /* fp_mult_df */
778 COSTS_N_INSNS (17), /* fp_div_sf */
779 COSTS_N_INSNS (32), /* fp_div_df */
780 COSTS_N_INSNS (5), /* int_mult_si */
781 COSTS_N_INSNS (5), /* int_mult_di */
782 COSTS_N_INSNS (41), /* int_div_si */
783 COSTS_N_INSNS (41), /* int_div_di */
784 1, /* branch_cost */
785 4 /* memory_latency */
787 { /* 74KC */
788 SOFT_FP_COSTS,
789 COSTS_N_INSNS (5), /* int_mult_si */
790 COSTS_N_INSNS (5), /* int_mult_di */
791 COSTS_N_INSNS (41), /* int_div_si */
792 COSTS_N_INSNS (41), /* int_div_di */
793 1, /* branch_cost */
794 4 /* memory_latency */
796 { /* 74KF2_1 */
797 COSTS_N_INSNS (8), /* fp_add */
798 COSTS_N_INSNS (8), /* fp_mult_sf */
799 COSTS_N_INSNS (10), /* fp_mult_df */
800 COSTS_N_INSNS (34), /* fp_div_sf */
801 COSTS_N_INSNS (64), /* fp_div_df */
802 COSTS_N_INSNS (5), /* int_mult_si */
803 COSTS_N_INSNS (5), /* int_mult_di */
804 COSTS_N_INSNS (41), /* int_div_si */
805 COSTS_N_INSNS (41), /* int_div_di */
806 1, /* branch_cost */
807 4 /* memory_latency */
809 { /* 74KF1_1 */
810 COSTS_N_INSNS (4), /* fp_add */
811 COSTS_N_INSNS (4), /* fp_mult_sf */
812 COSTS_N_INSNS (5), /* fp_mult_df */
813 COSTS_N_INSNS (17), /* fp_div_sf */
814 COSTS_N_INSNS (32), /* fp_div_df */
815 COSTS_N_INSNS (5), /* int_mult_si */
816 COSTS_N_INSNS (5), /* int_mult_di */
817 COSTS_N_INSNS (41), /* int_div_si */
818 COSTS_N_INSNS (41), /* int_div_di */
819 1, /* branch_cost */
820 4 /* memory_latency */
822 { /* 74KF3_2 */
823 COSTS_N_INSNS (6), /* fp_add */
824 COSTS_N_INSNS (6), /* fp_mult_sf */
825 COSTS_N_INSNS (7), /* fp_mult_df */
826 COSTS_N_INSNS (25), /* fp_div_sf */
827 COSTS_N_INSNS (48), /* fp_div_df */
828 COSTS_N_INSNS (5), /* int_mult_si */
829 COSTS_N_INSNS (5), /* int_mult_di */
830 COSTS_N_INSNS (41), /* int_div_si */
831 COSTS_N_INSNS (41), /* int_div_di */
832 1, /* branch_cost */
833 4 /* memory_latency */
835 { /* M4k */
836 DEFAULT_COSTS
838 { /* R3900 */
839 COSTS_N_INSNS (2), /* fp_add */
840 COSTS_N_INSNS (4), /* fp_mult_sf */
841 COSTS_N_INSNS (5), /* fp_mult_df */
842 COSTS_N_INSNS (12), /* fp_div_sf */
843 COSTS_N_INSNS (19), /* fp_div_df */
844 COSTS_N_INSNS (2), /* int_mult_si */
845 COSTS_N_INSNS (2), /* int_mult_di */
846 COSTS_N_INSNS (35), /* int_div_si */
847 COSTS_N_INSNS (35), /* int_div_di */
848 1, /* branch_cost */
849 4 /* memory_latency */
851 { /* R6000 */
852 COSTS_N_INSNS (3), /* fp_add */
853 COSTS_N_INSNS (5), /* fp_mult_sf */
854 COSTS_N_INSNS (6), /* fp_mult_df */
855 COSTS_N_INSNS (15), /* fp_div_sf */
856 COSTS_N_INSNS (16), /* fp_div_df */
857 COSTS_N_INSNS (17), /* int_mult_si */
858 COSTS_N_INSNS (17), /* int_mult_di */
859 COSTS_N_INSNS (38), /* int_div_si */
860 COSTS_N_INSNS (38), /* int_div_di */
861 2, /* branch_cost */
862 6 /* memory_latency */
864 { /* R4000 */
865 COSTS_N_INSNS (6), /* fp_add */
866 COSTS_N_INSNS (7), /* fp_mult_sf */
867 COSTS_N_INSNS (8), /* fp_mult_df */
868 COSTS_N_INSNS (23), /* fp_div_sf */
869 COSTS_N_INSNS (36), /* fp_div_df */
870 COSTS_N_INSNS (10), /* int_mult_si */
871 COSTS_N_INSNS (10), /* int_mult_di */
872 COSTS_N_INSNS (69), /* int_div_si */
873 COSTS_N_INSNS (69), /* int_div_di */
874 2, /* branch_cost */
875 6 /* memory_latency */
877 { /* R4100 */
878 DEFAULT_COSTS
880 { /* R4111 */
881 DEFAULT_COSTS
883 { /* R4120 */
884 DEFAULT_COSTS
886 { /* R4130 */
887 /* The only costs that appear to be updated here are
888 integer multiplication. */
889 SOFT_FP_COSTS,
890 COSTS_N_INSNS (4), /* int_mult_si */
891 COSTS_N_INSNS (6), /* int_mult_di */
892 COSTS_N_INSNS (69), /* int_div_si */
893 COSTS_N_INSNS (69), /* int_div_di */
894 1, /* branch_cost */
895 4 /* memory_latency */
897 { /* R4300 */
898 DEFAULT_COSTS
900 { /* R4600 */
901 DEFAULT_COSTS
903 { /* R4650 */
904 DEFAULT_COSTS
906 { /* R5000 */
907 COSTS_N_INSNS (6), /* fp_add */
908 COSTS_N_INSNS (4), /* fp_mult_sf */
909 COSTS_N_INSNS (5), /* fp_mult_df */
910 COSTS_N_INSNS (23), /* fp_div_sf */
911 COSTS_N_INSNS (36), /* fp_div_df */
912 COSTS_N_INSNS (5), /* int_mult_si */
913 COSTS_N_INSNS (5), /* int_mult_di */
914 COSTS_N_INSNS (36), /* int_div_si */
915 COSTS_N_INSNS (36), /* int_div_di */
916 1, /* branch_cost */
917 4 /* memory_latency */
919 { /* R5400 */
920 COSTS_N_INSNS (6), /* fp_add */
921 COSTS_N_INSNS (5), /* fp_mult_sf */
922 COSTS_N_INSNS (6), /* fp_mult_df */
923 COSTS_N_INSNS (30), /* fp_div_sf */
924 COSTS_N_INSNS (59), /* fp_div_df */
925 COSTS_N_INSNS (3), /* int_mult_si */
926 COSTS_N_INSNS (4), /* int_mult_di */
927 COSTS_N_INSNS (42), /* int_div_si */
928 COSTS_N_INSNS (74), /* int_div_di */
929 1, /* branch_cost */
930 4 /* memory_latency */
932 { /* R5500 */
933 COSTS_N_INSNS (6), /* fp_add */
934 COSTS_N_INSNS (5), /* fp_mult_sf */
935 COSTS_N_INSNS (6), /* fp_mult_df */
936 COSTS_N_INSNS (30), /* fp_div_sf */
937 COSTS_N_INSNS (59), /* fp_div_df */
938 COSTS_N_INSNS (5), /* int_mult_si */
939 COSTS_N_INSNS (9), /* int_mult_di */
940 COSTS_N_INSNS (42), /* int_div_si */
941 COSTS_N_INSNS (74), /* int_div_di */
942 1, /* branch_cost */
943 4 /* memory_latency */
945 { /* R7000 */
946 /* The only costs that are changed here are
947 integer multiplication. */
948 COSTS_N_INSNS (6), /* fp_add */
949 COSTS_N_INSNS (7), /* fp_mult_sf */
950 COSTS_N_INSNS (8), /* fp_mult_df */
951 COSTS_N_INSNS (23), /* fp_div_sf */
952 COSTS_N_INSNS (36), /* fp_div_df */
953 COSTS_N_INSNS (5), /* int_mult_si */
954 COSTS_N_INSNS (9), /* int_mult_di */
955 COSTS_N_INSNS (69), /* int_div_si */
956 COSTS_N_INSNS (69), /* int_div_di */
957 1, /* branch_cost */
958 4 /* memory_latency */
960 { /* R8000 */
961 DEFAULT_COSTS
963 { /* R9000 */
964 /* The only costs that are changed here are
965 integer multiplication. */
966 COSTS_N_INSNS (6), /* fp_add */
967 COSTS_N_INSNS (7), /* fp_mult_sf */
968 COSTS_N_INSNS (8), /* fp_mult_df */
969 COSTS_N_INSNS (23), /* fp_div_sf */
970 COSTS_N_INSNS (36), /* fp_div_df */
971 COSTS_N_INSNS (3), /* int_mult_si */
972 COSTS_N_INSNS (8), /* int_mult_di */
973 COSTS_N_INSNS (69), /* int_div_si */
974 COSTS_N_INSNS (69), /* int_div_di */
975 1, /* branch_cost */
976 4 /* memory_latency */
978 { /* SB1 */
979 /* These costs are the same as the SB-1A below. */
980 COSTS_N_INSNS (4), /* fp_add */
981 COSTS_N_INSNS (4), /* fp_mult_sf */
982 COSTS_N_INSNS (4), /* fp_mult_df */
983 COSTS_N_INSNS (24), /* fp_div_sf */
984 COSTS_N_INSNS (32), /* fp_div_df */
985 COSTS_N_INSNS (3), /* int_mult_si */
986 COSTS_N_INSNS (4), /* int_mult_di */
987 COSTS_N_INSNS (36), /* int_div_si */
988 COSTS_N_INSNS (68), /* int_div_di */
989 1, /* branch_cost */
990 4 /* memory_latency */
992 { /* SB1-A */
993 /* These costs are the same as the SB-1 above. */
994 COSTS_N_INSNS (4), /* fp_add */
995 COSTS_N_INSNS (4), /* fp_mult_sf */
996 COSTS_N_INSNS (4), /* fp_mult_df */
997 COSTS_N_INSNS (24), /* fp_div_sf */
998 COSTS_N_INSNS (32), /* fp_div_df */
999 COSTS_N_INSNS (3), /* int_mult_si */
1000 COSTS_N_INSNS (4), /* int_mult_di */
1001 COSTS_N_INSNS (36), /* int_div_si */
1002 COSTS_N_INSNS (68), /* int_div_di */
1003 1, /* branch_cost */
1004 4 /* memory_latency */
1006 { /* SR71000 */
1007 DEFAULT_COSTS
1011 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1012 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1013 struct mflip_mips16_entry GTY (()) {
1014 const char *name;
1015 bool mips16_p;
1017 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1019 /* Hash table callbacks for mflip_mips16_htab. */
1021 static hashval_t
1022 mflip_mips16_htab_hash (const void *entry)
1024 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1027 static int
1028 mflip_mips16_htab_eq (const void *entry, const void *name)
1030 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1031 (const char *) name) == 0;
1034 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1035 mode, false if it should next add an attribute for the opposite mode. */
1036 static GTY(()) bool mips16_flipper;
1038 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1039 for -mflip-mips16. Return true if it should use "mips16" and false if
1040 it should use "nomips16". */
1042 static bool
1043 mflip_mips16_use_mips16_p (tree decl)
1045 struct mflip_mips16_entry *entry;
1046 const char *name;
1047 hashval_t hash;
1048 void **slot;
1050 /* Use the opposite of the command-line setting for anonymous decls. */
1051 if (!DECL_NAME (decl))
1052 return !mips_base_mips16;
1054 if (!mflip_mips16_htab)
1055 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1056 mflip_mips16_htab_eq, NULL);
1058 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1059 hash = htab_hash_string (name);
1060 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1061 entry = (struct mflip_mips16_entry *) *slot;
1062 if (!entry)
1064 mips16_flipper = !mips16_flipper;
1065 entry = GGC_NEW (struct mflip_mips16_entry);
1066 entry->name = name;
1067 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1068 *slot = entry;
1070 return entry->mips16_p;
1073 /* Predicates to test for presence of "near" and "far"/"long_call"
1074 attributes on the given TYPE. */
1076 static bool
1077 mips_near_type_p (const_tree type)
1079 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1082 static bool
1083 mips_far_type_p (const_tree type)
1085 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1086 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1089 /* Similar predicates for "mips16"/"nomips16" function attributes. */
1091 static bool
1092 mips_mips16_decl_p (const_tree decl)
1094 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1097 static bool
1098 mips_nomips16_decl_p (const_tree decl)
1100 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1103 /* Return true if function DECL is a MIPS16 function. Return the ambient
1104 setting if DECL is null. */
1106 static bool
1107 mips_use_mips16_mode_p (tree decl)
1109 if (decl)
1111 /* Nested functions must use the same frame pointer as their
1112 parent and must therefore use the same ISA mode. */
1113 tree parent = decl_function_context (decl);
1114 if (parent)
1115 decl = parent;
1116 if (mips_mips16_decl_p (decl))
1117 return true;
1118 if (mips_nomips16_decl_p (decl))
1119 return false;
1121 return mips_base_mips16;
1124 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1126 static int
1127 mips_comp_type_attributes (const_tree type1, const_tree type2)
1129 /* Disallow mixed near/far attributes. */
1130 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1131 return 0;
1132 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1133 return 0;
1134 return 1;
1137 /* Implement TARGET_INSERT_ATTRIBUTES. */
1139 static void
1140 mips_insert_attributes (tree decl, tree *attributes)
1142 const char *name;
1143 bool mips16_p, nomips16_p;
1145 /* Check for "mips16" and "nomips16" attributes. */
1146 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1147 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1148 if (TREE_CODE (decl) != FUNCTION_DECL)
1150 if (mips16_p)
1151 error ("%qs attribute only applies to functions", "mips16");
1152 if (nomips16_p)
1153 error ("%qs attribute only applies to functions", "nomips16");
1155 else
1157 mips16_p |= mips_mips16_decl_p (decl);
1158 nomips16_p |= mips_nomips16_decl_p (decl);
1159 if (mips16_p || nomips16_p)
1161 /* DECL cannot be simultaneously "mips16" and "nomips16". */
1162 if (mips16_p && nomips16_p)
1163 error ("%qs cannot have both %<mips16%> and "
1164 "%<nomips16%> attributes",
1165 IDENTIFIER_POINTER (DECL_NAME (decl)));
1167 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1169 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1170 "mips16" attribute, arbitrarily pick one. We must pick the same
1171 setting for duplicate declarations of a function. */
1172 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1173 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1178 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1180 static tree
1181 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1183 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1184 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1185 error ("%qs redeclared with conflicting %qs attributes",
1186 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1187 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1188 error ("%qs redeclared with conflicting %qs attributes",
1189 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1191 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1192 DECL_ATTRIBUTES (newdecl));
1195 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1196 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1198 static void
1199 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1201 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1203 *base_ptr = XEXP (x, 0);
1204 *offset_ptr = INTVAL (XEXP (x, 1));
1206 else
1208 *base_ptr = x;
1209 *offset_ptr = 0;
1213 static unsigned int mips_build_integer (struct mips_integer_op *,
1214 unsigned HOST_WIDE_INT);
1216 /* A subroutine of mips_build_integer, with the same interface.
1217 Assume that the final action in the sequence should be a left shift. */
1219 static unsigned int
1220 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1222 unsigned int i, shift;
1224 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1225 since signed numbers are easier to load than unsigned ones. */
1226 shift = 0;
1227 while ((value & 1) == 0)
1228 value /= 2, shift++;
1230 i = mips_build_integer (codes, value);
1231 codes[i].code = ASHIFT;
1232 codes[i].value = shift;
1233 return i + 1;
1236 /* As for mips_build_shift, but assume that the final action will be
1237 an IOR or PLUS operation. */
1239 static unsigned int
1240 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1242 unsigned HOST_WIDE_INT high;
1243 unsigned int i;
1245 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1246 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1248 /* The constant is too complex to load with a simple LUI/ORI pair,
1249 so we want to give the recursive call as many trailing zeros as
1250 possible. In this case, we know bit 16 is set and that the
1251 low 16 bits form a negative number. If we subtract that number
1252 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1253 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1254 codes[i].code = PLUS;
1255 codes[i].value = CONST_LOW_PART (value);
1257 else
1259 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1260 bits gives a value with at least 17 trailing zeros. */
1261 i = mips_build_integer (codes, high);
1262 codes[i].code = IOR;
1263 codes[i].value = value & 0xffff;
1265 return i + 1;
1268 /* Fill CODES with a sequence of rtl operations to load VALUE.
1269 Return the number of operations needed. */
1271 static unsigned int
1272 mips_build_integer (struct mips_integer_op *codes,
1273 unsigned HOST_WIDE_INT value)
1275 if (SMALL_OPERAND (value)
1276 || SMALL_OPERAND_UNSIGNED (value)
1277 || LUI_OPERAND (value))
1279 /* The value can be loaded with a single instruction. */
1280 codes[0].code = UNKNOWN;
1281 codes[0].value = value;
1282 return 1;
1284 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1286 /* Either the constant is a simple LUI/ORI combination or its
1287 lowest bit is set. We don't want to shift in this case. */
1288 return mips_build_lower (codes, value);
1290 else if ((value & 0xffff) == 0)
1292 /* The constant will need at least three actions. The lowest
1293 16 bits are clear, so the final action will be a shift. */
1294 return mips_build_shift (codes, value);
1296 else
1298 /* The final action could be a shift, add or inclusive OR.
1299 Rather than use a complex condition to select the best
1300 approach, try both mips_build_shift and mips_build_lower
1301 and pick the one that gives the shortest sequence.
1302 Note that this case is only used once per constant. */
1303 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1304 unsigned int cost, alt_cost;
1306 cost = mips_build_shift (codes, value);
1307 alt_cost = mips_build_lower (alt_codes, value);
1308 if (alt_cost < cost)
1310 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1311 cost = alt_cost;
1313 return cost;
1317 /* Return true if X is a thread-local symbol. */
1319 static bool
1320 mips_tls_symbol_p (rtx x)
1322 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1325 /* Return true if SYMBOL_REF X is associated with a global symbol
1326 (in the STB_GLOBAL sense). */
1328 static bool
1329 mips_global_symbol_p (const_rtx x)
1331 const_tree decl = SYMBOL_REF_DECL (x);
1333 if (!decl)
1334 return !SYMBOL_REF_LOCAL_P (x);
1336 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1337 or weak symbols. Relocations in the object file will be against
1338 the target symbol, so it's that symbol's binding that matters here. */
1339 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1342 /* Return true if SYMBOL_REF X binds locally. */
1344 static bool
1345 mips_symbol_binds_local_p (const_rtx x)
1347 return (SYMBOL_REF_DECL (x)
1348 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1349 : SYMBOL_REF_LOCAL_P (x));
1352 /* Return true if rtx constants of mode MODE should be put into a small
1353 data section. */
1355 static bool
1356 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1358 return (!TARGET_EMBEDDED_DATA
1359 && TARGET_LOCAL_SDATA
1360 && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1363 /* Return true if X should not be moved directly into register $25.
1364 We need this because many versions of GAS will treat "la $25,foo" as
1365 part of a call sequence and so allow a global "foo" to be lazily bound. */
1367 bool
1368 mips_dangerous_for_la25_p (rtx x)
1370 return (!TARGET_EXPLICIT_RELOCS
1371 && TARGET_USE_GOT
1372 && GET_CODE (x) == SYMBOL_REF
1373 && mips_global_symbol_p (x));
1376 /* Return the method that should be used to access SYMBOL_REF or
1377 LABEL_REF X in context CONTEXT. */
1379 static enum mips_symbol_type
1380 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1382 if (TARGET_RTP_PIC)
1383 return SYMBOL_GOT_DISP;
1385 if (GET_CODE (x) == LABEL_REF)
1387 /* LABEL_REFs are used for jump tables as well as text labels.
1388 Only return SYMBOL_PC_RELATIVE if we know the label is in
1389 the text section. */
1390 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1391 return SYMBOL_PC_RELATIVE;
1393 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1394 return SYMBOL_GOT_PAGE_OFST;
1396 return SYMBOL_ABSOLUTE;
1399 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1401 if (SYMBOL_REF_TLS_MODEL (x))
1402 return SYMBOL_TLS;
1404 if (CONSTANT_POOL_ADDRESS_P (x))
1406 if (TARGET_MIPS16_TEXT_LOADS)
1407 return SYMBOL_PC_RELATIVE;
1409 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1410 return SYMBOL_PC_RELATIVE;
1412 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1413 return SYMBOL_GP_RELATIVE;
1416 /* Do not use small-data accesses for weak symbols; they may end up
1417 being zero. */
1418 if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1419 return SYMBOL_GP_RELATIVE;
1421 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1422 is in effect. */
1423 if (TARGET_ABICALLS
1424 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1426 /* There are three cases to consider:
1428 - o32 PIC (either with or without explicit relocs)
1429 - n32/n64 PIC without explicit relocs
1430 - n32/n64 PIC with explicit relocs
1432 In the first case, both local and global accesses will use an
1433 R_MIPS_GOT16 relocation. We must correctly predict which of
1434 the two semantics (local or global) the assembler and linker
1435 will apply. The choice depends on the symbol's binding rather
1436 than its visibility.
1438 In the second case, the assembler will not use R_MIPS_GOT16
1439 relocations, but it chooses between local and global accesses
1440 in the same way as for o32 PIC.
1442 In the third case we have more freedom since both forms of
1443 access will work for any kind of symbol. However, there seems
1444 little point in doing things differently. */
1445 if (mips_global_symbol_p (x))
1446 return SYMBOL_GOT_DISP;
1448 return SYMBOL_GOT_PAGE_OFST;
1451 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1452 return SYMBOL_FORCE_TO_MEM;
1454 return SYMBOL_ABSOLUTE;
1457 /* Classify the base of symbolic expression X, given that X appears in
1458 context CONTEXT. */
1460 static enum mips_symbol_type
1461 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1463 rtx offset;
1465 split_const (x, &x, &offset);
1466 if (UNSPEC_ADDRESS_P (x))
1467 return UNSPEC_ADDRESS_TYPE (x);
1469 return mips_classify_symbol (x, context);
1472 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1473 is the alignment in bytes of SYMBOL_REF X. */
1475 static bool
1476 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1478 HOST_WIDE_INT align;
1480 align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1481 return IN_RANGE (offset, 0, align - 1);
1484 /* Return true if X is a symbolic constant that can be used in context
1485 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1487 bool
1488 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1489 enum mips_symbol_type *symbol_type)
1491 rtx offset;
1493 split_const (x, &x, &offset);
1494 if (UNSPEC_ADDRESS_P (x))
1496 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1497 x = UNSPEC_ADDRESS (x);
1499 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1501 *symbol_type = mips_classify_symbol (x, context);
1502 if (*symbol_type == SYMBOL_TLS)
1503 return false;
1505 else
1506 return false;
1508 if (offset == const0_rtx)
1509 return true;
1511 /* Check whether a nonzero offset is valid for the underlying
1512 relocations. */
1513 switch (*symbol_type)
1515 case SYMBOL_ABSOLUTE:
1516 case SYMBOL_FORCE_TO_MEM:
1517 case SYMBOL_32_HIGH:
1518 case SYMBOL_64_HIGH:
1519 case SYMBOL_64_MID:
1520 case SYMBOL_64_LOW:
1521 /* If the target has 64-bit pointers and the object file only
1522 supports 32-bit symbols, the values of those symbols will be
1523 sign-extended. In this case we can't allow an arbitrary offset
1524 in case the 32-bit value X + OFFSET has a different sign from X. */
1525 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1526 return offset_within_block_p (x, INTVAL (offset));
1528 /* In other cases the relocations can handle any offset. */
1529 return true;
1531 case SYMBOL_PC_RELATIVE:
1532 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1533 In this case, we no longer have access to the underlying constant,
1534 but the original symbol-based access was known to be valid. */
1535 if (GET_CODE (x) == LABEL_REF)
1536 return true;
1538 /* Fall through. */
1540 case SYMBOL_GP_RELATIVE:
1541 /* Make sure that the offset refers to something within the
1542 same object block. This should guarantee that the final
1543 PC- or GP-relative offset is within the 16-bit limit. */
1544 return offset_within_block_p (x, INTVAL (offset));
1546 case SYMBOL_GOT_PAGE_OFST:
1547 case SYMBOL_GOTOFF_PAGE:
1548 /* If the symbol is global, the GOT entry will contain the symbol's
1549 address, and we will apply a 16-bit offset after loading it.
1550 If the symbol is local, the linker should provide enough local
1551 GOT entries for a 16-bit offset, but larger offsets may lead
1552 to GOT overflow. */
1553 return SMALL_INT (offset);
1555 case SYMBOL_TPREL:
1556 case SYMBOL_DTPREL:
1557 /* There is no carry between the HI and LO REL relocations, so the
1558 offset is only valid if we know it won't lead to such a carry. */
1559 return mips_offset_within_alignment_p (x, INTVAL (offset));
1561 case SYMBOL_GOT_DISP:
1562 case SYMBOL_GOTOFF_DISP:
1563 case SYMBOL_GOTOFF_CALL:
1564 case SYMBOL_GOTOFF_LOADGP:
1565 case SYMBOL_TLSGD:
1566 case SYMBOL_TLSLDM:
1567 case SYMBOL_GOTTPREL:
1568 case SYMBOL_TLS:
1569 case SYMBOL_HALF:
1570 return false;
1572 gcc_unreachable ();
1575 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1576 single instruction. We rely on the fact that, in the worst case,
1577 all instructions involved in a MIPS16 address calculation are usually
1578 extended ones. */
1580 static int
1581 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1583 switch (type)
1585 case SYMBOL_ABSOLUTE:
1586 /* When using 64-bit symbols, we need 5 preparatory instructions,
1587 such as:
1589 lui $at,%highest(symbol)
1590 daddiu $at,$at,%higher(symbol)
1591 dsll $at,$at,16
1592 daddiu $at,$at,%hi(symbol)
1593 dsll $at,$at,16
1595 The final address is then $at + %lo(symbol). With 32-bit
1596 symbols we just need a preparatory LUI for normal mode and
1597 a preparatory LI and SLL for MIPS16. */
1598 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1600 case SYMBOL_GP_RELATIVE:
1601 /* Treat GP-relative accesses as taking a single instruction on
1602 MIPS16 too; the copy of $gp can often be shared. */
1603 return 1;
1605 case SYMBOL_PC_RELATIVE:
1606 /* PC-relative constants can be only be used with ADDIUPC,
1607 DADDIUPC, LWPC and LDPC. */
1608 if (mode == MAX_MACHINE_MODE
1609 || GET_MODE_SIZE (mode) == 4
1610 || GET_MODE_SIZE (mode) == 8)
1611 return 1;
1613 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1614 return 0;
1616 case SYMBOL_FORCE_TO_MEM:
1617 /* LEAs will be converted into constant-pool references by
1618 mips_reorg. */
1619 if (mode == MAX_MACHINE_MODE)
1620 return 1;
1622 /* The constant must be loaded and then dereferenced. */
1623 return 0;
1625 case SYMBOL_GOT_DISP:
1626 /* The constant will have to be loaded from the GOT before it
1627 is used in an address. */
1628 if (mode != MAX_MACHINE_MODE)
1629 return 0;
1631 /* Fall through. */
1633 case SYMBOL_GOT_PAGE_OFST:
1634 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1635 local/global classification is accurate. The worst cases are:
1637 (1) For local symbols when generating o32 or o64 code. The assembler
1638 will use:
1640 lw $at,%got(symbol)
1643 ...and the final address will be $at + %lo(symbol).
1645 (2) For global symbols when -mxgot. The assembler will use:
1647 lui $at,%got_hi(symbol)
1648 (d)addu $at,$at,$gp
1650 ...and the final address will be $at + %got_lo(symbol). */
1651 return 3;
1653 case SYMBOL_GOTOFF_PAGE:
1654 case SYMBOL_GOTOFF_DISP:
1655 case SYMBOL_GOTOFF_CALL:
1656 case SYMBOL_GOTOFF_LOADGP:
1657 case SYMBOL_32_HIGH:
1658 case SYMBOL_64_HIGH:
1659 case SYMBOL_64_MID:
1660 case SYMBOL_64_LOW:
1661 case SYMBOL_TLSGD:
1662 case SYMBOL_TLSLDM:
1663 case SYMBOL_DTPREL:
1664 case SYMBOL_GOTTPREL:
1665 case SYMBOL_TPREL:
1666 case SYMBOL_HALF:
1667 /* A 16-bit constant formed by a single relocation, or a 32-bit
1668 constant formed from a high 16-bit relocation and a low 16-bit
1669 relocation. Use mips_split_p to determine which. 32-bit
1670 constants need an "lui; addiu" sequence for normal mode and
1671 an "li; sll; addiu" sequence for MIPS16 mode. */
1672 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1674 case SYMBOL_TLS:
1675 /* We don't treat a bare TLS symbol as a constant. */
1676 return 0;
1678 gcc_unreachable ();
1681 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1682 to load symbols of type TYPE into a register. Return 0 if the given
1683 type of symbol cannot be used as an immediate operand.
1685 Otherwise, return the number of instructions needed to load or store
1686 values of mode MODE to or from addresses of type TYPE. Return 0 if
1687 the given type of symbol is not valid in addresses.
1689 In both cases, treat extended MIPS16 instructions as two instructions. */
1691 static int
1692 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1694 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1697 /* A for_each_rtx callback. Stop the search if *X references a
1698 thread-local symbol. */
1700 static int
1701 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1703 return mips_tls_symbol_p (*x);
1706 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1708 static bool
1709 mips_cannot_force_const_mem (rtx x)
1711 rtx base, offset;
1713 if (!TARGET_MIPS16)
1715 /* As an optimization, reject constants that mips_legitimize_move
1716 can expand inline.
1718 Suppose we have a multi-instruction sequence that loads constant C
1719 into register R. If R does not get allocated a hard register, and
1720 R is used in an operand that allows both registers and memory
1721 references, reload will consider forcing C into memory and using
1722 one of the instruction's memory alternatives. Returning false
1723 here will force it to use an input reload instead. */
1724 if (GET_CODE (x) == CONST_INT)
1725 return true;
1727 split_const (x, &base, &offset);
1728 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1729 return true;
1732 /* TLS symbols must be computed by mips_legitimize_move. */
1733 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
1734 return true;
1736 return false;
1739 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1740 constants when we're using a per-function constant pool. */
1742 static bool
1743 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1744 const_rtx x ATTRIBUTE_UNUSED)
1746 return !TARGET_MIPS16_PCREL_LOADS;
1749 /* Return true if register REGNO is a valid base register for mode MODE.
1750 STRICT_P is true if REG_OK_STRICT is in effect. */
1753 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
1754 bool strict_p)
1756 if (!HARD_REGISTER_NUM_P (regno))
1758 if (!strict_p)
1759 return true;
1760 regno = reg_renumber[regno];
1763 /* These fake registers will be eliminated to either the stack or
1764 hard frame pointer, both of which are usually valid base registers.
1765 Reload deals with the cases where the eliminated form isn't valid. */
1766 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1767 return true;
1769 /* In MIPS16 mode, the stack pointer can only address word and doubleword
1770 values, nothing smaller. There are two problems here:
1772 (a) Instantiating virtual registers can introduce new uses of the
1773 stack pointer. If these virtual registers are valid addresses,
1774 the stack pointer should be too.
1776 (b) Most uses of the stack pointer are not made explicit until
1777 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1778 We don't know until that stage whether we'll be eliminating to the
1779 stack pointer (which needs the restriction) or the hard frame
1780 pointer (which doesn't).
1782 All in all, it seems more consistent to only enforce this restriction
1783 during and after reload. */
1784 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1785 return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1787 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1790 /* Return true if X is a valid base register for mode MODE.
1791 STRICT_P is true if REG_OK_STRICT is in effect. */
1793 static bool
1794 mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
1796 if (!strict_p && GET_CODE (x) == SUBREG)
1797 x = SUBREG_REG (x);
1799 return (REG_P (x)
1800 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
1803 /* Return true if X is a valid address for machine mode MODE. If it is,
1804 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
1805 effect. */
1807 static bool
1808 mips_classify_address (struct mips_address_info *info, rtx x,
1809 enum machine_mode mode, bool strict_p)
1811 switch (GET_CODE (x))
1813 case REG:
1814 case SUBREG:
1815 info->type = ADDRESS_REG;
1816 info->reg = x;
1817 info->offset = const0_rtx;
1818 return mips_valid_base_register_p (info->reg, mode, strict_p);
1820 case PLUS:
1821 info->type = ADDRESS_REG;
1822 info->reg = XEXP (x, 0);
1823 info->offset = XEXP (x, 1);
1824 return (mips_valid_base_register_p (info->reg, mode, strict_p)
1825 && const_arith_operand (info->offset, VOIDmode));
1827 case LO_SUM:
1828 info->type = ADDRESS_LO_SUM;
1829 info->reg = XEXP (x, 0);
1830 info->offset = XEXP (x, 1);
1831 /* We have to trust the creator of the LO_SUM to do something vaguely
1832 sane. Target-independent code that creates a LO_SUM should also
1833 create and verify the matching HIGH. Target-independent code that
1834 adds an offset to a LO_SUM must prove that the offset will not
1835 induce a carry. Failure to do either of these things would be
1836 a bug, and we are not required to check for it here. The MIPS
1837 backend itself should only create LO_SUMs for valid symbolic
1838 constants, with the high part being either a HIGH or a copy
1839 of _gp. */
1840 info->symbol_type
1841 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1842 return (mips_valid_base_register_p (info->reg, mode, strict_p)
1843 && mips_symbol_insns (info->symbol_type, mode) > 0
1844 && mips_lo_relocs[info->symbol_type] != 0);
1846 case CONST_INT:
1847 /* Small-integer addresses don't occur very often, but they
1848 are legitimate if $0 is a valid base register. */
1849 info->type = ADDRESS_CONST_INT;
1850 return !TARGET_MIPS16 && SMALL_INT (x);
1852 case CONST:
1853 case LABEL_REF:
1854 case SYMBOL_REF:
1855 info->type = ADDRESS_SYMBOLIC;
1856 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1857 &info->symbol_type)
1858 && mips_symbol_insns (info->symbol_type, mode) > 0
1859 && !mips_split_p[info->symbol_type]);
1861 default:
1862 return false;
1866 /* Return true if X is a legitimate address for a memory operand of mode
1867 MODE. STRICT_P is true if REG_OK_STRICT is in effect. */
1869 bool
1870 mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
1872 struct mips_address_info addr;
1874 return mips_classify_address (&addr, x, mode, strict_p);
1877 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1879 bool
1880 mips_stack_address_p (rtx x, enum machine_mode mode)
1882 struct mips_address_info addr;
1884 return (mips_classify_address (&addr, x, mode, false)
1885 && addr.type == ADDRESS_REG
1886 && addr.reg == stack_pointer_rtx);
1889 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
1890 address instruction. Note that such addresses are not considered
1891 legitimate in the GO_IF_LEGITIMATE_ADDRESS sense, because their use
1892 is so restricted. */
1894 static bool
1895 mips_lwxs_address_p (rtx addr)
1897 if (ISA_HAS_LWXS
1898 && GET_CODE (addr) == PLUS
1899 && REG_P (XEXP (addr, 1)))
1901 rtx offset = XEXP (addr, 0);
1902 if (GET_CODE (offset) == MULT
1903 && REG_P (XEXP (offset, 0))
1904 && GET_CODE (XEXP (offset, 1)) == CONST_INT
1905 && INTVAL (XEXP (offset, 1)) == 4)
1906 return true;
1908 return false;
1911 /* Return true if a value at OFFSET bytes from base register BASE can be
1912 accessed using an unextended MIPS16 instruction. MODE is the mode of
1913 the value.
1915 Usually the offset in an unextended instruction is a 5-bit field.
1916 The offset is unsigned and shifted left once for LH and SH, twice
1917 for LW and SW, and so on. An exception is LWSP and SWSP, which have
1918 an 8-bit immediate field that's shifted left twice. */
1920 static bool
1921 mips16_unextended_reference_p (enum machine_mode mode, rtx base,
1922 unsigned HOST_WIDE_INT offset)
1924 if (offset % GET_MODE_SIZE (mode) == 0)
1926 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1927 return offset < 256U * GET_MODE_SIZE (mode);
1928 return offset < 32U * GET_MODE_SIZE (mode);
1930 return false;
1933 /* Return the number of instructions needed to load or store a value
1934 of mode MODE at address X. Return 0 if X isn't valid for MODE.
1935 Assume that multiword moves may need to be split into word moves
1936 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
1937 enough.
1939 For MIPS16 code, count extended instructions as two instructions. */
1942 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
1944 struct mips_address_info addr;
1945 int factor;
1947 /* BLKmode is used for single unaligned loads and stores and should
1948 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
1949 meaningless, so we have to single it out as a special case one way
1950 or the other.) */
1951 if (mode != BLKmode && might_split_p)
1952 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1953 else
1954 factor = 1;
1956 if (mips_classify_address (&addr, x, mode, false))
1957 switch (addr.type)
1959 case ADDRESS_REG:
1960 if (TARGET_MIPS16
1961 && !mips16_unextended_reference_p (mode, addr.reg,
1962 UINTVAL (addr.offset)))
1963 return factor * 2;
1964 return factor;
1966 case ADDRESS_LO_SUM:
1967 return TARGET_MIPS16 ? factor * 2 : factor;
1969 case ADDRESS_CONST_INT:
1970 return factor;
1972 case ADDRESS_SYMBOLIC:
1973 return factor * mips_symbol_insns (addr.symbol_type, mode);
1975 return 0;
1978 /* Return the number of instructions needed to load constant X.
1979 Return 0 if X isn't a valid constant. */
1982 mips_const_insns (rtx x)
1984 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1985 enum mips_symbol_type symbol_type;
1986 rtx offset;
1988 switch (GET_CODE (x))
1990 case HIGH:
1991 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
1992 &symbol_type)
1993 || !mips_split_p[symbol_type])
1994 return 0;
1996 /* This is simply an LUI for normal mode. It is an extended
1997 LI followed by an extended SLL for MIPS16. */
1998 return TARGET_MIPS16 ? 4 : 1;
2000 case CONST_INT:
2001 if (TARGET_MIPS16)
2002 /* Unsigned 8-bit constants can be loaded using an unextended
2003 LI instruction. Unsigned 16-bit constants can be loaded
2004 using an extended LI. Negative constants must be loaded
2005 using LI and then negated. */
2006 return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2007 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2008 : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2009 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2010 : 0);
2012 return mips_build_integer (codes, INTVAL (x));
2014 case CONST_DOUBLE:
2015 case CONST_VECTOR:
2016 /* Allow zeros for normal mode, where we can use $0. */
2017 return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2019 case CONST:
2020 if (CONST_GP_P (x))
2021 return 1;
2023 /* See if we can refer to X directly. */
2024 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2025 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2027 /* Otherwise try splitting the constant into a base and offset.
2028 16-bit offsets can be added using an extra ADDIU. Larger offsets
2029 must be calculated separately and then added to the base. */
2030 split_const (x, &x, &offset);
2031 if (offset != 0)
2033 int n = mips_const_insns (x);
2034 if (n != 0)
2036 if (SMALL_INT (offset))
2037 return n + 1;
2038 else
2039 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2042 return 0;
2044 case SYMBOL_REF:
2045 case LABEL_REF:
2046 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2047 MAX_MACHINE_MODE);
2049 default:
2050 return 0;
2054 /* Return the number of instructions needed to implement INSN,
2055 given that it loads from or stores to MEM. Count extended
2056 MIPS16 instructions as two instructions. */
2059 mips_load_store_insns (rtx mem, rtx insn)
2061 enum machine_mode mode;
2062 bool might_split_p;
2063 rtx set;
2065 gcc_assert (MEM_P (mem));
2066 mode = GET_MODE (mem);
2068 /* Try to prove that INSN does not need to be split. */
2069 might_split_p = true;
2070 if (GET_MODE_BITSIZE (mode) == 64)
2072 set = single_set (insn);
2073 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2074 might_split_p = false;
2077 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2080 /* Return the number of instructions needed for an integer division. */
2083 mips_idiv_insns (void)
2085 int count;
2087 count = 1;
2088 if (TARGET_CHECK_ZERO_DIV)
2090 if (GENERATE_DIVIDE_TRAPS)
2091 count++;
2092 else
2093 count += 2;
2096 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2097 count++;
2098 return count;
2101 /* Emit a move from SRC to DEST. Assume that the move expanders can
2102 handle all moves if !can_create_pseudo_p (). The distinction is
2103 important because, unlike emit_move_insn, the move expanders know
2104 how to force Pmode objects into the constant pool even when the
2105 constant pool address is not itself legitimate. */
2108 mips_emit_move (rtx dest, rtx src)
2110 return (can_create_pseudo_p ()
2111 ? emit_move_insn (dest, src)
2112 : emit_move_insn_1 (dest, src));
2115 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2117 static void
2118 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2120 emit_insn (gen_rtx_SET (VOIDmode, target,
2121 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2124 /* Copy VALUE to a register and return that register. If new pseudos
2125 are allowed, copy it into a new register, otherwise use DEST. */
2127 static rtx
2128 mips_force_temporary (rtx dest, rtx value)
2130 if (can_create_pseudo_p ())
2131 return force_reg (Pmode, value);
2132 else
2134 mips_emit_move (dest, value);
2135 return dest;
2139 /* Emit a call sequence with call pattern PATTERN and return the call
2140 instruction itself (which is not necessarily the last instruction
2141 emitted). LAZY_P is true if the call address is lazily-bound. */
2143 static rtx
2144 mips_emit_call_insn (rtx pattern, bool lazy_p)
2146 rtx insn;
2148 insn = emit_call_insn (pattern);
2150 /* Lazy-binding stubs require $gp to be valid on entry. */
2151 if (lazy_p)
2152 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2154 if (TARGET_USE_GOT)
2156 /* See the comment above load_call<mode> for details. */
2157 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2158 gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2159 emit_insn (gen_update_got_version ());
2161 return insn;
2164 /* Return an instruction that copies $gp into register REG. We want
2165 GCC to treat the register's value as constant, so that its value
2166 can be rematerialized on demand. */
2168 static rtx
2169 gen_load_const_gp (rtx reg)
2171 return (Pmode == SImode
2172 ? gen_load_const_gp_si (reg)
2173 : gen_load_const_gp_di (reg));
2176 /* Return a pseudo register that contains the value of $gp throughout
2177 the current function. Such registers are needed by MIPS16 functions,
2178 for which $gp itself is not a valid base register or addition operand. */
2180 static rtx
2181 mips16_gp_pseudo_reg (void)
2183 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2184 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2186 /* Don't emit an instruction to initialize the pseudo register if
2187 we are being called from the tree optimizers' cost-calculation
2188 routines. */
2189 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2190 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2192 rtx insn, scan, after;
2194 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2196 push_topmost_sequence ();
2197 /* We need to emit the initialization after the FUNCTION_BEG
2198 note, so that it will be integrated. */
2199 after = get_insns ();
2200 for (scan = after; scan != NULL_RTX; scan = NEXT_INSN (scan))
2201 if (NOTE_P (scan) && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
2203 after = scan;
2204 break;
2206 insn = emit_insn_after (insn, after);
2207 pop_topmost_sequence ();
2209 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2212 return cfun->machine->mips16_gp_pseudo_rtx;
2215 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2216 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2217 constant in that context and can be split into a high part and a LO_SUM.
2218 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2219 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2221 TEMP is as for mips_force_temporary and is used to load the high
2222 part into a register. */
2224 bool
2225 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2227 enum mips_symbol_context context;
2228 enum mips_symbol_type symbol_type;
2229 rtx high;
2231 context = (mode == MAX_MACHINE_MODE
2232 ? SYMBOL_CONTEXT_LEA
2233 : SYMBOL_CONTEXT_MEM);
2234 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2235 || mips_symbol_insns (symbol_type, mode) == 0
2236 || !mips_split_p[symbol_type])
2237 return false;
2239 if (lo_sum_out)
2241 if (symbol_type == SYMBOL_GP_RELATIVE)
2243 if (!can_create_pseudo_p ())
2245 emit_insn (gen_load_const_gp (temp));
2246 high = temp;
2248 else
2249 high = mips16_gp_pseudo_reg ();
2251 else
2253 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2254 high = mips_force_temporary (temp, high);
2256 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2258 return true;
2261 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2262 then add CONST_INT OFFSET to the result. */
2264 static rtx
2265 mips_unspec_address_offset (rtx base, rtx offset,
2266 enum mips_symbol_type symbol_type)
2268 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2269 UNSPEC_ADDRESS_FIRST + symbol_type);
2270 if (offset != const0_rtx)
2271 base = gen_rtx_PLUS (Pmode, base, offset);
2272 return gen_rtx_CONST (Pmode, base);
2275 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2276 type SYMBOL_TYPE. */
2279 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2281 rtx base, offset;
2283 split_const (address, &base, &offset);
2284 return mips_unspec_address_offset (base, offset, symbol_type);
2287 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2288 high part to BASE and return the result. Just return BASE otherwise.
2289 TEMP is as for mips_force_temporary.
2291 The returned expression can be used as the first operand to a LO_SUM. */
2293 static rtx
2294 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2295 enum mips_symbol_type symbol_type)
2297 if (mips_split_p[symbol_type])
2299 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2300 addr = mips_force_temporary (temp, addr);
2301 base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2303 return base;
2306 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2307 mips_force_temporary; it is only needed when OFFSET is not a
2308 SMALL_OPERAND. */
2310 static rtx
2311 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2313 if (!SMALL_OPERAND (offset))
2315 rtx high;
2317 if (TARGET_MIPS16)
2319 /* Load the full offset into a register so that we can use
2320 an unextended instruction for the address itself. */
2321 high = GEN_INT (offset);
2322 offset = 0;
2324 else
2326 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2327 high = GEN_INT (CONST_HIGH_PART (offset));
2328 offset = CONST_LOW_PART (offset);
2330 high = mips_force_temporary (temp, high);
2331 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2333 return plus_constant (reg, offset);
2336 /* The __tls_get_attr symbol. */
2337 static GTY(()) rtx mips_tls_symbol;
2339 /* Return an instruction sequence that calls __tls_get_addr. SYM is
2340 the TLS symbol we are referencing and TYPE is the symbol type to use
2341 (either global dynamic or local dynamic). V0 is an RTX for the
2342 return value location. */
2344 static rtx
2345 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2347 rtx insn, loc, a0;
2349 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2351 if (!mips_tls_symbol)
2352 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2354 loc = mips_unspec_address (sym, type);
2356 start_sequence ();
2358 emit_insn (gen_rtx_SET (Pmode, a0,
2359 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2360 insn = mips_expand_call (v0, mips_tls_symbol, const0_rtx, const0_rtx, false);
2361 CONST_OR_PURE_CALL_P (insn) = 1;
2362 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2363 insn = get_insns ();
2365 end_sequence ();
2367 return insn;
2370 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2371 its address. The return value will be both a valid address and a valid
2372 SET_SRC (either a REG or a LO_SUM). */
2374 static rtx
2375 mips_legitimize_tls_address (rtx loc)
2377 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2378 enum tls_model model;
2380 if (TARGET_MIPS16)
2382 sorry ("MIPS16 TLS");
2383 return gen_reg_rtx (Pmode);
2386 model = SYMBOL_REF_TLS_MODEL (loc);
2387 /* Only TARGET_ABICALLS code can have more than one module; other
2388 code must be be static and should not use a GOT. All TLS models
2389 reduce to local exec in this situation. */
2390 if (!TARGET_ABICALLS)
2391 model = TLS_MODEL_LOCAL_EXEC;
2393 switch (model)
2395 case TLS_MODEL_GLOBAL_DYNAMIC:
2396 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2397 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2398 dest = gen_reg_rtx (Pmode);
2399 emit_libcall_block (insn, dest, v0, loc);
2400 break;
2402 case TLS_MODEL_LOCAL_DYNAMIC:
2403 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2404 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2405 tmp1 = gen_reg_rtx (Pmode);
2407 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2408 share the LDM result with other LD model accesses. */
2409 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2410 UNSPEC_TLS_LDM);
2411 emit_libcall_block (insn, tmp1, v0, eqv);
2413 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2414 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2415 mips_unspec_address (loc, SYMBOL_DTPREL));
2416 break;
2418 case TLS_MODEL_INITIAL_EXEC:
2419 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2420 tmp1 = gen_reg_rtx (Pmode);
2421 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2422 if (Pmode == DImode)
2424 emit_insn (gen_tls_get_tp_di (v1));
2425 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2427 else
2429 emit_insn (gen_tls_get_tp_si (v1));
2430 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2432 dest = gen_reg_rtx (Pmode);
2433 emit_insn (gen_add3_insn (dest, tmp1, v1));
2434 break;
2436 case TLS_MODEL_LOCAL_EXEC:
2437 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2438 if (Pmode == DImode)
2439 emit_insn (gen_tls_get_tp_di (v1));
2440 else
2441 emit_insn (gen_tls_get_tp_si (v1));
2443 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2444 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2445 mips_unspec_address (loc, SYMBOL_TPREL));
2446 break;
2448 default:
2449 gcc_unreachable ();
2451 return dest;
2454 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2455 be legitimized in a way that the generic machinery might not expect,
2456 put the new address in *XLOC and return true. MODE is the mode of
2457 the memory being accessed. */
2459 bool
2460 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2462 rtx base;
2463 HOST_WIDE_INT offset;
2465 if (mips_tls_symbol_p (*xloc))
2467 *xloc = mips_legitimize_tls_address (*xloc);
2468 return true;
2471 /* See if the address can split into a high part and a LO_SUM. */
2472 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2473 return true;
2475 /* Handle BASE + OFFSET using mips_add_offset. */
2476 mips_split_plus (*xloc, &base, &offset);
2477 if (offset != 0)
2479 if (!mips_valid_base_register_p (base, mode, false))
2480 base = copy_to_mode_reg (Pmode, base);
2481 *xloc = mips_add_offset (NULL, base, offset);
2482 return true;
2484 return false;
2487 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
2489 void
2490 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
2492 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2493 enum machine_mode mode;
2494 unsigned int i, num_ops;
2495 rtx x;
2497 mode = GET_MODE (dest);
2498 num_ops = mips_build_integer (codes, value);
2500 /* Apply each binary operation to X. Invariant: X is a legitimate
2501 source operand for a SET pattern. */
2502 x = GEN_INT (codes[0].value);
2503 for (i = 1; i < num_ops; i++)
2505 if (!can_create_pseudo_p ())
2507 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2508 x = temp;
2510 else
2511 x = force_reg (mode, x);
2512 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2515 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2518 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2519 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2520 move_operand. */
2522 static void
2523 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2525 rtx base, offset;
2527 /* Split moves of big integers into smaller pieces. */
2528 if (splittable_const_int_operand (src, mode))
2530 mips_move_integer (dest, dest, INTVAL (src));
2531 return;
2534 /* Split moves of symbolic constants into high/low pairs. */
2535 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2537 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2538 return;
2541 /* Generate the appropriate access sequences for TLS symbols. */
2542 if (mips_tls_symbol_p (src))
2544 mips_emit_move (dest, mips_legitimize_tls_address (src));
2545 return;
2548 /* If we have (const (plus symbol offset)), and that expression cannot
2549 be forced into memory, load the symbol first and add in the offset.
2550 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2551 forced into memory, as it usually produces better code. */
2552 split_const (src, &base, &offset);
2553 if (offset != const0_rtx
2554 && (targetm.cannot_force_const_mem (src)
2555 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2557 base = mips_force_temporary (dest, base);
2558 mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
2559 return;
2562 src = force_const_mem (mode, src);
2564 /* When using explicit relocs, constant pool references are sometimes
2565 not legitimate addresses. */
2566 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2567 mips_emit_move (dest, src);
2570 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
2571 sequence that is valid. */
2573 bool
2574 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2576 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2578 mips_emit_move (dest, force_reg (mode, src));
2579 return true;
2582 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2583 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2584 && REG_P (src) && MD_REG_P (REGNO (src))
2585 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2587 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2588 if (GET_MODE_SIZE (mode) <= 4)
2589 emit_insn (gen_mfhilo_si (gen_lowpart (SImode, dest),
2590 gen_lowpart (SImode, src),
2591 gen_rtx_REG (SImode, other_regno)));
2592 else
2593 emit_insn (gen_mfhilo_di (gen_lowpart (DImode, dest),
2594 gen_lowpart (DImode, src),
2595 gen_rtx_REG (DImode, other_regno)));
2596 return true;
2599 /* We need to deal with constants that would be legitimate
2600 immediate_operands but aren't legitimate move_operands. */
2601 if (CONSTANT_P (src) && !move_operand (src, mode))
2603 mips_legitimize_const_move (mode, dest, src);
2604 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2605 return true;
2607 return false;
2610 /* Return true if value X in context CONTEXT is a small-data address
2611 that can be rewritten as a LO_SUM. */
2613 static bool
2614 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2616 enum mips_symbol_type symbol_type;
2618 return (TARGET_EXPLICIT_RELOCS
2619 && mips_symbolic_constant_p (x, context, &symbol_type)
2620 && symbol_type == SYMBOL_GP_RELATIVE);
2623 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2624 containing MEM, or null if none. */
2626 static int
2627 mips_small_data_pattern_1 (rtx *loc, void *data)
2629 enum mips_symbol_context context;
2631 if (GET_CODE (*loc) == LO_SUM)
2632 return -1;
2634 if (MEM_P (*loc))
2636 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
2637 return 1;
2638 return -1;
2641 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2642 return mips_rewrite_small_data_p (*loc, context);
2645 /* Return true if OP refers to small data symbols directly, not through
2646 a LO_SUM. */
2648 bool
2649 mips_small_data_pattern_p (rtx op)
2651 return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
2654 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2655 DATA is the containing MEM, or null if none. */
2657 static int
2658 mips_rewrite_small_data_1 (rtx *loc, void *data)
2660 enum mips_symbol_context context;
2662 if (MEM_P (*loc))
2664 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
2665 return -1;
2668 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2669 if (mips_rewrite_small_data_p (*loc, context))
2670 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
2672 if (GET_CODE (*loc) == LO_SUM)
2673 return -1;
2675 return 0;
2678 /* Rewrite instruction pattern PATTERN so that it refers to small data
2679 using explicit relocations. */
2682 mips_rewrite_small_data (rtx pattern)
2684 pattern = copy_insn (pattern);
2685 for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
2686 return pattern;
2689 /* We need a lot of little routines to check the range of MIPS16 immediate
2690 operands. */
2692 static int
2693 m16_check_op (rtx op, int low, int high, int mask)
2695 return (GET_CODE (op) == CONST_INT
2696 && IN_RANGE (INTVAL (op), low, high)
2697 && (INTVAL (op) & mask) == 0);
2701 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2703 return m16_check_op (op, 0x1, 0x8, 0);
2707 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2709 return m16_check_op (op, -0x8, 0x7, 0);
2713 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2715 return m16_check_op (op, -0x7, 0x8, 0);
2719 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2721 return m16_check_op (op, -0x10, 0xf, 0);
2725 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2727 return m16_check_op (op, -0xf, 0x10, 0);
2731 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2733 return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
2737 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2739 return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
2743 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2745 return m16_check_op (op, -0x80, 0x7f, 0);
2749 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2751 return m16_check_op (op, -0x7f, 0x80, 0);
2755 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2757 return m16_check_op (op, 0x0, 0xff, 0);
2761 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2763 return m16_check_op (op, -0xff, 0x0, 0);
2767 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2769 return m16_check_op (op, -0x1, 0xfe, 0);
2773 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2775 return m16_check_op (op, 0x0, 0xff << 2, 3);
2779 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2781 return m16_check_op (op, -0xff << 2, 0x0, 3);
2785 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2787 return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
2791 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2793 return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
2796 /* The cost of loading values from the constant pool. It should be
2797 larger than the cost of any constant we want to synthesize inline. */
2798 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2800 /* Return the cost of X when used as an operand to the MIPS16 instruction
2801 that implements CODE. Return -1 if there is no such instruction, or if
2802 X is not a valid immediate operand for it. */
2804 static int
2805 mips16_constant_cost (int code, HOST_WIDE_INT x)
2807 switch (code)
2809 case ASHIFT:
2810 case ASHIFTRT:
2811 case LSHIFTRT:
2812 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2813 other shifts are extended. The shift patterns truncate the shift
2814 count to the right size, so there are no out-of-range values. */
2815 if (IN_RANGE (x, 1, 8))
2816 return 0;
2817 return COSTS_N_INSNS (1);
2819 case PLUS:
2820 if (IN_RANGE (x, -128, 127))
2821 return 0;
2822 if (SMALL_OPERAND (x))
2823 return COSTS_N_INSNS (1);
2824 return -1;
2826 case LEU:
2827 /* Like LE, but reject the always-true case. */
2828 if (x == -1)
2829 return -1;
2830 case LE:
2831 /* We add 1 to the immediate and use SLT. */
2832 x += 1;
2833 case XOR:
2834 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2835 case LT:
2836 case LTU:
2837 if (IN_RANGE (x, 0, 255))
2838 return 0;
2839 if (SMALL_OPERAND_UNSIGNED (x))
2840 return COSTS_N_INSNS (1);
2841 return -1;
2843 case EQ:
2844 case NE:
2845 /* Equality comparisons with 0 are cheap. */
2846 if (x == 0)
2847 return 0;
2848 return -1;
2850 default:
2851 return -1;
2855 /* Return true if there is a non-MIPS16 instruction that implements CODE
2856 and if that instruction accepts X as an immediate operand. */
2858 static int
2859 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2861 switch (code)
2863 case ASHIFT:
2864 case ASHIFTRT:
2865 case LSHIFTRT:
2866 /* All shift counts are truncated to a valid constant. */
2867 return true;
2869 case ROTATE:
2870 case ROTATERT:
2871 /* Likewise rotates, if the target supports rotates at all. */
2872 return ISA_HAS_ROR;
2874 case AND:
2875 case IOR:
2876 case XOR:
2877 /* These instructions take 16-bit unsigned immediates. */
2878 return SMALL_OPERAND_UNSIGNED (x);
2880 case PLUS:
2881 case LT:
2882 case LTU:
2883 /* These instructions take 16-bit signed immediates. */
2884 return SMALL_OPERAND (x);
2886 case EQ:
2887 case NE:
2888 case GT:
2889 case GTU:
2890 /* The "immediate" forms of these instructions are really
2891 implemented as comparisons with register 0. */
2892 return x == 0;
2894 case GE:
2895 case GEU:
2896 /* Likewise, meaning that the only valid immediate operand is 1. */
2897 return x == 1;
2899 case LE:
2900 /* We add 1 to the immediate and use SLT. */
2901 return SMALL_OPERAND (x + 1);
2903 case LEU:
2904 /* Likewise SLTU, but reject the always-true case. */
2905 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2907 case SIGN_EXTRACT:
2908 case ZERO_EXTRACT:
2909 /* The bit position and size are immediate operands. */
2910 return ISA_HAS_EXT_INS;
2912 default:
2913 /* By default assume that $0 can be used for 0. */
2914 return x == 0;
2918 /* Return the cost of binary operation X, given that the instruction
2919 sequence for a word-sized or smaller operation has cost SINGLE_COST
2920 and that the sequence of a double-word operation has cost DOUBLE_COST. */
2922 static int
2923 mips_binary_cost (rtx x, int single_cost, int double_cost)
2925 int cost;
2927 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
2928 cost = double_cost;
2929 else
2930 cost = single_cost;
2931 return (cost
2932 + rtx_cost (XEXP (x, 0), 0)
2933 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
2936 /* Return the cost of floating-point multiplications of mode MODE. */
2938 static int
2939 mips_fp_mult_cost (enum machine_mode mode)
2941 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
2944 /* Return the cost of floating-point divisions of mode MODE. */
2946 static int
2947 mips_fp_div_cost (enum machine_mode mode)
2949 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
2952 /* Return the cost of sign-extending OP to mode MODE, not including the
2953 cost of OP itself. */
2955 static int
2956 mips_sign_extend_cost (enum machine_mode mode, rtx op)
2958 if (MEM_P (op))
2959 /* Extended loads are as cheap as unextended ones. */
2960 return 0;
2962 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
2963 /* A sign extension from SImode to DImode in 64-bit mode is free. */
2964 return 0;
2966 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
2967 /* We can use SEB or SEH. */
2968 return COSTS_N_INSNS (1);
2970 /* We need to use a shift left and a shift right. */
2971 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
2974 /* Return the cost of zero-extending OP to mode MODE, not including the
2975 cost of OP itself. */
2977 static int
2978 mips_zero_extend_cost (enum machine_mode mode, rtx op)
2980 if (MEM_P (op))
2981 /* Extended loads are as cheap as unextended ones. */
2982 return 0;
2984 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
2985 /* We need a shift left by 32 bits and a shift right by 32 bits. */
2986 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
2988 if (GENERATE_MIPS16E)
2989 /* We can use ZEB or ZEH. */
2990 return COSTS_N_INSNS (1);
2992 if (TARGET_MIPS16)
2993 /* We need to load 0xff or 0xffff into a register and use AND. */
2994 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
2996 /* We can use ANDI. */
2997 return COSTS_N_INSNS (1);
3000 /* Implement TARGET_RTX_COSTS. */
3002 static bool
3003 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3005 enum machine_mode mode = GET_MODE (x);
3006 bool float_mode_p = FLOAT_MODE_P (mode);
3007 int cost;
3008 rtx addr;
3010 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3011 appear in the instruction stream, and the cost of a comparison is
3012 really the cost of the branch or scc condition. At the time of
3013 writing, GCC only uses an explicit outer COMPARE code when optabs
3014 is testing whether a constant is expensive enough to force into a
3015 register. We want optabs to pass such constants through the MIPS
3016 expanders instead, so make all constants very cheap here. */
3017 if (outer_code == COMPARE)
3019 gcc_assert (CONSTANT_P (x));
3020 *total = 0;
3021 return true;
3024 switch (code)
3026 case CONST_INT:
3027 /* Treat *clear_upper32-style ANDs as having zero cost in the
3028 second operand. The cost is entirely in the first operand.
3030 ??? This is needed because we would otherwise try to CSE
3031 the constant operand. Although that's the right thing for
3032 instructions that continue to be a register operation throughout
3033 compilation, it is disastrous for instructions that could
3034 later be converted into a memory operation. */
3035 if (TARGET_64BIT
3036 && outer_code == AND
3037 && UINTVAL (x) == 0xffffffff)
3039 *total = 0;
3040 return true;
3043 if (TARGET_MIPS16)
3045 cost = mips16_constant_cost (outer_code, INTVAL (x));
3046 if (cost >= 0)
3048 *total = cost;
3049 return true;
3052 else
3054 /* When not optimizing for size, we care more about the cost
3055 of hot code, and hot code is often in a loop. If a constant
3056 operand needs to be forced into a register, we will often be
3057 able to hoist the constant load out of the loop, so the load
3058 should not contribute to the cost. */
3059 if (!optimize_size
3060 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3062 *total = 0;
3063 return true;
3066 /* Fall through. */
3068 case CONST:
3069 case SYMBOL_REF:
3070 case LABEL_REF:
3071 case CONST_DOUBLE:
3072 if (force_to_mem_operand (x, VOIDmode))
3074 *total = COSTS_N_INSNS (1);
3075 return true;
3077 cost = mips_const_insns (x);
3078 if (cost > 0)
3080 /* If the constant is likely to be stored in a GPR, SETs of
3081 single-insn constants are as cheap as register sets; we
3082 never want to CSE them.
3084 Don't reduce the cost of storing a floating-point zero in
3085 FPRs. If we have a zero in an FPR for other reasons, we
3086 can get better cfg-cleanup and delayed-branch results by
3087 using it consistently, rather than using $0 sometimes and
3088 an FPR at other times. Also, moves between floating-point
3089 registers are sometimes cheaper than (D)MTC1 $0. */
3090 if (cost == 1
3091 && outer_code == SET
3092 && !(float_mode_p && TARGET_HARD_FLOAT))
3093 cost = 0;
3094 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3095 want to CSE the constant itself. It is usually better to
3096 have N copies of the last operation in the sequence and one
3097 shared copy of the other operations. (Note that this is
3098 not true for MIPS16 code, where the final operation in the
3099 sequence is often an extended instruction.)
3101 Also, if we have a CONST_INT, we don't know whether it is
3102 for a word or doubleword operation, so we cannot rely on
3103 the result of mips_build_integer. */
3104 else if (!TARGET_MIPS16
3105 && (outer_code == SET || mode == VOIDmode))
3106 cost = 1;
3107 *total = COSTS_N_INSNS (cost);
3108 return true;
3110 /* The value will need to be fetched from the constant pool. */
3111 *total = CONSTANT_POOL_COST;
3112 return true;
3114 case MEM:
3115 /* If the address is legitimate, return the number of
3116 instructions it needs. */
3117 addr = XEXP (x, 0);
3118 cost = mips_address_insns (addr, mode, true);
3119 if (cost > 0)
3121 *total = COSTS_N_INSNS (cost + 1);
3122 return true;
3124 /* Check for a scaled indexed address. */
3125 if (mips_lwxs_address_p (addr))
3127 *total = COSTS_N_INSNS (2);
3128 return true;
3130 /* Otherwise use the default handling. */
3131 return false;
3133 case FFS:
3134 *total = COSTS_N_INSNS (6);
3135 return false;
3137 case NOT:
3138 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3139 return false;
3141 case AND:
3142 /* Check for a *clear_upper32 pattern and treat it like a zero
3143 extension. See the pattern's comment for details. */
3144 if (TARGET_64BIT
3145 && mode == DImode
3146 && CONST_INT_P (XEXP (x, 1))
3147 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3149 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3150 + rtx_cost (XEXP (x, 0), 0));
3151 return true;
3153 /* Fall through. */
3155 case IOR:
3156 case XOR:
3157 /* Double-word operations use two single-word operations. */
3158 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3159 return true;
3161 case ASHIFT:
3162 case ASHIFTRT:
3163 case LSHIFTRT:
3164 case ROTATE:
3165 case ROTATERT:
3166 if (CONSTANT_P (XEXP (x, 1)))
3167 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3168 else
3169 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3170 return true;
3172 case ABS:
3173 if (float_mode_p)
3174 *total = mips_cost->fp_add;
3175 else
3176 *total = COSTS_N_INSNS (4);
3177 return false;
3179 case LO_SUM:
3180 /* Low-part immediates need an extended MIPS16 instruction. */
3181 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3182 + rtx_cost (XEXP (x, 0), 0));
3183 return true;
3185 case LT:
3186 case LTU:
3187 case LE:
3188 case LEU:
3189 case GT:
3190 case GTU:
3191 case GE:
3192 case GEU:
3193 case EQ:
3194 case NE:
3195 case UNORDERED:
3196 case LTGT:
3197 /* Branch comparisons have VOIDmode, so use the first operand's
3198 mode instead. */
3199 mode = GET_MODE (XEXP (x, 0));
3200 if (FLOAT_MODE_P (mode))
3202 *total = mips_cost->fp_add;
3203 return false;
3205 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3206 return true;
3208 case MINUS:
3209 if (float_mode_p
3210 && ISA_HAS_NMADD_NMSUB (mode)
3211 && TARGET_FUSED_MADD
3212 && !HONOR_NANS (mode)
3213 && !HONOR_SIGNED_ZEROS (mode))
3215 /* See if we can use NMADD or NMSUB. See mips.md for the
3216 associated patterns. */
3217 rtx op0 = XEXP (x, 0);
3218 rtx op1 = XEXP (x, 1);
3219 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3221 *total = (mips_fp_mult_cost (mode)
3222 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3223 + rtx_cost (XEXP (op0, 1), 0)
3224 + rtx_cost (op1, 0));
3225 return true;
3227 if (GET_CODE (op1) == MULT)
3229 *total = (mips_fp_mult_cost (mode)
3230 + rtx_cost (op0, 0)
3231 + rtx_cost (XEXP (op1, 0), 0)
3232 + rtx_cost (XEXP (op1, 1), 0));
3233 return true;
3236 /* Fall through. */
3238 case PLUS:
3239 if (float_mode_p)
3241 /* If this is part of a MADD or MSUB, treat the PLUS as
3242 being free. */
3243 if (ISA_HAS_FP4
3244 && TARGET_FUSED_MADD
3245 && GET_CODE (XEXP (x, 0)) == MULT)
3246 *total = 0;
3247 else
3248 *total = mips_cost->fp_add;
3249 return false;
3252 /* Double-word operations require three single-word operations and
3253 an SLTU. The MIPS16 version then needs to move the result of
3254 the SLTU from $24 to a MIPS16 register. */
3255 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3256 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3257 return true;
3259 case NEG:
3260 if (float_mode_p
3261 && ISA_HAS_NMADD_NMSUB (mode)
3262 && TARGET_FUSED_MADD
3263 && !HONOR_NANS (mode)
3264 && HONOR_SIGNED_ZEROS (mode))
3266 /* See if we can use NMADD or NMSUB. See mips.md for the
3267 associated patterns. */
3268 rtx op = XEXP (x, 0);
3269 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3270 && GET_CODE (XEXP (op, 0)) == MULT)
3272 *total = (mips_fp_mult_cost (mode)
3273 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3274 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3275 + rtx_cost (XEXP (op, 1), 0));
3276 return true;
3280 if (float_mode_p)
3281 *total = mips_cost->fp_add;
3282 else
3283 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3284 return false;
3286 case MULT:
3287 if (float_mode_p)
3288 *total = mips_fp_mult_cost (mode);
3289 else if (mode == DImode && !TARGET_64BIT)
3290 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3291 where the mulsidi3 always includes an MFHI and an MFLO. */
3292 *total = (optimize_size
3293 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3294 : mips_cost->int_mult_si * 3 + 6);
3295 else if (optimize_size)
3296 *total = (ISA_HAS_MUL3 ? 1 : 2);
3297 else if (mode == DImode)
3298 *total = mips_cost->int_mult_di;
3299 else
3300 *total = mips_cost->int_mult_si;
3301 return false;
3303 case DIV:
3304 /* Check for a reciprocal. */
3305 if (float_mode_p
3306 && ISA_HAS_FP4
3307 && flag_unsafe_math_optimizations
3308 && XEXP (x, 0) == CONST1_RTX (mode))
3310 if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3311 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3312 division as being free. */
3313 *total = rtx_cost (XEXP (x, 1), 0);
3314 else
3315 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3316 return true;
3318 /* Fall through. */
3320 case SQRT:
3321 case MOD:
3322 if (float_mode_p)
3324 *total = mips_fp_div_cost (mode);
3325 return false;
3327 /* Fall through. */
3329 case UDIV:
3330 case UMOD:
3331 if (optimize_size)
3333 /* It is our responsibility to make division by a power of 2
3334 as cheap as 2 register additions if we want the division
3335 expanders to be used for such operations; see the setting
3336 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3337 should always produce shorter code than using
3338 expand_sdiv2_pow2. */
3339 if (TARGET_MIPS16
3340 && CONST_INT_P (XEXP (x, 1))
3341 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3343 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3344 return true;
3346 *total = COSTS_N_INSNS (mips_idiv_insns ());
3348 else if (mode == DImode)
3349 *total = mips_cost->int_div_di;
3350 else
3351 *total = mips_cost->int_div_si;
3352 return false;
3354 case SIGN_EXTEND:
3355 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3356 return false;
3358 case ZERO_EXTEND:
3359 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3360 return false;
3362 case FLOAT:
3363 case UNSIGNED_FLOAT:
3364 case FIX:
3365 case FLOAT_EXTEND:
3366 case FLOAT_TRUNCATE:
3367 *total = mips_cost->fp_add;
3368 return false;
3370 default:
3371 return false;
3375 /* Implement TARGET_ADDRESS_COST. */
3377 static int
3378 mips_address_cost (rtx addr)
3380 return mips_address_insns (addr, SImode, false);
3383 /* Return one word of double-word value OP, taking into account the fixed
3384 endianness of certain registers. HIGH_P is true to select the high part,
3385 false to select the low part. */
3388 mips_subword (rtx op, bool high_p)
3390 unsigned int byte, offset;
3391 enum machine_mode mode;
3393 mode = GET_MODE (op);
3394 if (mode == VOIDmode)
3395 mode = DImode;
3397 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3398 byte = UNITS_PER_WORD;
3399 else
3400 byte = 0;
3402 if (FP_REG_RTX_P (op))
3404 /* Paired FPRs are always ordered little-endian. */
3405 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3406 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3409 if (MEM_P (op))
3410 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3412 return simplify_gen_subreg (word_mode, op, mode, byte);
3415 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3417 bool
3418 mips_split_64bit_move_p (rtx dest, rtx src)
3420 if (TARGET_64BIT)
3421 return false;
3423 /* FPR-to-FPR moves can be done in a single instruction, if they're
3424 allowed at all. */
3425 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3426 return false;
3428 /* Check for floating-point loads and stores. */
3429 if (ISA_HAS_LDC1_SDC1)
3431 if (FP_REG_RTX_P (dest) && MEM_P (src))
3432 return false;
3433 if (FP_REG_RTX_P (src) && MEM_P (dest))
3434 return false;
3436 return true;
3439 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3440 this function handles 64-bit moves for which mips_split_64bit_move_p
3441 holds. For 64-bit targets, this function handles 128-bit moves. */
3443 void
3444 mips_split_doubleword_move (rtx dest, rtx src)
3446 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3448 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3449 emit_insn (gen_move_doubleword_fprdi (dest, src));
3450 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3451 emit_insn (gen_move_doubleword_fprdf (dest, src));
3452 else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
3453 emit_insn (gen_move_doubleword_fprv2sf (dest, src));
3454 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3455 emit_insn (gen_move_doubleword_fprtf (dest, src));
3456 else
3457 gcc_unreachable ();
3459 else
3461 /* The operation can be split into two normal moves. Decide in
3462 which order to do them. */
3463 rtx low_dest;
3465 low_dest = mips_subword (dest, false);
3466 if (REG_P (low_dest)
3467 && reg_overlap_mentioned_p (low_dest, src))
3469 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3470 mips_emit_move (low_dest, mips_subword (src, false));
3472 else
3474 mips_emit_move (low_dest, mips_subword (src, false));
3475 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3480 /* Return the appropriate instructions to move SRC into DEST. Assume
3481 that SRC is operand 1 and DEST is operand 0. */
3483 const char *
3484 mips_output_move (rtx dest, rtx src)
3486 enum rtx_code dest_code, src_code;
3487 enum machine_mode mode;
3488 enum mips_symbol_type symbol_type;
3489 bool dbl_p;
3491 dest_code = GET_CODE (dest);
3492 src_code = GET_CODE (src);
3493 mode = GET_MODE (dest);
3494 dbl_p = (GET_MODE_SIZE (mode) == 8);
3496 if (dbl_p && mips_split_64bit_move_p (dest, src))
3497 return "#";
3499 if ((src_code == REG && GP_REG_P (REGNO (src)))
3500 || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
3502 if (dest_code == REG)
3504 if (GP_REG_P (REGNO (dest)))
3505 return "move\t%0,%z1";
3507 if (MD_REG_P (REGNO (dest)))
3508 return "mt%0\t%z1";
3510 if (DSP_ACC_REG_P (REGNO (dest)))
3512 static char retval[] = "mt__\t%z1,%q0";
3514 retval[2] = reg_names[REGNO (dest)][4];
3515 retval[3] = reg_names[REGNO (dest)][5];
3516 return retval;
3519 if (FP_REG_P (REGNO (dest)))
3520 return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
3522 if (ALL_COP_REG_P (REGNO (dest)))
3524 static char retval[] = "dmtc_\t%z1,%0";
3526 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3527 return dbl_p ? retval : retval + 1;
3530 if (dest_code == MEM)
3531 return dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0";
3533 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3535 if (src_code == REG)
3537 /* Handled by separate patterns. */
3538 gcc_assert (!MD_REG_P (REGNO (src)));
3540 if (DSP_ACC_REG_P (REGNO (src)))
3542 static char retval[] = "mf__\t%0,%q1";
3544 retval[2] = reg_names[REGNO (src)][4];
3545 retval[3] = reg_names[REGNO (src)][5];
3546 return retval;
3549 if (FP_REG_P (REGNO (src)))
3550 return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
3552 if (ALL_COP_REG_P (REGNO (src)))
3554 static char retval[] = "dmfc_\t%0,%1";
3556 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3557 return dbl_p ? retval : retval + 1;
3560 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3561 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3564 if (src_code == MEM)
3565 return dbl_p ? "ld\t%0,%1" : "lw\t%0,%1";
3567 if (src_code == CONST_INT)
3569 /* Don't use the X format for the operand itself, because that
3570 will give out-of-range numbers for 64-bit hosts and 32-bit
3571 targets. */
3572 if (!TARGET_MIPS16)
3573 return "li\t%0,%1\t\t\t# %X1";
3575 if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
3576 return "li\t%0,%1";
3578 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
3579 return "#";
3582 if (src_code == HIGH)
3583 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3585 if (CONST_GP_P (src))
3586 return "move\t%0,%1";
3588 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3589 && mips_lo_relocs[symbol_type] != 0)
3591 /* A signed 16-bit constant formed by applying a relocation
3592 operator to a symbolic address. */
3593 gcc_assert (!mips_split_p[symbol_type]);
3594 return "li\t%0,%R1";
3597 if (symbolic_operand (src, VOIDmode))
3599 gcc_assert (TARGET_MIPS16
3600 ? TARGET_MIPS16_TEXT_LOADS
3601 : !TARGET_EXPLICIT_RELOCS);
3602 return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
3605 if (src_code == REG && FP_REG_P (REGNO (src)))
3607 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3609 if (GET_MODE (dest) == V2SFmode)
3610 return "mov.ps\t%0,%1";
3611 else
3612 return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
3615 if (dest_code == MEM)
3616 return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
3618 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3620 if (src_code == MEM)
3621 return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
3623 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3625 static char retval[] = "l_c_\t%0,%1";
3627 retval[1] = (dbl_p ? 'd' : 'w');
3628 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3629 return retval;
3631 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3633 static char retval[] = "s_c_\t%1,%0";
3635 retval[1] = (dbl_p ? 'd' : 'w');
3636 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3637 return retval;
3639 gcc_unreachable ();
3642 /* Return true if CMP1 is a suitable second operand for integer ordering
3643 test CODE. See also the *sCC patterns in mips.md. */
3645 static bool
3646 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
3648 switch (code)
3650 case GT:
3651 case GTU:
3652 return reg_or_0_operand (cmp1, VOIDmode);
3654 case GE:
3655 case GEU:
3656 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3658 case LT:
3659 case LTU:
3660 return arith_operand (cmp1, VOIDmode);
3662 case LE:
3663 return sle_operand (cmp1, VOIDmode);
3665 case LEU:
3666 return sleu_operand (cmp1, VOIDmode);
3668 default:
3669 gcc_unreachable ();
3673 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
3674 integer ordering test *CODE, or if an equivalent combination can
3675 be formed by adjusting *CODE and *CMP1. When returning true, update
3676 *CODE and *CMP1 with the chosen code and operand, otherwise leave
3677 them alone. */
3679 static bool
3680 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
3681 enum machine_mode mode)
3683 HOST_WIDE_INT plus_one;
3685 if (mips_int_order_operand_ok_p (*code, *cmp1))
3686 return true;
3688 if (GET_CODE (*cmp1) == CONST_INT)
3689 switch (*code)
3691 case LE:
3692 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
3693 if (INTVAL (*cmp1) < plus_one)
3695 *code = LT;
3696 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3697 return true;
3699 break;
3701 case LEU:
3702 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
3703 if (plus_one != 0)
3705 *code = LTU;
3706 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3707 return true;
3709 break;
3711 default:
3712 break;
3714 return false;
3717 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
3718 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
3719 is nonnull, it's OK to set TARGET to the inverse of the result and
3720 flip *INVERT_PTR instead. */
3722 static void
3723 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
3724 rtx target, rtx cmp0, rtx cmp1)
3726 enum machine_mode mode;
3728 /* First see if there is a MIPS instruction that can do this operation.
3729 If not, try doing the same for the inverse operation. If that also
3730 fails, force CMP1 into a register and try again. */
3731 mode = GET_MODE (cmp0);
3732 if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
3733 mips_emit_binary (code, target, cmp0, cmp1);
3734 else
3736 enum rtx_code inv_code = reverse_condition (code);
3737 if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
3739 cmp1 = force_reg (mode, cmp1);
3740 mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
3742 else if (invert_ptr == 0)
3744 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3745 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3746 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3748 else
3750 *invert_ptr = !*invert_ptr;
3751 mips_emit_binary (inv_code, target, cmp0, cmp1);
3756 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3757 The register will have the same mode as CMP0. */
3759 static rtx
3760 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3762 if (cmp1 == const0_rtx)
3763 return cmp0;
3765 if (uns_arith_operand (cmp1, VOIDmode))
3766 return expand_binop (GET_MODE (cmp0), xor_optab,
3767 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3769 return expand_binop (GET_MODE (cmp0), sub_optab,
3770 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3773 /* Convert *CODE into a code that can be used in a floating-point
3774 scc instruction (C.cond.fmt). Return true if the values of
3775 the condition code registers will be inverted, with 0 indicating
3776 that the condition holds. */
3778 static bool
3779 mips_reversed_fp_cond (enum rtx_code *code)
3781 switch (*code)
3783 case NE:
3784 case LTGT:
3785 case ORDERED:
3786 *code = reverse_condition_maybe_unordered (*code);
3787 return true;
3789 default:
3790 return false;
3794 /* Convert a comparison into something that can be used in a branch or
3795 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3796 being compared and *CODE is the code used to compare them.
3798 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3799 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
3800 otherwise any standard branch condition can be used. The standard branch
3801 conditions are:
3803 - EQ or NE between two registers.
3804 - any comparison between a register and zero. */
3806 static void
3807 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3809 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3811 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3813 *op0 = cmp_operands[0];
3814 *op1 = cmp_operands[1];
3816 else if (*code == EQ || *code == NE)
3818 if (need_eq_ne_p)
3820 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3821 *op1 = const0_rtx;
3823 else
3825 *op0 = cmp_operands[0];
3826 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3829 else
3831 /* The comparison needs a separate scc instruction. Store the
3832 result of the scc in *OP0 and compare it against zero. */
3833 bool invert = false;
3834 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3835 mips_emit_int_order_test (*code, &invert, *op0,
3836 cmp_operands[0], cmp_operands[1]);
3837 *code = (invert ? EQ : NE);
3838 *op1 = const0_rtx;
3841 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3843 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3844 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3845 *code = NE;
3846 *op1 = const0_rtx;
3848 else
3850 enum rtx_code cmp_code;
3852 /* Floating-point tests use a separate C.cond.fmt comparison to
3853 set a condition code register. The branch or conditional move
3854 will then compare that register against zero.
3856 Set CMP_CODE to the code of the comparison instruction and
3857 *CODE to the code that the branch or move should use. */
3858 cmp_code = *code;
3859 *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
3860 *op0 = (ISA_HAS_8CC
3861 ? gen_reg_rtx (CCmode)
3862 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3863 *op1 = const0_rtx;
3864 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3868 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3869 Store the result in TARGET and return true if successful.
3871 On 64-bit targets, TARGET may be narrower than cmp_operands[0]. */
3873 bool
3874 mips_expand_scc (enum rtx_code code, rtx target)
3876 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3877 return false;
3879 if (code == EQ || code == NE)
3881 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3882 mips_emit_binary (code, target, zie, const0_rtx);
3884 else
3885 mips_emit_int_order_test (code, 0, target,
3886 cmp_operands[0], cmp_operands[1]);
3887 return true;
3890 /* Compare cmp_operands[0] with cmp_operands[1] using comparison code
3891 CODE and jump to OPERANDS[0] if the condition holds. */
3893 void
3894 mips_expand_conditional_branch (rtx *operands, enum rtx_code code)
3896 rtx op0, op1, condition;
3898 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3899 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3900 emit_jump_insn (gen_condjump (condition, operands[0]));
3903 /* Implement:
3905 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3906 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3908 void
3909 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3910 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3912 rtx cmp_result;
3913 bool reversed_p;
3915 reversed_p = mips_reversed_fp_cond (&cond);
3916 cmp_result = gen_reg_rtx (CCV2mode);
3917 emit_insn (gen_scc_ps (cmp_result,
3918 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3919 if (reversed_p)
3920 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3921 cmp_result));
3922 else
3923 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3924 cmp_result));
3927 /* Compare cmp_operands[0] with cmp_operands[1] using the code of
3928 OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] if the condition
3929 holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
3931 void
3932 mips_expand_conditional_move (rtx *operands)
3934 enum rtx_code code;
3935 rtx cond, op0, op1;
3937 code = GET_CODE (operands[1]);
3938 mips_emit_compare (&code, &op0, &op1, true);
3939 cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1),
3940 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3941 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
3942 operands[2], operands[3])));
3945 /* Compare cmp_operands[0] with cmp_operands[1] using rtl code CODE,
3946 then trap if the condition holds. */
3948 void
3949 mips_expand_conditional_trap (enum rtx_code code)
3951 rtx op0, op1;
3952 enum machine_mode mode;
3954 /* MIPS conditional trap instructions don't have GT or LE flavors,
3955 so we must swap the operands and convert to LT and GE respectively. */
3956 switch (code)
3958 case GT:
3959 case LE:
3960 case GTU:
3961 case LEU:
3962 code = swap_condition (code);
3963 op0 = cmp_operands[1];
3964 op1 = cmp_operands[0];
3965 break;
3967 default:
3968 op0 = cmp_operands[0];
3969 op1 = cmp_operands[1];
3970 break;
3973 mode = GET_MODE (cmp_operands[0]);
3974 op0 = force_reg (mode, op0);
3975 if (!arith_operand (op1, mode))
3976 op1 = force_reg (mode, op1);
3978 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3979 gen_rtx_fmt_ee (code, mode, op0, op1),
3980 const0_rtx));
3983 /* Initialize *CUM for a call to a function of type FNTYPE. */
3985 void
3986 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
3988 memset (cum, 0, sizeof (*cum));
3989 cum->prototype = (fntype && prototype_p (fntype));
3990 cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
3993 /* Fill INFO with information about a single argument. CUM is the
3994 cumulative state for earlier arguments. MODE is the mode of this
3995 argument and TYPE is its type (if known). NAMED is true if this
3996 is a named (fixed) argument rather than a variable one. */
3998 static void
3999 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4000 enum machine_mode mode, tree type, int named)
4002 bool doubleword_aligned_p;
4003 unsigned int num_bytes, num_words, max_regs;
4005 /* Work out the size of the argument. */
4006 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4007 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4009 /* Decide whether it should go in a floating-point register, assuming
4010 one is free. Later code checks for availability.
4012 The checks against UNITS_PER_FPVALUE handle the soft-float and
4013 single-float cases. */
4014 switch (mips_abi)
4016 case ABI_EABI:
4017 /* The EABI conventions have traditionally been defined in terms
4018 of TYPE_MODE, regardless of the actual type. */
4019 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4020 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4021 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4022 break;
4024 case ABI_32:
4025 case ABI_O64:
4026 /* Only leading floating-point scalars are passed in
4027 floating-point registers. We also handle vector floats the same
4028 say, which is OK because they are not covered by the standard ABI. */
4029 info->fpr_p = (!cum->gp_reg_found
4030 && cum->arg_number < 2
4031 && (type == 0
4032 || SCALAR_FLOAT_TYPE_P (type)
4033 || VECTOR_FLOAT_TYPE_P (type))
4034 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4035 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4036 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4037 break;
4039 case ABI_N32:
4040 case ABI_64:
4041 /* Scalar, complex and vector floating-point types are passed in
4042 floating-point registers, as long as this is a named rather
4043 than a variable argument. */
4044 info->fpr_p = (named
4045 && (type == 0 || FLOAT_TYPE_P (type))
4046 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4047 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4048 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4049 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4051 /* ??? According to the ABI documentation, the real and imaginary
4052 parts of complex floats should be passed in individual registers.
4053 The real and imaginary parts of stack arguments are supposed
4054 to be contiguous and there should be an extra word of padding
4055 at the end.
4057 This has two problems. First, it makes it impossible to use a
4058 single "void *" va_list type, since register and stack arguments
4059 are passed differently. (At the time of writing, MIPSpro cannot
4060 handle complex float varargs correctly.) Second, it's unclear
4061 what should happen when there is only one register free.
4063 For now, we assume that named complex floats should go into FPRs
4064 if there are two FPRs free, otherwise they should be passed in the
4065 same way as a struct containing two floats. */
4066 if (info->fpr_p
4067 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4068 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4070 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4071 info->fpr_p = false;
4072 else
4073 num_words = 2;
4075 break;
4077 default:
4078 gcc_unreachable ();
4081 /* See whether the argument has doubleword alignment. */
4082 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4084 /* Set REG_OFFSET to the register count we're interested in.
4085 The EABI allocates the floating-point registers separately,
4086 but the other ABIs allocate them like integer registers. */
4087 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4088 ? cum->num_fprs
4089 : cum->num_gprs);
4091 /* Advance to an even register if the argument is doubleword-aligned. */
4092 if (doubleword_aligned_p)
4093 info->reg_offset += info->reg_offset & 1;
4095 /* Work out the offset of a stack argument. */
4096 info->stack_offset = cum->stack_words;
4097 if (doubleword_aligned_p)
4098 info->stack_offset += info->stack_offset & 1;
4100 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4102 /* Partition the argument between registers and stack. */
4103 info->reg_words = MIN (num_words, max_regs);
4104 info->stack_words = num_words - info->reg_words;
4107 /* INFO describes a register argument that has the normal format for the
4108 argument's mode. Return the register it uses, assuming that FPRs are
4109 available if HARD_FLOAT_P. */
4111 static unsigned int
4112 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4114 if (!info->fpr_p || !hard_float_p)
4115 return GP_ARG_FIRST + info->reg_offset;
4116 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4117 /* In o32, the second argument is always passed in $f14
4118 for TARGET_DOUBLE_FLOAT, regardless of whether the
4119 first argument was a word or doubleword. */
4120 return FP_ARG_FIRST + 2;
4121 else
4122 return FP_ARG_FIRST + info->reg_offset;
4125 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
4127 static bool
4128 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4130 return !TARGET_OLDABI;
4133 /* Implement FUNCTION_ARG. */
4136 mips_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4137 tree type, int named)
4139 struct mips_arg_info info;
4141 /* We will be called with a mode of VOIDmode after the last argument
4142 has been seen. Whatever we return will be passed to the call expander.
4143 If we need a MIPS16 fp_code, return a REG with the code stored as
4144 the mode. */
4145 if (mode == VOIDmode)
4147 if (TARGET_MIPS16 && cum->fp_code != 0)
4148 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4149 else
4150 return NULL;
4153 mips_get_arg_info (&info, cum, mode, type, named);
4155 /* Return straight away if the whole argument is passed on the stack. */
4156 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4157 return NULL;
4159 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4160 contains a double in its entirety, then that 64-bit chunk is passed
4161 in a floating-point register. */
4162 if (TARGET_NEWABI
4163 && TARGET_HARD_FLOAT
4164 && named
4165 && type != 0
4166 && TREE_CODE (type) == RECORD_TYPE
4167 && TYPE_SIZE_UNIT (type)
4168 && host_integerp (TYPE_SIZE_UNIT (type), 1))
4170 tree field;
4172 /* First check to see if there is any such field. */
4173 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4174 if (TREE_CODE (field) == FIELD_DECL
4175 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4176 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4177 && host_integerp (bit_position (field), 0)
4178 && int_bit_position (field) % BITS_PER_WORD == 0)
4179 break;
4181 if (field != 0)
4183 /* Now handle the special case by returning a PARALLEL
4184 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4185 chunks are passed in registers. */
4186 unsigned int i;
4187 HOST_WIDE_INT bitpos;
4188 rtx ret;
4190 /* assign_parms checks the mode of ENTRY_PARM, so we must
4191 use the actual mode here. */
4192 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4194 bitpos = 0;
4195 field = TYPE_FIELDS (type);
4196 for (i = 0; i < info.reg_words; i++)
4198 rtx reg;
4200 for (; field; field = TREE_CHAIN (field))
4201 if (TREE_CODE (field) == FIELD_DECL
4202 && int_bit_position (field) >= bitpos)
4203 break;
4205 if (field
4206 && int_bit_position (field) == bitpos
4207 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4208 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4209 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4210 else
4211 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4213 XVECEXP (ret, 0, i)
4214 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4215 GEN_INT (bitpos / BITS_PER_UNIT));
4217 bitpos += BITS_PER_WORD;
4219 return ret;
4223 /* Handle the n32/n64 conventions for passing complex floating-point
4224 arguments in FPR pairs. The real part goes in the lower register
4225 and the imaginary part goes in the upper register. */
4226 if (TARGET_NEWABI
4227 && info.fpr_p
4228 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4230 rtx real, imag;
4231 enum machine_mode inner;
4232 unsigned int regno;
4234 inner = GET_MODE_INNER (mode);
4235 regno = FP_ARG_FIRST + info.reg_offset;
4236 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4238 /* Real part in registers, imaginary part on stack. */
4239 gcc_assert (info.stack_words == info.reg_words);
4240 return gen_rtx_REG (inner, regno);
4242 else
4244 gcc_assert (info.stack_words == 0);
4245 real = gen_rtx_EXPR_LIST (VOIDmode,
4246 gen_rtx_REG (inner, regno),
4247 const0_rtx);
4248 imag = gen_rtx_EXPR_LIST (VOIDmode,
4249 gen_rtx_REG (inner,
4250 regno + info.reg_words / 2),
4251 GEN_INT (GET_MODE_SIZE (inner)));
4252 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4256 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4259 /* Implement FUNCTION_ARG_ADVANCE. */
4261 void
4262 mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4263 tree type, int named)
4265 struct mips_arg_info info;
4267 mips_get_arg_info (&info, cum, mode, type, named);
4269 if (!info.fpr_p)
4270 cum->gp_reg_found = true;
4272 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4273 an explanation of what this code does. It assumes that we're using
4274 either the o32 or the o64 ABI, both of which pass at most 2 arguments
4275 in FPRs. */
4276 if (cum->arg_number < 2 && info.fpr_p)
4277 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4279 /* Advance the register count. This has the effect of setting
4280 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4281 argument required us to skip the final GPR and pass the whole
4282 argument on the stack. */
4283 if (mips_abi != ABI_EABI || !info.fpr_p)
4284 cum->num_gprs = info.reg_offset + info.reg_words;
4285 else if (info.reg_words > 0)
4286 cum->num_fprs += MAX_FPRS_PER_FMT;
4288 /* Advance the stack word count. */
4289 if (info.stack_words > 0)
4290 cum->stack_words = info.stack_offset + info.stack_words;
4292 cum->arg_number++;
4295 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4297 static int
4298 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4299 enum machine_mode mode, tree type, bool named)
4301 struct mips_arg_info info;
4303 mips_get_arg_info (&info, cum, mode, type, named);
4304 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4307 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4308 PARM_BOUNDARY bits of alignment, but will be given anything up
4309 to STACK_BOUNDARY bits if the type requires it. */
4312 mips_function_arg_boundary (enum machine_mode mode, tree type)
4314 unsigned int alignment;
4316 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4317 if (alignment < PARM_BOUNDARY)
4318 alignment = PARM_BOUNDARY;
4319 if (alignment > STACK_BOUNDARY)
4320 alignment = STACK_BOUNDARY;
4321 return alignment;
4324 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4325 upward rather than downward. In other words, return true if the
4326 first byte of the stack slot has useful data, false if the last
4327 byte does. */
4329 bool
4330 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4332 /* On little-endian targets, the first byte of every stack argument
4333 is passed in the first byte of the stack slot. */
4334 if (!BYTES_BIG_ENDIAN)
4335 return true;
4337 /* Otherwise, integral types are padded downward: the last byte of a
4338 stack argument is passed in the last byte of the stack slot. */
4339 if (type != 0
4340 ? (INTEGRAL_TYPE_P (type)
4341 || POINTER_TYPE_P (type)
4342 || FIXED_POINT_TYPE_P (type))
4343 : (SCALAR_INT_MODE_P (mode)
4344 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4345 return false;
4347 /* Big-endian o64 pads floating-point arguments downward. */
4348 if (mips_abi == ABI_O64)
4349 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4350 return false;
4352 /* Other types are padded upward for o32, o64, n32 and n64. */
4353 if (mips_abi != ABI_EABI)
4354 return true;
4356 /* Arguments smaller than a stack slot are padded downward. */
4357 if (mode != BLKmode)
4358 return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
4359 else
4360 return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
4363 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4364 if the least significant byte of the register has useful data. Return
4365 the opposite if the most significant byte does. */
4367 bool
4368 mips_pad_reg_upward (enum machine_mode mode, tree type)
4370 /* No shifting is required for floating-point arguments. */
4371 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4372 return !BYTES_BIG_ENDIAN;
4374 /* Otherwise, apply the same padding to register arguments as we do
4375 to stack arguments. */
4376 return mips_pad_arg_upward (mode, type);
4379 /* Return nonzero when an argument must be passed by reference. */
4381 static bool
4382 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4383 enum machine_mode mode, const_tree type,
4384 bool named ATTRIBUTE_UNUSED)
4386 if (mips_abi == ABI_EABI)
4388 int size;
4390 /* ??? How should SCmode be handled? */
4391 if (mode == DImode || mode == DFmode
4392 || mode == DQmode || mode == UDQmode
4393 || mode == DAmode || mode == UDAmode)
4394 return 0;
4396 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4397 return size == -1 || size > UNITS_PER_WORD;
4399 else
4401 /* If we have a variable-sized parameter, we have no choice. */
4402 return targetm.calls.must_pass_in_stack (mode, type);
4406 /* Implement TARGET_CALLEE_COPIES. */
4408 static bool
4409 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4410 enum machine_mode mode ATTRIBUTE_UNUSED,
4411 const_tree type ATTRIBUTE_UNUSED, bool named)
4413 return mips_abi == ABI_EABI && named;
4416 /* See whether VALTYPE is a record whose fields should be returned in
4417 floating-point registers. If so, return the number of fields and
4418 list them in FIELDS (which should have two elements). Return 0
4419 otherwise.
4421 For n32 & n64, a structure with one or two fields is returned in
4422 floating-point registers as long as every field has a floating-point
4423 type. */
4425 static int
4426 mips_fpr_return_fields (const_tree valtype, tree *fields)
4428 tree field;
4429 int i;
4431 if (!TARGET_NEWABI)
4432 return 0;
4434 if (TREE_CODE (valtype) != RECORD_TYPE)
4435 return 0;
4437 i = 0;
4438 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4440 if (TREE_CODE (field) != FIELD_DECL)
4441 continue;
4443 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
4444 return 0;
4446 if (i == 2)
4447 return 0;
4449 fields[i++] = field;
4451 return i;
4454 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4455 a value in the most significant part of $2/$3 if:
4457 - the target is big-endian;
4459 - the value has a structure or union type (we generalize this to
4460 cover aggregates from other languages too); and
4462 - the structure is not returned in floating-point registers. */
4464 static bool
4465 mips_return_in_msb (const_tree valtype)
4467 tree fields[2];
4469 return (TARGET_NEWABI
4470 && TARGET_BIG_ENDIAN
4471 && AGGREGATE_TYPE_P (valtype)
4472 && mips_fpr_return_fields (valtype, fields) == 0);
4475 /* Return true if the function return value MODE will get returned in a
4476 floating-point register. */
4478 static bool
4479 mips_return_mode_in_fpr_p (enum machine_mode mode)
4481 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4482 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4483 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4484 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4487 /* Return the representation of an FPR return register when the
4488 value being returned in FP_RETURN has mode VALUE_MODE and the
4489 return type itself has mode TYPE_MODE. On NewABI targets,
4490 the two modes may be different for structures like:
4492 struct __attribute__((packed)) foo { float f; }
4494 where we return the SFmode value of "f" in FP_RETURN, but where
4495 the structure itself has mode BLKmode. */
4497 static rtx
4498 mips_return_fpr_single (enum machine_mode type_mode,
4499 enum machine_mode value_mode)
4501 rtx x;
4503 x = gen_rtx_REG (value_mode, FP_RETURN);
4504 if (type_mode != value_mode)
4506 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
4507 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
4509 return x;
4512 /* Return a composite value in a pair of floating-point registers.
4513 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4514 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4515 complete value.
4517 For n32 & n64, $f0 always holds the first value and $f2 the second.
4518 Otherwise the values are packed together as closely as possible. */
4520 static rtx
4521 mips_return_fpr_pair (enum machine_mode mode,
4522 enum machine_mode mode1, HOST_WIDE_INT offset1,
4523 enum machine_mode mode2, HOST_WIDE_INT offset2)
4525 int inc;
4527 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4528 return gen_rtx_PARALLEL
4529 (mode,
4530 gen_rtvec (2,
4531 gen_rtx_EXPR_LIST (VOIDmode,
4532 gen_rtx_REG (mode1, FP_RETURN),
4533 GEN_INT (offset1)),
4534 gen_rtx_EXPR_LIST (VOIDmode,
4535 gen_rtx_REG (mode2, FP_RETURN + inc),
4536 GEN_INT (offset2))));
4540 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4541 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4542 VALTYPE is null and MODE is the mode of the return value. */
4545 mips_function_value (const_tree valtype, enum machine_mode mode)
4547 if (valtype)
4549 tree fields[2];
4550 int unsigned_p;
4552 mode = TYPE_MODE (valtype);
4553 unsigned_p = TYPE_UNSIGNED (valtype);
4555 /* Since TARGET_PROMOTE_FUNCTION_RETURN unconditionally returns true,
4556 we must promote the mode just as PROMOTE_MODE does. */
4557 mode = promote_mode (valtype, mode, &unsigned_p, 1);
4559 /* Handle structures whose fields are returned in $f0/$f2. */
4560 switch (mips_fpr_return_fields (valtype, fields))
4562 case 1:
4563 return mips_return_fpr_single (mode,
4564 TYPE_MODE (TREE_TYPE (fields[0])));
4566 case 2:
4567 return mips_return_fpr_pair (mode,
4568 TYPE_MODE (TREE_TYPE (fields[0])),
4569 int_byte_position (fields[0]),
4570 TYPE_MODE (TREE_TYPE (fields[1])),
4571 int_byte_position (fields[1]));
4574 /* If a value is passed in the most significant part of a register, see
4575 whether we have to round the mode up to a whole number of words. */
4576 if (mips_return_in_msb (valtype))
4578 HOST_WIDE_INT size = int_size_in_bytes (valtype);
4579 if (size % UNITS_PER_WORD != 0)
4581 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4582 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4586 /* For EABI, the class of return register depends entirely on MODE.
4587 For example, "struct { some_type x; }" and "union { some_type x; }"
4588 are returned in the same way as a bare "some_type" would be.
4589 Other ABIs only use FPRs for scalar, complex or vector types. */
4590 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
4591 return gen_rtx_REG (mode, GP_RETURN);
4594 if (!TARGET_MIPS16)
4596 /* Handle long doubles for n32 & n64. */
4597 if (mode == TFmode)
4598 return mips_return_fpr_pair (mode,
4599 DImode, 0,
4600 DImode, GET_MODE_SIZE (mode) / 2);
4602 if (mips_return_mode_in_fpr_p (mode))
4604 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4605 return mips_return_fpr_pair (mode,
4606 GET_MODE_INNER (mode), 0,
4607 GET_MODE_INNER (mode),
4608 GET_MODE_SIZE (mode) / 2);
4609 else
4610 return gen_rtx_REG (mode, FP_RETURN);
4614 return gen_rtx_REG (mode, GP_RETURN);
4617 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
4618 all BLKmode objects are returned in memory. Under the n32, n64
4619 and embedded ABIs, small structures are returned in a register.
4620 Objects with varying size must still be returned in memory, of
4621 course. */
4623 static bool
4624 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
4626 return (TARGET_OLDABI
4627 ? TYPE_MODE (type) == BLKmode
4628 : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
4631 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
4633 static void
4634 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4635 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4636 int no_rtl)
4638 CUMULATIVE_ARGS local_cum;
4639 int gp_saved, fp_saved;
4641 /* The caller has advanced CUM up to, but not beyond, the last named
4642 argument. Advance a local copy of CUM past the last "real" named
4643 argument, to find out how many registers are left over. */
4644 local_cum = *cum;
4645 FUNCTION_ARG_ADVANCE (local_cum, mode, type, true);
4647 /* Found out how many registers we need to save. */
4648 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4649 fp_saved = (EABI_FLOAT_VARARGS_P
4650 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4651 : 0);
4653 if (!no_rtl)
4655 if (gp_saved > 0)
4657 rtx ptr, mem;
4659 ptr = plus_constant (virtual_incoming_args_rtx,
4660 REG_PARM_STACK_SPACE (cfun->decl)
4661 - gp_saved * UNITS_PER_WORD);
4662 mem = gen_frame_mem (BLKmode, ptr);
4663 set_mem_alias_set (mem, get_varargs_alias_set ());
4665 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4666 mem, gp_saved);
4668 if (fp_saved > 0)
4670 /* We can't use move_block_from_reg, because it will use
4671 the wrong mode. */
4672 enum machine_mode mode;
4673 int off, i;
4675 /* Set OFF to the offset from virtual_incoming_args_rtx of
4676 the first float register. The FP save area lies below
4677 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4678 off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
4679 off -= fp_saved * UNITS_PER_FPREG;
4681 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4683 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4684 i += MAX_FPRS_PER_FMT)
4686 rtx ptr, mem;
4688 ptr = plus_constant (virtual_incoming_args_rtx, off);
4689 mem = gen_frame_mem (mode, ptr);
4690 set_mem_alias_set (mem, get_varargs_alias_set ());
4691 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4692 off += UNITS_PER_HWFPVALUE;
4696 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4697 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4698 + fp_saved * UNITS_PER_FPREG);
4701 /* Implement TARGET_BUILTIN_VA_LIST. */
4703 static tree
4704 mips_build_builtin_va_list (void)
4706 if (EABI_FLOAT_VARARGS_P)
4708 /* We keep 3 pointers, and two offsets.
4710 Two pointers are to the overflow area, which starts at the CFA.
4711 One of these is constant, for addressing into the GPR save area
4712 below it. The other is advanced up the stack through the
4713 overflow region.
4715 The third pointer is to the bottom of the GPR save area.
4716 Since the FPR save area is just below it, we can address
4717 FPR slots off this pointer.
4719 We also keep two one-byte offsets, which are to be subtracted
4720 from the constant pointers to yield addresses in the GPR and
4721 FPR save areas. These are downcounted as float or non-float
4722 arguments are used, and when they get to zero, the argument
4723 must be obtained from the overflow region. */
4724 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4725 tree array, index;
4727 record = lang_hooks.types.make_type (RECORD_TYPE);
4729 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4730 ptr_type_node);
4731 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4732 ptr_type_node);
4733 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4734 ptr_type_node);
4735 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4736 unsigned_char_type_node);
4737 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4738 unsigned_char_type_node);
4739 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4740 warn on every user file. */
4741 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4742 array = build_array_type (unsigned_char_type_node,
4743 build_index_type (index));
4744 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4746 DECL_FIELD_CONTEXT (f_ovfl) = record;
4747 DECL_FIELD_CONTEXT (f_gtop) = record;
4748 DECL_FIELD_CONTEXT (f_ftop) = record;
4749 DECL_FIELD_CONTEXT (f_goff) = record;
4750 DECL_FIELD_CONTEXT (f_foff) = record;
4751 DECL_FIELD_CONTEXT (f_res) = record;
4753 TYPE_FIELDS (record) = f_ovfl;
4754 TREE_CHAIN (f_ovfl) = f_gtop;
4755 TREE_CHAIN (f_gtop) = f_ftop;
4756 TREE_CHAIN (f_ftop) = f_goff;
4757 TREE_CHAIN (f_goff) = f_foff;
4758 TREE_CHAIN (f_foff) = f_res;
4760 layout_type (record);
4761 return record;
4763 else if (TARGET_IRIX && TARGET_IRIX6)
4764 /* On IRIX 6, this type is 'char *'. */
4765 return build_pointer_type (char_type_node);
4766 else
4767 /* Otherwise, we use 'void *'. */
4768 return ptr_type_node;
4771 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
4773 static void
4774 mips_va_start (tree valist, rtx nextarg)
4776 if (EABI_FLOAT_VARARGS_P)
4778 const CUMULATIVE_ARGS *cum;
4779 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4780 tree ovfl, gtop, ftop, goff, foff;
4781 tree t;
4782 int gpr_save_area_size;
4783 int fpr_save_area_size;
4784 int fpr_offset;
4786 cum = &crtl->args.info;
4787 gpr_save_area_size
4788 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4789 fpr_save_area_size
4790 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4792 f_ovfl = TYPE_FIELDS (va_list_type_node);
4793 f_gtop = TREE_CHAIN (f_ovfl);
4794 f_ftop = TREE_CHAIN (f_gtop);
4795 f_goff = TREE_CHAIN (f_ftop);
4796 f_foff = TREE_CHAIN (f_goff);
4798 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4799 NULL_TREE);
4800 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4801 NULL_TREE);
4802 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4803 NULL_TREE);
4804 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4805 NULL_TREE);
4806 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4807 NULL_TREE);
4809 /* Emit code to initialize OVFL, which points to the next varargs
4810 stack argument. CUM->STACK_WORDS gives the number of stack
4811 words used by named arguments. */
4812 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4813 if (cum->stack_words > 0)
4814 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4815 size_int (cum->stack_words * UNITS_PER_WORD));
4816 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4817 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4819 /* Emit code to initialize GTOP, the top of the GPR save area. */
4820 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4821 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4822 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4824 /* Emit code to initialize FTOP, the top of the FPR save area.
4825 This address is gpr_save_area_bytes below GTOP, rounded
4826 down to the next fp-aligned boundary. */
4827 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4828 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4829 fpr_offset &= -UNITS_PER_FPVALUE;
4830 if (fpr_offset)
4831 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4832 size_int (-fpr_offset));
4833 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4834 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4836 /* Emit code to initialize GOFF, the offset from GTOP of the
4837 next GPR argument. */
4838 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4839 build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
4840 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4842 /* Likewise emit code to initialize FOFF, the offset from FTOP
4843 of the next FPR argument. */
4844 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4845 build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
4846 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4848 else
4850 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4851 std_expand_builtin_va_start (valist, nextarg);
4855 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
4857 static tree
4858 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4860 tree addr;
4861 bool indirect_p;
4863 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4864 if (indirect_p)
4865 type = build_pointer_type (type);
4867 if (!EABI_FLOAT_VARARGS_P)
4868 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4869 else
4871 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4872 tree ovfl, top, off, align;
4873 HOST_WIDE_INT size, rsize, osize;
4874 tree t, u;
4876 f_ovfl = TYPE_FIELDS (va_list_type_node);
4877 f_gtop = TREE_CHAIN (f_ovfl);
4878 f_ftop = TREE_CHAIN (f_gtop);
4879 f_goff = TREE_CHAIN (f_ftop);
4880 f_foff = TREE_CHAIN (f_goff);
4882 /* Let:
4884 TOP be the top of the GPR or FPR save area;
4885 OFF be the offset from TOP of the next register;
4886 ADDR_RTX be the address of the argument;
4887 SIZE be the number of bytes in the argument type;
4888 RSIZE be the number of bytes used to store the argument
4889 when it's in the register save area; and
4890 OSIZE be the number of bytes used to store it when it's
4891 in the stack overflow area.
4893 The code we want is:
4895 1: off &= -rsize; // round down
4896 2: if (off != 0)
4897 3: {
4898 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
4899 5: off -= rsize;
4900 6: }
4901 7: else
4902 8: {
4903 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
4904 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
4905 11: ovfl += osize;
4906 14: }
4908 [1] and [9] can sometimes be optimized away. */
4910 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4911 NULL_TREE);
4912 size = int_size_in_bytes (type);
4914 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4915 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4917 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4918 NULL_TREE);
4919 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4920 NULL_TREE);
4922 /* When va_start saves FPR arguments to the stack, each slot
4923 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
4924 argument's precision. */
4925 rsize = UNITS_PER_HWFPVALUE;
4927 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4928 (= PARM_BOUNDARY bits). This can be different from RSIZE
4929 in two cases:
4931 (1) On 32-bit targets when TYPE is a structure such as:
4933 struct s { float f; };
4935 Such structures are passed in paired FPRs, so RSIZE
4936 will be 8 bytes. However, the structure only takes
4937 up 4 bytes of memory, so OSIZE will only be 4.
4939 (2) In combinations such as -mgp64 -msingle-float
4940 -fshort-double. Doubles passed in registers will then take
4941 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
4942 stack take up UNITS_PER_WORD bytes. */
4943 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4945 else
4947 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4948 NULL_TREE);
4949 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4950 NULL_TREE);
4951 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4952 if (rsize > UNITS_PER_WORD)
4954 /* [1] Emit code for: off &= -rsize. */
4955 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4956 build_int_cst (NULL_TREE, -rsize));
4957 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4958 gimplify_and_add (t, pre_p);
4960 osize = rsize;
4963 /* [2] Emit code to branch if off == 0. */
4964 t = build2 (NE_EXPR, boolean_type_node, off,
4965 build_int_cst (TREE_TYPE (off), 0));
4966 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4968 /* [5] Emit code for: off -= rsize. We do this as a form of
4969 post-decrement not available to C. */
4970 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4971 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4973 /* [4] Emit code for:
4974 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
4975 t = fold_convert (sizetype, t);
4976 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4977 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4978 if (BYTES_BIG_ENDIAN && rsize > size)
4980 u = size_int (rsize - size);
4981 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4983 COND_EXPR_THEN (addr) = t;
4985 if (osize > UNITS_PER_WORD)
4987 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
4988 u = size_int (osize - 1);
4989 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4990 t = fold_convert (sizetype, t);
4991 u = size_int (-osize);
4992 t = build2 (BIT_AND_EXPR, sizetype, t, u);
4993 t = fold_convert (TREE_TYPE (ovfl), t);
4994 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4996 else
4997 align = NULL;
4999 /* [10, 11] Emit code for:
5000 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5001 ovfl += osize. */
5002 u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
5003 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5004 if (BYTES_BIG_ENDIAN && osize > size)
5006 u = size_int (osize - size);
5007 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5010 /* String [9] and [10, 11] together. */
5011 if (align)
5012 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5013 COND_EXPR_ELSE (addr) = t;
5015 addr = fold_convert (build_pointer_type (type), addr);
5016 addr = build_va_arg_indirect_ref (addr);
5019 if (indirect_p)
5020 addr = build_va_arg_indirect_ref (addr);
5022 return addr;
5025 /* A chained list of functions for which mips16_build_call_stub has already
5026 generated a stub. NAME is the name of the function and FP_RET_P is true
5027 if the function returns a value in floating-point registers. */
5028 struct mips16_stub {
5029 struct mips16_stub *next;
5030 char *name;
5031 bool fp_ret_p;
5033 static struct mips16_stub *mips16_stubs;
5035 /* Return the two-character string that identifies floating-point
5036 return mode MODE in the name of a MIPS16 function stub. */
5038 static const char *
5039 mips16_call_stub_mode_suffix (enum machine_mode mode)
5041 if (mode == SFmode)
5042 return "sf";
5043 else if (mode == DFmode)
5044 return "df";
5045 else if (mode == SCmode)
5046 return "sc";
5047 else if (mode == DCmode)
5048 return "dc";
5049 else if (mode == V2SFmode)
5050 return "df";
5051 else
5052 gcc_unreachable ();
5055 /* Write instructions to move a 32-bit value between general register
5056 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5057 from GPREG to FPREG and 'f' to move in the opposite direction. */
5059 static void
5060 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5062 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5063 reg_names[gpreg], reg_names[fpreg]);
5066 /* Likewise for 64-bit values. */
5068 static void
5069 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5071 if (TARGET_64BIT)
5072 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
5073 reg_names[gpreg], reg_names[fpreg]);
5074 else if (TARGET_FLOAT64)
5076 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5077 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5078 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
5079 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
5081 else
5083 /* Move the least-significant word. */
5084 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5085 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5086 /* ...then the most significant word. */
5087 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5088 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
5092 /* Write out code to move floating-point arguments into or out of
5093 general registers. FP_CODE is the code describing which arguments
5094 are present (see the comment above the definition of CUMULATIVE_ARGS
5095 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
5097 static void
5098 mips_output_args_xfer (int fp_code, char direction)
5100 unsigned int gparg, fparg, f;
5101 CUMULATIVE_ARGS cum;
5103 /* This code only works for o32 and o64. */
5104 gcc_assert (TARGET_OLDABI);
5106 mips_init_cumulative_args (&cum, NULL);
5108 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5110 enum machine_mode mode;
5111 struct mips_arg_info info;
5113 if ((f & 3) == 1)
5114 mode = SFmode;
5115 else if ((f & 3) == 2)
5116 mode = DFmode;
5117 else
5118 gcc_unreachable ();
5120 mips_get_arg_info (&info, &cum, mode, NULL, true);
5121 gparg = mips_arg_regno (&info, false);
5122 fparg = mips_arg_regno (&info, true);
5124 if (mode == SFmode)
5125 mips_output_32bit_xfer (direction, gparg, fparg);
5126 else
5127 mips_output_64bit_xfer (direction, gparg, fparg);
5129 mips_function_arg_advance (&cum, mode, NULL, true);
5133 /* Write a MIPS16 stub for the current function. This stub is used
5134 for functions which take arguments in the floating-point registers.
5135 It is normal-mode code that moves the floating-point arguments
5136 into the general registers and then jumps to the MIPS16 code. */
5138 static void
5139 mips16_build_function_stub (void)
5141 const char *fnname, *separator;
5142 char *secname, *stubname;
5143 tree stubdecl;
5144 unsigned int f;
5146 /* Create the name of the stub, and its unique section. */
5147 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5148 fnname = targetm.strip_name_encoding (fnname);
5149 secname = ACONCAT ((".mips16.fn.", fnname, NULL));
5150 stubname = ACONCAT (("__fn_stub_", fnname, NULL));
5152 /* Build a decl for the stub. */
5153 stubdecl = build_decl (FUNCTION_DECL, get_identifier (stubname),
5154 build_function_type (void_type_node, NULL_TREE));
5155 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5156 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5158 /* Output a comment. */
5159 fprintf (asm_out_file, "\t# Stub function for %s (",
5160 current_function_name ());
5161 separator = "";
5162 for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
5164 fprintf (asm_out_file, "%s%s", separator,
5165 (f & 3) == 1 ? "float" : "double");
5166 separator = ", ";
5168 fprintf (asm_out_file, ")\n");
5170 /* Write the preamble leading up to the function declaration. */
5171 fprintf (asm_out_file, "\t.set\tnomips16\n");
5172 switch_to_section (function_section (stubdecl));
5173 ASM_OUTPUT_ALIGN (asm_out_file,
5174 floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
5176 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
5177 within a .ent, and we cannot emit another .ent. */
5178 if (!FUNCTION_NAME_ALREADY_DECLARED)
5180 fputs ("\t.ent\t", asm_out_file);
5181 assemble_name (asm_out_file, stubname);
5182 fputs ("\n", asm_out_file);
5185 /* Start the definition proper. */
5186 assemble_name (asm_out_file, stubname);
5187 fputs (":\n", asm_out_file);
5189 /* Load the address of the MIPS16 function into $at. Do this first so
5190 that targets with coprocessor interlocks can use an MFC1 to fill the
5191 delay slot. */
5192 fprintf (asm_out_file, "\t.set\tnoat\n");
5193 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
5194 assemble_name (asm_out_file, fnname);
5195 fprintf (asm_out_file, "\n");
5197 /* Move the arguments from floating-point registers to general registers. */
5198 mips_output_args_xfer (crtl->args.info.fp_code, 'f');
5200 /* Jump to the MIPS16 function. */
5201 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5202 fprintf (asm_out_file, "\t.set\tat\n");
5204 if (!FUNCTION_NAME_ALREADY_DECLARED)
5206 fputs ("\t.end\t", asm_out_file);
5207 assemble_name (asm_out_file, stubname);
5208 fputs ("\n", asm_out_file);
5211 switch_to_section (function_section (current_function_decl));
5214 /* The current function is a MIPS16 function that returns a value in an FPR.
5215 Copy the return value from its soft-float to its hard-float location.
5216 libgcc2 has special non-MIPS16 helper functions for each case. */
5218 static void
5219 mips16_copy_fpr_return_value (void)
5221 rtx fn, insn, arg, call;
5222 tree id, return_type;
5223 enum machine_mode return_mode;
5225 return_type = DECL_RESULT (current_function_decl);
5226 return_mode = DECL_MODE (return_type);
5228 id = get_identifier (ACONCAT (("__mips16_ret_",
5229 mips16_call_stub_mode_suffix (return_mode),
5230 NULL)));
5231 fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5232 arg = gen_rtx_REG (return_mode, GP_RETURN);
5233 call = gen_call_value_internal (arg, fn, const0_rtx);
5234 insn = mips_emit_call_insn (call, false);
5235 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), arg);
5238 /* Consider building a stub for a MIPS16 call to function FN.
5239 RETVAL is the location of the return value, or null if this is
5240 a "call" rather than a "call_value". ARGS_SIZE is the size of the
5241 arguments and FP_CODE is the code built by mips_function_arg;
5242 see the comment above CUMULATIVE_ARGS for details.
5244 If a stub was needed, emit the call and return the call insn itself.
5245 Return null otherwise.
5247 A stub is needed for calls to functions that, in normal mode,
5248 receive arguments in FPRs or return values in FPRs. The stub
5249 copies the arguments from their soft-float positions to their
5250 hard-float positions, calls the real function, then copies the
5251 return value from its hard-float position to its soft-float
5252 position.
5254 We emit a JAL to FN even when FN might need a stub. If FN turns out
5255 to be to a non-MIPS16 function, the linker automatically redirects
5256 the JAL to the stub, otherwise the JAL continues to call FN directly. */
5258 static rtx
5259 mips16_build_call_stub (rtx retval, rtx fn, rtx args_size, int fp_code)
5261 const char *fnname;
5262 bool fp_ret_p;
5263 struct mips16_stub *l;
5264 rtx insn;
5266 /* We don't need to do anything if we aren't in MIPS16 mode, or if
5267 we were invoked with the -msoft-float option. */
5268 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5269 return NULL_RTX;
5271 /* Figure out whether the value might come back in a floating-point
5272 register. */
5273 fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
5275 /* We don't need to do anything if there were no floating-point
5276 arguments and the value will not be returned in a floating-point
5277 register. */
5278 if (fp_code == 0 && !fp_ret_p)
5279 return NULL_RTX;
5281 /* We don't need to do anything if this is a call to a special
5282 MIPS16 support function. */
5283 if (GET_CODE (fn) == SYMBOL_REF
5284 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
5285 return NULL_RTX;
5287 /* This code will only work for o32 and o64 abis. The other ABI's
5288 require more sophisticated support. */
5289 gcc_assert (TARGET_OLDABI);
5291 /* If we're calling via a function pointer, use one of the magic
5292 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
5293 Each stub expects the function address to arrive in register $2. */
5294 if (GET_CODE (fn) != SYMBOL_REF)
5296 char buf[30];
5297 tree id;
5298 rtx stub_fn, insn;
5300 /* Create a SYMBOL_REF for the libgcc.a function. */
5301 if (fp_ret_p)
5302 sprintf (buf, "__mips16_call_stub_%s_%d",
5303 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5304 fp_code);
5305 else
5306 sprintf (buf, "__mips16_call_stub_%d", fp_code);
5307 id = get_identifier (buf);
5308 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5310 /* Load the target function into $2. */
5311 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
5313 /* Emit the call. */
5314 if (retval == NULL_RTX)
5315 insn = gen_call_internal (stub_fn, args_size);
5316 else
5317 insn = gen_call_value_internal (retval, stub_fn, args_size);
5318 insn = mips_emit_call_insn (insn, false);
5320 /* Tell GCC that this call does indeed use the value of $2. */
5321 CALL_INSN_FUNCTION_USAGE (insn) =
5322 gen_rtx_EXPR_LIST (VOIDmode,
5323 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
5324 CALL_INSN_FUNCTION_USAGE (insn));
5326 /* If we are handling a floating-point return value, we need to
5327 save $18 in the function prologue. Putting a note on the
5328 call will mean that df_regs_ever_live_p ($18) will be true if the
5329 call is not eliminated, and we can check that in the prologue
5330 code. */
5331 if (fp_ret_p)
5332 CALL_INSN_FUNCTION_USAGE (insn) =
5333 gen_rtx_EXPR_LIST (VOIDmode,
5334 gen_rtx_USE (VOIDmode,
5335 gen_rtx_REG (word_mode, 18)),
5336 CALL_INSN_FUNCTION_USAGE (insn));
5338 return insn;
5341 /* We know the function we are going to call. If we have already
5342 built a stub, we don't need to do anything further. */
5343 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5344 for (l = mips16_stubs; l != NULL; l = l->next)
5345 if (strcmp (l->name, fnname) == 0)
5346 break;
5348 if (l == NULL)
5350 const char *separator;
5351 char *secname, *stubname;
5352 tree stubid, stubdecl;
5353 unsigned int f;
5355 /* If the function does not return in FPRs, the special stub
5356 section is named
5357 .mips16.call.FNNAME
5359 If the function does return in FPRs, the stub section is named
5360 .mips16.call.fp.FNNAME
5362 Build a decl for the stub. */
5363 secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
5364 fnname, NULL));
5365 stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
5366 fnname, NULL));
5367 stubid = get_identifier (stubname);
5368 stubdecl = build_decl (FUNCTION_DECL, stubid,
5369 build_function_type (void_type_node, NULL_TREE));
5370 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5371 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE,
5372 void_type_node);
5374 /* Output a comment. */
5375 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
5376 (fp_ret_p
5377 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
5378 : ""),
5379 fnname);
5380 separator = "";
5381 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5383 fprintf (asm_out_file, "%s%s", separator,
5384 (f & 3) == 1 ? "float" : "double");
5385 separator = ", ";
5387 fprintf (asm_out_file, ")\n");
5389 /* Write the preamble leading up to the function declaration. */
5390 fprintf (asm_out_file, "\t.set\tnomips16\n");
5391 assemble_start_function (stubdecl, stubname);
5393 if (!FUNCTION_NAME_ALREADY_DECLARED)
5395 fputs ("\t.ent\t", asm_out_file);
5396 assemble_name (asm_out_file, stubname);
5397 fputs ("\n", asm_out_file);
5399 assemble_name (asm_out_file, stubname);
5400 fputs (":\n", asm_out_file);
5403 if (!fp_ret_p)
5405 /* Load the address of the MIPS16 function into $at. Do this
5406 first so that targets with coprocessor interlocks can use
5407 an MFC1 to fill the delay slot. */
5408 fprintf (asm_out_file, "\t.set\tnoat\n");
5409 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
5410 fnname);
5413 /* Move the arguments from general registers to floating-point
5414 registers. */
5415 mips_output_args_xfer (fp_code, 't');
5417 if (!fp_ret_p)
5419 /* Jump to the previously-loaded address. */
5420 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5421 fprintf (asm_out_file, "\t.set\tat\n");
5423 else
5425 /* Save the return address in $18 and call the non-MIPS16 function.
5426 The stub's caller knows that $18 might be clobbered, even though
5427 $18 is usually a call-saved register. */
5428 fprintf (asm_out_file, "\tmove\t%s,%s\n",
5429 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
5430 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
5432 /* Move the result from floating-point registers to
5433 general registers. */
5434 switch (GET_MODE (retval))
5436 case SCmode:
5437 mips_output_32bit_xfer ('f', GP_RETURN + 1,
5438 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5439 /* Fall though. */
5440 case SFmode:
5441 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5442 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
5444 /* On 64-bit targets, complex floats are returned in
5445 a single GPR, such that "sd" on a suitably-aligned
5446 target would store the value correctly. */
5447 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
5448 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
5449 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
5450 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
5451 reg_names[GP_RETURN],
5452 reg_names[GP_RETURN],
5453 reg_names[GP_RETURN + 1]);
5455 break;
5457 case DCmode:
5458 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
5459 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5460 /* Fall though. */
5461 case DFmode:
5462 case V2SFmode:
5463 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5464 break;
5466 default:
5467 gcc_unreachable ();
5469 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
5472 #ifdef ASM_DECLARE_FUNCTION_SIZE
5473 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
5474 #endif
5476 if (!FUNCTION_NAME_ALREADY_DECLARED)
5478 fputs ("\t.end\t", asm_out_file);
5479 assemble_name (asm_out_file, stubname);
5480 fputs ("\n", asm_out_file);
5483 /* Record this stub. */
5484 l = XNEW (struct mips16_stub);
5485 l->name = xstrdup (fnname);
5486 l->fp_ret_p = fp_ret_p;
5487 l->next = mips16_stubs;
5488 mips16_stubs = l;
5491 /* If we expect a floating-point return value, but we've built a
5492 stub which does not expect one, then we're in trouble. We can't
5493 use the existing stub, because it won't handle the floating-point
5494 value. We can't build a new stub, because the linker won't know
5495 which stub to use for the various calls in this object file.
5496 Fortunately, this case is illegal, since it means that a function
5497 was declared in two different ways in a single compilation. */
5498 if (fp_ret_p && !l->fp_ret_p)
5499 error ("cannot handle inconsistent calls to %qs", fnname);
5501 if (retval == NULL_RTX)
5502 insn = gen_call_internal_direct (fn, args_size);
5503 else
5504 insn = gen_call_value_internal_direct (retval, fn, args_size);
5505 insn = mips_emit_call_insn (insn, false);
5507 /* If we are calling a stub which handles a floating-point return
5508 value, we need to arrange to save $18 in the prologue. We do this
5509 by marking the function call as using the register. The prologue
5510 will later see that it is used, and emit code to save it. */
5511 if (fp_ret_p)
5512 CALL_INSN_FUNCTION_USAGE (insn) =
5513 gen_rtx_EXPR_LIST (VOIDmode,
5514 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
5515 CALL_INSN_FUNCTION_USAGE (insn));
5517 return insn;
5520 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5522 static bool
5523 mips_ok_for_lazy_binding_p (rtx x)
5525 return (TARGET_USE_GOT
5526 && GET_CODE (x) == SYMBOL_REF
5527 && !mips_symbol_binds_local_p (x));
5530 /* Load function address ADDR into register DEST. SIBCALL_P is true
5531 if the address is needed for a sibling call. Return true if we
5532 used an explicit lazy-binding sequence. */
5534 static bool
5535 mips_load_call_address (rtx dest, rtx addr, bool sibcall_p)
5537 /* If we're generating PIC, and this call is to a global function,
5538 try to allow its address to be resolved lazily. This isn't
5539 possible for sibcalls when $gp is call-saved because the value
5540 of $gp on entry to the stub would be our caller's gp, not ours. */
5541 if (TARGET_EXPLICIT_RELOCS
5542 && !(sibcall_p && TARGET_CALL_SAVED_GP)
5543 && mips_ok_for_lazy_binding_p (addr))
5545 rtx high, lo_sum_symbol;
5547 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
5548 addr, SYMBOL_GOTOFF_CALL);
5549 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
5550 if (Pmode == SImode)
5551 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
5552 else
5553 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
5554 return true;
5556 else
5558 mips_emit_move (dest, addr);
5559 return false;
5563 /* Expand a "call", "sibcall", "call_value" or "sibcall_value" instruction.
5564 RESULT is where the result will go (null for "call"s and "sibcall"s),
5565 ADDR is the address of the function, ARGS_SIZE is the size of the
5566 arguments and AUX is the value passed to us by mips_function_arg.
5567 SIBCALL_P is true if we are expanding a sibling call, false if we're
5568 expanding a normal call.
5570 Return the call itself. */
5573 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, bool sibcall_p)
5575 rtx orig_addr, pattern, insn;
5576 bool lazy_p;
5578 orig_addr = addr;
5579 lazy_p = false;
5580 if (!call_insn_operand (addr, VOIDmode))
5582 addr = gen_reg_rtx (Pmode);
5583 lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
5586 insn = mips16_build_call_stub (result, addr, args_size,
5587 aux == 0 ? 0 : (int) GET_MODE (aux));
5588 if (insn)
5590 gcc_assert (!sibcall_p && !lazy_p);
5591 return insn;
5594 if (result == 0)
5595 pattern = (sibcall_p
5596 ? gen_sibcall_internal (addr, args_size)
5597 : gen_call_internal (addr, args_size));
5598 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
5600 /* Handle return values created by mips_return_fpr_pair. */
5601 rtx reg1, reg2;
5603 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
5604 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
5605 pattern =
5606 (sibcall_p
5607 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
5608 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
5610 else
5612 /* Handle return values created by mips_return_fpr_single. */
5613 if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
5614 result = XEXP (XVECEXP (result, 0, 0), 0);
5615 pattern = (sibcall_p
5616 ? gen_sibcall_value_internal (result, addr, args_size)
5617 : gen_call_value_internal (result, addr, args_size));
5620 return mips_emit_call_insn (pattern, lazy_p);
5623 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
5625 static bool
5626 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5628 if (!TARGET_SIBCALLS)
5629 return false;
5631 /* We can't do a sibcall if the called function is a MIPS16 function
5632 because there is no direct "jx" instruction equivalent to "jalx" to
5633 switch the ISA mode. We only care about cases where the sibling
5634 and normal calls would both be direct. */
5635 if (mips_use_mips16_mode_p (decl)
5636 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5637 return false;
5639 /* When -minterlink-mips16 is in effect, assume that non-locally-binding
5640 functions could be MIPS16 ones unless an attribute explicitly tells
5641 us otherwise. */
5642 if (TARGET_INTERLINK_MIPS16
5643 && decl
5644 && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
5645 && !mips_nomips16_decl_p (decl)
5646 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5647 return false;
5649 /* Otherwise OK. */
5650 return true;
5653 /* Emit code to move general operand SRC into condition-code
5654 register DEST given that SCRATCH is a scratch TFmode FPR.
5655 The sequence is:
5657 FP1 = SRC
5658 FP2 = 0.0f
5659 DEST = FP2 < FP1
5661 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
5663 void
5664 mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
5666 rtx fp1, fp2;
5668 /* Change the source to SFmode. */
5669 if (MEM_P (src))
5670 src = adjust_address (src, SFmode, 0);
5671 else if (REG_P (src) || GET_CODE (src) == SUBREG)
5672 src = gen_rtx_REG (SFmode, true_regnum (src));
5674 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
5675 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
5677 mips_emit_move (copy_rtx (fp1), src);
5678 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
5679 emit_insn (gen_slt_sf (dest, fp2, fp1));
5682 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
5683 Assume that the areas do not overlap. */
5685 static void
5686 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
5688 HOST_WIDE_INT offset, delta;
5689 unsigned HOST_WIDE_INT bits;
5690 int i;
5691 enum machine_mode mode;
5692 rtx *regs;
5694 /* Work out how many bits to move at a time. If both operands have
5695 half-word alignment, it is usually better to move in half words.
5696 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
5697 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
5698 Otherwise move word-sized chunks. */
5699 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
5700 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
5701 bits = BITS_PER_WORD / 2;
5702 else
5703 bits = BITS_PER_WORD;
5705 mode = mode_for_size (bits, MODE_INT, 0);
5706 delta = bits / BITS_PER_UNIT;
5708 /* Allocate a buffer for the temporary registers. */
5709 regs = alloca (sizeof (rtx) * length / delta);
5711 /* Load as many BITS-sized chunks as possible. Use a normal load if
5712 the source has enough alignment, otherwise use left/right pairs. */
5713 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5715 regs[i] = gen_reg_rtx (mode);
5716 if (MEM_ALIGN (src) >= bits)
5717 mips_emit_move (regs[i], adjust_address (src, mode, offset));
5718 else
5720 rtx part = adjust_address (src, BLKmode, offset);
5721 if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
5722 gcc_unreachable ();
5726 /* Copy the chunks to the destination. */
5727 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5728 if (MEM_ALIGN (dest) >= bits)
5729 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
5730 else
5732 rtx part = adjust_address (dest, BLKmode, offset);
5733 if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
5734 gcc_unreachable ();
5737 /* Mop up any left-over bytes. */
5738 if (offset < length)
5740 src = adjust_address (src, BLKmode, offset);
5741 dest = adjust_address (dest, BLKmode, offset);
5742 move_by_pieces (dest, src, length - offset,
5743 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
5747 /* Helper function for doing a loop-based block operation on memory
5748 reference MEM. Each iteration of the loop will operate on LENGTH
5749 bytes of MEM.
5751 Create a new base register for use within the loop and point it to
5752 the start of MEM. Create a new memory reference that uses this
5753 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
5755 static void
5756 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
5757 rtx *loop_reg, rtx *loop_mem)
5759 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
5761 /* Although the new mem does not refer to a known location,
5762 it does keep up to LENGTH bytes of alignment. */
5763 *loop_mem = change_address (mem, BLKmode, *loop_reg);
5764 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
5767 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
5768 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
5769 the memory regions do not overlap. */
5771 static void
5772 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
5773 HOST_WIDE_INT bytes_per_iter)
5775 rtx label, src_reg, dest_reg, final_src;
5776 HOST_WIDE_INT leftover;
5778 leftover = length % bytes_per_iter;
5779 length -= leftover;
5781 /* Create registers and memory references for use within the loop. */
5782 mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
5783 mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
5785 /* Calculate the value that SRC_REG should have after the last iteration
5786 of the loop. */
5787 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
5788 0, 0, OPTAB_WIDEN);
5790 /* Emit the start of the loop. */
5791 label = gen_label_rtx ();
5792 emit_label (label);
5794 /* Emit the loop body. */
5795 mips_block_move_straight (dest, src, bytes_per_iter);
5797 /* Move on to the next block. */
5798 mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
5799 mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
5801 /* Emit the loop condition. */
5802 if (Pmode == DImode)
5803 emit_insn (gen_cmpdi (src_reg, final_src));
5804 else
5805 emit_insn (gen_cmpsi (src_reg, final_src));
5806 emit_jump_insn (gen_bne (label));
5808 /* Mop up any left-over bytes. */
5809 if (leftover)
5810 mips_block_move_straight (dest, src, leftover);
5813 /* Expand a movmemsi instruction, which copies LENGTH bytes from
5814 memory reference SRC to memory reference DEST. */
5816 bool
5817 mips_expand_block_move (rtx dest, rtx src, rtx length)
5819 if (GET_CODE (length) == CONST_INT)
5821 if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
5823 mips_block_move_straight (dest, src, INTVAL (length));
5824 return true;
5826 else if (optimize)
5828 mips_block_move_loop (dest, src, INTVAL (length),
5829 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
5830 return true;
5833 return false;
5836 /* Expand a loop of synci insns for the address range [BEGIN, END). */
5838 void
5839 mips_expand_synci_loop (rtx begin, rtx end)
5841 rtx inc, label, cmp, cmp_result;
5843 /* Load INC with the cache line size (rdhwr INC,$1). */
5844 inc = gen_reg_rtx (SImode);
5845 emit_insn (gen_rdhwr (inc, const1_rtx));
5847 /* Loop back to here. */
5848 label = gen_label_rtx ();
5849 emit_label (label);
5851 emit_insn (gen_synci (begin));
5853 cmp = gen_reg_rtx (Pmode);
5854 mips_emit_binary (GTU, cmp, begin, end);
5856 mips_emit_binary (PLUS, begin, begin, inc);
5858 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
5859 emit_jump_insn (gen_condjump (cmp_result, label));
5862 /* Return true if it is possible to use left/right accesses for a
5863 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5864 returning true, update *OP, *LEFT and *RIGHT as follows:
5866 *OP is a BLKmode reference to the whole field.
5868 *LEFT is a QImode reference to the first byte if big endian or
5869 the last byte if little endian. This address can be used in the
5870 left-side instructions (LWL, SWL, LDL, SDL).
5872 *RIGHT is a QImode reference to the opposite end of the field and
5873 can be used in the patterning right-side instruction. */
5875 static bool
5876 mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
5877 rtx *left, rtx *right)
5879 rtx first, last;
5881 /* Check that the operand really is a MEM. Not all the extv and
5882 extzv predicates are checked. */
5883 if (!MEM_P (*op))
5884 return false;
5886 /* Check that the size is valid. */
5887 if (width != 32 && (!TARGET_64BIT || width != 64))
5888 return false;
5890 /* We can only access byte-aligned values. Since we are always passed
5891 a reference to the first byte of the field, it is not necessary to
5892 do anything with BITPOS after this check. */
5893 if (bitpos % BITS_PER_UNIT != 0)
5894 return false;
5896 /* Reject aligned bitfields: we want to use a normal load or store
5897 instead of a left/right pair. */
5898 if (MEM_ALIGN (*op) >= width)
5899 return false;
5901 /* Adjust *OP to refer to the whole field. This also has the effect
5902 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5903 *op = adjust_address (*op, BLKmode, 0);
5904 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
5906 /* Get references to both ends of the field. We deliberately don't
5907 use the original QImode *OP for FIRST since the new BLKmode one
5908 might have a simpler address. */
5909 first = adjust_address (*op, QImode, 0);
5910 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
5912 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5913 correspond to the MSB and RIGHT to the LSB. */
5914 if (TARGET_BIG_ENDIAN)
5915 *left = first, *right = last;
5916 else
5917 *left = last, *right = first;
5919 return true;
5922 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
5923 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
5924 the operation is the equivalent of:
5926 (set DEST (*_extract SRC WIDTH BITPOS))
5928 Return true on success. */
5930 bool
5931 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
5932 HOST_WIDE_INT bitpos)
5934 rtx left, right, temp;
5936 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
5937 be a paradoxical word_mode subreg. This is the only case in which
5938 we allow the destination to be larger than the source. */
5939 if (GET_CODE (dest) == SUBREG
5940 && GET_MODE (dest) == DImode
5941 && GET_MODE (SUBREG_REG (dest)) == SImode)
5942 dest = SUBREG_REG (dest);
5944 /* After the above adjustment, the destination must be the same
5945 width as the source. */
5946 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
5947 return false;
5949 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
5950 return false;
5952 temp = gen_reg_rtx (GET_MODE (dest));
5953 if (GET_MODE (dest) == DImode)
5955 emit_insn (gen_mov_ldl (temp, src, left));
5956 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
5958 else
5960 emit_insn (gen_mov_lwl (temp, src, left));
5961 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
5963 return true;
5966 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
5967 BITPOS and SRC are the operands passed to the expander; the operation
5968 is the equivalent of:
5970 (set (zero_extract DEST WIDTH BITPOS) SRC)
5972 Return true on success. */
5974 bool
5975 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
5976 HOST_WIDE_INT bitpos)
5978 rtx left, right;
5979 enum machine_mode mode;
5981 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
5982 return false;
5984 mode = mode_for_size (width, MODE_INT, 0);
5985 src = gen_lowpart (mode, src);
5986 if (mode == DImode)
5988 emit_insn (gen_mov_sdl (dest, src, left));
5989 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
5991 else
5993 emit_insn (gen_mov_swl (dest, src, left));
5994 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
5996 return true;
5999 /* Return true if X is a MEM with the same size as MODE. */
6001 bool
6002 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6004 rtx size;
6006 if (!MEM_P (x))
6007 return false;
6009 size = MEM_SIZE (x);
6010 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6013 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
6014 source of an "ext" instruction or the destination of an "ins"
6015 instruction. OP must be a register operand and the following
6016 conditions must hold:
6018 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
6019 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6020 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6022 Also reject lengths equal to a word as they are better handled
6023 by the move patterns. */
6025 bool
6026 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
6028 if (!ISA_HAS_EXT_INS
6029 || !register_operand (op, VOIDmode)
6030 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6031 return false;
6033 if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
6034 return false;
6036 if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
6037 return false;
6039 return true;
6042 /* Return true if -msplit-addresses is selected and should be honored.
6044 -msplit-addresses is a half-way house between explicit relocations
6045 and the traditional assembler macros. It can split absolute 32-bit
6046 symbolic constants into a high/lo_sum pair but uses macros for other
6047 sorts of access.
6049 Like explicit relocation support for REL targets, it relies
6050 on GNU extensions in the assembler and the linker.
6052 Although this code should work for -O0, it has traditionally
6053 been treated as an optimization. */
6055 static bool
6056 mips_split_addresses_p (void)
6058 return (TARGET_SPLIT_ADDRESSES
6059 && optimize
6060 && !TARGET_MIPS16
6061 && !flag_pic
6062 && !ABI_HAS_64BIT_SYMBOLS);
6065 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
6067 static void
6068 mips_init_relocs (void)
6070 memset (mips_split_p, '\0', sizeof (mips_split_p));
6071 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6072 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6074 if (ABI_HAS_64BIT_SYMBOLS)
6076 if (TARGET_EXPLICIT_RELOCS)
6078 mips_split_p[SYMBOL_64_HIGH] = true;
6079 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6080 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6082 mips_split_p[SYMBOL_64_MID] = true;
6083 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6084 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6086 mips_split_p[SYMBOL_64_LOW] = true;
6087 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6088 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6090 mips_split_p[SYMBOL_ABSOLUTE] = true;
6091 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6094 else
6096 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses_p () || TARGET_MIPS16)
6098 mips_split_p[SYMBOL_ABSOLUTE] = true;
6099 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6100 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6102 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6106 if (TARGET_MIPS16)
6108 /* The high part is provided by a pseudo copy of $gp. */
6109 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6110 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6113 if (TARGET_EXPLICIT_RELOCS)
6115 /* Small data constants are kept whole until after reload,
6116 then lowered by mips_rewrite_small_data. */
6117 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6119 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6120 if (TARGET_NEWABI)
6122 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6123 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6125 else
6127 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6128 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6131 if (TARGET_XGOT)
6133 /* The HIGH and LO_SUM are matched by special .md patterns. */
6134 mips_split_p[SYMBOL_GOT_DISP] = true;
6136 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6137 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6138 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6140 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6141 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6142 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6144 else
6146 if (TARGET_NEWABI)
6147 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6148 else
6149 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6150 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6154 if (TARGET_NEWABI)
6156 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6157 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6158 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6161 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6162 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6164 mips_split_p[SYMBOL_DTPREL] = true;
6165 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6166 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6168 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6170 mips_split_p[SYMBOL_TPREL] = true;
6171 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6172 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6174 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6177 /* If OP is an UNSPEC address, return the address to which it refers,
6178 otherwise return OP itself. */
6180 static rtx
6181 mips_strip_unspec_address (rtx op)
6183 rtx base, offset;
6185 split_const (op, &base, &offset);
6186 if (UNSPEC_ADDRESS_P (base))
6187 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6188 return op;
6191 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6192 in context CONTEXT. RELOCS is the array of relocations to use. */
6194 static void
6195 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6196 const char **relocs)
6198 enum mips_symbol_type symbol_type;
6199 const char *p;
6201 symbol_type = mips_classify_symbolic_expression (op, context);
6202 gcc_assert (relocs[symbol_type]);
6204 fputs (relocs[symbol_type], file);
6205 output_addr_const (file, mips_strip_unspec_address (op));
6206 for (p = relocs[symbol_type]; *p != 0; p++)
6207 if (*p == '(')
6208 fputc (')', file);
6211 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6212 The punctuation characters are:
6214 '(' Start a nested ".set noreorder" block.
6215 ')' End a nested ".set noreorder" block.
6216 '[' Start a nested ".set noat" block.
6217 ']' End a nested ".set noat" block.
6218 '<' Start a nested ".set nomacro" block.
6219 '>' End a nested ".set nomacro" block.
6220 '*' Behave like %(%< if generating a delayed-branch sequence.
6221 '#' Print a nop if in a ".set noreorder" block.
6222 '/' Like '#', but do nothing within a delayed-branch sequence.
6223 '?' Print "l" if mips_branch_likely is true
6224 '.' Print the name of the register with a hard-wired zero (zero or $0).
6225 '@' Print the name of the assembler temporary register (at or $1).
6226 '^' Print the name of the pic call-through register (t9 or $25).
6227 '+' Print the name of the gp register (usually gp or $28).
6228 '$' Print the name of the stack pointer register (sp or $29).
6229 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6230 '-' Print ".set pop" under the same conditions for '|'.
6232 See also mips_init_print_operand_pucnt. */
6234 static void
6235 mips_print_operand_punctuation (FILE *file, int ch)
6237 switch (ch)
6239 case '(':
6240 if (set_noreorder++ == 0)
6241 fputs (".set\tnoreorder\n\t", file);
6242 break;
6244 case ')':
6245 gcc_assert (set_noreorder > 0);
6246 if (--set_noreorder == 0)
6247 fputs ("\n\t.set\treorder", file);
6248 break;
6250 case '[':
6251 if (set_noat++ == 0)
6252 fputs (".set\tnoat\n\t", file);
6253 break;
6255 case ']':
6256 gcc_assert (set_noat > 0);
6257 if (--set_noat == 0)
6258 fputs ("\n\t.set\tat", file);
6259 break;
6261 case '<':
6262 if (set_nomacro++ == 0)
6263 fputs (".set\tnomacro\n\t", file);
6264 break;
6266 case '>':
6267 gcc_assert (set_nomacro > 0);
6268 if (--set_nomacro == 0)
6269 fputs ("\n\t.set\tmacro", file);
6270 break;
6272 case '*':
6273 if (final_sequence != 0)
6275 mips_print_operand_punctuation (file, '(');
6276 mips_print_operand_punctuation (file, '<');
6278 break;
6280 case '#':
6281 if (set_noreorder != 0)
6282 fputs ("\n\tnop", file);
6283 break;
6285 case '/':
6286 /* Print an extra newline so that the delayed insn is separated
6287 from the following ones. This looks neater and is consistent
6288 with non-nop delayed sequences. */
6289 if (set_noreorder != 0 && final_sequence == 0)
6290 fputs ("\n\tnop\n", file);
6291 break;
6293 case '?':
6294 if (mips_branch_likely)
6295 putc ('l', file);
6296 break;
6298 case '.':
6299 fputs (reg_names[GP_REG_FIRST + 0], file);
6300 break;
6302 case '@':
6303 fputs (reg_names[GP_REG_FIRST + 1], file);
6304 break;
6306 case '^':
6307 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
6308 break;
6310 case '+':
6311 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6312 break;
6314 case '$':
6315 fputs (reg_names[STACK_POINTER_REGNUM], file);
6316 break;
6318 case '|':
6319 if (!ISA_HAS_LL_SC)
6320 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6321 break;
6323 case '-':
6324 if (!ISA_HAS_LL_SC)
6325 fputs ("\n\t.set\tpop", file);
6326 break;
6328 default:
6329 gcc_unreachable ();
6330 break;
6334 /* Initialize mips_print_operand_punct. */
6336 static void
6337 mips_init_print_operand_punct (void)
6339 const char *p;
6341 for (p = "()[]<>*#/?.@^+$|-"; *p; p++)
6342 mips_print_operand_punct[(unsigned char) *p] = true;
6345 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
6346 associated with condition CODE. Print the condition part of the
6347 opcode to FILE. */
6349 static void
6350 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
6352 switch (code)
6354 case EQ:
6355 case NE:
6356 case GT:
6357 case GE:
6358 case LT:
6359 case LE:
6360 case GTU:
6361 case GEU:
6362 case LTU:
6363 case LEU:
6364 /* Conveniently, the MIPS names for these conditions are the same
6365 as their RTL equivalents. */
6366 fputs (GET_RTX_NAME (code), file);
6367 break;
6369 default:
6370 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6371 break;
6375 /* Likewise floating-point branches. */
6377 static void
6378 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
6380 switch (code)
6382 case EQ:
6383 fputs ("c1f", file);
6384 break;
6386 case NE:
6387 fputs ("c1t", file);
6388 break;
6390 default:
6391 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6392 break;
6396 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6398 'X' Print CONST_INT OP in hexadecimal format.
6399 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
6400 'd' Print CONST_INT OP in decimal.
6401 'h' Print the high-part relocation associated with OP, after stripping
6402 any outermost HIGH.
6403 'R' Print the low-part relocation associated with OP.
6404 'C' Print the integer branch condition for comparison OP.
6405 'N' Print the inverse of the integer branch condition for comparison OP.
6406 'F' Print the FPU branch condition for comparison OP.
6407 'W' Print the inverse of the FPU branch condition for comparison OP.
6408 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6409 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6410 't' Like 'T', but with the EQ/NE cases reversed
6411 'Y' Print mips_fp_conditions[INTVAL (OP)]
6412 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
6413 'q' Print a DSP accumulator register.
6414 'D' Print the second part of a double-word register or memory operand.
6415 'L' Print the low-order register in a double-word register operand.
6416 'M' Print high-order register in a double-word register operand.
6417 'z' Print $0 if OP is zero, otherwise print OP normally. */
6419 void
6420 mips_print_operand (FILE *file, rtx op, int letter)
6422 enum rtx_code code;
6424 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6426 mips_print_operand_punctuation (file, letter);
6427 return;
6430 gcc_assert (op);
6431 code = GET_CODE (op);
6433 switch (letter)
6435 case 'X':
6436 if (GET_CODE (op) == CONST_INT)
6437 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6438 else
6439 output_operand_lossage ("invalid use of '%%%c'", letter);
6440 break;
6442 case 'x':
6443 if (GET_CODE (op) == CONST_INT)
6444 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
6445 else
6446 output_operand_lossage ("invalid use of '%%%c'", letter);
6447 break;
6449 case 'd':
6450 if (GET_CODE (op) == CONST_INT)
6451 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
6452 else
6453 output_operand_lossage ("invalid use of '%%%c'", letter);
6454 break;
6456 case 'h':
6457 if (code == HIGH)
6458 op = XEXP (op, 0);
6459 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6460 break;
6462 case 'R':
6463 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6464 break;
6466 case 'C':
6467 mips_print_int_branch_condition (file, code, letter);
6468 break;
6470 case 'N':
6471 mips_print_int_branch_condition (file, reverse_condition (code), letter);
6472 break;
6474 case 'F':
6475 mips_print_float_branch_condition (file, code, letter);
6476 break;
6478 case 'W':
6479 mips_print_float_branch_condition (file, reverse_condition (code),
6480 letter);
6481 break;
6483 case 'T':
6484 case 't':
6486 int truth = (code == NE) == (letter == 'T');
6487 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6489 break;
6491 case 'Y':
6492 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
6493 fputs (mips_fp_conditions[UINTVAL (op)], file);
6494 else
6495 output_operand_lossage ("'%%%c' is not a valid operand prefix",
6496 letter);
6497 break;
6499 case 'Z':
6500 if (ISA_HAS_8CC)
6502 mips_print_operand (file, op, 0);
6503 fputc (',', file);
6505 break;
6507 case 'q':
6508 if (code == REG && MD_REG_P (REGNO (op)))
6509 fprintf (file, "$ac0");
6510 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
6511 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
6512 else
6513 output_operand_lossage ("invalid use of '%%%c'", letter);
6514 break;
6516 default:
6517 switch (code)
6519 case REG:
6521 unsigned int regno = REGNO (op);
6522 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
6523 || (letter == 'L' && TARGET_BIG_ENDIAN)
6524 || letter == 'D')
6525 regno++;
6526 fprintf (file, "%s", reg_names[regno]);
6528 break;
6530 case MEM:
6531 if (letter == 'D')
6532 output_address (plus_constant (XEXP (op, 0), 4));
6533 else
6534 output_address (XEXP (op, 0));
6535 break;
6537 default:
6538 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6539 fputs (reg_names[GP_REG_FIRST], file);
6540 else if (CONST_GP_P (op))
6541 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6542 else
6543 output_addr_const (file, mips_strip_unspec_address (op));
6544 break;
6549 /* Output address operand X to FILE. */
6551 void
6552 mips_print_operand_address (FILE *file, rtx x)
6554 struct mips_address_info addr;
6556 if (mips_classify_address (&addr, x, word_mode, true))
6557 switch (addr.type)
6559 case ADDRESS_REG:
6560 mips_print_operand (file, addr.offset, 0);
6561 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6562 return;
6564 case ADDRESS_LO_SUM:
6565 mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6566 mips_lo_relocs);
6567 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6568 return;
6570 case ADDRESS_CONST_INT:
6571 output_addr_const (file, x);
6572 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
6573 return;
6575 case ADDRESS_SYMBOLIC:
6576 output_addr_const (file, mips_strip_unspec_address (x));
6577 return;
6579 gcc_unreachable ();
6582 /* Implement TARGET_ENCODE_SECTION_INFO. */
6584 static void
6585 mips_encode_section_info (tree decl, rtx rtl, int first)
6587 default_encode_section_info (decl, rtl, first);
6589 if (TREE_CODE (decl) == FUNCTION_DECL)
6591 rtx symbol = XEXP (rtl, 0);
6592 tree type = TREE_TYPE (decl);
6594 /* Encode whether the symbol is short or long. */
6595 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
6596 || mips_far_type_p (type))
6597 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
6601 /* Implement TARGET_SELECT_RTX_SECTION. */
6603 static section *
6604 mips_select_rtx_section (enum machine_mode mode, rtx x,
6605 unsigned HOST_WIDE_INT align)
6607 /* ??? Consider using mergeable small data sections. */
6608 if (mips_rtx_constant_in_small_data_p (mode))
6609 return get_named_section (NULL, ".sdata", 0);
6611 return default_elf_select_rtx_section (mode, x, align);
6614 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6616 The complication here is that, with the combination TARGET_ABICALLS
6617 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6618 therefore not be included in the read-only part of a DSO. Handle such
6619 cases by selecting a normal data section instead of a read-only one.
6620 The logic apes that in default_function_rodata_section. */
6622 static section *
6623 mips_function_rodata_section (tree decl)
6625 if (!TARGET_ABICALLS || TARGET_GPWORD)
6626 return default_function_rodata_section (decl);
6628 if (decl && DECL_SECTION_NAME (decl))
6630 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6631 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6633 char *rname = ASTRDUP (name);
6634 rname[14] = 'd';
6635 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6637 else if (flag_function_sections
6638 && flag_data_sections
6639 && strncmp (name, ".text.", 6) == 0)
6641 char *rname = ASTRDUP (name);
6642 memcpy (rname + 1, "data", 4);
6643 return get_section (rname, SECTION_WRITE, decl);
6646 return data_section;
6649 /* Implement TARGET_IN_SMALL_DATA_P. */
6651 static bool
6652 mips_in_small_data_p (const_tree decl)
6654 unsigned HOST_WIDE_INT size;
6656 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6657 return false;
6659 /* We don't yet generate small-data references for -mabicalls
6660 or VxWorks RTP code. See the related -G handling in
6661 mips_override_options. */
6662 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
6663 return false;
6665 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6667 const char *name;
6669 /* Reject anything that isn't in a known small-data section. */
6670 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6671 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6672 return false;
6674 /* If a symbol is defined externally, the assembler will use the
6675 usual -G rules when deciding how to implement macros. */
6676 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
6677 return true;
6679 else if (TARGET_EMBEDDED_DATA)
6681 /* Don't put constants into the small data section: we want them
6682 to be in ROM rather than RAM. */
6683 if (TREE_CODE (decl) != VAR_DECL)
6684 return false;
6686 if (TREE_READONLY (decl)
6687 && !TREE_SIDE_EFFECTS (decl)
6688 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6689 return false;
6692 /* Enforce -mlocal-sdata. */
6693 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
6694 return false;
6696 /* Enforce -mextern-sdata. */
6697 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
6699 if (DECL_EXTERNAL (decl))
6700 return false;
6701 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
6702 return false;
6705 /* We have traditionally not treated zero-sized objects as small data,
6706 so this is now effectively part of the ABI. */
6707 size = int_size_in_bytes (TREE_TYPE (decl));
6708 return size > 0 && size <= mips_small_data_threshold;
6711 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
6712 anchors for small data: the GP register acts as an anchor in that
6713 case. We also don't want to use them for PC-relative accesses,
6714 where the PC acts as an anchor. */
6716 static bool
6717 mips_use_anchors_for_symbol_p (const_rtx symbol)
6719 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
6721 case SYMBOL_PC_RELATIVE:
6722 case SYMBOL_GP_RELATIVE:
6723 return false;
6725 default:
6726 return default_use_anchors_for_symbol_p (symbol);
6730 /* The MIPS debug format wants all automatic variables and arguments
6731 to be in terms of the virtual frame pointer (stack pointer before
6732 any adjustment in the function), while the MIPS 3.0 linker wants
6733 the frame pointer to be the stack pointer after the initial
6734 adjustment. So, we do the adjustment here. The arg pointer (which
6735 is eliminated) points to the virtual frame pointer, while the frame
6736 pointer (which may be eliminated) points to the stack pointer after
6737 the initial adjustments. */
6739 HOST_WIDE_INT
6740 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6742 rtx offset2 = const0_rtx;
6743 rtx reg = eliminate_constant_term (addr, &offset2);
6745 if (offset == 0)
6746 offset = INTVAL (offset2);
6748 if (reg == stack_pointer_rtx
6749 || reg == frame_pointer_rtx
6750 || reg == hard_frame_pointer_rtx)
6752 offset -= cfun->machine->frame.total_size;
6753 if (reg == hard_frame_pointer_rtx)
6754 offset += cfun->machine->frame.hard_frame_pointer_offset;
6757 /* sdbout_parms does not want this to crash for unrecognized cases. */
6758 #if 0
6759 else if (reg != arg_pointer_rtx)
6760 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6761 addr);
6762 #endif
6764 return offset;
6767 /* Implement ASM_OUTPUT_EXTERNAL. */
6769 void
6770 mips_output_external (FILE *file, tree decl, const char *name)
6772 default_elf_asm_output_external (file, decl, name);
6774 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6775 set in order to avoid putting out names that are never really
6776 used. */
6777 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6779 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6781 /* When using assembler macros, emit .extern directives for
6782 all small-data externs so that the assembler knows how
6783 big they are.
6785 In most cases it would be safe (though pointless) to emit
6786 .externs for other symbols too. One exception is when an
6787 object is within the -G limit but declared by the user to
6788 be in a section other than .sbss or .sdata. */
6789 fputs ("\t.extern\t", file);
6790 assemble_name (file, name);
6791 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6792 int_size_in_bytes (TREE_TYPE (decl)));
6794 else if (TARGET_IRIX
6795 && mips_abi == ABI_32
6796 && TREE_CODE (decl) == FUNCTION_DECL)
6798 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6799 `.global name .text' directive for every used but
6800 undefined function. If we don't, the linker may perform
6801 an optimization (skipping over the insns that set $gp)
6802 when it is unsafe. */
6803 fputs ("\t.globl ", file);
6804 assemble_name (file, name);
6805 fputs (" .text\n", file);
6810 /* Implement ASM_OUTPUT_SOURCE_FILENAME. */
6812 void
6813 mips_output_filename (FILE *stream, const char *name)
6815 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6816 directives. */
6817 if (write_symbols == DWARF2_DEBUG)
6818 return;
6819 else if (mips_output_filename_first_time)
6821 mips_output_filename_first_time = 0;
6822 num_source_filenames += 1;
6823 current_function_file = name;
6824 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6825 output_quoted_string (stream, name);
6826 putc ('\n', stream);
6828 /* If we are emitting stabs, let dbxout.c handle this (except for
6829 the mips_output_filename_first_time case). */
6830 else if (write_symbols == DBX_DEBUG)
6831 return;
6832 else if (name != current_function_file
6833 && strcmp (name, current_function_file) != 0)
6835 num_source_filenames += 1;
6836 current_function_file = name;
6837 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6838 output_quoted_string (stream, name);
6839 putc ('\n', stream);
6843 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
6845 static void ATTRIBUTE_UNUSED
6846 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
6848 switch (size)
6850 case 4:
6851 fputs ("\t.dtprelword\t", file);
6852 break;
6854 case 8:
6855 fputs ("\t.dtpreldword\t", file);
6856 break;
6858 default:
6859 gcc_unreachable ();
6861 output_addr_const (file, x);
6862 fputs ("+0x8000", file);
6865 /* Implement TARGET_DWARF_REGISTER_SPAN. */
6867 static rtx
6868 mips_dwarf_register_span (rtx reg)
6870 rtx high, low;
6871 enum machine_mode mode;
6873 /* By default, GCC maps increasing register numbers to increasing
6874 memory locations, but paired FPRs are always little-endian,
6875 regardless of the prevailing endianness. */
6876 mode = GET_MODE (reg);
6877 if (FP_REG_P (REGNO (reg))
6878 && TARGET_BIG_ENDIAN
6879 && MAX_FPRS_PER_FMT > 1
6880 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
6882 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
6883 high = mips_subword (reg, true);
6884 low = mips_subword (reg, false);
6885 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
6888 return NULL_RTX;
6891 /* Implement ASM_OUTPUT_ASCII. */
6893 void
6894 mips_output_ascii (FILE *stream, const char *string, size_t len)
6896 size_t i;
6897 int cur_pos;
6899 cur_pos = 17;
6900 fprintf (stream, "\t.ascii\t\"");
6901 for (i = 0; i < len; i++)
6903 int c;
6905 c = (unsigned char) string[i];
6906 if (ISPRINT (c))
6908 if (c == '\\' || c == '\"')
6910 putc ('\\', stream);
6911 cur_pos++;
6913 putc (c, stream);
6914 cur_pos++;
6916 else
6918 fprintf (stream, "\\%03o", c);
6919 cur_pos += 4;
6922 if (cur_pos > 72 && i+1 < len)
6924 cur_pos = 17;
6925 fprintf (stream, "\"\n\t.ascii\t\"");
6928 fprintf (stream, "\"\n");
6931 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6932 macros, mark the symbol as written so that mips_asm_output_external
6933 won't emit an .extern for it. STREAM is the output file, NAME is the
6934 name of the symbol, INIT_STRING is the string that should be written
6935 before the symbol and FINAL_STRING is the string that should be
6936 written after it. FINAL_STRING is a printf format that consumes the
6937 remaining arguments. */
6939 void
6940 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6941 const char *final_string, ...)
6943 va_list ap;
6945 fputs (init_string, stream);
6946 assemble_name (stream, name);
6947 va_start (ap, final_string);
6948 vfprintf (stream, final_string, ap);
6949 va_end (ap);
6951 if (!TARGET_EXPLICIT_RELOCS)
6953 tree name_tree = get_identifier (name);
6954 TREE_ASM_WRITTEN (name_tree) = 1;
6958 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6959 NAME is the name of the object and ALIGN is the required alignment
6960 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6961 alignment argument. */
6963 void
6964 mips_declare_common_object (FILE *stream, const char *name,
6965 const char *init_string,
6966 unsigned HOST_WIDE_INT size,
6967 unsigned int align, bool takes_alignment_p)
6969 if (!takes_alignment_p)
6971 size += (align / BITS_PER_UNIT) - 1;
6972 size -= size % (align / BITS_PER_UNIT);
6973 mips_declare_object (stream, name, init_string,
6974 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6976 else
6977 mips_declare_object (stream, name, init_string,
6978 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6979 size, align / BITS_PER_UNIT);
6982 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6983 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6985 void
6986 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6987 unsigned HOST_WIDE_INT size,
6988 unsigned int align)
6990 /* If the target wants uninitialized const declarations in
6991 .rdata then don't put them in .comm. */
6992 if (TARGET_EMBEDDED_DATA
6993 && TARGET_UNINIT_CONST_IN_RODATA
6994 && TREE_CODE (decl) == VAR_DECL
6995 && TREE_READONLY (decl)
6996 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6998 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6999 targetm.asm_out.globalize_label (stream, name);
7001 switch_to_section (readonly_data_section);
7002 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7003 mips_declare_object (stream, name, "",
7004 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7005 size);
7007 else
7008 mips_declare_common_object (stream, name, "\n\t.comm\t",
7009 size, align, true);
7012 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7013 extern int size_directive_output;
7015 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7016 definitions except that it uses mips_declare_object to emit the label. */
7018 void
7019 mips_declare_object_name (FILE *stream, const char *name,
7020 tree decl ATTRIBUTE_UNUSED)
7022 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7023 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7024 #endif
7026 size_directive_output = 0;
7027 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7029 HOST_WIDE_INT size;
7031 size_directive_output = 1;
7032 size = int_size_in_bytes (TREE_TYPE (decl));
7033 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7036 mips_declare_object (stream, name, "", ":\n");
7039 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7041 void
7042 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7044 const char *name;
7046 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7047 if (!flag_inhibit_size_directive
7048 && DECL_SIZE (decl) != 0
7049 && !at_end
7050 && top_level
7051 && DECL_INITIAL (decl) == error_mark_node
7052 && !size_directive_output)
7054 HOST_WIDE_INT size;
7056 size_directive_output = 1;
7057 size = int_size_in_bytes (TREE_TYPE (decl));
7058 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7061 #endif
7063 /* Return the FOO in the name of the ".mdebug.FOO" section associated
7064 with the current ABI. */
7066 static const char *
7067 mips_mdebug_abi_name (void)
7069 switch (mips_abi)
7071 case ABI_32:
7072 return "abi32";
7073 case ABI_O64:
7074 return "abiO64";
7075 case ABI_N32:
7076 return "abiN32";
7077 case ABI_64:
7078 return "abiN64";
7079 case ABI_EABI:
7080 return TARGET_64BIT ? "eabi64" : "eabi32";
7081 default:
7082 gcc_unreachable ();
7086 /* Implement TARGET_ASM_FILE_START. */
7088 static void
7089 mips_file_start (void)
7091 default_file_start ();
7093 /* Generate a special section to describe the ABI switches used to
7094 produce the resultant binary. This is unnecessary on IRIX and
7095 causes unwanted warnings from the native linker. */
7096 if (!TARGET_IRIX)
7098 /* Record the ABI itself. Modern versions of binutils encode
7099 this information in the ELF header flags, but GDB needs the
7100 information in order to correctly debug binaries produced by
7101 older binutils. See the function mips_gdbarch_init in
7102 gdb/mips-tdep.c. */
7103 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7104 mips_mdebug_abi_name ());
7106 /* There is no ELF header flag to distinguish long32 forms of the
7107 EABI from long64 forms. Emit a special section to help tools
7108 such as GDB. Do the same for o64, which is sometimes used with
7109 -mlong64. */
7110 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7111 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7112 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7114 #ifdef HAVE_AS_GNU_ATTRIBUTE
7115 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7116 (TARGET_HARD_FLOAT_ABI
7117 ? (TARGET_DOUBLE_FLOAT
7118 ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
7119 #endif
7122 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
7123 if (TARGET_ABICALLS)
7124 fprintf (asm_out_file, "\t.abicalls\n");
7126 if (flag_verbose_asm)
7127 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7128 ASM_COMMENT_START,
7129 mips_small_data_threshold, mips_arch_info->name, mips_isa);
7132 /* Make the last instruction frame-related and note that it performs
7133 the operation described by FRAME_PATTERN. */
7135 static void
7136 mips_set_frame_expr (rtx frame_pattern)
7138 rtx insn;
7140 insn = get_last_insn ();
7141 RTX_FRAME_RELATED_P (insn) = 1;
7142 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7143 frame_pattern,
7144 REG_NOTES (insn));
7147 /* Return a frame-related rtx that stores REG at MEM.
7148 REG must be a single register. */
7150 static rtx
7151 mips_frame_set (rtx mem, rtx reg)
7153 rtx set;
7155 /* If we're saving the return address register and the DWARF return
7156 address column differs from the hard register number, adjust the
7157 note reg to refer to the former. */
7158 if (REGNO (reg) == GP_REG_FIRST + 31
7159 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7160 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7162 set = gen_rtx_SET (VOIDmode, mem, reg);
7163 RTX_FRAME_RELATED_P (set) = 1;
7165 return set;
7168 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7169 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7170 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7171 static const unsigned char mips16e_s2_s8_regs[] = {
7172 30, 23, 22, 21, 20, 19, 18
7174 static const unsigned char mips16e_a0_a3_regs[] = {
7175 4, 5, 6, 7
7178 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7179 ordered from the uppermost in memory to the lowest in memory. */
7180 static const unsigned char mips16e_save_restore_regs[] = {
7181 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7184 /* Return the index of the lowest X in the range [0, SIZE) for which
7185 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7187 static unsigned int
7188 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7189 unsigned int size)
7191 unsigned int i;
7193 for (i = 0; i < size; i++)
7194 if (BITSET_P (mask, regs[i]))
7195 break;
7197 return i;
7200 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7201 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7202 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7203 is true for all indexes (X, SIZE). */
7205 static void
7206 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7207 unsigned int size, unsigned int *num_regs_ptr)
7209 unsigned int i;
7211 i = mips16e_find_first_register (*mask_ptr, regs, size);
7212 for (i++; i < size; i++)
7213 if (!BITSET_P (*mask_ptr, regs[i]))
7215 *num_regs_ptr += 1;
7216 *mask_ptr |= 1 << regs[i];
7220 /* Return a simplified form of X using the register values in REG_VALUES.
7221 REG_VALUES[R] is the last value assigned to hard register R, or null
7222 if R has not been modified.
7224 This function is rather limited, but is good enough for our purposes. */
7226 static rtx
7227 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7229 x = avoid_constant_pool_reference (x);
7231 if (UNARY_P (x))
7233 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7234 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7235 x0, GET_MODE (XEXP (x, 0)));
7238 if (ARITHMETIC_P (x))
7240 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7241 rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7242 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7245 if (REG_P (x)
7246 && reg_values[REGNO (x)]
7247 && !rtx_unstable_p (reg_values[REGNO (x)]))
7248 return reg_values[REGNO (x)];
7250 return x;
7253 /* Return true if (set DEST SRC) stores an argument register into its
7254 caller-allocated save slot, storing the number of that argument
7255 register in *REGNO_PTR if so. REG_VALUES is as for
7256 mips16e_collect_propagate_value. */
7258 static bool
7259 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7260 unsigned int *regno_ptr)
7262 unsigned int argno, regno;
7263 HOST_WIDE_INT offset, required_offset;
7264 rtx addr, base;
7266 /* Check that this is a word-mode store. */
7267 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7268 return false;
7270 /* Check that the register being saved is an unmodified argument
7271 register. */
7272 regno = REGNO (src);
7273 if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
7274 return false;
7275 argno = regno - GP_ARG_FIRST;
7277 /* Check whether the address is an appropriate stack-pointer or
7278 frame-pointer access. */
7279 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7280 mips_split_plus (addr, &base, &offset);
7281 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7282 if (base == hard_frame_pointer_rtx)
7283 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
7284 else if (base != stack_pointer_rtx)
7285 return false;
7286 if (offset != required_offset)
7287 return false;
7289 *regno_ptr = regno;
7290 return true;
7293 /* A subroutine of mips_expand_prologue, called only when generating
7294 MIPS16e SAVE instructions. Search the start of the function for any
7295 instructions that save argument registers into their caller-allocated
7296 save slots. Delete such instructions and return a value N such that
7297 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7298 instructions redundant. */
7300 static unsigned int
7301 mips16e_collect_argument_saves (void)
7303 rtx reg_values[FIRST_PSEUDO_REGISTER];
7304 rtx insn, next, set, dest, src;
7305 unsigned int nargs, regno;
7307 push_topmost_sequence ();
7308 nargs = 0;
7309 memset (reg_values, 0, sizeof (reg_values));
7310 for (insn = get_insns (); insn; insn = next)
7312 next = NEXT_INSN (insn);
7313 if (NOTE_P (insn))
7314 continue;
7316 if (!INSN_P (insn))
7317 break;
7319 set = PATTERN (insn);
7320 if (GET_CODE (set) != SET)
7321 break;
7323 dest = SET_DEST (set);
7324 src = SET_SRC (set);
7325 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
7327 if (!BITSET_P (cfun->machine->frame.mask, regno))
7329 delete_insn (insn);
7330 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7333 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7334 reg_values[REGNO (dest)]
7335 = mips16e_collect_propagate_value (src, reg_values);
7336 else
7337 break;
7339 pop_topmost_sequence ();
7341 return nargs;
7344 /* Return a move between register REGNO and memory location SP + OFFSET.
7345 Make the move a load if RESTORE_P, otherwise make it a frame-related
7346 store. */
7348 static rtx
7349 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7350 unsigned int regno)
7352 rtx reg, mem;
7354 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7355 reg = gen_rtx_REG (SImode, regno);
7356 return (restore_p
7357 ? gen_rtx_SET (VOIDmode, reg, mem)
7358 : mips_frame_set (mem, reg));
7361 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7362 The instruction must:
7364 - Allocate or deallocate SIZE bytes in total; SIZE is known
7365 to be nonzero.
7367 - Save or restore as many registers in *MASK_PTR as possible.
7368 The instruction saves the first registers at the top of the
7369 allocated area, with the other registers below it.
7371 - Save NARGS argument registers above the allocated area.
7373 (NARGS is always zero if RESTORE_P.)
7375 The SAVE and RESTORE instructions cannot save and restore all general
7376 registers, so there may be some registers left over for the caller to
7377 handle. Destructively modify *MASK_PTR so that it contains the registers
7378 that still need to be saved or restored. The caller can save these
7379 registers in the memory immediately below *OFFSET_PTR, which is a
7380 byte offset from the bottom of the allocated stack area. */
7382 static rtx
7383 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7384 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7385 HOST_WIDE_INT size)
7387 rtx pattern, set;
7388 HOST_WIDE_INT offset, top_offset;
7389 unsigned int i, regno;
7390 int n;
7392 gcc_assert (cfun->machine->frame.num_fp == 0);
7394 /* Calculate the number of elements in the PARALLEL. We need one element
7395 for the stack adjustment, one for each argument register save, and one
7396 for each additional register move. */
7397 n = 1 + nargs;
7398 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7399 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7400 n++;
7402 /* Create the final PARALLEL. */
7403 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7404 n = 0;
7406 /* Add the stack pointer adjustment. */
7407 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7408 plus_constant (stack_pointer_rtx,
7409 restore_p ? size : -size));
7410 RTX_FRAME_RELATED_P (set) = 1;
7411 XVECEXP (pattern, 0, n++) = set;
7413 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7414 top_offset = restore_p ? size : 0;
7416 /* Save the arguments. */
7417 for (i = 0; i < nargs; i++)
7419 offset = top_offset + i * UNITS_PER_WORD;
7420 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7421 XVECEXP (pattern, 0, n++) = set;
7424 /* Then fill in the other register moves. */
7425 offset = top_offset;
7426 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7428 regno = mips16e_save_restore_regs[i];
7429 if (BITSET_P (*mask_ptr, regno))
7431 offset -= UNITS_PER_WORD;
7432 set = mips16e_save_restore_reg (restore_p, offset, regno);
7433 XVECEXP (pattern, 0, n++) = set;
7434 *mask_ptr &= ~(1 << regno);
7438 /* Tell the caller what offset it should use for the remaining registers. */
7439 *offset_ptr = size + (offset - top_offset);
7441 gcc_assert (n == XVECLEN (pattern, 0));
7443 return pattern;
7446 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7447 pointer. Return true if PATTERN matches the kind of instruction
7448 generated by mips16e_build_save_restore. If INFO is nonnull,
7449 initialize it when returning true. */
7451 bool
7452 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7453 struct mips16e_save_restore_info *info)
7455 unsigned int i, nargs, mask, extra;
7456 HOST_WIDE_INT top_offset, save_offset, offset;
7457 rtx set, reg, mem, base;
7458 int n;
7460 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7461 return false;
7463 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7464 top_offset = adjust > 0 ? adjust : 0;
7466 /* Interpret all other members of the PARALLEL. */
7467 save_offset = top_offset - UNITS_PER_WORD;
7468 mask = 0;
7469 nargs = 0;
7470 i = 0;
7471 for (n = 1; n < XVECLEN (pattern, 0); n++)
7473 /* Check that we have a SET. */
7474 set = XVECEXP (pattern, 0, n);
7475 if (GET_CODE (set) != SET)
7476 return false;
7478 /* Check that the SET is a load (if restoring) or a store
7479 (if saving). */
7480 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7481 if (!MEM_P (mem))
7482 return false;
7484 /* Check that the address is the sum of the stack pointer and a
7485 possibly-zero constant offset. */
7486 mips_split_plus (XEXP (mem, 0), &base, &offset);
7487 if (base != stack_pointer_rtx)
7488 return false;
7490 /* Check that SET's other operand is a register. */
7491 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7492 if (!REG_P (reg))
7493 return false;
7495 /* Check for argument saves. */
7496 if (offset == top_offset + nargs * UNITS_PER_WORD
7497 && REGNO (reg) == GP_ARG_FIRST + nargs)
7498 nargs++;
7499 else if (offset == save_offset)
7501 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7502 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7503 return false;
7505 mask |= 1 << REGNO (reg);
7506 save_offset -= UNITS_PER_WORD;
7508 else
7509 return false;
7512 /* Check that the restrictions on register ranges are met. */
7513 extra = 0;
7514 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7515 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7516 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7517 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7518 if (extra != 0)
7519 return false;
7521 /* Make sure that the topmost argument register is not saved twice.
7522 The checks above ensure that the same is then true for the other
7523 argument registers. */
7524 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7525 return false;
7527 /* Pass back information, if requested. */
7528 if (info)
7530 info->nargs = nargs;
7531 info->mask = mask;
7532 info->size = (adjust > 0 ? adjust : -adjust);
7535 return true;
7538 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7539 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7540 the null terminator. */
7542 static char *
7543 mips16e_add_register_range (char *s, unsigned int min_reg,
7544 unsigned int max_reg)
7546 if (min_reg != max_reg)
7547 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7548 else
7549 s += sprintf (s, ",%s", reg_names[min_reg]);
7550 return s;
7553 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7554 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7556 const char *
7557 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7559 static char buffer[300];
7561 struct mips16e_save_restore_info info;
7562 unsigned int i, end;
7563 char *s;
7565 /* Parse the pattern. */
7566 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7567 gcc_unreachable ();
7569 /* Add the mnemonic. */
7570 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7571 s += strlen (s);
7573 /* Save the arguments. */
7574 if (info.nargs > 1)
7575 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7576 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7577 else if (info.nargs == 1)
7578 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7580 /* Emit the amount of stack space to allocate or deallocate. */
7581 s += sprintf (s, "%d", (int) info.size);
7583 /* Save or restore $16. */
7584 if (BITSET_P (info.mask, 16))
7585 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7587 /* Save or restore $17. */
7588 if (BITSET_P (info.mask, 17))
7589 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7591 /* Save or restore registers in the range $s2...$s8, which
7592 mips16e_s2_s8_regs lists in decreasing order. Note that this
7593 is a software register range; the hardware registers are not
7594 numbered consecutively. */
7595 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7596 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7597 if (i < end)
7598 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7599 mips16e_s2_s8_regs[i]);
7601 /* Save or restore registers in the range $a0...$a3. */
7602 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7603 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7604 if (i < end)
7605 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7606 mips16e_a0_a3_regs[end - 1]);
7608 /* Save or restore $31. */
7609 if (BITSET_P (info.mask, 31))
7610 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7612 return buffer;
7615 /* Return true if the current function has an insn that implicitly
7616 refers to $gp. */
7618 static bool
7619 mips_function_has_gp_insn (void)
7621 /* Don't bother rechecking if we found one last time. */
7622 if (!cfun->machine->has_gp_insn_p)
7624 rtx insn;
7626 push_topmost_sequence ();
7627 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7628 if (USEFUL_INSN_P (insn)
7629 && (get_attr_got (insn) != GOT_UNSET
7630 || mips_small_data_pattern_p (PATTERN (insn))))
7632 cfun->machine->has_gp_insn_p = true;
7633 break;
7635 pop_topmost_sequence ();
7637 return cfun->machine->has_gp_insn_p;
7640 /* Return the register that should be used as the global pointer
7641 within this function. Return 0 if the function doesn't need
7642 a global pointer. */
7644 static unsigned int
7645 mips_global_pointer (void)
7647 unsigned int regno;
7649 /* $gp is always available unless we're using a GOT. */
7650 if (!TARGET_USE_GOT)
7651 return GLOBAL_POINTER_REGNUM;
7653 /* We must always provide $gp when it is used implicitly. */
7654 if (!TARGET_EXPLICIT_RELOCS)
7655 return GLOBAL_POINTER_REGNUM;
7657 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7658 a valid gp. */
7659 if (crtl->profile)
7660 return GLOBAL_POINTER_REGNUM;
7662 /* If the function has a nonlocal goto, $gp must hold the correct
7663 global pointer for the target function. */
7664 if (crtl->has_nonlocal_goto)
7665 return GLOBAL_POINTER_REGNUM;
7667 /* If the gp is never referenced, there's no need to initialize it.
7668 Note that reload can sometimes introduce constant pool references
7669 into a function that otherwise didn't need them. For example,
7670 suppose we have an instruction like:
7672 (set (reg:DF R1) (float:DF (reg:SI R2)))
7674 If R2 turns out to be constant such as 1, the instruction may have a
7675 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7676 using this constant if R2 doesn't get allocated to a register.
7678 In cases like these, reload will have added the constant to the pool
7679 but no instruction will yet refer to it. */
7680 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7681 && !crtl->uses_const_pool
7682 && !mips_function_has_gp_insn ())
7683 return 0;
7685 /* We need a global pointer, but perhaps we can use a call-clobbered
7686 register instead of $gp. */
7687 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7688 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7689 if (!df_regs_ever_live_p (regno)
7690 && call_really_used_regs[regno]
7691 && !fixed_regs[regno]
7692 && regno != PIC_FUNCTION_ADDR_REGNUM)
7693 return regno;
7695 return GLOBAL_POINTER_REGNUM;
7698 /* Return true if the current function returns its value in a floating-point
7699 register in MIPS16 mode. */
7701 static bool
7702 mips16_cfun_returns_in_fpr_p (void)
7704 tree return_type = DECL_RESULT (current_function_decl);
7705 return (TARGET_MIPS16
7706 && TARGET_HARD_FLOAT_ABI
7707 && !aggregate_value_p (return_type, current_function_decl)
7708 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7711 /* Return true if the current function must save register REGNO. */
7713 static bool
7714 mips_save_reg_p (unsigned int regno)
7716 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7717 if we have not chosen a call-clobbered substitute. */
7718 if (regno == GLOBAL_POINTER_REGNUM)
7719 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7721 /* Check call-saved registers. */
7722 if ((crtl->saves_all_registers || df_regs_ever_live_p (regno))
7723 && !call_really_used_regs[regno])
7724 return true;
7726 /* Save both registers in an FPR pair if either one is used. This is
7727 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7728 register to be used without the even register. */
7729 if (FP_REG_P (regno)
7730 && MAX_FPRS_PER_FMT == 2
7731 && df_regs_ever_live_p (regno + 1)
7732 && !call_really_used_regs[regno + 1])
7733 return true;
7735 /* We need to save the old frame pointer before setting up a new one. */
7736 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7737 return true;
7739 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7740 if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7741 return true;
7743 /* We need to save the incoming return address if it is ever clobbered
7744 within the function, if __builtin_eh_return is being used to set a
7745 different return address, or if a stub is being used to return a
7746 value in FPRs. */
7747 if (regno == GP_REG_FIRST + 31
7748 && (df_regs_ever_live_p (regno)
7749 || crtl->calls_eh_return
7750 || mips16_cfun_returns_in_fpr_p ()))
7751 return true;
7753 return false;
7756 /* Populate the current function's mips_frame_info structure.
7758 MIPS stack frames look like:
7760 +-------------------------------+
7762 | incoming stack arguments |
7764 +-------------------------------+
7766 | caller-allocated save area |
7767 A | for register arguments |
7769 +-------------------------------+ <-- incoming stack pointer
7771 | callee-allocated save area |
7772 B | for arguments that are |
7773 | split between registers and |
7774 | the stack |
7776 +-------------------------------+ <-- arg_pointer_rtx
7778 C | callee-allocated save area |
7779 | for register varargs |
7781 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
7782 | | + UNITS_PER_HWFPVALUE
7783 | FPR save area |
7785 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
7786 | | + UNITS_PER_WORD
7787 | GPR save area |
7789 +-------------------------------+
7790 | | \
7791 | local variables | | var_size
7792 | | /
7793 +-------------------------------+
7794 | | \
7795 | $gp save area | | cprestore_size
7796 | | /
7797 P +-------------------------------+ <-- hard_frame_pointer_rtx for
7798 | | MIPS16 code
7799 | outgoing stack arguments |
7801 +-------------------------------+
7803 | caller-allocated save area |
7804 | for register arguments |
7806 +-------------------------------+ <-- stack_pointer_rtx
7807 frame_pointer_rtx
7808 hard_frame_pointer_rtx for
7809 non-MIPS16 code.
7811 At least two of A, B and C will be empty.
7813 Dynamic stack allocations such as alloca insert data at point P.
7814 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
7815 hard_frame_pointer_rtx unchanged. */
7817 static void
7818 mips_compute_frame_info (void)
7820 struct mips_frame_info *frame;
7821 HOST_WIDE_INT offset, size;
7822 unsigned int regno, i;
7824 frame = &cfun->machine->frame;
7825 memset (frame, 0, sizeof (*frame));
7826 size = get_frame_size ();
7828 cfun->machine->global_pointer = mips_global_pointer ();
7830 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
7831 area and the $gp save slot. This area isn't needed in leaf functions,
7832 but if the target-independent frame size is nonzero, we're committed
7833 to allocating it anyway. */
7834 if (size == 0 && current_function_is_leaf)
7836 /* The MIPS 3.0 linker does not like functions that dynamically
7837 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7838 looks like we are trying to create a second frame pointer to the
7839 function, so allocate some stack space to make it happy. */
7840 if (cfun->calls_alloca)
7841 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
7842 else
7843 frame->args_size = 0;
7844 frame->cprestore_size = 0;
7846 else
7848 frame->args_size = crtl->outgoing_args_size;
7849 frame->cprestore_size = STARTING_FRAME_OFFSET - frame->args_size;
7851 offset = frame->args_size + frame->cprestore_size;
7853 /* Move above the local variables. */
7854 frame->var_size = MIPS_STACK_ALIGN (size);
7855 offset += frame->var_size;
7857 /* Find out which GPRs we need to save. */
7858 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7859 if (mips_save_reg_p (regno))
7861 frame->num_gp++;
7862 frame->mask |= 1 << (regno - GP_REG_FIRST);
7865 /* If this function calls eh_return, we must also save and restore the
7866 EH data registers. */
7867 if (crtl->calls_eh_return)
7868 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
7870 frame->num_gp++;
7871 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
7874 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7875 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7876 save all later registers too. */
7877 if (GENERATE_MIPS16E_SAVE_RESTORE)
7879 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
7880 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
7881 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
7882 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
7885 /* Move above the GPR save area. */
7886 if (frame->num_gp > 0)
7888 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
7889 frame->gp_sp_offset = offset - UNITS_PER_WORD;
7892 /* Find out which FPRs we need to save. This loop must iterate over
7893 the same space as its companion in mips_for_each_saved_reg. */
7894 if (TARGET_HARD_FLOAT)
7895 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
7896 if (mips_save_reg_p (regno))
7898 frame->num_fp += MAX_FPRS_PER_FMT;
7899 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
7902 /* Move above the FPR save area. */
7903 if (frame->num_fp > 0)
7905 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
7906 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
7909 /* Move above the callee-allocated varargs save area. */
7910 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7911 frame->arg_pointer_offset = offset;
7913 /* Move above the callee-allocated area for pretend stack arguments. */
7914 offset += crtl->args.pretend_args_size;
7915 frame->total_size = offset;
7917 /* Work out the offsets of the save areas from the top of the frame. */
7918 if (frame->gp_sp_offset > 0)
7919 frame->gp_save_offset = frame->gp_sp_offset - offset;
7920 if (frame->fp_sp_offset > 0)
7921 frame->fp_save_offset = frame->fp_sp_offset - offset;
7923 /* MIPS16 code offsets the frame pointer by the size of the outgoing
7924 arguments. This tends to increase the chances of using unextended
7925 instructions for local variables and incoming arguments. */
7926 if (TARGET_MIPS16)
7927 frame->hard_frame_pointer_offset = frame->args_size;
7930 /* Return the style of GP load sequence that is being used for the
7931 current function. */
7933 enum mips_loadgp_style
7934 mips_current_loadgp_style (void)
7936 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7937 return LOADGP_NONE;
7939 if (TARGET_RTP_PIC)
7940 return LOADGP_RTP;
7942 if (TARGET_ABSOLUTE_ABICALLS)
7943 return LOADGP_ABSOLUTE;
7945 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7948 /* Implement FRAME_POINTER_REQUIRED. */
7950 bool
7951 mips_frame_pointer_required (void)
7953 /* If the function contains dynamic stack allocations, we need to
7954 use the frame pointer to access the static parts of the frame. */
7955 if (cfun->calls_alloca)
7956 return true;
7958 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
7959 reload may be unable to compute the address of a local variable,
7960 since there is no way to add a large constant to the stack pointer
7961 without using a second temporary register. */
7962 if (TARGET_MIPS16)
7964 mips_compute_frame_info ();
7965 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
7966 return true;
7969 return false;
7972 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
7973 or argument pointer. TO is either the stack pointer or hard frame
7974 pointer. */
7976 HOST_WIDE_INT
7977 mips_initial_elimination_offset (int from, int to)
7979 HOST_WIDE_INT offset;
7981 mips_compute_frame_info ();
7983 /* Set OFFSET to the offset from the soft frame pointer, which is also
7984 the offset from the end-of-prologue stack pointer. */
7985 switch (from)
7987 case FRAME_POINTER_REGNUM:
7988 offset = 0;
7989 break;
7991 case ARG_POINTER_REGNUM:
7992 offset = cfun->machine->frame.arg_pointer_offset;
7993 break;
7995 default:
7996 gcc_unreachable ();
7999 if (to == HARD_FRAME_POINTER_REGNUM)
8000 offset -= cfun->machine->frame.hard_frame_pointer_offset;
8002 return offset;
8005 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
8007 static void
8008 mips_extra_live_on_entry (bitmap regs)
8010 if (TARGET_USE_GOT)
8012 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
8013 the global pointer. */
8014 if (!TARGET_ABSOLUTE_ABICALLS)
8015 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8017 /* See the comment above load_call<mode> for details. */
8018 bitmap_set_bit (regs, GOT_VERSION_REGNUM);
8022 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
8023 previous frame. */
8026 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8028 if (count != 0)
8029 return const0_rtx;
8031 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8034 /* Emit code to change the current function's return address to
8035 ADDRESS. SCRATCH is available as a scratch register, if needed.
8036 ADDRESS and SCRATCH are both word-mode GPRs. */
8038 void
8039 mips_set_return_address (rtx address, rtx scratch)
8041 rtx slot_address;
8043 gcc_assert (BITSET_P (cfun->machine->frame.mask, 31));
8044 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
8045 cfun->machine->frame.gp_sp_offset);
8046 mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
8049 /* Restore $gp from its save slot. Valid only when using o32 or
8050 o64 abicalls. */
8052 void
8053 mips_restore_gp (void)
8055 rtx base, address;
8057 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
8059 base = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
8060 address = mips_add_offset (pic_offset_table_rtx, base,
8061 crtl->outgoing_args_size);
8062 mips_emit_move (pic_offset_table_rtx, gen_frame_mem (Pmode, address));
8063 if (!TARGET_EXPLICIT_RELOCS)
8064 emit_insn (gen_blockage ());
8067 /* A function to save or store a register. The first argument is the
8068 register and the second is the stack slot. */
8069 typedef void (*mips_save_restore_fn) (rtx, rtx);
8071 /* Use FN to save or restore register REGNO. MODE is the register's
8072 mode and OFFSET is the offset of its save slot from the current
8073 stack pointer. */
8075 static void
8076 mips_save_restore_reg (enum machine_mode mode, int regno,
8077 HOST_WIDE_INT offset, mips_save_restore_fn fn)
8079 rtx mem;
8081 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
8082 fn (gen_rtx_REG (mode, regno), mem);
8085 /* Call FN for each register that is saved by the current function.
8086 SP_OFFSET is the offset of the current stack pointer from the start
8087 of the frame. */
8089 static void
8090 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
8092 enum machine_mode fpr_mode;
8093 HOST_WIDE_INT offset;
8094 int regno;
8096 /* Save registers starting from high to low. The debuggers prefer at least
8097 the return register be stored at func+4, and also it allows us not to
8098 need a nop in the epilogue if at least one register is reloaded in
8099 addition to return address. */
8100 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
8101 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
8102 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
8104 mips_save_restore_reg (word_mode, regno, offset, fn);
8105 offset -= UNITS_PER_WORD;
8108 /* This loop must iterate over the same space as its companion in
8109 mips_compute_frame_info. */
8110 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
8111 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
8112 for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
8113 regno >= FP_REG_FIRST;
8114 regno -= MAX_FPRS_PER_FMT)
8115 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
8117 mips_save_restore_reg (fpr_mode, regno, offset, fn);
8118 offset -= GET_MODE_SIZE (fpr_mode);
8122 /* If we're generating n32 or n64 abicalls, and the current function
8123 does not use $28 as its global pointer, emit a cplocal directive.
8124 Use pic_offset_table_rtx as the argument to the directive. */
8126 static void
8127 mips_output_cplocal (void)
8129 if (!TARGET_EXPLICIT_RELOCS
8130 && cfun->machine->global_pointer > 0
8131 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
8132 output_asm_insn (".cplocal %+", 0);
8135 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
8137 static void
8138 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8140 const char *fnname;
8142 #ifdef SDB_DEBUGGING_INFO
8143 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
8144 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
8145 #endif
8147 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
8148 floating-point arguments. */
8149 if (TARGET_MIPS16
8150 && TARGET_HARD_FLOAT_ABI
8151 && crtl->args.info.fp_code != 0)
8152 mips16_build_function_stub ();
8154 /* Select the MIPS16 mode for this function. */
8155 if (TARGET_MIPS16)
8156 fprintf (file, "\t.set\tmips16\n");
8157 else
8158 fprintf (file, "\t.set\tnomips16\n");
8160 if (!FUNCTION_NAME_ALREADY_DECLARED)
8162 /* Get the function name the same way that toplev.c does before calling
8163 assemble_start_function. This is needed so that the name used here
8164 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8165 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8167 if (!flag_inhibit_size_directive)
8169 fputs ("\t.ent\t", file);
8170 assemble_name (file, fnname);
8171 fputs ("\n", file);
8174 assemble_name (file, fnname);
8175 fputs (":\n", file);
8178 /* Stop mips_file_end from treating this function as external. */
8179 if (TARGET_IRIX && mips_abi == ABI_32)
8180 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
8182 /* Output MIPS-specific frame information. */
8183 if (!flag_inhibit_size_directive)
8185 const struct mips_frame_info *frame;
8187 frame = &cfun->machine->frame;
8189 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
8190 fprintf (file,
8191 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
8192 "# vars= " HOST_WIDE_INT_PRINT_DEC
8193 ", regs= %d/%d"
8194 ", args= " HOST_WIDE_INT_PRINT_DEC
8195 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
8196 reg_names[frame_pointer_needed
8197 ? HARD_FRAME_POINTER_REGNUM
8198 : STACK_POINTER_REGNUM],
8199 (frame_pointer_needed
8200 ? frame->total_size - frame->hard_frame_pointer_offset
8201 : frame->total_size),
8202 reg_names[GP_REG_FIRST + 31],
8203 frame->var_size,
8204 frame->num_gp, frame->num_fp,
8205 frame->args_size,
8206 frame->cprestore_size);
8208 /* .mask MASK, OFFSET. */
8209 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8210 frame->mask, frame->gp_save_offset);
8212 /* .fmask MASK, OFFSET. */
8213 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8214 frame->fmask, frame->fp_save_offset);
8217 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
8218 Also emit the ".set noreorder; .set nomacro" sequence for functions
8219 that need it. */
8220 if (mips_current_loadgp_style () == LOADGP_OLDABI)
8222 /* .cpload must be in a .set noreorder but not a .set nomacro block. */
8223 if (!cfun->machine->all_noreorder_p)
8224 output_asm_insn ("%(.cpload\t%^%)", 0);
8225 else
8226 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8228 else if (cfun->machine->all_noreorder_p)
8229 output_asm_insn ("%(%<", 0);
8231 /* Tell the assembler which register we're using as the global
8232 pointer. This is needed for thunks, since they can use either
8233 explicit relocs or assembler macros. */
8234 mips_output_cplocal ();
8237 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
8239 static void
8240 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8241 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8243 /* Reinstate the normal $gp. */
8244 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8245 mips_output_cplocal ();
8247 if (cfun->machine->all_noreorder_p)
8249 /* Avoid using %>%) since it adds excess whitespace. */
8250 output_asm_insn (".set\tmacro", 0);
8251 output_asm_insn (".set\treorder", 0);
8252 set_noreorder = set_nomacro = 0;
8255 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8257 const char *fnname;
8259 /* Get the function name the same way that toplev.c does before calling
8260 assemble_start_function. This is needed so that the name used here
8261 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8262 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8263 fputs ("\t.end\t", file);
8264 assemble_name (file, fnname);
8265 fputs ("\n", file);
8269 /* Save register REG to MEM. Make the instruction frame-related. */
8271 static void
8272 mips_save_reg (rtx reg, rtx mem)
8274 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8276 rtx x1, x2;
8278 if (mips_split_64bit_move_p (mem, reg))
8279 mips_split_doubleword_move (mem, reg);
8280 else
8281 mips_emit_move (mem, reg);
8283 x1 = mips_frame_set (mips_subword (mem, false),
8284 mips_subword (reg, false));
8285 x2 = mips_frame_set (mips_subword (mem, true),
8286 mips_subword (reg, true));
8287 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8289 else
8291 if (TARGET_MIPS16
8292 && REGNO (reg) != GP_REG_FIRST + 31
8293 && !M16_REG_P (REGNO (reg)))
8295 /* Save a non-MIPS16 register by moving it through a temporary.
8296 We don't need to do this for $31 since there's a special
8297 instruction for it. */
8298 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8299 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8301 else
8302 mips_emit_move (mem, reg);
8304 mips_set_frame_expr (mips_frame_set (mem, reg));
8308 /* The __gnu_local_gp symbol. */
8310 static GTY(()) rtx mips_gnu_local_gp;
8312 /* If we're generating n32 or n64 abicalls, emit instructions
8313 to set up the global pointer. */
8315 static void
8316 mips_emit_loadgp (void)
8318 rtx addr, offset, incoming_address, base, index, pic_reg;
8320 pic_reg = pic_offset_table_rtx;
8321 switch (mips_current_loadgp_style ())
8323 case LOADGP_ABSOLUTE:
8324 if (mips_gnu_local_gp == NULL)
8326 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
8327 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
8329 emit_insn (Pmode == SImode
8330 ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
8331 : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
8332 break;
8334 case LOADGP_NEWABI:
8335 addr = XEXP (DECL_RTL (current_function_decl), 0);
8336 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
8337 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8338 emit_insn (Pmode == SImode
8339 ? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
8340 : gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
8341 if (!TARGET_EXPLICIT_RELOCS)
8342 emit_insn (gen_loadgp_blockage ());
8343 break;
8345 case LOADGP_RTP:
8346 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
8347 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
8348 emit_insn (Pmode == SImode
8349 ? gen_loadgp_rtp_si (pic_reg, base, index)
8350 : gen_loadgp_rtp_di (pic_reg, base, index));
8351 if (!TARGET_EXPLICIT_RELOCS)
8352 emit_insn (gen_loadgp_blockage ());
8353 break;
8355 default:
8356 break;
8360 /* Expand the "prologue" pattern. */
8362 void
8363 mips_expand_prologue (void)
8365 const struct mips_frame_info *frame;
8366 HOST_WIDE_INT size;
8367 unsigned int nargs;
8368 rtx insn;
8370 if (cfun->machine->global_pointer > 0)
8371 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8373 frame = &cfun->machine->frame;
8374 size = frame->total_size;
8376 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8377 bytes beforehand; this is enough to cover the register save area
8378 without going out of range. */
8379 if ((frame->mask | frame->fmask) != 0)
8381 HOST_WIDE_INT step1;
8383 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8384 if (GENERATE_MIPS16E_SAVE_RESTORE)
8386 HOST_WIDE_INT offset;
8387 unsigned int mask, regno;
8389 /* Try to merge argument stores into the save instruction. */
8390 nargs = mips16e_collect_argument_saves ();
8392 /* Build the save instruction. */
8393 mask = frame->mask;
8394 insn = mips16e_build_save_restore (false, &mask, &offset,
8395 nargs, step1);
8396 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8397 size -= step1;
8399 /* Check if we need to save other registers. */
8400 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8401 if (BITSET_P (mask, regno - GP_REG_FIRST))
8403 offset -= UNITS_PER_WORD;
8404 mips_save_restore_reg (word_mode, regno,
8405 offset, mips_save_reg);
8408 else
8410 insn = gen_add3_insn (stack_pointer_rtx,
8411 stack_pointer_rtx,
8412 GEN_INT (-step1));
8413 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8414 size -= step1;
8415 mips_for_each_saved_reg (size, mips_save_reg);
8419 /* Allocate the rest of the frame. */
8420 if (size > 0)
8422 if (SMALL_OPERAND (-size))
8423 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8424 stack_pointer_rtx,
8425 GEN_INT (-size)))) = 1;
8426 else
8428 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8429 if (TARGET_MIPS16)
8431 /* There are no instructions to add or subtract registers
8432 from the stack pointer, so use the frame pointer as a
8433 temporary. We should always be using a frame pointer
8434 in this case anyway. */
8435 gcc_assert (frame_pointer_needed);
8436 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8437 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8438 hard_frame_pointer_rtx,
8439 MIPS_PROLOGUE_TEMP (Pmode)));
8440 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8442 else
8443 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8444 stack_pointer_rtx,
8445 MIPS_PROLOGUE_TEMP (Pmode)));
8447 /* Describe the combined effect of the previous instructions. */
8448 mips_set_frame_expr
8449 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8450 plus_constant (stack_pointer_rtx, -size)));
8454 /* Set up the frame pointer, if we're using one. */
8455 if (frame_pointer_needed)
8457 HOST_WIDE_INT offset;
8459 offset = frame->hard_frame_pointer_offset;
8460 if (offset == 0)
8462 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8463 RTX_FRAME_RELATED_P (insn) = 1;
8465 else if (SMALL_OPERAND (offset))
8467 insn = gen_add3_insn (hard_frame_pointer_rtx,
8468 stack_pointer_rtx, GEN_INT (offset));
8469 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8471 else
8473 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
8474 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8475 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8476 hard_frame_pointer_rtx,
8477 MIPS_PROLOGUE_TEMP (Pmode)));
8478 mips_set_frame_expr
8479 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8480 plus_constant (stack_pointer_rtx, offset)));
8484 mips_emit_loadgp ();
8486 /* Initialize the $gp save slot. */
8487 if (frame->cprestore_size > 0)
8488 emit_insn (gen_cprestore (GEN_INT (crtl->outgoing_args_size)));
8490 /* If we are profiling, make sure no instructions are scheduled before
8491 the call to mcount. */
8492 if (crtl->profile)
8493 emit_insn (gen_blockage ());
8496 /* Emit instructions to restore register REG from slot MEM. */
8498 static void
8499 mips_restore_reg (rtx reg, rtx mem)
8501 /* There's no MIPS16 instruction to load $31 directly. Load into
8502 $7 instead and adjust the return insn appropriately. */
8503 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8504 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
8506 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8508 /* Can't restore directly; move through a temporary. */
8509 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8510 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8512 else
8513 mips_emit_move (reg, mem);
8516 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
8517 says which. */
8519 void
8520 mips_expand_epilogue (bool sibcall_p)
8522 const struct mips_frame_info *frame;
8523 HOST_WIDE_INT step1, step2;
8524 rtx base, target;
8526 if (!sibcall_p && mips_can_use_return_insn ())
8528 emit_jump_insn (gen_return ());
8529 return;
8532 /* In MIPS16 mode, if the return value should go into a floating-point
8533 register, we need to call a helper routine to copy it over. */
8534 if (mips16_cfun_returns_in_fpr_p ())
8535 mips16_copy_fpr_return_value ();
8537 /* Split the frame into two. STEP1 is the amount of stack we should
8538 deallocate before restoring the registers. STEP2 is the amount we
8539 should deallocate afterwards.
8541 Start off by assuming that no registers need to be restored. */
8542 frame = &cfun->machine->frame;
8543 step1 = frame->total_size;
8544 step2 = 0;
8546 /* Work out which register holds the frame address. */
8547 if (!frame_pointer_needed)
8548 base = stack_pointer_rtx;
8549 else
8551 base = hard_frame_pointer_rtx;
8552 step1 -= frame->hard_frame_pointer_offset;
8555 /* If we need to restore registers, deallocate as much stack as
8556 possible in the second step without going out of range. */
8557 if ((frame->mask | frame->fmask) != 0)
8559 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8560 step1 -= step2;
8563 /* Set TARGET to BASE + STEP1. */
8564 target = base;
8565 if (step1 > 0)
8567 rtx adjust;
8569 /* Get an rtx for STEP1 that we can add to BASE. */
8570 adjust = GEN_INT (step1);
8571 if (!SMALL_OPERAND (step1))
8573 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8574 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8577 /* Normal mode code can copy the result straight into $sp. */
8578 if (!TARGET_MIPS16)
8579 target = stack_pointer_rtx;
8581 emit_insn (gen_add3_insn (target, base, adjust));
8584 /* Copy TARGET into the stack pointer. */
8585 if (target != stack_pointer_rtx)
8586 mips_emit_move (stack_pointer_rtx, target);
8588 /* If we're using addressing macros, $gp is implicitly used by all
8589 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8590 from the stack. */
8591 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8592 emit_insn (gen_blockage ());
8594 if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
8596 unsigned int regno, mask;
8597 HOST_WIDE_INT offset;
8598 rtx restore;
8600 /* Generate the restore instruction. */
8601 mask = frame->mask;
8602 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8604 /* Restore any other registers manually. */
8605 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8606 if (BITSET_P (mask, regno - GP_REG_FIRST))
8608 offset -= UNITS_PER_WORD;
8609 mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
8612 /* Restore the remaining registers and deallocate the final bit
8613 of the frame. */
8614 emit_insn (restore);
8616 else
8618 /* Restore the registers. */
8619 mips_for_each_saved_reg (frame->total_size - step2, mips_restore_reg);
8621 /* Deallocate the final bit of the frame. */
8622 if (step2 > 0)
8623 emit_insn (gen_add3_insn (stack_pointer_rtx,
8624 stack_pointer_rtx,
8625 GEN_INT (step2)));
8628 /* Add in the __builtin_eh_return stack adjustment. We need to
8629 use a temporary in MIPS16 code. */
8630 if (crtl->calls_eh_return)
8632 if (TARGET_MIPS16)
8634 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8635 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8636 MIPS_EPILOGUE_TEMP (Pmode),
8637 EH_RETURN_STACKADJ_RTX));
8638 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8640 else
8641 emit_insn (gen_add3_insn (stack_pointer_rtx,
8642 stack_pointer_rtx,
8643 EH_RETURN_STACKADJ_RTX));
8646 if (!sibcall_p)
8648 unsigned int regno;
8650 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8651 path will restore the return address into $7 rather than $31. */
8652 if (TARGET_MIPS16
8653 && !GENERATE_MIPS16E_SAVE_RESTORE
8654 && BITSET_P (frame->mask, 31))
8655 regno = GP_REG_FIRST + 7;
8656 else
8657 regno = GP_REG_FIRST + 31;
8658 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
8662 /* Return nonzero if this function is known to have a null epilogue.
8663 This allows the optimizer to omit jumps to jumps if no stack
8664 was created. */
8666 bool
8667 mips_can_use_return_insn (void)
8669 if (!reload_completed)
8670 return false;
8672 if (crtl->profile)
8673 return false;
8675 /* In MIPS16 mode, a function that returns a floating-point value
8676 needs to arrange to copy the return value into the floating-point
8677 registers. */
8678 if (mips16_cfun_returns_in_fpr_p ())
8679 return false;
8681 return cfun->machine->frame.total_size == 0;
8684 /* Return true if register REGNO can store a value of mode MODE.
8685 The result of this function is cached in mips_hard_regno_mode_ok. */
8687 static bool
8688 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
8690 unsigned int size;
8691 enum mode_class class;
8693 if (mode == CCV2mode)
8694 return (ISA_HAS_8CC
8695 && ST_REG_P (regno)
8696 && (regno - ST_REG_FIRST) % 2 == 0);
8698 if (mode == CCV4mode)
8699 return (ISA_HAS_8CC
8700 && ST_REG_P (regno)
8701 && (regno - ST_REG_FIRST) % 4 == 0);
8703 if (mode == CCmode)
8705 if (!ISA_HAS_8CC)
8706 return regno == FPSW_REGNUM;
8708 return (ST_REG_P (regno)
8709 || GP_REG_P (regno)
8710 || FP_REG_P (regno));
8713 size = GET_MODE_SIZE (mode);
8714 class = GET_MODE_CLASS (mode);
8716 if (GP_REG_P (regno))
8717 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
8719 if (FP_REG_P (regno)
8720 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
8721 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
8723 /* Allow TFmode for CCmode reloads. */
8724 if (mode == TFmode && ISA_HAS_8CC)
8725 return true;
8727 if (class == MODE_FLOAT
8728 || class == MODE_COMPLEX_FLOAT
8729 || class == MODE_VECTOR_FLOAT)
8730 return size <= UNITS_PER_FPVALUE;
8732 /* Allow integer modes that fit into a single register. We need
8733 to put integers into FPRs when using instructions like CVT
8734 and TRUNC. There's no point allowing sizes smaller than a word,
8735 because the FPU has no appropriate load/store instructions. */
8736 if (class == MODE_INT)
8737 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
8740 if (ACC_REG_P (regno)
8741 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
8743 if (size <= UNITS_PER_WORD)
8744 return true;
8746 if (size <= UNITS_PER_WORD * 2)
8747 return (DSP_ACC_REG_P (regno)
8748 ? ((regno - DSP_ACC_REG_FIRST) & 1) == 0
8749 : regno == MD_REG_FIRST);
8752 if (ALL_COP_REG_P (regno))
8753 return class == MODE_INT && size <= UNITS_PER_WORD;
8755 if (regno == GOT_VERSION_REGNUM)
8756 return mode == SImode;
8758 return false;
8761 /* Implement HARD_REGNO_NREGS. */
8763 unsigned int
8764 mips_hard_regno_nregs (int regno, enum machine_mode mode)
8766 if (ST_REG_P (regno))
8767 /* The size of FP status registers is always 4, because they only hold
8768 CCmode values, and CCmode is always considered to be 4 bytes wide. */
8769 return (GET_MODE_SIZE (mode) + 3) / 4;
8771 if (FP_REG_P (regno))
8772 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8774 /* All other registers are word-sized. */
8775 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8778 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
8779 in mips_hard_regno_nregs. */
8782 mips_class_max_nregs (enum reg_class class, enum machine_mode mode)
8784 int size;
8785 HARD_REG_SET left;
8787 size = 0x8000;
8788 COPY_HARD_REG_SET (left, reg_class_contents[(int) class]);
8789 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
8791 size = MIN (size, 4);
8792 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
8794 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
8796 size = MIN (size, UNITS_PER_FPREG);
8797 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
8799 if (!hard_reg_set_empty_p (left))
8800 size = MIN (size, UNITS_PER_WORD);
8801 return (GET_MODE_SIZE (mode) + size - 1) / size;
8804 /* Implement CANNOT_CHANGE_MODE_CLASS. */
8806 bool
8807 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
8808 enum machine_mode to ATTRIBUTE_UNUSED,
8809 enum reg_class class)
8811 /* There are several problems with changing the modes of values
8812 in floating-point registers:
8814 - When a multi-word value is stored in paired floating-point
8815 registers, the first register always holds the low word.
8816 We therefore can't allow FPRs to change between single-word
8817 and multi-word modes on big-endian targets.
8819 - GCC assumes that each word of a multiword register can be accessed
8820 individually using SUBREGs. This is not true for floating-point
8821 registers if they are bigger than a word.
8823 - Loading a 32-bit value into a 64-bit floating-point register
8824 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8825 We can't allow FPRs to change from SImode to to a wider mode on
8826 64-bit targets.
8828 - If the FPU has already interpreted a value in one format, we must
8829 not ask it to treat the value as having a different format.
8831 We therefore disallow all mode changes involving FPRs. */
8832 return reg_classes_intersect_p (FP_REGS, class);
8835 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
8837 static bool
8838 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
8840 switch (mode)
8842 case SFmode:
8843 return TARGET_HARD_FLOAT;
8845 case DFmode:
8846 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
8848 case V2SFmode:
8849 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
8851 default:
8852 return false;
8856 /* Implement MODES_TIEABLE_P. */
8858 bool
8859 mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
8861 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
8862 prefer to put one of them in FPRs. */
8863 return (mode1 == mode2
8864 || (!mips_mode_ok_for_mov_fmt_p (mode1)
8865 && !mips_mode_ok_for_mov_fmt_p (mode2)));
8868 /* Implement PREFERRED_RELOAD_CLASS. */
8870 enum reg_class
8871 mips_preferred_reload_class (rtx x, enum reg_class class)
8873 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8874 return LEA_REGS;
8876 if (reg_class_subset_p (FP_REGS, class)
8877 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
8878 return FP_REGS;
8880 if (reg_class_subset_p (GR_REGS, class))
8881 class = GR_REGS;
8883 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8884 class = M16_REGS;
8886 return class;
8889 /* Implement REGISTER_MOVE_COST. */
8892 mips_register_move_cost (enum machine_mode mode,
8893 enum reg_class to, enum reg_class from)
8895 if (TARGET_MIPS16)
8897 /* ??? We cannot move general registers into HI and LO because
8898 MIPS16 has no MTHI and MTLO instructions. Make the cost of
8899 moves in the opposite direction just as high, which stops the
8900 register allocators from using HI and LO for pseudos. */
8901 if (reg_class_subset_p (from, GENERAL_REGS)
8902 && reg_class_subset_p (to, GENERAL_REGS))
8904 if (reg_class_subset_p (from, M16_REGS)
8905 || reg_class_subset_p (to, M16_REGS))
8906 return 2;
8907 /* Two MOVEs. */
8908 return 4;
8911 else if (reg_class_subset_p (from, GENERAL_REGS))
8913 if (reg_class_subset_p (to, GENERAL_REGS))
8914 return 2;
8915 if (reg_class_subset_p (to, FP_REGS))
8916 return 4;
8917 if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
8918 return 5;
8919 if (reg_class_subset_p (to, ACC_REGS))
8920 return 6;
8922 else if (reg_class_subset_p (to, GENERAL_REGS))
8924 if (reg_class_subset_p (from, FP_REGS))
8925 return 4;
8926 if (reg_class_subset_p (from, ST_REGS))
8927 /* LUI followed by MOVF. */
8928 return 4;
8929 if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
8930 return 5;
8931 if (reg_class_subset_p (from, ACC_REGS))
8932 return 6;
8934 else if (reg_class_subset_p (from, FP_REGS))
8936 if (reg_class_subset_p (to, FP_REGS)
8937 && mips_mode_ok_for_mov_fmt_p (mode))
8938 return 4;
8939 if (reg_class_subset_p (to, ST_REGS))
8940 /* An expensive sequence. */
8941 return 8;
8944 return 12;
8947 /* Return the register class required for a secondary register when
8948 copying between one of the registers in CLASS and value X, which
8949 has mode MODE. X is the source of the move if IN_P, otherwise it
8950 is the destination. Return NO_REGS if no secondary register is
8951 needed. */
8953 enum reg_class
8954 mips_secondary_reload_class (enum reg_class class,
8955 enum machine_mode mode, rtx x, bool in_p)
8957 int regno;
8959 /* If X is a constant that cannot be loaded into $25, it must be loaded
8960 into some other GPR. No other register class allows a direct move. */
8961 if (mips_dangerous_for_la25_p (x))
8962 return reg_class_subset_p (class, LEA_REGS) ? NO_REGS : LEA_REGS;
8964 regno = true_regnum (x);
8965 if (TARGET_MIPS16)
8967 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
8968 if (!reg_class_subset_p (class, M16_REGS) && !M16_REG_P (regno))
8969 return M16_REGS;
8971 /* We can't really copy to HI or LO at all in MIPS16 mode. */
8972 if (in_p ? reg_classes_intersect_p (class, ACC_REGS) : ACC_REG_P (regno))
8973 return M16_REGS;
8975 return NO_REGS;
8978 /* Copying from accumulator registers to anywhere other than a general
8979 register requires a temporary general register. */
8980 if (reg_class_subset_p (class, ACC_REGS))
8981 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
8982 if (ACC_REG_P (regno))
8983 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
8985 /* We can only copy a value to a condition code register from a
8986 floating-point register, and even then we require a scratch
8987 floating-point register. We can only copy a value out of a
8988 condition-code register into a general register. */
8989 if (reg_class_subset_p (class, ST_REGS))
8991 if (in_p)
8992 return FP_REGS;
8993 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
8995 if (ST_REG_P (regno))
8997 if (!in_p)
8998 return FP_REGS;
8999 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9002 if (reg_class_subset_p (class, FP_REGS))
9004 if (MEM_P (x)
9005 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
9006 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9007 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9008 return NO_REGS;
9010 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
9011 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9012 return NO_REGS;
9014 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
9015 /* We can force the constant to memory and use lwc1
9016 and ldc1. As above, we will use pairs of lwc1s if
9017 ldc1 is not supported. */
9018 return NO_REGS;
9020 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
9021 /* In this case we can use mov.fmt. */
9022 return NO_REGS;
9024 /* Otherwise, we need to reload through an integer register. */
9025 return GR_REGS;
9027 if (FP_REG_P (regno))
9028 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9030 return NO_REGS;
9033 /* Implement TARGET_MODE_REP_EXTENDED. */
9035 static int
9036 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
9038 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
9039 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
9040 return SIGN_EXTEND;
9042 return UNKNOWN;
9045 /* Implement TARGET_VALID_POINTER_MODE. */
9047 static bool
9048 mips_valid_pointer_mode (enum machine_mode mode)
9050 return mode == SImode || (TARGET_64BIT && mode == DImode);
9053 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
9055 static bool
9056 mips_vector_mode_supported_p (enum machine_mode mode)
9058 switch (mode)
9060 case V2SFmode:
9061 return TARGET_PAIRED_SINGLE_FLOAT;
9063 case V2HImode:
9064 case V4QImode:
9065 case V2HQmode:
9066 case V2UHQmode:
9067 case V2HAmode:
9068 case V2UHAmode:
9069 case V4QQmode:
9070 case V4UQQmode:
9071 return TARGET_DSP;
9073 default:
9074 return false;
9078 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9080 static bool
9081 mips_scalar_mode_supported_p (enum machine_mode mode)
9083 if (ALL_FIXED_POINT_MODE_P (mode)
9084 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9085 return true;
9087 return default_scalar_mode_supported_p (mode);
9090 /* Implement TARGET_INIT_LIBFUNCS. */
9092 #include "config/gofast.h"
9094 static void
9095 mips_init_libfuncs (void)
9097 if (TARGET_FIX_VR4120)
9099 /* Register the special divsi3 and modsi3 functions needed to work
9100 around VR4120 division errata. */
9101 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9102 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9105 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
9107 /* Register the MIPS16 -mhard-float stubs. */
9108 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9109 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9110 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9111 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9113 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9114 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9115 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9116 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9117 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9118 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9119 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
9121 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9122 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9123 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
9125 if (TARGET_DOUBLE_FLOAT)
9127 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9128 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9129 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9130 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9132 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9133 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9134 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9135 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9136 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9137 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9138 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
9140 set_conv_libfunc (sext_optab, DFmode, SFmode,
9141 "__mips16_extendsfdf2");
9142 set_conv_libfunc (trunc_optab, SFmode, DFmode,
9143 "__mips16_truncdfsf2");
9144 set_conv_libfunc (sfix_optab, SImode, DFmode,
9145 "__mips16_fix_truncdfsi");
9146 set_conv_libfunc (sfloat_optab, DFmode, SImode,
9147 "__mips16_floatsidf");
9148 set_conv_libfunc (ufloat_optab, DFmode, SImode,
9149 "__mips16_floatunsidf");
9152 else
9153 /* Register the gofast functions if selected using --enable-gofast. */
9154 gofast_maybe_init_libfuncs ();
9157 /* Return the length of INSN. LENGTH is the initial length computed by
9158 attributes in the machine-description file. */
9161 mips_adjust_insn_length (rtx insn, int length)
9163 /* A unconditional jump has an unfilled delay slot if it is not part
9164 of a sequence. A conditional jump normally has a delay slot, but
9165 does not on MIPS16. */
9166 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9167 length += 4;
9169 /* See how many nops might be needed to avoid hardware hazards. */
9170 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9171 switch (get_attr_hazard (insn))
9173 case HAZARD_NONE:
9174 break;
9176 case HAZARD_DELAY:
9177 length += 4;
9178 break;
9180 case HAZARD_HILO:
9181 length += 8;
9182 break;
9185 /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
9186 the .md file length attributes are 4-based for both modes.
9187 Adjust the MIPS16 ones here. */
9188 if (TARGET_MIPS16)
9189 length /= 2;
9191 return length;
9194 /* Return an asm sequence to start a noat block and load the address
9195 of a label into $1. */
9197 const char *
9198 mips_output_load_label (void)
9200 if (TARGET_EXPLICIT_RELOCS)
9201 switch (mips_abi)
9203 case ABI_N32:
9204 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9206 case ABI_64:
9207 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9209 default:
9210 if (ISA_HAS_LOAD_DELAY)
9211 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9212 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9214 else
9216 if (Pmode == DImode)
9217 return "%[dla\t%@,%0";
9218 else
9219 return "%[la\t%@,%0";
9223 /* Return the assembly code for INSN, which has the operands given by
9224 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9225 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9226 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9227 version of BRANCH_IF_TRUE. */
9229 const char *
9230 mips_output_conditional_branch (rtx insn, rtx *operands,
9231 const char *branch_if_true,
9232 const char *branch_if_false)
9234 unsigned int length;
9235 rtx taken, not_taken;
9237 length = get_attr_length (insn);
9238 if (length <= 8)
9240 /* Just a simple conditional branch. */
9241 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9242 return branch_if_true;
9245 /* Generate a reversed branch around a direct jump. This fallback does
9246 not use branch-likely instructions. */
9247 mips_branch_likely = false;
9248 not_taken = gen_label_rtx ();
9249 taken = operands[1];
9251 /* Generate the reversed branch to NOT_TAKEN. */
9252 operands[1] = not_taken;
9253 output_asm_insn (branch_if_false, operands);
9255 /* If INSN has a delay slot, we must provide delay slots for both the
9256 branch to NOT_TAKEN and the conditional jump. We must also ensure
9257 that INSN's delay slot is executed in the appropriate cases. */
9258 if (final_sequence)
9260 /* This first delay slot will always be executed, so use INSN's
9261 delay slot if is not annulled. */
9262 if (!INSN_ANNULLED_BRANCH_P (insn))
9264 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9265 asm_out_file, optimize, 1, NULL);
9266 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9268 else
9269 output_asm_insn ("nop", 0);
9270 fprintf (asm_out_file, "\n");
9273 /* Output the unconditional branch to TAKEN. */
9274 if (length <= 16)
9275 output_asm_insn ("j\t%0%/", &taken);
9276 else
9278 output_asm_insn (mips_output_load_label (), &taken);
9279 output_asm_insn ("jr\t%@%]%/", 0);
9282 /* Now deal with its delay slot; see above. */
9283 if (final_sequence)
9285 /* This delay slot will only be executed if the branch is taken.
9286 Use INSN's delay slot if is annulled. */
9287 if (INSN_ANNULLED_BRANCH_P (insn))
9289 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9290 asm_out_file, optimize, 1, NULL);
9291 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9293 else
9294 output_asm_insn ("nop", 0);
9295 fprintf (asm_out_file, "\n");
9298 /* Output NOT_TAKEN. */
9299 targetm.asm_out.internal_label (asm_out_file, "L",
9300 CODE_LABEL_NUMBER (not_taken));
9301 return "";
9304 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9305 if some ordering condition is true. The condition is given by
9306 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9307 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9308 its second is always zero. */
9310 const char *
9311 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9313 const char *branch[2];
9315 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9316 Make BRANCH[0] branch on the inverse condition. */
9317 switch (GET_CODE (operands[0]))
9319 /* These cases are equivalent to comparisons against zero. */
9320 case LEU:
9321 inverted_p = !inverted_p;
9322 /* Fall through. */
9323 case GTU:
9324 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9325 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9326 break;
9328 /* These cases are always true or always false. */
9329 case LTU:
9330 inverted_p = !inverted_p;
9331 /* Fall through. */
9332 case GEU:
9333 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9334 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9335 break;
9337 default:
9338 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9339 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9340 break;
9342 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9345 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
9346 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
9348 When working around R4000 and R4400 errata, we need to make sure that
9349 the division is not immediately followed by a shift[1][2]. We also
9350 need to stop the division from being put into a branch delay slot[3].
9351 The easiest way to avoid both problems is to add a nop after the
9352 division. When a divide-by-zero check is needed, this nop can be
9353 used to fill the branch delay slot.
9355 [1] If a double-word or a variable shift executes immediately
9356 after starting an integer division, the shift may give an
9357 incorrect result. See quotations of errata #16 and #28 from
9358 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9359 in mips.md for details.
9361 [2] A similar bug to [1] exists for all revisions of the
9362 R4000 and the R4400 when run in an MC configuration.
9363 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9365 "19. In this following sequence:
9367 ddiv (or ddivu or div or divu)
9368 dsll32 (or dsrl32, dsra32)
9370 if an MPT stall occurs, while the divide is slipping the cpu
9371 pipeline, then the following double shift would end up with an
9372 incorrect result.
9374 Workaround: The compiler needs to avoid generating any
9375 sequence with divide followed by extended double shift."
9377 This erratum is also present in "MIPS R4400MC Errata, Processor
9378 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9379 & 3.0" as errata #10 and #4, respectively.
9381 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9382 (also valid for MIPS R4000MC processors):
9384 "52. R4000SC: This bug does not apply for the R4000PC.
9386 There are two flavors of this bug:
9388 1) If the instruction just after divide takes an RF exception
9389 (tlb-refill, tlb-invalid) and gets an instruction cache
9390 miss (both primary and secondary) and the line which is
9391 currently in secondary cache at this index had the first
9392 data word, where the bits 5..2 are set, then R4000 would
9393 get a wrong result for the div.
9397 div r8, r9
9398 ------------------- # end-of page. -tlb-refill
9402 div r8, r9
9403 ------------------- # end-of page. -tlb-invalid
9406 2) If the divide is in the taken branch delay slot, where the
9407 target takes RF exception and gets an I-cache miss for the
9408 exception vector or where I-cache miss occurs for the
9409 target address, under the above mentioned scenarios, the
9410 div would get wrong results.
9413 j r2 # to next page mapped or unmapped
9414 div r8,r9 # this bug would be there as long
9415 # as there is an ICache miss and
9416 nop # the "data pattern" is present
9419 beq r0, r0, NextPage # to Next page
9420 div r8,r9
9423 This bug is present for div, divu, ddiv, and ddivu
9424 instructions.
9426 Workaround: For item 1), OS could make sure that the next page
9427 after the divide instruction is also mapped. For item 2), the
9428 compiler could make sure that the divide instruction is not in
9429 the branch delay slot."
9431 These processors have PRId values of 0x00004220 and 0x00004300 for
9432 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9434 const char *
9435 mips_output_division (const char *division, rtx *operands)
9437 const char *s;
9439 s = division;
9440 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9442 output_asm_insn (s, operands);
9443 s = "nop";
9445 if (TARGET_CHECK_ZERO_DIV)
9447 if (TARGET_MIPS16)
9449 output_asm_insn (s, operands);
9450 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9452 else if (GENERATE_DIVIDE_TRAPS)
9454 output_asm_insn (s, operands);
9455 s = "teq\t%2,%.,7";
9457 else
9459 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9460 output_asm_insn (s, operands);
9461 s = "break\t7%)\n1:";
9464 return s;
9467 /* Return true if IN_INSN is a multiply-add or multiply-subtract
9468 instruction and if OUT_INSN assigns to the accumulator operand. */
9470 bool
9471 mips_linked_madd_p (rtx out_insn, rtx in_insn)
9473 rtx x;
9475 x = single_set (in_insn);
9476 if (x == 0)
9477 return false;
9479 x = SET_SRC (x);
9481 if (GET_CODE (x) == PLUS
9482 && GET_CODE (XEXP (x, 0)) == MULT
9483 && reg_set_p (XEXP (x, 1), out_insn))
9484 return true;
9486 if (GET_CODE (x) == MINUS
9487 && GET_CODE (XEXP (x, 1)) == MULT
9488 && reg_set_p (XEXP (x, 0), out_insn))
9489 return true;
9491 return false;
9494 /* True if the dependency between OUT_INSN and IN_INSN is on the store
9495 data rather than the address. We need this because the cprestore
9496 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
9497 which causes the default routine to abort. We just return false
9498 for that case. */
9500 bool
9501 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
9503 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
9504 return false;
9506 return !store_data_bypass_p (out_insn, in_insn);
9509 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9510 dependencies have no cost, except on the 20Kc where output-dependence
9511 is treated like input-dependence. */
9513 static int
9514 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9515 rtx dep ATTRIBUTE_UNUSED, int cost)
9517 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
9518 && TUNE_20KC)
9519 return cost;
9520 if (REG_NOTE_KIND (link) != 0)
9521 return 0;
9522 return cost;
9525 /* Return the number of instructions that can be issued per cycle. */
9527 static int
9528 mips_issue_rate (void)
9530 switch (mips_tune)
9532 case PROCESSOR_74KC:
9533 case PROCESSOR_74KF2_1:
9534 case PROCESSOR_74KF1_1:
9535 case PROCESSOR_74KF3_2:
9536 /* The 74k is not strictly quad-issue cpu, but can be seen as one
9537 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
9538 but in reality only a maximum of 3 insns can be issued as
9539 floating-point loads and stores also require a slot in the
9540 AGEN pipe. */
9541 return 4;
9543 case PROCESSOR_20KC:
9544 case PROCESSOR_R4130:
9545 case PROCESSOR_R5400:
9546 case PROCESSOR_R5500:
9547 case PROCESSOR_R7000:
9548 case PROCESSOR_R9000:
9549 return 2;
9551 case PROCESSOR_SB1:
9552 case PROCESSOR_SB1A:
9553 /* This is actually 4, but we get better performance if we claim 3.
9554 This is partly because of unwanted speculative code motion with the
9555 larger number, and partly because in most common cases we can't
9556 reach the theoretical max of 4. */
9557 return 3;
9559 default:
9560 return 1;
9564 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9565 be as wide as the scheduling freedom in the DFA. */
9567 static int
9568 mips_multipass_dfa_lookahead (void)
9570 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9571 if (TUNE_SB1)
9572 return 4;
9574 return 0;
9577 /* Remove the instruction at index LOWER from ready queue READY and
9578 reinsert it in front of the instruction at index HIGHER. LOWER must
9579 be <= HIGHER. */
9581 static void
9582 mips_promote_ready (rtx *ready, int lower, int higher)
9584 rtx new_head;
9585 int i;
9587 new_head = ready[lower];
9588 for (i = lower; i < higher; i++)
9589 ready[i] = ready[i + 1];
9590 ready[i] = new_head;
9593 /* If the priority of the instruction at POS2 in the ready queue READY
9594 is within LIMIT units of that of the instruction at POS1, swap the
9595 instructions if POS2 is not already less than POS1. */
9597 static void
9598 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
9600 if (pos1 < pos2
9601 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
9603 rtx temp;
9605 temp = ready[pos1];
9606 ready[pos1] = ready[pos2];
9607 ready[pos2] = temp;
9611 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9612 that may clobber hi or lo. */
9613 static rtx mips_macc_chains_last_hilo;
9615 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9616 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9618 static void
9619 mips_macc_chains_record (rtx insn)
9621 if (get_attr_may_clobber_hilo (insn))
9622 mips_macc_chains_last_hilo = insn;
9625 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9626 has NREADY elements, looking for a multiply-add or multiply-subtract
9627 instruction that is cumulative with mips_macc_chains_last_hilo.
9628 If there is one, promote it ahead of anything else that might
9629 clobber hi or lo. */
9631 static void
9632 mips_macc_chains_reorder (rtx *ready, int nready)
9634 int i, j;
9636 if (mips_macc_chains_last_hilo != 0)
9637 for (i = nready - 1; i >= 0; i--)
9638 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9640 for (j = nready - 1; j > i; j--)
9641 if (recog_memoized (ready[j]) >= 0
9642 && get_attr_may_clobber_hilo (ready[j]))
9644 mips_promote_ready (ready, i, j);
9645 break;
9647 break;
9651 /* The last instruction to be scheduled. */
9652 static rtx vr4130_last_insn;
9654 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9655 points to an rtx that is initially an instruction. Nullify the rtx
9656 if the instruction uses the value of register X. */
9658 static void
9659 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
9660 void *data)
9662 rtx *insn_ptr;
9664 insn_ptr = (rtx *) data;
9665 if (REG_P (x)
9666 && *insn_ptr != 0
9667 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9668 *insn_ptr = 0;
9671 /* Return true if there is true register dependence between vr4130_last_insn
9672 and INSN. */
9674 static bool
9675 vr4130_true_reg_dependence_p (rtx insn)
9677 note_stores (PATTERN (vr4130_last_insn),
9678 vr4130_true_reg_dependence_p_1, &insn);
9679 return insn == 0;
9682 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9683 the ready queue and that INSN2 is the instruction after it, return
9684 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9685 in which INSN1 and INSN2 can probably issue in parallel, but for
9686 which (INSN2, INSN1) should be less sensitive to instruction
9687 alignment than (INSN1, INSN2). See 4130.md for more details. */
9689 static bool
9690 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9692 sd_iterator_def sd_it;
9693 dep_t dep;
9695 /* Check for the following case:
9697 1) there is some other instruction X with an anti dependence on INSN1;
9698 2) X has a higher priority than INSN2; and
9699 3) X is an arithmetic instruction (and thus has no unit restrictions).
9701 If INSN1 is the last instruction blocking X, it would better to
9702 choose (INSN1, X) over (INSN2, INSN1). */
9703 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
9704 if (DEP_TYPE (dep) == REG_DEP_ANTI
9705 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
9706 && recog_memoized (DEP_CON (dep)) >= 0
9707 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
9708 return false;
9710 if (vr4130_last_insn != 0
9711 && recog_memoized (insn1) >= 0
9712 && recog_memoized (insn2) >= 0)
9714 /* See whether INSN1 and INSN2 use different execution units,
9715 or if they are both ALU-type instructions. If so, they can
9716 probably execute in parallel. */
9717 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9718 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9719 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9721 /* If only one of the instructions has a dependence on
9722 vr4130_last_insn, prefer to schedule the other one first. */
9723 bool dep1_p = vr4130_true_reg_dependence_p (insn1);
9724 bool dep2_p = vr4130_true_reg_dependence_p (insn2);
9725 if (dep1_p != dep2_p)
9726 return dep1_p;
9728 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9729 is not an ALU-type instruction and if INSN1 uses the same
9730 execution unit. (Note that if this condition holds, we already
9731 know that INSN2 uses a different execution unit.) */
9732 if (class1 != VR4130_CLASS_ALU
9733 && recog_memoized (vr4130_last_insn) >= 0
9734 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9735 return true;
9738 return false;
9741 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9742 queue with at least two instructions. Swap the first two if
9743 vr4130_swap_insns_p says that it could be worthwhile. */
9745 static void
9746 vr4130_reorder (rtx *ready, int nready)
9748 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9749 mips_promote_ready (ready, nready - 2, nready - 1);
9752 /* Record whether last 74k AGEN instruction was a load or store. */
9753 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
9755 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
9756 resets to TYPE_UNKNOWN state. */
9758 static void
9759 mips_74k_agen_init (rtx insn)
9761 if (!insn || !NONJUMP_INSN_P (insn))
9762 mips_last_74k_agen_insn = TYPE_UNKNOWN;
9763 else
9765 enum attr_type type = get_attr_type (insn);
9766 if (type == TYPE_LOAD || type == TYPE_STORE)
9767 mips_last_74k_agen_insn = type;
9771 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
9772 loads to be grouped together, and multiple stores to be grouped
9773 together. Swap things around in the ready queue to make this happen. */
9775 static void
9776 mips_74k_agen_reorder (rtx *ready, int nready)
9778 int i;
9779 int store_pos, load_pos;
9781 store_pos = -1;
9782 load_pos = -1;
9784 for (i = nready - 1; i >= 0; i--)
9786 rtx insn = ready[i];
9787 if (USEFUL_INSN_P (insn))
9788 switch (get_attr_type (insn))
9790 case TYPE_STORE:
9791 if (store_pos == -1)
9792 store_pos = i;
9793 break;
9795 case TYPE_LOAD:
9796 if (load_pos == -1)
9797 load_pos = i;
9798 break;
9800 default:
9801 break;
9805 if (load_pos == -1 || store_pos == -1)
9806 return;
9808 switch (mips_last_74k_agen_insn)
9810 case TYPE_UNKNOWN:
9811 /* Prefer to schedule loads since they have a higher latency. */
9812 case TYPE_LOAD:
9813 /* Swap loads to the front of the queue. */
9814 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
9815 break;
9816 case TYPE_STORE:
9817 /* Swap stores to the front of the queue. */
9818 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
9819 break;
9820 default:
9821 break;
9825 /* Implement TARGET_SCHED_INIT. */
9827 static void
9828 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9829 int max_ready ATTRIBUTE_UNUSED)
9831 mips_macc_chains_last_hilo = 0;
9832 vr4130_last_insn = 0;
9833 mips_74k_agen_init (NULL_RTX);
9836 /* Implement TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
9838 static int
9839 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9840 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
9842 if (!reload_completed
9843 && TUNE_MACC_CHAINS
9844 && *nreadyp > 0)
9845 mips_macc_chains_reorder (ready, *nreadyp);
9847 if (reload_completed
9848 && TUNE_MIPS4130
9849 && !TARGET_VR4130_ALIGN
9850 && *nreadyp > 1)
9851 vr4130_reorder (ready, *nreadyp);
9853 if (TUNE_74K)
9854 mips_74k_agen_reorder (ready, *nreadyp);
9856 return mips_issue_rate ();
9859 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9861 static int
9862 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9863 rtx insn, int more)
9865 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
9866 if (USEFUL_INSN_P (insn))
9868 more--;
9869 if (!reload_completed && TUNE_MACC_CHAINS)
9870 mips_macc_chains_record (insn);
9871 vr4130_last_insn = insn;
9872 if (TUNE_74K)
9873 mips_74k_agen_init (insn);
9875 return more;
9878 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9879 return the first operand of the associated PREF or PREFX insn. */
9882 mips_prefetch_cookie (rtx write, rtx locality)
9884 /* store_streamed / load_streamed. */
9885 if (INTVAL (locality) <= 0)
9886 return GEN_INT (INTVAL (write) + 4);
9888 /* store / load. */
9889 if (INTVAL (locality) <= 2)
9890 return write;
9892 /* store_retained / load_retained. */
9893 return GEN_INT (INTVAL (write) + 6);
9896 /* This structure describes a single built-in function. */
9897 struct mips_builtin_description {
9898 /* The code of the main .md file instruction. See mips_builtin_type
9899 for more information. */
9900 enum insn_code icode;
9902 /* The floating-point comparison code to use with ICODE, if any. */
9903 enum mips_fp_condition cond;
9905 /* The name of the built-in function. */
9906 const char *name;
9908 /* Specifies how the function should be expanded. */
9909 enum mips_builtin_type builtin_type;
9911 /* The function's prototype. */
9912 enum mips_function_type function_type;
9914 /* The target flags required for this function. */
9915 int target_flags;
9918 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9919 FUNCTION_TYPE and TARGET_FLAGS are mips_builtin_description fields. */
9920 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9921 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9922 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9924 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
9925 require TARGET_FLAGS. */
9926 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9927 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9928 "__builtin_mips_" #INSN "_" #COND "_s", \
9929 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9930 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9931 "__builtin_mips_" #INSN "_" #COND "_d", \
9932 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9934 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9935 The lower and upper forms require TARGET_FLAGS while the any and all
9936 forms require MASK_MIPS3D. */
9937 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9938 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9939 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9940 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9941 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9942 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
9943 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9944 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9945 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
9946 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
9947 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9948 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
9949 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
9951 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
9952 require MASK_MIPS3D. */
9953 #define CMP_4S_BUILTINS(INSN, COND) \
9954 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9955 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
9956 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9957 MASK_MIPS3D }, \
9958 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9959 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
9960 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9961 MASK_MIPS3D }
9963 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
9964 instruction requires TARGET_FLAGS. */
9965 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
9966 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9967 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
9968 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9969 TARGET_FLAGS }, \
9970 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9971 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
9972 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9973 TARGET_FLAGS }
9975 /* Define all the built-in functions related to C.cond.fmt condition COND. */
9976 #define CMP_BUILTINS(COND) \
9977 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9978 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
9979 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
9980 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9981 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
9982 CMP_4S_BUILTINS (c, COND), \
9983 CMP_4S_BUILTINS (cabs, COND)
9985 static const struct mips_builtin_description mips_ps_bdesc[] = {
9986 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9987 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9988 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9989 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9990 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
9991 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9992 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9993 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9995 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
9996 MASK_PAIRED_SINGLE_FLOAT),
9997 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9998 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9999 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10000 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10002 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10003 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10004 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10005 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10006 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10007 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10009 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10010 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10011 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10012 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10013 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10014 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10016 MIPS_FP_CONDITIONS (CMP_BUILTINS)
10019 /* Built-in functions for the SB-1 processor. */
10021 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10023 static const struct mips_builtin_description mips_sb1_bdesc[] = {
10024 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10027 /* Built-in functions for the DSP ASE. */
10029 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10030 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10031 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10032 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10033 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
10035 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10036 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
10037 mips_builtin_description fields. */
10038 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10039 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10040 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10042 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10043 branch instruction. TARGET_FLAGS is a mips_builtin_description field. */
10044 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
10045 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
10046 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10048 static const struct mips_builtin_description mips_dsp_bdesc[] = {
10049 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10050 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10051 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10052 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10053 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10054 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10055 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10056 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10057 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10058 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10059 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10060 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10061 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10062 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10063 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10064 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10065 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10066 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10067 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10068 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10069 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10070 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10071 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10072 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10073 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10074 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10075 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10076 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10077 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10078 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10079 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10080 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10081 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10082 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10083 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10084 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10085 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10086 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10087 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10088 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10089 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10090 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10091 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10092 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10093 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10094 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10095 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10096 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10097 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10098 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10099 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10100 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10101 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10102 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10103 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10104 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10105 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10106 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10107 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10108 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10109 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10110 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, MASK_DSP),
10111 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, MASK_DSP),
10112 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, MASK_DSP),
10113 BPOSGE_BUILTIN (32, MASK_DSP),
10115 /* The following are for the MIPS DSP ASE REV 2. */
10116 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
10117 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10118 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10119 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10120 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10121 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10122 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10123 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10124 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10125 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10126 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10127 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10128 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10129 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10130 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10131 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10132 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10133 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10134 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10135 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10136 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10137 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
10138 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10139 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10140 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10141 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10142 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10143 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10144 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10145 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10146 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10147 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10148 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10149 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
10152 static const struct mips_builtin_description mips_dsp_32only_bdesc[] = {
10153 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10154 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10155 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10156 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10157 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10158 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10159 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10160 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10161 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10162 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10163 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10164 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10165 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10166 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10167 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10168 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10169 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10170 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10171 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10172 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10173 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10175 /* The following are for the MIPS DSP ASE REV 2. */
10176 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10177 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10178 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10179 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10180 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10181 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10182 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10183 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
10184 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
10185 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10186 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10187 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10188 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10189 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10190 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
10193 /* This structure describes an array of mips_builtin_description entries. */
10194 struct mips_bdesc_map {
10195 /* The array that this entry describes. */
10196 const struct mips_builtin_description *bdesc;
10198 /* The number of entries in BDESC. */
10199 unsigned int size;
10201 /* The target processor that supports the functions in BDESC.
10202 PROCESSOR_MAX means we enable them for all processors. */
10203 enum processor_type proc;
10205 /* The functions in BDESC are not supported if any of these
10206 target flags are set. */
10207 int unsupported_target_flags;
10210 /* All MIPS-specific built-in functions. */
10211 static const struct mips_bdesc_map mips_bdesc_arrays[] = {
10212 { mips_ps_bdesc, ARRAY_SIZE (mips_ps_bdesc), PROCESSOR_MAX, 0 },
10213 { mips_sb1_bdesc, ARRAY_SIZE (mips_sb1_bdesc), PROCESSOR_SB1, 0 },
10214 { mips_dsp_bdesc, ARRAY_SIZE (mips_dsp_bdesc), PROCESSOR_MAX, 0 },
10215 { mips_dsp_32only_bdesc, ARRAY_SIZE (mips_dsp_32only_bdesc),
10216 PROCESSOR_MAX, MASK_64BIT }
10219 /* MODE is a vector mode whose elements have type TYPE. Return the type
10220 of the vector itself. */
10222 static tree
10223 mips_builtin_vector_type (tree type, enum machine_mode mode)
10225 static tree types[(int) MAX_MACHINE_MODE];
10227 if (types[(int) mode] == NULL_TREE)
10228 types[(int) mode] = build_vector_type_for_mode (type, mode);
10229 return types[(int) mode];
10232 /* Source-level argument types. */
10233 #define MIPS_ATYPE_VOID void_type_node
10234 #define MIPS_ATYPE_INT integer_type_node
10235 #define MIPS_ATYPE_POINTER ptr_type_node
10237 /* Standard mode-based argument types. */
10238 #define MIPS_ATYPE_SI intSI_type_node
10239 #define MIPS_ATYPE_USI unsigned_intSI_type_node
10240 #define MIPS_ATYPE_DI intDI_type_node
10241 #define MIPS_ATYPE_SF float_type_node
10242 #define MIPS_ATYPE_DF double_type_node
10244 /* Vector argument types. */
10245 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
10246 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
10247 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
10249 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
10250 their associated MIPS_ATYPEs. */
10251 #define MIPS_FTYPE_ATYPES1(A, B) \
10252 MIPS_ATYPE_##A, MIPS_ATYPE_##B
10254 #define MIPS_FTYPE_ATYPES2(A, B, C) \
10255 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
10257 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
10258 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
10260 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
10261 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
10262 MIPS_ATYPE_##E
10264 /* Return the function type associated with function prototype TYPE. */
10266 static tree
10267 mips_build_function_type (enum mips_function_type type)
10269 static tree types[(int) MIPS_MAX_FTYPE_MAX];
10271 if (types[(int) type] == NULL_TREE)
10272 switch (type)
10274 #define DEF_MIPS_FTYPE(NUM, ARGS) \
10275 case MIPS_FTYPE_NAME##NUM ARGS: \
10276 types[(int) type] \
10277 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
10278 NULL_TREE); \
10279 break;
10280 #include "config/mips/mips-ftypes.def"
10281 #undef DEF_MIPS_FTYPE
10282 default:
10283 gcc_unreachable ();
10286 return types[(int) type];
10289 /* Implement TARGET_INIT_BUILTINS. */
10291 static void
10292 mips_init_builtins (void)
10294 const struct mips_builtin_description *d;
10295 const struct mips_bdesc_map *m;
10296 unsigned int offset;
10298 /* Iterate through all of the bdesc arrays, initializing all of the
10299 builtin functions. */
10300 offset = 0;
10301 for (m = mips_bdesc_arrays;
10302 m < &mips_bdesc_arrays[ARRAY_SIZE (mips_bdesc_arrays)];
10303 m++)
10305 if ((m->proc == PROCESSOR_MAX || m->proc == mips_arch)
10306 && (m->unsupported_target_flags & target_flags) == 0)
10307 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10308 if ((d->target_flags & target_flags) == d->target_flags)
10309 add_builtin_function (d->name,
10310 mips_build_function_type (d->function_type),
10311 d - m->bdesc + offset,
10312 BUILT_IN_MD, NULL, NULL);
10313 offset += m->size;
10317 /* Take argument ARGNO from EXP's argument list and convert it into a
10318 form suitable for input operand OPNO of instruction ICODE. Return the
10319 value. */
10321 static rtx
10322 mips_prepare_builtin_arg (enum insn_code icode,
10323 unsigned int opno, tree exp, unsigned int argno)
10325 rtx value;
10326 enum machine_mode mode;
10328 value = expand_normal (CALL_EXPR_ARG (exp, argno));
10329 mode = insn_data[icode].operand[opno].mode;
10330 if (!insn_data[icode].operand[opno].predicate (value, mode))
10332 value = copy_to_mode_reg (mode, value);
10333 /* Check the predicate again. */
10334 if (!insn_data[icode].operand[opno].predicate (value, mode))
10336 error ("invalid argument to built-in function");
10337 return const0_rtx;
10341 return value;
10344 /* Return an rtx suitable for output operand OP of instruction ICODE.
10345 If TARGET is non-null, try to use it where possible. */
10347 static rtx
10348 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10350 enum machine_mode mode;
10352 mode = insn_data[icode].operand[op].mode;
10353 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10354 target = gen_reg_rtx (mode);
10356 return target;
10359 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
10360 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
10361 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
10362 suggests a good place to put the result. */
10364 static rtx
10365 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
10366 bool has_target_p)
10368 rtx ops[MAX_RECOG_OPERANDS];
10369 int opno, argno;
10371 /* Map any target to operand 0. */
10372 opno = 0;
10373 if (has_target_p)
10375 ops[opno] = mips_prepare_builtin_target (icode, opno, target);
10376 opno++;
10379 /* Map the arguments to the other operands. The n_operands value
10380 for an expander includes match_dups and match_scratches as well as
10381 match_operands, so n_operands is only an upper bound on the number
10382 of arguments to the expander function. */
10383 gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
10384 for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
10385 ops[opno] = mips_prepare_builtin_arg (icode, opno, exp, argno);
10387 switch (opno)
10389 case 2:
10390 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10391 break;
10393 case 3:
10394 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10395 break;
10397 case 4:
10398 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10399 break;
10401 default:
10402 gcc_unreachable ();
10404 return target;
10407 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
10408 function; TYPE says which. EXP is the CALL_EXPR that calls the
10409 function, ICODE is the instruction that should be used to compare
10410 the first two arguments, and COND is the condition it should test.
10411 TARGET, if nonnull, suggests a good place to put the result. */
10413 static rtx
10414 mips_expand_builtin_movtf (enum mips_builtin_type type,
10415 enum insn_code icode, enum mips_fp_condition cond,
10416 rtx target, tree exp)
10418 rtx cmp_result, op0, op1;
10420 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10421 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
10422 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
10423 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10425 icode = CODE_FOR_mips_cond_move_tf_ps;
10426 target = mips_prepare_builtin_target (icode, 0, target);
10427 if (type == MIPS_BUILTIN_MOVT)
10429 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
10430 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
10432 else
10434 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
10435 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
10437 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10438 return target;
10441 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10442 into TARGET otherwise. Return TARGET. */
10444 static rtx
10445 mips_builtin_branch_and_move (rtx condition, rtx target,
10446 rtx value_if_true, rtx value_if_false)
10448 rtx true_label, done_label;
10450 true_label = gen_label_rtx ();
10451 done_label = gen_label_rtx ();
10453 /* First assume that CONDITION is false. */
10454 mips_emit_move (target, value_if_false);
10456 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10457 emit_jump_insn (gen_condjump (condition, true_label));
10458 emit_jump_insn (gen_jump (done_label));
10459 emit_barrier ();
10461 /* Fix TARGET if CONDITION is true. */
10462 emit_label (true_label);
10463 mips_emit_move (target, value_if_true);
10465 emit_label (done_label);
10466 return target;
10469 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
10470 the CALL_EXPR that calls the function, ICODE is the code of the
10471 comparison instruction, and COND is the condition it should test.
10472 TARGET, if nonnull, suggests a good place to put the boolean result. */
10474 static rtx
10475 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10476 enum insn_code icode, enum mips_fp_condition cond,
10477 rtx target, tree exp)
10479 rtx offset, condition, cmp_result, args[MAX_RECOG_OPERANDS];
10480 int argno;
10482 if (target == 0 || GET_MODE (target) != SImode)
10483 target = gen_reg_rtx (SImode);
10485 /* The instruction should have a target operand, an operand for each
10486 argument, and an operand for COND. */
10487 gcc_assert (call_expr_nargs (exp) + 2 == insn_data[icode].n_operands);
10489 /* Prepare the operands to the comparison. */
10490 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10491 for (argno = 0; argno < call_expr_nargs (exp); argno++)
10492 args[argno] = mips_prepare_builtin_arg (icode, argno + 1, exp, argno);
10494 switch (insn_data[icode].n_operands)
10496 case 4:
10497 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
10498 GEN_INT (cond)));
10499 break;
10501 case 6:
10502 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
10503 args[2], args[3], GEN_INT (cond)));
10504 break;
10506 default:
10507 gcc_unreachable ();
10510 /* If the comparison sets more than one register, we define the result
10511 to be 0 if all registers are false and -1 if all registers are true.
10512 The value of the complete result is indeterminate otherwise. */
10513 switch (builtin_type)
10515 case MIPS_BUILTIN_CMP_ALL:
10516 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10517 return mips_builtin_branch_and_move (condition, target,
10518 const0_rtx, const1_rtx);
10520 case MIPS_BUILTIN_CMP_UPPER:
10521 case MIPS_BUILTIN_CMP_LOWER:
10522 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10523 condition = gen_single_cc (cmp_result, offset);
10524 return mips_builtin_branch_and_move (condition, target,
10525 const1_rtx, const0_rtx);
10527 default:
10528 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10529 return mips_builtin_branch_and_move (condition, target,
10530 const1_rtx, const0_rtx);
10534 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
10535 if nonnull, suggests a good place to put the boolean result. */
10537 static rtx
10538 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10540 rtx condition, cmp_result;
10541 int cmp_value;
10543 if (target == 0 || GET_MODE (target) != SImode)
10544 target = gen_reg_rtx (SImode);
10546 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10548 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10549 cmp_value = 32;
10550 else
10551 gcc_assert (0);
10553 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10554 return mips_builtin_branch_and_move (condition, target,
10555 const1_rtx, const0_rtx);
10558 /* EXP is a CALL_EXPR that calls the function described by BDESC.
10559 Expand the call and return an rtx for its return value.
10560 TARGET, if nonnull, suggests a good place to put this value. */
10562 static rtx
10563 mips_expand_builtin_1 (const struct mips_builtin_description *bdesc,
10564 tree exp, rtx target)
10566 switch (bdesc->builtin_type)
10568 case MIPS_BUILTIN_DIRECT:
10569 return mips_expand_builtin_direct (bdesc->icode, target, exp, true);
10571 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10572 return mips_expand_builtin_direct (bdesc->icode, target, exp, false);
10574 case MIPS_BUILTIN_MOVT:
10575 case MIPS_BUILTIN_MOVF:
10576 return mips_expand_builtin_movtf (bdesc->builtin_type, bdesc->icode,
10577 bdesc->cond, target, exp);
10579 case MIPS_BUILTIN_CMP_ANY:
10580 case MIPS_BUILTIN_CMP_ALL:
10581 case MIPS_BUILTIN_CMP_UPPER:
10582 case MIPS_BUILTIN_CMP_LOWER:
10583 case MIPS_BUILTIN_CMP_SINGLE:
10584 return mips_expand_builtin_compare (bdesc->builtin_type, bdesc->icode,
10585 bdesc->cond, target, exp);
10587 case MIPS_BUILTIN_BPOSGE32:
10588 return mips_expand_builtin_bposge (bdesc->builtin_type, target);
10590 gcc_unreachable ();
10593 /* Implement TARGET_EXPAND_BUILTIN. */
10595 static rtx
10596 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10597 enum machine_mode mode ATTRIBUTE_UNUSED,
10598 int ignore ATTRIBUTE_UNUSED)
10600 tree fndecl;
10601 unsigned int fcode;
10602 const struct mips_bdesc_map *m;
10604 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10605 fcode = DECL_FUNCTION_CODE (fndecl);
10606 if (TARGET_MIPS16)
10608 error ("built-in function %qs not supported for MIPS16",
10609 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
10610 return const0_rtx;
10613 for (m = mips_bdesc_arrays;
10614 m < &mips_bdesc_arrays[ARRAY_SIZE (mips_bdesc_arrays)];
10615 m++)
10617 if (fcode < m->size)
10618 return mips_expand_builtin_1 (m->bdesc + fcode, exp, target);
10619 fcode -= m->size;
10621 gcc_unreachable ();
10624 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
10625 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
10626 struct mips16_constant {
10627 struct mips16_constant *next;
10628 rtx value;
10629 rtx label;
10630 enum machine_mode mode;
10633 /* Information about an incomplete MIPS16 constant pool. FIRST is the
10634 first constant, HIGHEST_ADDRESS is the highest address that the first
10635 byte of the pool can have, and INSN_ADDRESS is the current instruction
10636 address. */
10637 struct mips16_constant_pool {
10638 struct mips16_constant *first;
10639 int highest_address;
10640 int insn_address;
10643 /* Add constant VALUE to POOL and return its label. MODE is the
10644 value's mode (used for CONST_INTs, etc.). */
10646 static rtx
10647 mips16_add_constant (struct mips16_constant_pool *pool,
10648 rtx value, enum machine_mode mode)
10650 struct mips16_constant **p, *c;
10651 bool first_of_size_p;
10653 /* See whether the constant is already in the pool. If so, return the
10654 existing label, otherwise leave P pointing to the place where the
10655 constant should be added.
10657 Keep the pool sorted in increasing order of mode size so that we can
10658 reduce the number of alignments needed. */
10659 first_of_size_p = true;
10660 for (p = &pool->first; *p != 0; p = &(*p)->next)
10662 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
10663 return (*p)->label;
10664 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
10665 break;
10666 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
10667 first_of_size_p = false;
10670 /* In the worst case, the constant needed by the earliest instruction
10671 will end up at the end of the pool. The entire pool must then be
10672 accessible from that instruction.
10674 When adding the first constant, set the pool's highest address to
10675 the address of the first out-of-range byte. Adjust this address
10676 downwards each time a new constant is added. */
10677 if (pool->first == 0)
10678 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
10679 of the instruction with the lowest two bits clear. The base PC
10680 value for LDPC has the lowest three bits clear. Assume the worst
10681 case here; namely that the PC-relative instruction occupies the
10682 last 2 bytes in an aligned word. */
10683 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10684 pool->highest_address -= GET_MODE_SIZE (mode);
10685 if (first_of_size_p)
10686 /* Take into account the worst possible padding due to alignment. */
10687 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10689 /* Create a new entry. */
10690 c = XNEW (struct mips16_constant);
10691 c->value = value;
10692 c->mode = mode;
10693 c->label = gen_label_rtx ();
10694 c->next = *p;
10695 *p = c;
10697 return c->label;
10700 /* Output constant VALUE after instruction INSN and return the last
10701 instruction emitted. MODE is the mode of the constant. */
10703 static rtx
10704 mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10706 if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
10708 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10709 return emit_insn_after (gen_consttable_int (value, size), insn);
10712 if (SCALAR_FLOAT_MODE_P (mode))
10713 return emit_insn_after (gen_consttable_float (value), insn);
10715 if (VECTOR_MODE_P (mode))
10717 int i;
10719 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10720 insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
10721 CONST_VECTOR_ELT (value, i), insn);
10722 return insn;
10725 gcc_unreachable ();
10728 /* Dump out the constants in CONSTANTS after INSN. */
10730 static void
10731 mips16_emit_constants (struct mips16_constant *constants, rtx insn)
10733 struct mips16_constant *c, *next;
10734 int align;
10736 align = 0;
10737 for (c = constants; c != NULL; c = next)
10739 /* If necessary, increase the alignment of PC. */
10740 if (align < GET_MODE_SIZE (c->mode))
10742 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10743 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
10745 align = GET_MODE_SIZE (c->mode);
10747 insn = emit_label_after (c->label, insn);
10748 insn = mips16_emit_constants_1 (c->mode, c->value, insn);
10750 next = c->next;
10751 free (c);
10754 emit_barrier_after (insn);
10757 /* Return the length of instruction INSN. */
10759 static int
10760 mips16_insn_length (rtx insn)
10762 if (JUMP_P (insn))
10764 rtx body = PATTERN (insn);
10765 if (GET_CODE (body) == ADDR_VEC)
10766 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
10767 if (GET_CODE (body) == ADDR_DIFF_VEC)
10768 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
10770 return get_attr_length (insn);
10773 /* If *X is a symbolic constant that refers to the constant pool, add
10774 the constant to POOL and rewrite *X to use the constant's label. */
10776 static void
10777 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
10779 rtx base, offset, label;
10781 split_const (*x, &base, &offset);
10782 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
10784 label = mips16_add_constant (pool, get_pool_constant (base),
10785 get_pool_mode (base));
10786 base = gen_rtx_LABEL_REF (Pmode, label);
10787 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
10791 /* This structure is used to communicate with mips16_rewrite_pool_refs.
10792 INSN is the instruction we're rewriting and POOL points to the current
10793 constant pool. */
10794 struct mips16_rewrite_pool_refs_info {
10795 rtx insn;
10796 struct mips16_constant_pool *pool;
10799 /* Rewrite *X so that constant pool references refer to the constant's
10800 label instead. DATA points to a mips16_rewrite_pool_refs_info
10801 structure. */
10803 static int
10804 mips16_rewrite_pool_refs (rtx *x, void *data)
10806 struct mips16_rewrite_pool_refs_info *info = data;
10808 if (force_to_mem_operand (*x, Pmode))
10810 rtx mem = force_const_mem (GET_MODE (*x), *x);
10811 validate_change (info->insn, x, mem, false);
10814 if (MEM_P (*x))
10816 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
10817 return -1;
10820 if (TARGET_MIPS16_TEXT_LOADS)
10821 mips16_rewrite_pool_constant (info->pool, x);
10823 return GET_CODE (*x) == CONST ? -1 : 0;
10826 /* Build MIPS16 constant pools. */
10828 static void
10829 mips16_lay_out_constants (void)
10831 struct mips16_constant_pool pool;
10832 struct mips16_rewrite_pool_refs_info info;
10833 rtx insn, barrier;
10835 if (!TARGET_MIPS16_PCREL_LOADS)
10836 return;
10838 barrier = 0;
10839 memset (&pool, 0, sizeof (pool));
10840 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10842 /* Rewrite constant pool references in INSN. */
10843 if (INSN_P (insn))
10845 info.insn = insn;
10846 info.pool = &pool;
10847 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
10850 pool.insn_address += mips16_insn_length (insn);
10852 if (pool.first != NULL)
10854 /* If there are no natural barriers between the first user of
10855 the pool and the highest acceptable address, we'll need to
10856 create a new instruction to jump around the constant pool.
10857 In the worst case, this instruction will be 4 bytes long.
10859 If it's too late to do this transformation after INSN,
10860 do it immediately before INSN. */
10861 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
10863 rtx label, jump;
10865 label = gen_label_rtx ();
10867 jump = emit_jump_insn_before (gen_jump (label), insn);
10868 JUMP_LABEL (jump) = label;
10869 LABEL_NUSES (label) = 1;
10870 barrier = emit_barrier_after (jump);
10872 emit_label_after (label, barrier);
10873 pool.insn_address += 4;
10876 /* See whether the constant pool is now out of range of the first
10877 user. If so, output the constants after the previous barrier.
10878 Note that any instructions between BARRIER and INSN (inclusive)
10879 will use negative offsets to refer to the pool. */
10880 if (pool.insn_address > pool.highest_address)
10882 mips16_emit_constants (pool.first, barrier);
10883 pool.first = NULL;
10884 barrier = 0;
10886 else if (BARRIER_P (insn))
10887 barrier = insn;
10890 mips16_emit_constants (pool.first, get_last_insn ());
10893 /* A temporary variable used by for_each_rtx callbacks, etc. */
10894 static rtx mips_sim_insn;
10896 /* A structure representing the state of the processor pipeline.
10897 Used by the mips_sim_* family of functions. */
10898 struct mips_sim {
10899 /* The maximum number of instructions that can be issued in a cycle.
10900 (Caches mips_issue_rate.) */
10901 unsigned int issue_rate;
10903 /* The current simulation time. */
10904 unsigned int time;
10906 /* How many more instructions can be issued in the current cycle. */
10907 unsigned int insns_left;
10909 /* LAST_SET[X].INSN is the last instruction to set register X.
10910 LAST_SET[X].TIME is the time at which that instruction was issued.
10911 INSN is null if no instruction has yet set register X. */
10912 struct {
10913 rtx insn;
10914 unsigned int time;
10915 } last_set[FIRST_PSEUDO_REGISTER];
10917 /* The pipeline's current DFA state. */
10918 state_t dfa_state;
10921 /* Reset STATE to the initial simulation state. */
10923 static void
10924 mips_sim_reset (struct mips_sim *state)
10926 state->time = 0;
10927 state->insns_left = state->issue_rate;
10928 memset (&state->last_set, 0, sizeof (state->last_set));
10929 state_reset (state->dfa_state);
10932 /* Initialize STATE before its first use. DFA_STATE points to an
10933 allocated but uninitialized DFA state. */
10935 static void
10936 mips_sim_init (struct mips_sim *state, state_t dfa_state)
10938 state->issue_rate = mips_issue_rate ();
10939 state->dfa_state = dfa_state;
10940 mips_sim_reset (state);
10943 /* Advance STATE by one clock cycle. */
10945 static void
10946 mips_sim_next_cycle (struct mips_sim *state)
10948 state->time++;
10949 state->insns_left = state->issue_rate;
10950 state_transition (state->dfa_state, 0);
10953 /* Advance simulation state STATE until instruction INSN can read
10954 register REG. */
10956 static void
10957 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
10959 unsigned int regno, end_regno;
10961 end_regno = END_REGNO (reg);
10962 for (regno = REGNO (reg); regno < end_regno; regno++)
10963 if (state->last_set[regno].insn != 0)
10965 unsigned int t;
10967 t = (state->last_set[regno].time
10968 + insn_latency (state->last_set[regno].insn, insn));
10969 while (state->time < t)
10970 mips_sim_next_cycle (state);
10974 /* A for_each_rtx callback. If *X is a register, advance simulation state
10975 DATA until mips_sim_insn can read the register's value. */
10977 static int
10978 mips_sim_wait_regs_2 (rtx *x, void *data)
10980 if (REG_P (*x))
10981 mips_sim_wait_reg (data, mips_sim_insn, *x);
10982 return 0;
10985 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
10987 static void
10988 mips_sim_wait_regs_1 (rtx *x, void *data)
10990 for_each_rtx (x, mips_sim_wait_regs_2, data);
10993 /* Advance simulation state STATE until all of INSN's register
10994 dependencies are satisfied. */
10996 static void
10997 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
10999 mips_sim_insn = insn;
11000 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
11003 /* Advance simulation state STATE until the units required by
11004 instruction INSN are available. */
11006 static void
11007 mips_sim_wait_units (struct mips_sim *state, rtx insn)
11009 state_t tmp_state;
11011 tmp_state = alloca (state_size ());
11012 while (state->insns_left == 0
11013 || (memcpy (tmp_state, state->dfa_state, state_size ()),
11014 state_transition (tmp_state, insn) >= 0))
11015 mips_sim_next_cycle (state);
11018 /* Advance simulation state STATE until INSN is ready to issue. */
11020 static void
11021 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
11023 mips_sim_wait_regs (state, insn);
11024 mips_sim_wait_units (state, insn);
11027 /* mips_sim_insn has just set X. Update the LAST_SET array
11028 in simulation state DATA. */
11030 static void
11031 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11033 struct mips_sim *state;
11035 state = data;
11036 if (REG_P (x))
11038 unsigned int regno, end_regno;
11040 end_regno = END_REGNO (x);
11041 for (regno = REGNO (x); regno < end_regno; regno++)
11043 state->last_set[regno].insn = mips_sim_insn;
11044 state->last_set[regno].time = state->time;
11049 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
11050 can issue immediately (i.e., that mips_sim_wait_insn has already
11051 been called). */
11053 static void
11054 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
11056 state_transition (state->dfa_state, insn);
11057 state->insns_left--;
11059 mips_sim_insn = insn;
11060 note_stores (PATTERN (insn), mips_sim_record_set, state);
11063 /* Simulate issuing a NOP in state STATE. */
11065 static void
11066 mips_sim_issue_nop (struct mips_sim *state)
11068 if (state->insns_left == 0)
11069 mips_sim_next_cycle (state);
11070 state->insns_left--;
11073 /* Update simulation state STATE so that it's ready to accept the instruction
11074 after INSN. INSN should be part of the main rtl chain, not a member of a
11075 SEQUENCE. */
11077 static void
11078 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
11080 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
11081 if (JUMP_P (insn))
11082 mips_sim_issue_nop (state);
11084 switch (GET_CODE (SEQ_BEGIN (insn)))
11086 case CODE_LABEL:
11087 case CALL_INSN:
11088 /* We can't predict the processor state after a call or label. */
11089 mips_sim_reset (state);
11090 break;
11092 case JUMP_INSN:
11093 /* The delay slots of branch likely instructions are only executed
11094 when the branch is taken. Therefore, if the caller has simulated
11095 the delay slot instruction, STATE does not really reflect the state
11096 of the pipeline for the instruction after the delay slot. Also,
11097 branch likely instructions tend to incur a penalty when not taken,
11098 so there will probably be an extra delay between the branch and
11099 the instruction after the delay slot. */
11100 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
11101 mips_sim_reset (state);
11102 break;
11104 default:
11105 break;
11109 /* The VR4130 pipeline issues aligned pairs of instructions together,
11110 but it stalls the second instruction if it depends on the first.
11111 In order to cut down the amount of logic required, this dependence
11112 check is not based on a full instruction decode. Instead, any non-SPECIAL
11113 instruction is assumed to modify the register specified by bits 20-16
11114 (which is usually the "rt" field).
11116 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
11117 input, so we can end up with a false dependence between the branch
11118 and its delay slot. If this situation occurs in instruction INSN,
11119 try to avoid it by swapping rs and rt. */
11121 static void
11122 vr4130_avoid_branch_rt_conflict (rtx insn)
11124 rtx first, second;
11126 first = SEQ_BEGIN (insn);
11127 second = SEQ_END (insn);
11128 if (JUMP_P (first)
11129 && NONJUMP_INSN_P (second)
11130 && GET_CODE (PATTERN (first)) == SET
11131 && GET_CODE (SET_DEST (PATTERN (first))) == PC
11132 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
11134 /* Check for the right kind of condition. */
11135 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
11136 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
11137 && REG_P (XEXP (cond, 0))
11138 && REG_P (XEXP (cond, 1))
11139 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
11140 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
11142 /* SECOND mentions the rt register but not the rs register. */
11143 rtx tmp = XEXP (cond, 0);
11144 XEXP (cond, 0) = XEXP (cond, 1);
11145 XEXP (cond, 1) = tmp;
11150 /* Implement -mvr4130-align. Go through each basic block and simulate the
11151 processor pipeline. If we find that a pair of instructions could execute
11152 in parallel, and the first of those instructions is not 8-byte aligned,
11153 insert a nop to make it aligned. */
11155 static void
11156 vr4130_align_insns (void)
11158 struct mips_sim state;
11159 rtx insn, subinsn, last, last2, next;
11160 bool aligned_p;
11162 dfa_start ();
11164 /* LAST is the last instruction before INSN to have a nonzero length.
11165 LAST2 is the last such instruction before LAST. */
11166 last = 0;
11167 last2 = 0;
11169 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
11170 aligned_p = true;
11172 mips_sim_init (&state, alloca (state_size ()));
11173 for (insn = get_insns (); insn != 0; insn = next)
11175 unsigned int length;
11177 next = NEXT_INSN (insn);
11179 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
11180 This isn't really related to the alignment pass, but we do it on
11181 the fly to avoid a separate instruction walk. */
11182 vr4130_avoid_branch_rt_conflict (insn);
11184 if (USEFUL_INSN_P (insn))
11185 FOR_EACH_SUBINSN (subinsn, insn)
11187 mips_sim_wait_insn (&state, subinsn);
11189 /* If we want this instruction to issue in parallel with the
11190 previous one, make sure that the previous instruction is
11191 aligned. There are several reasons why this isn't worthwhile
11192 when the second instruction is a call:
11194 - Calls are less likely to be performance critical,
11195 - There's a good chance that the delay slot can execute
11196 in parallel with the call.
11197 - The return address would then be unaligned.
11199 In general, if we're going to insert a nop between instructions
11200 X and Y, it's better to insert it immediately after X. That
11201 way, if the nop makes Y aligned, it will also align any labels
11202 between X and Y. */
11203 if (state.insns_left != state.issue_rate
11204 && !CALL_P (subinsn))
11206 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
11208 /* SUBINSN is the first instruction in INSN and INSN is
11209 aligned. We want to align the previous instruction
11210 instead, so insert a nop between LAST2 and LAST.
11212 Note that LAST could be either a single instruction
11213 or a branch with a delay slot. In the latter case,
11214 LAST, like INSN, is already aligned, but the delay
11215 slot must have some extra delay that stops it from
11216 issuing at the same time as the branch. We therefore
11217 insert a nop before the branch in order to align its
11218 delay slot. */
11219 emit_insn_after (gen_nop (), last2);
11220 aligned_p = false;
11222 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
11224 /* SUBINSN is the delay slot of INSN, but INSN is
11225 currently unaligned. Insert a nop between
11226 LAST and INSN to align it. */
11227 emit_insn_after (gen_nop (), last);
11228 aligned_p = true;
11231 mips_sim_issue_insn (&state, subinsn);
11233 mips_sim_finish_insn (&state, insn);
11235 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
11236 length = get_attr_length (insn);
11237 if (length > 0)
11239 /* If the instruction is an asm statement or multi-instruction
11240 mips.md patern, the length is only an estimate. Insert an
11241 8 byte alignment after it so that the following instructions
11242 can be handled correctly. */
11243 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
11244 && (recog_memoized (insn) < 0 || length >= 8))
11246 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
11247 next = NEXT_INSN (next);
11248 mips_sim_next_cycle (&state);
11249 aligned_p = true;
11251 else if (length & 4)
11252 aligned_p = !aligned_p;
11253 last2 = last;
11254 last = insn;
11257 /* See whether INSN is an aligned label. */
11258 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
11259 aligned_p = true;
11261 dfa_finish ();
11264 /* This structure records that the current function has a LO_SUM
11265 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
11266 the largest offset applied to BASE by all such LO_SUMs. */
11267 struct mips_lo_sum_offset {
11268 rtx base;
11269 HOST_WIDE_INT offset;
11272 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
11274 static hashval_t
11275 mips_hash_base (rtx base)
11277 int do_not_record_p;
11279 return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
11282 /* Hash-table callbacks for mips_lo_sum_offsets. */
11284 static hashval_t
11285 mips_lo_sum_offset_hash (const void *entry)
11287 return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
11290 static int
11291 mips_lo_sum_offset_eq (const void *entry, const void *value)
11293 return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
11294 (const_rtx) value);
11297 /* Look up symbolic constant X in HTAB, which is a hash table of
11298 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
11299 paired with a recorded LO_SUM, otherwise record X in the table. */
11301 static bool
11302 mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
11304 rtx base, offset;
11305 void **slot;
11306 struct mips_lo_sum_offset *entry;
11308 /* Split X into a base and offset. */
11309 split_const (x, &base, &offset);
11310 if (UNSPEC_ADDRESS_P (base))
11311 base = UNSPEC_ADDRESS (base);
11313 /* Look up the base in the hash table. */
11314 slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
11315 if (slot == NULL)
11316 return false;
11318 entry = (struct mips_lo_sum_offset *) *slot;
11319 if (option == INSERT)
11321 if (entry == NULL)
11323 entry = XNEW (struct mips_lo_sum_offset);
11324 entry->base = base;
11325 entry->offset = INTVAL (offset);
11326 *slot = entry;
11328 else
11330 if (INTVAL (offset) > entry->offset)
11331 entry->offset = INTVAL (offset);
11334 return INTVAL (offset) <= entry->offset;
11337 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
11338 Record every LO_SUM in *LOC. */
11340 static int
11341 mips_record_lo_sum (rtx *loc, void *data)
11343 if (GET_CODE (*loc) == LO_SUM)
11344 mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
11345 return 0;
11348 /* Return true if INSN is a SET of an orphaned high-part relocation.
11349 HTAB is a hash table of mips_lo_sum_offsets that describes all the
11350 LO_SUMs in the current function. */
11352 static bool
11353 mips_orphaned_high_part_p (htab_t htab, rtx insn)
11355 enum mips_symbol_type type;
11356 rtx x, set;
11358 set = single_set (insn);
11359 if (set)
11361 /* Check for %his. */
11362 x = SET_SRC (set);
11363 if (GET_CODE (x) == HIGH
11364 && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
11365 return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
11367 /* Check for local %gots (and %got_pages, which is redundant but OK). */
11368 if (GET_CODE (x) == UNSPEC
11369 && XINT (x, 1) == UNSPEC_LOAD_GOT
11370 && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
11371 SYMBOL_CONTEXT_LEA, &type)
11372 && type == SYMBOL_GOTOFF_PAGE)
11373 return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
11375 return false;
11378 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
11379 INSN and a previous instruction, avoid it by inserting nops after
11380 instruction AFTER.
11382 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
11383 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
11384 before using the value of that register. *HILO_DELAY counts the
11385 number of instructions since the last hilo hazard (that is,
11386 the number of instructions since the last MFLO or MFHI).
11388 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
11389 for the next instruction.
11391 LO_REG is an rtx for the LO register, used in dependence checking. */
11393 static void
11394 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
11395 rtx *delayed_reg, rtx lo_reg)
11397 rtx pattern, set;
11398 int nops, ninsns;
11400 pattern = PATTERN (insn);
11402 /* Do not put the whole function in .set noreorder if it contains
11403 an asm statement. We don't know whether there will be hazards
11404 between the asm statement and the gcc-generated code. */
11405 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
11406 cfun->machine->all_noreorder_p = false;
11408 /* Ignore zero-length instructions (barriers and the like). */
11409 ninsns = get_attr_length (insn) / 4;
11410 if (ninsns == 0)
11411 return;
11413 /* Work out how many nops are needed. Note that we only care about
11414 registers that are explicitly mentioned in the instruction's pattern.
11415 It doesn't matter that calls use the argument registers or that they
11416 clobber hi and lo. */
11417 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
11418 nops = 2 - *hilo_delay;
11419 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
11420 nops = 1;
11421 else
11422 nops = 0;
11424 /* Insert the nops between this instruction and the previous one.
11425 Each new nop takes us further from the last hilo hazard. */
11426 *hilo_delay += nops;
11427 while (nops-- > 0)
11428 emit_insn_after (gen_hazard_nop (), after);
11430 /* Set up the state for the next instruction. */
11431 *hilo_delay += ninsns;
11432 *delayed_reg = 0;
11433 if (INSN_CODE (insn) >= 0)
11434 switch (get_attr_hazard (insn))
11436 case HAZARD_NONE:
11437 break;
11439 case HAZARD_HILO:
11440 *hilo_delay = 0;
11441 break;
11443 case HAZARD_DELAY:
11444 set = single_set (insn);
11445 gcc_assert (set);
11446 *delayed_reg = SET_DEST (set);
11447 break;
11451 /* Go through the instruction stream and insert nops where necessary.
11452 Also delete any high-part relocations whose partnering low parts
11453 are now all dead. See if the whole function can then be put into
11454 .set noreorder and .set nomacro. */
11456 static void
11457 mips_reorg_process_insns (void)
11459 rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
11460 int hilo_delay;
11461 htab_t htab;
11463 /* Force all instructions to be split into their final form. */
11464 split_all_insns_noflow ();
11466 /* Recalculate instruction lengths without taking nops into account. */
11467 cfun->machine->ignore_hazard_length_p = true;
11468 shorten_branches (get_insns ());
11470 cfun->machine->all_noreorder_p = true;
11472 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
11473 if (!TARGET_EXPLICIT_RELOCS)
11474 cfun->machine->all_noreorder_p = false;
11476 /* Profiled functions can't be all noreorder because the profiler
11477 support uses assembler macros. */
11478 if (crtl->profile)
11479 cfun->machine->all_noreorder_p = false;
11481 /* Code compiled with -mfix-vr4120 can't be all noreorder because
11482 we rely on the assembler to work around some errata. */
11483 if (TARGET_FIX_VR4120)
11484 cfun->machine->all_noreorder_p = false;
11486 /* The same is true for -mfix-vr4130 if we might generate MFLO or
11487 MFHI instructions. Note that we avoid using MFLO and MFHI if
11488 the VR4130 MACC and DMACC instructions are available instead;
11489 see the *mfhilo_{si,di}_macc patterns. */
11490 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
11491 cfun->machine->all_noreorder_p = false;
11493 htab = htab_create (37, mips_lo_sum_offset_hash,
11494 mips_lo_sum_offset_eq, free);
11496 /* Make a first pass over the instructions, recording all the LO_SUMs. */
11497 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
11498 FOR_EACH_SUBINSN (subinsn, insn)
11499 if (INSN_P (subinsn))
11500 for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
11502 last_insn = 0;
11503 hilo_delay = 2;
11504 delayed_reg = 0;
11505 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
11507 /* Make a second pass over the instructions. Delete orphaned
11508 high-part relocations or turn them into NOPs. Avoid hazards
11509 by inserting NOPs. */
11510 for (insn = get_insns (); insn != 0; insn = next_insn)
11512 next_insn = NEXT_INSN (insn);
11513 if (INSN_P (insn))
11515 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
11517 /* If we find an orphaned high-part relocation in a delay
11518 slot, it's easier to turn that instruction into a NOP than
11519 to delete it. The delay slot will be a NOP either way. */
11520 FOR_EACH_SUBINSN (subinsn, insn)
11521 if (INSN_P (subinsn))
11523 if (mips_orphaned_high_part_p (htab, subinsn))
11525 PATTERN (subinsn) = gen_nop ();
11526 INSN_CODE (subinsn) = CODE_FOR_nop;
11528 mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
11529 &delayed_reg, lo_reg);
11531 last_insn = insn;
11533 else
11535 /* INSN is a single instruction. Delete it if it's an
11536 orphaned high-part relocation. */
11537 if (mips_orphaned_high_part_p (htab, insn))
11538 delete_insn (insn);
11539 else
11541 mips_avoid_hazard (last_insn, insn, &hilo_delay,
11542 &delayed_reg, lo_reg);
11543 last_insn = insn;
11549 htab_delete (htab);
11552 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
11554 static void
11555 mips_reorg (void)
11557 mips16_lay_out_constants ();
11558 if (mips_base_delayed_branch)
11559 dbr_schedule (get_insns ());
11560 mips_reorg_process_insns ();
11561 if (TARGET_EXPLICIT_RELOCS && TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
11562 vr4130_align_insns ();
11565 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
11566 in order to avoid duplicating too much logic from elsewhere. */
11568 static void
11569 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
11570 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11571 tree function)
11573 rtx this, temp1, temp2, insn, fnaddr;
11574 bool use_sibcall_p;
11576 /* Pretend to be a post-reload pass while generating rtl. */
11577 reload_completed = 1;
11579 /* Mark the end of the (empty) prologue. */
11580 emit_note (NOTE_INSN_PROLOGUE_END);
11582 /* Determine if we can use a sibcall to call FUNCTION directly. */
11583 fnaddr = XEXP (DECL_RTL (function), 0);
11584 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
11585 && const_call_insn_operand (fnaddr, Pmode));
11587 /* Determine if we need to load FNADDR from the GOT. */
11588 if (!use_sibcall_p)
11589 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
11591 case SYMBOL_GOT_PAGE_OFST:
11592 case SYMBOL_GOT_DISP:
11593 /* Pick a global pointer. Use a call-clobbered register if
11594 TARGET_CALL_SAVED_GP. */
11595 cfun->machine->global_pointer =
11596 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
11597 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
11599 /* Set up the global pointer for n32 or n64 abicalls. */
11600 mips_emit_loadgp ();
11601 break;
11603 default:
11604 break;
11607 /* We need two temporary registers in some cases. */
11608 temp1 = gen_rtx_REG (Pmode, 2);
11609 temp2 = gen_rtx_REG (Pmode, 3);
11611 /* Find out which register contains the "this" pointer. */
11612 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11613 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
11614 else
11615 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
11617 /* Add DELTA to THIS. */
11618 if (delta != 0)
11620 rtx offset = GEN_INT (delta);
11621 if (!SMALL_OPERAND (delta))
11623 mips_emit_move (temp1, offset);
11624 offset = temp1;
11626 emit_insn (gen_add3_insn (this, this, offset));
11629 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
11630 if (vcall_offset != 0)
11632 rtx addr;
11634 /* Set TEMP1 to *THIS. */
11635 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
11637 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
11638 addr = mips_add_offset (temp2, temp1, vcall_offset);
11640 /* Load the offset and add it to THIS. */
11641 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
11642 emit_insn (gen_add3_insn (this, this, temp1));
11645 /* Jump to the target function. Use a sibcall if direct jumps are
11646 allowed, otherwise load the address into a register first. */
11647 if (use_sibcall_p)
11649 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
11650 SIBLING_CALL_P (insn) = 1;
11652 else
11654 /* This is messy. GAS treats "la $25,foo" as part of a call
11655 sequence and may allow a global "foo" to be lazily bound.
11656 The general move patterns therefore reject this combination.
11658 In this context, lazy binding would actually be OK
11659 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
11660 TARGET_CALL_SAVED_GP; see mips_load_call_address.
11661 We must therefore load the address via a temporary
11662 register if mips_dangerous_for_la25_p.
11664 If we jump to the temporary register rather than $25, the assembler
11665 can use the move insn to fill the jump's delay slot. */
11666 if (TARGET_USE_PIC_FN_ADDR_REG
11667 && !mips_dangerous_for_la25_p (fnaddr))
11668 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
11669 mips_load_call_address (temp1, fnaddr, true);
11671 if (TARGET_USE_PIC_FN_ADDR_REG
11672 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
11673 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
11674 emit_jump_insn (gen_indirect_jump (temp1));
11677 /* Run just enough of rest_of_compilation. This sequence was
11678 "borrowed" from alpha.c. */
11679 insn = get_insns ();
11680 insn_locators_alloc ();
11681 split_all_insns_noflow ();
11682 mips16_lay_out_constants ();
11683 shorten_branches (insn);
11684 final_start_function (insn, file, 1);
11685 final (insn, file, 1);
11686 final_end_function ();
11687 free_after_compilation (cfun);
11689 /* Clean up the vars set above. Note that final_end_function resets
11690 the global pointer for us. */
11691 reload_completed = 0;
11694 /* The last argument passed to mips_set_mips16_mode, or negative if the
11695 function hasn't been called yet. */
11696 static GTY(()) int was_mips16_p = -1;
11698 /* Set up the target-dependent global state so that it matches the
11699 current function's ISA mode. */
11701 static void
11702 mips_set_mips16_mode (int mips16_p)
11704 if (mips16_p == was_mips16_p)
11705 return;
11707 /* Restore base settings of various flags. */
11708 target_flags = mips_base_target_flags;
11709 flag_schedule_insns = mips_base_schedule_insns;
11710 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
11711 flag_move_loop_invariants = mips_base_move_loop_invariants;
11712 align_loops = mips_base_align_loops;
11713 align_jumps = mips_base_align_jumps;
11714 align_functions = mips_base_align_functions;
11716 if (mips16_p)
11718 /* Switch to MIPS16 mode. */
11719 target_flags |= MASK_MIPS16;
11721 /* Don't run the scheduler before reload, since it tends to
11722 increase register pressure. */
11723 flag_schedule_insns = 0;
11725 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
11726 the whole function to be in a single section. */
11727 flag_reorder_blocks_and_partition = 0;
11729 /* Don't move loop invariants, because it tends to increase
11730 register pressure. It also introduces an extra move in cases
11731 where the constant is the first operand in a two-operand binary
11732 instruction, or when it forms a register argument to a functon
11733 call. */
11734 flag_move_loop_invariants = 0;
11736 /* Silently disable -mexplicit-relocs since it doesn't apply
11737 to MIPS16 code. Even so, it would overly pedantic to warn
11738 about "-mips16 -mexplicit-relocs", especially given that
11739 we use a %gprel() operator. */
11740 target_flags &= ~MASK_EXPLICIT_RELOCS;
11742 /* Experiments suggest we get the best overall section-anchor
11743 results from using the range of an unextended LW or SW. Code
11744 that makes heavy use of byte or short accesses can do better
11745 with ranges of 0...31 and 0...63 respectively, but most code is
11746 sensitive to the range of LW and SW instead. */
11747 targetm.min_anchor_offset = 0;
11748 targetm.max_anchor_offset = 127;
11750 if (flag_pic || TARGET_ABICALLS)
11751 sorry ("MIPS16 PIC");
11753 if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
11754 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
11756 else
11758 /* Switch to normal (non-MIPS16) mode. */
11759 target_flags &= ~MASK_MIPS16;
11761 /* Provide default values for align_* for 64-bit targets. */
11762 if (TARGET_64BIT)
11764 if (align_loops == 0)
11765 align_loops = 8;
11766 if (align_jumps == 0)
11767 align_jumps = 8;
11768 if (align_functions == 0)
11769 align_functions = 8;
11772 targetm.min_anchor_offset = -32768;
11773 targetm.max_anchor_offset = 32767;
11776 /* (Re)initialize MIPS target internals for new ISA. */
11777 mips_init_relocs ();
11779 if (was_mips16_p >= 0)
11780 /* Reinitialize target-dependent state. */
11781 target_reinit ();
11783 was_mips16_p = mips16_p;
11786 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
11787 function should use the MIPS16 ISA and switch modes accordingly. */
11789 static void
11790 mips_set_current_function (tree fndecl)
11792 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
11795 /* Allocate a chunk of memory for per-function machine-dependent data. */
11797 static struct machine_function *
11798 mips_init_machine_status (void)
11800 return ((struct machine_function *)
11801 ggc_alloc_cleared (sizeof (struct machine_function)));
11804 /* Return the processor associated with the given ISA level, or null
11805 if the ISA isn't valid. */
11807 static const struct mips_cpu_info *
11808 mips_cpu_info_from_isa (int isa)
11810 unsigned int i;
11812 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
11813 if (mips_cpu_info_table[i].isa == isa)
11814 return mips_cpu_info_table + i;
11816 return NULL;
11819 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11820 with a final "000" replaced by "k". Ignore case.
11822 Note: this function is shared between GCC and GAS. */
11824 static bool
11825 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
11827 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
11828 given++, canonical++;
11830 return ((*given == 0 && *canonical == 0)
11831 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
11834 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11835 CPU name. We've traditionally allowed a lot of variation here.
11837 Note: this function is shared between GCC and GAS. */
11839 static bool
11840 mips_matching_cpu_name_p (const char *canonical, const char *given)
11842 /* First see if the name matches exactly, or with a final "000"
11843 turned into "k". */
11844 if (mips_strict_matching_cpu_name_p (canonical, given))
11845 return true;
11847 /* If not, try comparing based on numerical designation alone.
11848 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11849 if (TOLOWER (*given) == 'r')
11850 given++;
11851 if (!ISDIGIT (*given))
11852 return false;
11854 /* Skip over some well-known prefixes in the canonical name,
11855 hoping to find a number there too. */
11856 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
11857 canonical += 2;
11858 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
11859 canonical += 2;
11860 else if (TOLOWER (canonical[0]) == 'r')
11861 canonical += 1;
11863 return mips_strict_matching_cpu_name_p (canonical, given);
11866 /* Return the mips_cpu_info entry for the processor or ISA given
11867 by CPU_STRING. Return null if the string isn't recognized.
11869 A similar function exists in GAS. */
11871 static const struct mips_cpu_info *
11872 mips_parse_cpu (const char *cpu_string)
11874 unsigned int i;
11875 const char *s;
11877 /* In the past, we allowed upper-case CPU names, but it doesn't
11878 work well with the multilib machinery. */
11879 for (s = cpu_string; *s != 0; s++)
11880 if (ISUPPER (*s))
11882 warning (0, "CPU names must be lower case");
11883 break;
11886 /* 'from-abi' selects the most compatible architecture for the given
11887 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11888 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11889 version. */
11890 if (strcasecmp (cpu_string, "from-abi") == 0)
11891 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
11892 : ABI_NEEDS_64BIT_REGS ? 3
11893 : (TARGET_64BIT ? 3 : 1));
11895 /* 'default' has traditionally been a no-op. Probably not very useful. */
11896 if (strcasecmp (cpu_string, "default") == 0)
11897 return NULL;
11899 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
11900 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
11901 return mips_cpu_info_table + i;
11903 return NULL;
11906 /* Set up globals to generate code for the ISA or processor
11907 described by INFO. */
11909 static void
11910 mips_set_architecture (const struct mips_cpu_info *info)
11912 if (info != 0)
11914 mips_arch_info = info;
11915 mips_arch = info->cpu;
11916 mips_isa = info->isa;
11920 /* Likewise for tuning. */
11922 static void
11923 mips_set_tune (const struct mips_cpu_info *info)
11925 if (info != 0)
11927 mips_tune_info = info;
11928 mips_tune = info->cpu;
11932 /* Implement TARGET_HANDLE_OPTION. */
11934 static bool
11935 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
11937 switch (code)
11939 case OPT_mabi_:
11940 if (strcmp (arg, "32") == 0)
11941 mips_abi = ABI_32;
11942 else if (strcmp (arg, "o64") == 0)
11943 mips_abi = ABI_O64;
11944 else if (strcmp (arg, "n32") == 0)
11945 mips_abi = ABI_N32;
11946 else if (strcmp (arg, "64") == 0)
11947 mips_abi = ABI_64;
11948 else if (strcmp (arg, "eabi") == 0)
11949 mips_abi = ABI_EABI;
11950 else
11951 return false;
11952 return true;
11954 case OPT_march_:
11955 case OPT_mtune_:
11956 return mips_parse_cpu (arg) != 0;
11958 case OPT_mips:
11959 mips_isa_option_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
11960 return mips_isa_option_info != 0;
11962 case OPT_mno_flush_func:
11963 mips_cache_flush_func = NULL;
11964 return true;
11966 case OPT_mcode_readable_:
11967 if (strcmp (arg, "yes") == 0)
11968 mips_code_readable = CODE_READABLE_YES;
11969 else if (strcmp (arg, "pcrel") == 0)
11970 mips_code_readable = CODE_READABLE_PCREL;
11971 else if (strcmp (arg, "no") == 0)
11972 mips_code_readable = CODE_READABLE_NO;
11973 else
11974 return false;
11975 return true;
11977 default:
11978 return true;
11982 /* Implement OVERRIDE_OPTIONS. */
11984 void
11985 mips_override_options (void)
11987 int i, start, regno, mode;
11989 #ifdef SUBTARGET_OVERRIDE_OPTIONS
11990 SUBTARGET_OVERRIDE_OPTIONS;
11991 #endif
11993 /* Set the small data limit. */
11994 mips_small_data_threshold = (g_switch_set
11995 ? g_switch_value
11996 : MIPS_DEFAULT_GVALUE);
11998 /* The following code determines the architecture and register size.
11999 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
12000 The GAS and GCC code should be kept in sync as much as possible. */
12002 if (mips_arch_string != 0)
12003 mips_set_architecture (mips_parse_cpu (mips_arch_string));
12005 if (mips_isa_option_info != 0)
12007 if (mips_arch_info == 0)
12008 mips_set_architecture (mips_isa_option_info);
12009 else if (mips_arch_info->isa != mips_isa_option_info->isa)
12010 error ("%<-%s%> conflicts with the other architecture options, "
12011 "which specify a %s processor",
12012 mips_isa_option_info->name,
12013 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
12016 if (mips_arch_info == 0)
12018 #ifdef MIPS_CPU_STRING_DEFAULT
12019 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
12020 #else
12021 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
12022 #endif
12025 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
12026 error ("%<-march=%s%> is not compatible with the selected ABI",
12027 mips_arch_info->name);
12029 /* Optimize for mips_arch, unless -mtune selects a different processor. */
12030 if (mips_tune_string != 0)
12031 mips_set_tune (mips_parse_cpu (mips_tune_string));
12033 if (mips_tune_info == 0)
12034 mips_set_tune (mips_arch_info);
12036 if ((target_flags_explicit & MASK_64BIT) != 0)
12038 /* The user specified the size of the integer registers. Make sure
12039 it agrees with the ABI and ISA. */
12040 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
12041 error ("%<-mgp64%> used with a 32-bit processor");
12042 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
12043 error ("%<-mgp32%> used with a 64-bit ABI");
12044 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
12045 error ("%<-mgp64%> used with a 32-bit ABI");
12047 else
12049 /* Infer the integer register size from the ABI and processor.
12050 Restrict ourselves to 32-bit registers if that's all the
12051 processor has, or if the ABI cannot handle 64-bit registers. */
12052 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
12053 target_flags &= ~MASK_64BIT;
12054 else
12055 target_flags |= MASK_64BIT;
12058 if ((target_flags_explicit & MASK_FLOAT64) != 0)
12060 if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
12061 error ("unsupported combination: %s", "-mfp64 -msingle-float");
12062 else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
12063 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
12064 else if (!TARGET_64BIT && TARGET_FLOAT64)
12066 if (!ISA_HAS_MXHC1)
12067 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
12068 " the target supports the mfhc1 and mthc1 instructions");
12069 else if (mips_abi != ABI_32)
12070 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
12071 " the o32 ABI");
12074 else
12076 /* -msingle-float selects 32-bit float registers. Otherwise the
12077 float registers should be the same size as the integer ones. */
12078 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
12079 target_flags |= MASK_FLOAT64;
12080 else
12081 target_flags &= ~MASK_FLOAT64;
12084 /* End of code shared with GAS. */
12086 /* If no -mlong* option was given, infer it from the other options. */
12087 if ((target_flags_explicit & MASK_LONG64) == 0)
12089 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
12090 target_flags |= MASK_LONG64;
12091 else
12092 target_flags &= ~MASK_LONG64;
12095 if (!TARGET_OLDABI)
12096 flag_pcc_struct_return = 0;
12098 /* Decide which rtx_costs structure to use. */
12099 if (optimize_size)
12100 mips_cost = &mips_rtx_cost_optimize_size;
12101 else
12102 mips_cost = &mips_rtx_cost_data[mips_tune];
12104 /* If the user hasn't specified a branch cost, use the processor's
12105 default. */
12106 if (mips_branch_cost == 0)
12107 mips_branch_cost = mips_cost->branch_cost;
12109 /* If neither -mbranch-likely nor -mno-branch-likely was given
12110 on the command line, set MASK_BRANCHLIKELY based on the target
12111 architecture and tuning flags. Annulled delay slots are a
12112 size win, so we only consider the processor-specific tuning
12113 for !optimize_size. */
12114 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
12116 if (ISA_HAS_BRANCHLIKELY
12117 && (optimize_size
12118 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
12119 target_flags |= MASK_BRANCHLIKELY;
12120 else
12121 target_flags &= ~MASK_BRANCHLIKELY;
12123 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
12124 warning (0, "the %qs architecture does not support branch-likely"
12125 " instructions", mips_arch_info->name);
12127 /* The effect of -mabicalls isn't defined for the EABI. */
12128 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
12130 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
12131 target_flags &= ~MASK_ABICALLS;
12134 /* MIPS16 cannot generate PIC yet. */
12135 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
12137 sorry ("MIPS16 PIC");
12138 target_flags &= ~MASK_ABICALLS;
12139 flag_pic = flag_pie = flag_shlib = 0;
12142 if (TARGET_ABICALLS)
12143 /* We need to set flag_pic for executables as well as DSOs
12144 because we may reference symbols that are not defined in
12145 the final executable. (MIPS does not use things like
12146 copy relocs, for example.)
12148 Also, there is a body of code that uses __PIC__ to distinguish
12149 between -mabicalls and -mno-abicalls code. */
12150 flag_pic = 1;
12152 /* -mvr4130-align is a "speed over size" optimization: it usually produces
12153 faster code, but at the expense of more nops. Enable it at -O3 and
12154 above. */
12155 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
12156 target_flags |= MASK_VR4130_ALIGN;
12158 /* Prefer a call to memcpy over inline code when optimizing for size,
12159 though see MOVE_RATIO in mips.h. */
12160 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
12161 target_flags |= MASK_MEMCPY;
12163 /* If we have a nonzero small-data limit, check that the -mgpopt
12164 setting is consistent with the other target flags. */
12165 if (mips_small_data_threshold > 0)
12167 if (!TARGET_GPOPT)
12169 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
12170 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
12172 TARGET_LOCAL_SDATA = false;
12173 TARGET_EXTERN_SDATA = false;
12175 else
12177 if (TARGET_VXWORKS_RTP)
12178 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
12180 if (TARGET_ABICALLS)
12181 warning (0, "cannot use small-data accesses for %qs",
12182 "-mabicalls");
12186 #ifdef MIPS_TFMODE_FORMAT
12187 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
12188 #endif
12190 /* Make sure that the user didn't turn off paired single support when
12191 MIPS-3D support is requested. */
12192 if (TARGET_MIPS3D
12193 && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
12194 && !TARGET_PAIRED_SINGLE_FLOAT)
12195 error ("%<-mips3d%> requires %<-mpaired-single%>");
12197 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
12198 if (TARGET_MIPS3D)
12199 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
12201 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
12202 and TARGET_HARD_FLOAT_ABI are both true. */
12203 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
12204 error ("%qs must be used with %qs",
12205 TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
12206 TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
12208 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
12209 enabled. */
12210 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
12211 warning (0, "the %qs architecture does not support paired-single"
12212 " instructions", mips_arch_info->name);
12214 /* If TARGET_DSPR2, enable MASK_DSP. */
12215 if (TARGET_DSPR2)
12216 target_flags |= MASK_DSP;
12218 mips_init_print_operand_punct ();
12220 /* Set up array to map GCC register number to debug register number.
12221 Ignore the special purpose register numbers. */
12223 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12225 mips_dbx_regno[i] = INVALID_REGNUM;
12226 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
12227 mips_dwarf_regno[i] = i;
12228 else
12229 mips_dwarf_regno[i] = INVALID_REGNUM;
12232 start = GP_DBX_FIRST - GP_REG_FIRST;
12233 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
12234 mips_dbx_regno[i] = i + start;
12236 start = FP_DBX_FIRST - FP_REG_FIRST;
12237 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
12238 mips_dbx_regno[i] = i + start;
12240 /* Accumulator debug registers use big-endian ordering. */
12241 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
12242 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
12243 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
12244 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
12245 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
12247 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
12248 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
12251 /* Set up mips_hard_regno_mode_ok. */
12252 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
12253 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12254 mips_hard_regno_mode_ok[(int)mode][regno]
12255 = mips_hard_regno_mode_ok_p (regno, mode);
12257 /* Function to allocate machine-dependent function status. */
12258 init_machine_status = &mips_init_machine_status;
12260 /* Default to working around R4000 errata only if the processor
12261 was selected explicitly. */
12262 if ((target_flags_explicit & MASK_FIX_R4000) == 0
12263 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
12264 target_flags |= MASK_FIX_R4000;
12266 /* Default to working around R4400 errata only if the processor
12267 was selected explicitly. */
12268 if ((target_flags_explicit & MASK_FIX_R4400) == 0
12269 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
12270 target_flags |= MASK_FIX_R4400;
12272 /* Save base state of options. */
12273 mips_base_mips16 = TARGET_MIPS16;
12274 mips_base_target_flags = target_flags;
12275 mips_base_delayed_branch = flag_delayed_branch;
12276 mips_base_schedule_insns = flag_schedule_insns;
12277 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
12278 mips_base_move_loop_invariants = flag_move_loop_invariants;
12279 mips_base_align_loops = align_loops;
12280 mips_base_align_jumps = align_jumps;
12281 mips_base_align_functions = align_functions;
12283 /* Now select the ISA mode. */
12284 mips_set_mips16_mode (mips_base_mips16);
12286 /* We call dbr_schedule from within mips_reorg. */
12287 flag_delayed_branch = 0;
12290 /* Swap the register information for registers I and I + 1, which
12291 currently have the wrong endianness. Note that the registers'
12292 fixedness and call-clobberedness might have been set on the
12293 command line. */
12295 static void
12296 mips_swap_registers (unsigned int i)
12298 int tmpi;
12299 const char *tmps;
12301 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
12302 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
12304 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
12305 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
12306 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
12307 SWAP_STRING (reg_names[i], reg_names[i + 1]);
12309 #undef SWAP_STRING
12310 #undef SWAP_INT
12313 /* Implement CONDITIONAL_REGISTER_USAGE. */
12315 void
12316 mips_conditional_register_usage (void)
12318 if (!ISA_HAS_DSP)
12320 int regno;
12322 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
12323 fixed_regs[regno] = call_used_regs[regno] = 1;
12325 if (!TARGET_HARD_FLOAT)
12327 int regno;
12329 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
12330 fixed_regs[regno] = call_used_regs[regno] = 1;
12331 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12332 fixed_regs[regno] = call_used_regs[regno] = 1;
12334 else if (! ISA_HAS_8CC)
12336 int regno;
12338 /* We only have a single condition-code register. We implement
12339 this by fixing all the condition-code registers and generating
12340 RTL that refers directly to ST_REG_FIRST. */
12341 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12342 fixed_regs[regno] = call_used_regs[regno] = 1;
12344 /* In MIPS16 mode, we permit the $t temporary registers to be used
12345 for reload. We prohibit the unused $s registers, since they
12346 are call-saved, and saving them via a MIPS16 register would
12347 probably waste more time than just reloading the value. */
12348 if (TARGET_MIPS16)
12350 fixed_regs[18] = call_used_regs[18] = 1;
12351 fixed_regs[19] = call_used_regs[19] = 1;
12352 fixed_regs[20] = call_used_regs[20] = 1;
12353 fixed_regs[21] = call_used_regs[21] = 1;
12354 fixed_regs[22] = call_used_regs[22] = 1;
12355 fixed_regs[23] = call_used_regs[23] = 1;
12356 fixed_regs[26] = call_used_regs[26] = 1;
12357 fixed_regs[27] = call_used_regs[27] = 1;
12358 fixed_regs[30] = call_used_regs[30] = 1;
12360 /* $f20-$f23 are call-clobbered for n64. */
12361 if (mips_abi == ABI_64)
12363 int regno;
12364 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
12365 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12367 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
12368 for n32. */
12369 if (mips_abi == ABI_N32)
12371 int regno;
12372 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
12373 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12375 /* Make sure that double-register accumulator values are correctly
12376 ordered for the current endianness. */
12377 if (TARGET_LITTLE_ENDIAN)
12379 unsigned int regno;
12381 mips_swap_registers (MD_REG_FIRST);
12382 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
12383 mips_swap_registers (regno);
12387 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
12388 other registers for instructions for which it is possible. This
12389 encourages the compiler to use CMP in cases where an XOR would
12390 require some register shuffling. */
12392 void
12393 mips_order_regs_for_local_alloc (void)
12395 int i;
12397 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12398 reg_alloc_order[i] = i;
12400 if (TARGET_MIPS16)
12402 /* It really doesn't matter where we put register 0, since it is
12403 a fixed register anyhow. */
12404 reg_alloc_order[0] = 24;
12405 reg_alloc_order[24] = 0;
12409 /* Initialize the GCC target structure. */
12410 #undef TARGET_ASM_ALIGNED_HI_OP
12411 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
12412 #undef TARGET_ASM_ALIGNED_SI_OP
12413 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
12414 #undef TARGET_ASM_ALIGNED_DI_OP
12415 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
12417 #undef TARGET_ASM_FUNCTION_PROLOGUE
12418 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
12419 #undef TARGET_ASM_FUNCTION_EPILOGUE
12420 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
12421 #undef TARGET_ASM_SELECT_RTX_SECTION
12422 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
12423 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
12424 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
12426 #undef TARGET_SCHED_INIT
12427 #define TARGET_SCHED_INIT mips_sched_init
12428 #undef TARGET_SCHED_REORDER
12429 #define TARGET_SCHED_REORDER mips_sched_reorder
12430 #undef TARGET_SCHED_REORDER2
12431 #define TARGET_SCHED_REORDER2 mips_sched_reorder
12432 #undef TARGET_SCHED_VARIABLE_ISSUE
12433 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
12434 #undef TARGET_SCHED_ADJUST_COST
12435 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
12436 #undef TARGET_SCHED_ISSUE_RATE
12437 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
12438 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
12439 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
12440 mips_multipass_dfa_lookahead
12442 #undef TARGET_DEFAULT_TARGET_FLAGS
12443 #define TARGET_DEFAULT_TARGET_FLAGS \
12444 (TARGET_DEFAULT \
12445 | TARGET_CPU_DEFAULT \
12446 | TARGET_ENDIAN_DEFAULT \
12447 | TARGET_FP_EXCEPTIONS_DEFAULT \
12448 | MASK_CHECK_ZERO_DIV \
12449 | MASK_FUSED_MADD)
12450 #undef TARGET_HANDLE_OPTION
12451 #define TARGET_HANDLE_OPTION mips_handle_option
12453 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
12454 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
12456 #undef TARGET_INSERT_ATTRIBUTES
12457 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
12458 #undef TARGET_MERGE_DECL_ATTRIBUTES
12459 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
12460 #undef TARGET_SET_CURRENT_FUNCTION
12461 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
12463 #undef TARGET_VALID_POINTER_MODE
12464 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
12465 #undef TARGET_RTX_COSTS
12466 #define TARGET_RTX_COSTS mips_rtx_costs
12467 #undef TARGET_ADDRESS_COST
12468 #define TARGET_ADDRESS_COST mips_address_cost
12470 #undef TARGET_IN_SMALL_DATA_P
12471 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
12473 #undef TARGET_MACHINE_DEPENDENT_REORG
12474 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
12476 #undef TARGET_ASM_FILE_START
12477 #define TARGET_ASM_FILE_START mips_file_start
12478 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
12479 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
12481 #undef TARGET_INIT_LIBFUNCS
12482 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
12484 #undef TARGET_BUILD_BUILTIN_VA_LIST
12485 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
12486 #undef TARGET_EXPAND_BUILTIN_VA_START
12487 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
12488 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
12489 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
12491 #undef TARGET_PROMOTE_FUNCTION_ARGS
12492 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
12493 #undef TARGET_PROMOTE_FUNCTION_RETURN
12494 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
12495 #undef TARGET_PROMOTE_PROTOTYPES
12496 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
12498 #undef TARGET_RETURN_IN_MEMORY
12499 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
12500 #undef TARGET_RETURN_IN_MSB
12501 #define TARGET_RETURN_IN_MSB mips_return_in_msb
12503 #undef TARGET_ASM_OUTPUT_MI_THUNK
12504 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
12505 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
12506 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
12508 #undef TARGET_SETUP_INCOMING_VARARGS
12509 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
12510 #undef TARGET_STRICT_ARGUMENT_NAMING
12511 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
12512 #undef TARGET_MUST_PASS_IN_STACK
12513 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
12514 #undef TARGET_PASS_BY_REFERENCE
12515 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
12516 #undef TARGET_CALLEE_COPIES
12517 #define TARGET_CALLEE_COPIES mips_callee_copies
12518 #undef TARGET_ARG_PARTIAL_BYTES
12519 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
12521 #undef TARGET_MODE_REP_EXTENDED
12522 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
12524 #undef TARGET_VECTOR_MODE_SUPPORTED_P
12525 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
12527 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12528 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
12530 #undef TARGET_INIT_BUILTINS
12531 #define TARGET_INIT_BUILTINS mips_init_builtins
12532 #undef TARGET_EXPAND_BUILTIN
12533 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
12535 #undef TARGET_HAVE_TLS
12536 #define TARGET_HAVE_TLS HAVE_AS_TLS
12538 #undef TARGET_CANNOT_FORCE_CONST_MEM
12539 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
12541 #undef TARGET_ENCODE_SECTION_INFO
12542 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
12544 #undef TARGET_ATTRIBUTE_TABLE
12545 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
12546 /* All our function attributes are related to how out-of-line copies should
12547 be compiled or called. They don't in themselves prevent inlining. */
12548 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
12549 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
12551 #undef TARGET_EXTRA_LIVE_ON_ENTRY
12552 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
12554 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
12555 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
12556 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
12557 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
12559 #undef TARGET_COMP_TYPE_ATTRIBUTES
12560 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
12562 #ifdef HAVE_AS_DTPRELWORD
12563 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12564 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
12565 #endif
12566 #undef TARGET_DWARF_REGISTER_SPAN
12567 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
12569 struct gcc_target targetm = TARGET_INITIALIZER;
12571 #include "gt-mips.h"