mips.c (BUILTIN_AVAIL_NON_MIPS16): New macro.
[official-gcc.git] / gcc / config / mips / mips.c
blob69f8258e8f01283bf5d41283f2fee4fd004e3e92
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
59 #include "bitmap.h"
60 #include "diagnostic.h"
62 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
70 XVECEXP (X, 0, 0)
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
91 part of the frame. */
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
99 (INSN_P (INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
110 : (INSN))
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
116 : (INSN))
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
130 ADDRESS_REG
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
134 ADDRESS_LO_SUM
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
138 ADDRESS_CONST_INT
139 A signed 16-bit constant address.
141 ADDRESS_SYMBOLIC:
142 A constant symbolic address. */
143 enum mips_address_type {
144 ADDRESS_REG,
145 ADDRESS_LO_SUM,
146 ADDRESS_CONST_INT,
147 ADDRESS_SYMBOLIC
150 /* Macros to create an enumeration identifier for a function prototype. */
151 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
152 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
153 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
154 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
156 /* Classifies the prototype of a built-in function. */
157 enum mips_function_type {
158 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
159 #include "config/mips/mips-ftypes.def"
160 #undef DEF_MIPS_FTYPE
161 MIPS_MAX_FTYPE_MAX
164 /* Specifies how a built-in function should be converted into rtl. */
165 enum mips_builtin_type {
166 /* The function corresponds directly to an .md pattern. The return
167 value is mapped to operand 0 and the arguments are mapped to
168 operands 1 and above. */
169 MIPS_BUILTIN_DIRECT,
171 /* The function corresponds directly to an .md pattern. There is no return
172 value and the arguments are mapped to operands 0 and above. */
173 MIPS_BUILTIN_DIRECT_NO_TARGET,
175 /* The function corresponds to a comparison instruction followed by
176 a mips_cond_move_tf_ps pattern. The first two arguments are the
177 values to compare and the second two arguments are the vector
178 operands for the movt.ps or movf.ps instruction (in assembly order). */
179 MIPS_BUILTIN_MOVF,
180 MIPS_BUILTIN_MOVT,
182 /* The function corresponds to a V2SF comparison instruction. Operand 0
183 of this instruction is the result of the comparison, which has mode
184 CCV2 or CCV4. The function arguments are mapped to operands 1 and
185 above. The function's return value is an SImode boolean that is
186 true under the following conditions:
188 MIPS_BUILTIN_CMP_ANY: one of the registers is true
189 MIPS_BUILTIN_CMP_ALL: all of the registers are true
190 MIPS_BUILTIN_CMP_LOWER: the first register is true
191 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
192 MIPS_BUILTIN_CMP_ANY,
193 MIPS_BUILTIN_CMP_ALL,
194 MIPS_BUILTIN_CMP_UPPER,
195 MIPS_BUILTIN_CMP_LOWER,
197 /* As above, but the instruction only sets a single $fcc register. */
198 MIPS_BUILTIN_CMP_SINGLE,
200 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
201 MIPS_BUILTIN_BPOSGE32
204 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
205 #define MIPS_FP_CONDITIONS(MACRO) \
206 MACRO (f), \
207 MACRO (un), \
208 MACRO (eq), \
209 MACRO (ueq), \
210 MACRO (olt), \
211 MACRO (ult), \
212 MACRO (ole), \
213 MACRO (ule), \
214 MACRO (sf), \
215 MACRO (ngle), \
216 MACRO (seq), \
217 MACRO (ngl), \
218 MACRO (lt), \
219 MACRO (nge), \
220 MACRO (le), \
221 MACRO (ngt)
223 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
224 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
225 enum mips_fp_condition {
226 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
229 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
230 #define STRINGIFY(X) #X
231 static const char *const mips_fp_conditions[] = {
232 MIPS_FP_CONDITIONS (STRINGIFY)
235 /* Information about a function's frame layout. */
236 struct mips_frame_info GTY(()) {
237 /* The size of the frame in bytes. */
238 HOST_WIDE_INT total_size;
240 /* The number of bytes allocated to variables. */
241 HOST_WIDE_INT var_size;
243 /* The number of bytes allocated to outgoing function arguments. */
244 HOST_WIDE_INT args_size;
246 /* The number of bytes allocated to the .cprestore slot, or 0 if there
247 is no such slot. */
248 HOST_WIDE_INT cprestore_size;
250 /* Bit X is set if the function saves or restores GPR X. */
251 unsigned int mask;
253 /* Likewise FPR X. */
254 unsigned int fmask;
256 /* The number of GPRs and FPRs saved. */
257 unsigned int num_gp;
258 unsigned int num_fp;
260 /* The offset of the topmost GPR and FPR save slots from the top of
261 the frame, or zero if no such slots are needed. */
262 HOST_WIDE_INT gp_save_offset;
263 HOST_WIDE_INT fp_save_offset;
265 /* Likewise, but giving offsets from the bottom of the frame. */
266 HOST_WIDE_INT gp_sp_offset;
267 HOST_WIDE_INT fp_sp_offset;
269 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
270 HOST_WIDE_INT arg_pointer_offset;
272 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
273 HOST_WIDE_INT hard_frame_pointer_offset;
276 struct machine_function GTY(()) {
277 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
278 rtx mips16_gp_pseudo_rtx;
280 /* The number of extra stack bytes taken up by register varargs.
281 This area is allocated by the callee at the very top of the frame. */
282 int varargs_size;
284 /* The current frame information, calculated by mips_compute_frame_info. */
285 struct mips_frame_info frame;
287 /* The register to use as the function's global pointer. */
288 unsigned int global_pointer;
290 /* True if mips_adjust_insn_length should ignore an instruction's
291 hazard attribute. */
292 bool ignore_hazard_length_p;
294 /* True if the whole function is suitable for .set noreorder and
295 .set nomacro. */
296 bool all_noreorder_p;
298 /* True if the function is known to have an instruction that needs $gp. */
299 bool has_gp_insn_p;
301 /* True if we have emitted an instruction to initialize
302 mips16_gp_pseudo_rtx. */
303 bool initialized_mips16_gp_pseudo_p;
306 /* Information about a single argument. */
307 struct mips_arg_info {
308 /* True if the argument is passed in a floating-point register, or
309 would have been if we hadn't run out of registers. */
310 bool fpr_p;
312 /* The number of words passed in registers, rounded up. */
313 unsigned int reg_words;
315 /* For EABI, the offset of the first register from GP_ARG_FIRST or
316 FP_ARG_FIRST. For other ABIs, the offset of the first register from
317 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
318 comment for details).
320 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
321 on the stack. */
322 unsigned int reg_offset;
324 /* The number of words that must be passed on the stack, rounded up. */
325 unsigned int stack_words;
327 /* The offset from the start of the stack overflow area of the argument's
328 first stack word. Only meaningful when STACK_WORDS is nonzero. */
329 unsigned int stack_offset;
332 /* Information about an address described by mips_address_type.
334 ADDRESS_CONST_INT
335 No fields are used.
337 ADDRESS_REG
338 REG is the base register and OFFSET is the constant offset.
340 ADDRESS_LO_SUM
341 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
342 is the type of symbol it references.
344 ADDRESS_SYMBOLIC
345 SYMBOL_TYPE is the type of symbol that the address references. */
346 struct mips_address_info {
347 enum mips_address_type type;
348 rtx reg;
349 rtx offset;
350 enum mips_symbol_type symbol_type;
353 /* One stage in a constant building sequence. These sequences have
354 the form:
356 A = VALUE[0]
357 A = A CODE[1] VALUE[1]
358 A = A CODE[2] VALUE[2]
361 where A is an accumulator, each CODE[i] is a binary rtl operation
362 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
363 struct mips_integer_op {
364 enum rtx_code code;
365 unsigned HOST_WIDE_INT value;
368 /* The largest number of operations needed to load an integer constant.
369 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
370 When the lowest bit is clear, we can try, but reject a sequence with
371 an extra SLL at the end. */
372 #define MIPS_MAX_INTEGER_OPS 7
374 /* Information about a MIPS16e SAVE or RESTORE instruction. */
375 struct mips16e_save_restore_info {
376 /* The number of argument registers saved by a SAVE instruction.
377 0 for RESTORE instructions. */
378 unsigned int nargs;
380 /* Bit X is set if the instruction saves or restores GPR X. */
381 unsigned int mask;
383 /* The total number of bytes to allocate. */
384 HOST_WIDE_INT size;
387 /* Global variables for machine-dependent things. */
389 /* The -G setting, or the configuration's default small-data limit if
390 no -G option is given. */
391 static unsigned int mips_small_data_threshold;
393 /* The number of file directives written by mips_output_filename. */
394 int num_source_filenames;
396 /* The name that appeared in the last .file directive written by
397 mips_output_filename, or "" if mips_output_filename hasn't
398 written anything yet. */
399 const char *current_function_file = "";
401 /* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
402 int sdb_label_count;
404 /* Arrays that map GCC register numbers to debugger register numbers. */
405 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
406 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
408 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
409 int set_noreorder;
410 int set_nomacro;
411 static int set_noat;
413 /* True if we're writing out a branch-likely instruction rather than a
414 normal branch. */
415 static bool mips_branch_likely;
417 /* The operands passed to the last cmpMM expander. */
418 rtx cmp_operands[2];
420 /* The current instruction-set architecture. */
421 enum processor_type mips_arch;
422 const struct mips_cpu_info *mips_arch_info;
424 /* The processor that we should tune the code for. */
425 enum processor_type mips_tune;
426 const struct mips_cpu_info *mips_tune_info;
428 /* The ISA level associated with mips_arch. */
429 int mips_isa;
431 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
432 static const struct mips_cpu_info *mips_isa_option_info;
434 /* Which ABI to use. */
435 int mips_abi = MIPS_ABI_DEFAULT;
437 /* Which cost information to use. */
438 const struct mips_rtx_cost_data *mips_cost;
440 /* The ambient target flags, excluding MASK_MIPS16. */
441 static int mips_base_target_flags;
443 /* True if MIPS16 is the default mode. */
444 static bool mips_base_mips16;
446 /* The ambient values of other global variables. */
447 static int mips_base_delayed_branch; /* flag_delayed_branch */
448 static int mips_base_schedule_insns; /* flag_schedule_insns */
449 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
450 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
451 static int mips_base_align_loops; /* align_loops */
452 static int mips_base_align_jumps; /* align_jumps */
453 static int mips_base_align_functions; /* align_functions */
455 /* The -mcode-readable setting. */
456 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
458 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
459 bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
461 /* Index C is true if character C is a valid PRINT_OPERAND punctation
462 character. */
463 bool mips_print_operand_punct[256];
465 static GTY (()) int mips_output_filename_first_time = 1;
467 /* mips_split_p[X] is true if symbols of type X can be split by
468 mips_split_symbol. */
469 bool mips_split_p[NUM_SYMBOL_TYPES];
471 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
472 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
473 if they are matched by a special .md file pattern. */
474 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
476 /* Likewise for HIGHs. */
477 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
479 /* Index R is the smallest register class that contains register R. */
480 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
481 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
482 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
483 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
484 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
485 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
486 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
487 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
488 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
489 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
490 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
491 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
492 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
493 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
494 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
495 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
496 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
497 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
498 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
499 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
500 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
501 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
502 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
503 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
504 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
505 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
506 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
507 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
508 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
509 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
510 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
511 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
512 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
513 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
514 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
515 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
516 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
517 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
518 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
519 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
520 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
521 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
522 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
523 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
524 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
525 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
526 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
527 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
530 /* The value of TARGET_ATTRIBUTE_TABLE. */
531 const struct attribute_spec mips_attribute_table[] = {
532 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
533 { "long_call", 0, 0, false, true, true, NULL },
534 { "far", 0, 0, false, true, true, NULL },
535 { "near", 0, 0, false, true, true, NULL },
536 /* We would really like to treat "mips16" and "nomips16" as type
537 attributes, but GCC doesn't provide the hooks we need to support
538 the right conversion rules. As declaration attributes, they affect
539 code generation but don't carry other semantics. */
540 { "mips16", 0, 0, true, false, false, NULL },
541 { "nomips16", 0, 0, true, false, false, NULL },
542 { NULL, 0, 0, false, false, false, NULL }
545 /* A table describing all the processors GCC knows about. Names are
546 matched in the order listed. The first mention of an ISA level is
547 taken as the canonical name for that ISA.
549 To ease comparison, please keep this table in the same order
550 as GAS's mips_cpu_info_table. Please also make sure that
551 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
552 options correctly. */
553 static const struct mips_cpu_info mips_cpu_info_table[] = {
554 /* Entries for generic ISAs. */
555 { "mips1", PROCESSOR_R3000, 1, 0 },
556 { "mips2", PROCESSOR_R6000, 2, 0 },
557 { "mips3", PROCESSOR_R4000, 3, 0 },
558 { "mips4", PROCESSOR_R8000, 4, 0 },
559 /* Prefer not to use branch-likely instructions for generic MIPS32rX
560 and MIPS64rX code. The instructions were officially deprecated
561 in revisions 2 and earlier, but revision 3 is likely to downgrade
562 that to a recommendation to avoid the instructions in code that
563 isn't tuned to a specific processor. */
564 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
565 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
566 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
568 /* MIPS I processors. */
569 { "r3000", PROCESSOR_R3000, 1, 0 },
570 { "r2000", PROCESSOR_R3000, 1, 0 },
571 { "r3900", PROCESSOR_R3900, 1, 0 },
573 /* MIPS II processors. */
574 { "r6000", PROCESSOR_R6000, 2, 0 },
576 /* MIPS III processors. */
577 { "r4000", PROCESSOR_R4000, 3, 0 },
578 { "vr4100", PROCESSOR_R4100, 3, 0 },
579 { "vr4111", PROCESSOR_R4111, 3, 0 },
580 { "vr4120", PROCESSOR_R4120, 3, 0 },
581 { "vr4130", PROCESSOR_R4130, 3, 0 },
582 { "vr4300", PROCESSOR_R4300, 3, 0 },
583 { "r4400", PROCESSOR_R4000, 3, 0 },
584 { "r4600", PROCESSOR_R4600, 3, 0 },
585 { "orion", PROCESSOR_R4600, 3, 0 },
586 { "r4650", PROCESSOR_R4650, 3, 0 },
587 /* ST Loongson 2E/2F processors. */
588 { "loongson2e", PROCESSOR_LOONGSON_2E, 3, PTF_AVOID_BRANCHLIKELY },
589 { "loongson2f", PROCESSOR_LOONGSON_2F, 3, PTF_AVOID_BRANCHLIKELY },
591 /* MIPS IV processors. */
592 { "r8000", PROCESSOR_R8000, 4, 0 },
593 { "vr5000", PROCESSOR_R5000, 4, 0 },
594 { "vr5400", PROCESSOR_R5400, 4, 0 },
595 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
596 { "rm7000", PROCESSOR_R7000, 4, 0 },
597 { "rm9000", PROCESSOR_R9000, 4, 0 },
599 /* MIPS32 processors. */
600 { "4kc", PROCESSOR_4KC, 32, 0 },
601 { "4km", PROCESSOR_4KC, 32, 0 },
602 { "4kp", PROCESSOR_4KP, 32, 0 },
603 { "4ksc", PROCESSOR_4KC, 32, 0 },
605 /* MIPS32 Release 2 processors. */
606 { "m4k", PROCESSOR_M4K, 33, 0 },
607 { "4kec", PROCESSOR_4KC, 33, 0 },
608 { "4kem", PROCESSOR_4KC, 33, 0 },
609 { "4kep", PROCESSOR_4KP, 33, 0 },
610 { "4ksd", PROCESSOR_4KC, 33, 0 },
612 { "24kc", PROCESSOR_24KC, 33, 0 },
613 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
614 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
615 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
616 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
617 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
619 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP. */
620 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
621 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
622 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
623 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
624 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
626 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP. */
627 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
628 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
629 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
630 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
631 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
633 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2. */
634 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
635 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
636 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
637 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
638 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
639 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
641 /* MIPS64 processors. */
642 { "5kc", PROCESSOR_5KC, 64, 0 },
643 { "5kf", PROCESSOR_5KF, 64, 0 },
644 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
645 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
646 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
647 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
648 { "xlr", PROCESSOR_XLR, 64, 0 }
651 /* Default costs. If these are used for a processor we should look
652 up the actual costs. */
653 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
654 COSTS_N_INSNS (7), /* fp_mult_sf */ \
655 COSTS_N_INSNS (8), /* fp_mult_df */ \
656 COSTS_N_INSNS (23), /* fp_div_sf */ \
657 COSTS_N_INSNS (36), /* fp_div_df */ \
658 COSTS_N_INSNS (10), /* int_mult_si */ \
659 COSTS_N_INSNS (10), /* int_mult_di */ \
660 COSTS_N_INSNS (69), /* int_div_si */ \
661 COSTS_N_INSNS (69), /* int_div_di */ \
662 2, /* branch_cost */ \
663 4 /* memory_latency */
665 /* Floating-point costs for processors without an FPU. Just assume that
666 all floating-point libcalls are very expensive. */
667 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
668 COSTS_N_INSNS (256), /* fp_mult_sf */ \
669 COSTS_N_INSNS (256), /* fp_mult_df */ \
670 COSTS_N_INSNS (256), /* fp_div_sf */ \
671 COSTS_N_INSNS (256) /* fp_div_df */
673 /* Costs to use when optimizing for size. */
674 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
675 COSTS_N_INSNS (1), /* fp_add */
676 COSTS_N_INSNS (1), /* fp_mult_sf */
677 COSTS_N_INSNS (1), /* fp_mult_df */
678 COSTS_N_INSNS (1), /* fp_div_sf */
679 COSTS_N_INSNS (1), /* fp_div_df */
680 COSTS_N_INSNS (1), /* int_mult_si */
681 COSTS_N_INSNS (1), /* int_mult_di */
682 COSTS_N_INSNS (1), /* int_div_si */
683 COSTS_N_INSNS (1), /* int_div_di */
684 2, /* branch_cost */
685 4 /* memory_latency */
688 /* Costs to use when optimizing for speed, indexed by processor. */
689 static const struct mips_rtx_cost_data mips_rtx_cost_data[PROCESSOR_MAX] = {
690 { /* R3000 */
691 COSTS_N_INSNS (2), /* fp_add */
692 COSTS_N_INSNS (4), /* fp_mult_sf */
693 COSTS_N_INSNS (5), /* fp_mult_df */
694 COSTS_N_INSNS (12), /* fp_div_sf */
695 COSTS_N_INSNS (19), /* fp_div_df */
696 COSTS_N_INSNS (12), /* int_mult_si */
697 COSTS_N_INSNS (12), /* int_mult_di */
698 COSTS_N_INSNS (35), /* int_div_si */
699 COSTS_N_INSNS (35), /* int_div_di */
700 1, /* branch_cost */
701 4 /* memory_latency */
703 { /* 4KC */
704 SOFT_FP_COSTS,
705 COSTS_N_INSNS (6), /* int_mult_si */
706 COSTS_N_INSNS (6), /* int_mult_di */
707 COSTS_N_INSNS (36), /* int_div_si */
708 COSTS_N_INSNS (36), /* int_div_di */
709 1, /* branch_cost */
710 4 /* memory_latency */
712 { /* 4KP */
713 SOFT_FP_COSTS,
714 COSTS_N_INSNS (36), /* int_mult_si */
715 COSTS_N_INSNS (36), /* int_mult_di */
716 COSTS_N_INSNS (37), /* int_div_si */
717 COSTS_N_INSNS (37), /* int_div_di */
718 1, /* branch_cost */
719 4 /* memory_latency */
721 { /* 5KC */
722 SOFT_FP_COSTS,
723 COSTS_N_INSNS (4), /* int_mult_si */
724 COSTS_N_INSNS (11), /* int_mult_di */
725 COSTS_N_INSNS (36), /* int_div_si */
726 COSTS_N_INSNS (68), /* int_div_di */
727 1, /* branch_cost */
728 4 /* memory_latency */
730 { /* 5KF */
731 COSTS_N_INSNS (4), /* fp_add */
732 COSTS_N_INSNS (4), /* fp_mult_sf */
733 COSTS_N_INSNS (5), /* fp_mult_df */
734 COSTS_N_INSNS (17), /* fp_div_sf */
735 COSTS_N_INSNS (32), /* fp_div_df */
736 COSTS_N_INSNS (4), /* int_mult_si */
737 COSTS_N_INSNS (11), /* int_mult_di */
738 COSTS_N_INSNS (36), /* int_div_si */
739 COSTS_N_INSNS (68), /* int_div_di */
740 1, /* branch_cost */
741 4 /* memory_latency */
743 { /* 20KC */
744 COSTS_N_INSNS (4), /* fp_add */
745 COSTS_N_INSNS (4), /* fp_mult_sf */
746 COSTS_N_INSNS (5), /* fp_mult_df */
747 COSTS_N_INSNS (17), /* fp_div_sf */
748 COSTS_N_INSNS (32), /* fp_div_df */
749 COSTS_N_INSNS (4), /* int_mult_si */
750 COSTS_N_INSNS (7), /* int_mult_di */
751 COSTS_N_INSNS (42), /* int_div_si */
752 COSTS_N_INSNS (72), /* int_div_di */
753 1, /* branch_cost */
754 4 /* memory_latency */
756 { /* 24KC */
757 SOFT_FP_COSTS,
758 COSTS_N_INSNS (5), /* int_mult_si */
759 COSTS_N_INSNS (5), /* int_mult_di */
760 COSTS_N_INSNS (41), /* int_div_si */
761 COSTS_N_INSNS (41), /* int_div_di */
762 1, /* branch_cost */
763 4 /* memory_latency */
765 { /* 24KF2_1 */
766 COSTS_N_INSNS (8), /* fp_add */
767 COSTS_N_INSNS (8), /* fp_mult_sf */
768 COSTS_N_INSNS (10), /* fp_mult_df */
769 COSTS_N_INSNS (34), /* fp_div_sf */
770 COSTS_N_INSNS (64), /* fp_div_df */
771 COSTS_N_INSNS (5), /* int_mult_si */
772 COSTS_N_INSNS (5), /* int_mult_di */
773 COSTS_N_INSNS (41), /* int_div_si */
774 COSTS_N_INSNS (41), /* int_div_di */
775 1, /* branch_cost */
776 4 /* memory_latency */
778 { /* 24KF1_1 */
779 COSTS_N_INSNS (4), /* fp_add */
780 COSTS_N_INSNS (4), /* fp_mult_sf */
781 COSTS_N_INSNS (5), /* fp_mult_df */
782 COSTS_N_INSNS (17), /* fp_div_sf */
783 COSTS_N_INSNS (32), /* fp_div_df */
784 COSTS_N_INSNS (5), /* int_mult_si */
785 COSTS_N_INSNS (5), /* int_mult_di */
786 COSTS_N_INSNS (41), /* int_div_si */
787 COSTS_N_INSNS (41), /* int_div_di */
788 1, /* branch_cost */
789 4 /* memory_latency */
791 { /* 74KC */
792 SOFT_FP_COSTS,
793 COSTS_N_INSNS (5), /* int_mult_si */
794 COSTS_N_INSNS (5), /* int_mult_di */
795 COSTS_N_INSNS (41), /* int_div_si */
796 COSTS_N_INSNS (41), /* int_div_di */
797 1, /* branch_cost */
798 4 /* memory_latency */
800 { /* 74KF2_1 */
801 COSTS_N_INSNS (8), /* fp_add */
802 COSTS_N_INSNS (8), /* fp_mult_sf */
803 COSTS_N_INSNS (10), /* fp_mult_df */
804 COSTS_N_INSNS (34), /* fp_div_sf */
805 COSTS_N_INSNS (64), /* fp_div_df */
806 COSTS_N_INSNS (5), /* int_mult_si */
807 COSTS_N_INSNS (5), /* int_mult_di */
808 COSTS_N_INSNS (41), /* int_div_si */
809 COSTS_N_INSNS (41), /* int_div_di */
810 1, /* branch_cost */
811 4 /* memory_latency */
813 { /* 74KF1_1 */
814 COSTS_N_INSNS (4), /* fp_add */
815 COSTS_N_INSNS (4), /* fp_mult_sf */
816 COSTS_N_INSNS (5), /* fp_mult_df */
817 COSTS_N_INSNS (17), /* fp_div_sf */
818 COSTS_N_INSNS (32), /* fp_div_df */
819 COSTS_N_INSNS (5), /* int_mult_si */
820 COSTS_N_INSNS (5), /* int_mult_di */
821 COSTS_N_INSNS (41), /* int_div_si */
822 COSTS_N_INSNS (41), /* int_div_di */
823 1, /* branch_cost */
824 4 /* memory_latency */
826 { /* 74KF3_2 */
827 COSTS_N_INSNS (6), /* fp_add */
828 COSTS_N_INSNS (6), /* fp_mult_sf */
829 COSTS_N_INSNS (7), /* fp_mult_df */
830 COSTS_N_INSNS (25), /* fp_div_sf */
831 COSTS_N_INSNS (48), /* fp_div_df */
832 COSTS_N_INSNS (5), /* int_mult_si */
833 COSTS_N_INSNS (5), /* int_mult_di */
834 COSTS_N_INSNS (41), /* int_div_si */
835 COSTS_N_INSNS (41), /* int_div_di */
836 1, /* branch_cost */
837 4 /* memory_latency */
839 { /* Loongson-2E */
840 DEFAULT_COSTS
842 { /* Loongson-2F */
843 DEFAULT_COSTS
845 { /* M4k */
846 DEFAULT_COSTS
848 { /* R3900 */
849 COSTS_N_INSNS (2), /* fp_add */
850 COSTS_N_INSNS (4), /* fp_mult_sf */
851 COSTS_N_INSNS (5), /* fp_mult_df */
852 COSTS_N_INSNS (12), /* fp_div_sf */
853 COSTS_N_INSNS (19), /* fp_div_df */
854 COSTS_N_INSNS (2), /* int_mult_si */
855 COSTS_N_INSNS (2), /* int_mult_di */
856 COSTS_N_INSNS (35), /* int_div_si */
857 COSTS_N_INSNS (35), /* int_div_di */
858 1, /* branch_cost */
859 4 /* memory_latency */
861 { /* R6000 */
862 COSTS_N_INSNS (3), /* fp_add */
863 COSTS_N_INSNS (5), /* fp_mult_sf */
864 COSTS_N_INSNS (6), /* fp_mult_df */
865 COSTS_N_INSNS (15), /* fp_div_sf */
866 COSTS_N_INSNS (16), /* fp_div_df */
867 COSTS_N_INSNS (17), /* int_mult_si */
868 COSTS_N_INSNS (17), /* int_mult_di */
869 COSTS_N_INSNS (38), /* int_div_si */
870 COSTS_N_INSNS (38), /* int_div_di */
871 2, /* branch_cost */
872 6 /* memory_latency */
874 { /* R4000 */
875 COSTS_N_INSNS (6), /* fp_add */
876 COSTS_N_INSNS (7), /* fp_mult_sf */
877 COSTS_N_INSNS (8), /* fp_mult_df */
878 COSTS_N_INSNS (23), /* fp_div_sf */
879 COSTS_N_INSNS (36), /* fp_div_df */
880 COSTS_N_INSNS (10), /* int_mult_si */
881 COSTS_N_INSNS (10), /* int_mult_di */
882 COSTS_N_INSNS (69), /* int_div_si */
883 COSTS_N_INSNS (69), /* int_div_di */
884 2, /* branch_cost */
885 6 /* memory_latency */
887 { /* R4100 */
888 DEFAULT_COSTS
890 { /* R4111 */
891 DEFAULT_COSTS
893 { /* R4120 */
894 DEFAULT_COSTS
896 { /* R4130 */
897 /* The only costs that appear to be updated here are
898 integer multiplication. */
899 SOFT_FP_COSTS,
900 COSTS_N_INSNS (4), /* int_mult_si */
901 COSTS_N_INSNS (6), /* int_mult_di */
902 COSTS_N_INSNS (69), /* int_div_si */
903 COSTS_N_INSNS (69), /* int_div_di */
904 1, /* branch_cost */
905 4 /* memory_latency */
907 { /* R4300 */
908 DEFAULT_COSTS
910 { /* R4600 */
911 DEFAULT_COSTS
913 { /* R4650 */
914 DEFAULT_COSTS
916 { /* R5000 */
917 COSTS_N_INSNS (6), /* fp_add */
918 COSTS_N_INSNS (4), /* fp_mult_sf */
919 COSTS_N_INSNS (5), /* fp_mult_df */
920 COSTS_N_INSNS (23), /* fp_div_sf */
921 COSTS_N_INSNS (36), /* fp_div_df */
922 COSTS_N_INSNS (5), /* int_mult_si */
923 COSTS_N_INSNS (5), /* int_mult_di */
924 COSTS_N_INSNS (36), /* int_div_si */
925 COSTS_N_INSNS (36), /* int_div_di */
926 1, /* branch_cost */
927 4 /* memory_latency */
929 { /* R5400 */
930 COSTS_N_INSNS (6), /* fp_add */
931 COSTS_N_INSNS (5), /* fp_mult_sf */
932 COSTS_N_INSNS (6), /* fp_mult_df */
933 COSTS_N_INSNS (30), /* fp_div_sf */
934 COSTS_N_INSNS (59), /* fp_div_df */
935 COSTS_N_INSNS (3), /* int_mult_si */
936 COSTS_N_INSNS (4), /* int_mult_di */
937 COSTS_N_INSNS (42), /* int_div_si */
938 COSTS_N_INSNS (74), /* int_div_di */
939 1, /* branch_cost */
940 4 /* memory_latency */
942 { /* R5500 */
943 COSTS_N_INSNS (6), /* fp_add */
944 COSTS_N_INSNS (5), /* fp_mult_sf */
945 COSTS_N_INSNS (6), /* fp_mult_df */
946 COSTS_N_INSNS (30), /* fp_div_sf */
947 COSTS_N_INSNS (59), /* fp_div_df */
948 COSTS_N_INSNS (5), /* int_mult_si */
949 COSTS_N_INSNS (9), /* int_mult_di */
950 COSTS_N_INSNS (42), /* int_div_si */
951 COSTS_N_INSNS (74), /* int_div_di */
952 1, /* branch_cost */
953 4 /* memory_latency */
955 { /* R7000 */
956 /* The only costs that are changed here are
957 integer multiplication. */
958 COSTS_N_INSNS (6), /* fp_add */
959 COSTS_N_INSNS (7), /* fp_mult_sf */
960 COSTS_N_INSNS (8), /* fp_mult_df */
961 COSTS_N_INSNS (23), /* fp_div_sf */
962 COSTS_N_INSNS (36), /* fp_div_df */
963 COSTS_N_INSNS (5), /* int_mult_si */
964 COSTS_N_INSNS (9), /* int_mult_di */
965 COSTS_N_INSNS (69), /* int_div_si */
966 COSTS_N_INSNS (69), /* int_div_di */
967 1, /* branch_cost */
968 4 /* memory_latency */
970 { /* R8000 */
971 DEFAULT_COSTS
973 { /* R9000 */
974 /* The only costs that are changed here are
975 integer multiplication. */
976 COSTS_N_INSNS (6), /* fp_add */
977 COSTS_N_INSNS (7), /* fp_mult_sf */
978 COSTS_N_INSNS (8), /* fp_mult_df */
979 COSTS_N_INSNS (23), /* fp_div_sf */
980 COSTS_N_INSNS (36), /* fp_div_df */
981 COSTS_N_INSNS (3), /* int_mult_si */
982 COSTS_N_INSNS (8), /* int_mult_di */
983 COSTS_N_INSNS (69), /* int_div_si */
984 COSTS_N_INSNS (69), /* int_div_di */
985 1, /* branch_cost */
986 4 /* memory_latency */
988 { /* SB1 */
989 /* These costs are the same as the SB-1A below. */
990 COSTS_N_INSNS (4), /* fp_add */
991 COSTS_N_INSNS (4), /* fp_mult_sf */
992 COSTS_N_INSNS (4), /* fp_mult_df */
993 COSTS_N_INSNS (24), /* fp_div_sf */
994 COSTS_N_INSNS (32), /* fp_div_df */
995 COSTS_N_INSNS (3), /* int_mult_si */
996 COSTS_N_INSNS (4), /* int_mult_di */
997 COSTS_N_INSNS (36), /* int_div_si */
998 COSTS_N_INSNS (68), /* int_div_di */
999 1, /* branch_cost */
1000 4 /* memory_latency */
1002 { /* SB1-A */
1003 /* These costs are the same as the SB-1 above. */
1004 COSTS_N_INSNS (4), /* fp_add */
1005 COSTS_N_INSNS (4), /* fp_mult_sf */
1006 COSTS_N_INSNS (4), /* fp_mult_df */
1007 COSTS_N_INSNS (24), /* fp_div_sf */
1008 COSTS_N_INSNS (32), /* fp_div_df */
1009 COSTS_N_INSNS (3), /* int_mult_si */
1010 COSTS_N_INSNS (4), /* int_mult_di */
1011 COSTS_N_INSNS (36), /* int_div_si */
1012 COSTS_N_INSNS (68), /* int_div_di */
1013 1, /* branch_cost */
1014 4 /* memory_latency */
1016 { /* SR71000 */
1017 DEFAULT_COSTS
1019 { /* XLR */
1020 /* Need to replace first five with the costs of calling the appropriate
1021 libgcc routine. */
1022 COSTS_N_INSNS (256), /* fp_add */
1023 COSTS_N_INSNS (256), /* fp_mult_sf */
1024 COSTS_N_INSNS (256), /* fp_mult_df */
1025 COSTS_N_INSNS (256), /* fp_div_sf */
1026 COSTS_N_INSNS (256), /* fp_div_df */
1027 COSTS_N_INSNS (8), /* int_mult_si */
1028 COSTS_N_INSNS (8), /* int_mult_di */
1029 COSTS_N_INSNS (72), /* int_div_si */
1030 COSTS_N_INSNS (72), /* int_div_di */
1031 1, /* branch_cost */
1032 4 /* memory_latency */
1036 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1037 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1038 struct mflip_mips16_entry GTY (()) {
1039 const char *name;
1040 bool mips16_p;
1042 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1044 /* Hash table callbacks for mflip_mips16_htab. */
1046 static hashval_t
1047 mflip_mips16_htab_hash (const void *entry)
1049 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1052 static int
1053 mflip_mips16_htab_eq (const void *entry, const void *name)
1055 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1056 (const char *) name) == 0;
1059 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1060 mode, false if it should next add an attribute for the opposite mode. */
1061 static GTY(()) bool mips16_flipper;
1063 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1064 for -mflip-mips16. Return true if it should use "mips16" and false if
1065 it should use "nomips16". */
1067 static bool
1068 mflip_mips16_use_mips16_p (tree decl)
1070 struct mflip_mips16_entry *entry;
1071 const char *name;
1072 hashval_t hash;
1073 void **slot;
1075 /* Use the opposite of the command-line setting for anonymous decls. */
1076 if (!DECL_NAME (decl))
1077 return !mips_base_mips16;
1079 if (!mflip_mips16_htab)
1080 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1081 mflip_mips16_htab_eq, NULL);
1083 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1084 hash = htab_hash_string (name);
1085 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1086 entry = (struct mflip_mips16_entry *) *slot;
1087 if (!entry)
1089 mips16_flipper = !mips16_flipper;
1090 entry = GGC_NEW (struct mflip_mips16_entry);
1091 entry->name = name;
1092 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1093 *slot = entry;
1095 return entry->mips16_p;
1098 /* Predicates to test for presence of "near" and "far"/"long_call"
1099 attributes on the given TYPE. */
1101 static bool
1102 mips_near_type_p (const_tree type)
1104 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1107 static bool
1108 mips_far_type_p (const_tree type)
1110 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1111 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1114 /* Similar predicates for "mips16"/"nomips16" function attributes. */
1116 static bool
1117 mips_mips16_decl_p (const_tree decl)
1119 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1122 static bool
1123 mips_nomips16_decl_p (const_tree decl)
1125 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1128 /* Return true if function DECL is a MIPS16 function. Return the ambient
1129 setting if DECL is null. */
1131 static bool
1132 mips_use_mips16_mode_p (tree decl)
1134 if (decl)
1136 /* Nested functions must use the same frame pointer as their
1137 parent and must therefore use the same ISA mode. */
1138 tree parent = decl_function_context (decl);
1139 if (parent)
1140 decl = parent;
1141 if (mips_mips16_decl_p (decl))
1142 return true;
1143 if (mips_nomips16_decl_p (decl))
1144 return false;
1146 return mips_base_mips16;
1149 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1151 static int
1152 mips_comp_type_attributes (const_tree type1, const_tree type2)
1154 /* Disallow mixed near/far attributes. */
1155 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1156 return 0;
1157 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1158 return 0;
1159 return 1;
1162 /* Implement TARGET_INSERT_ATTRIBUTES. */
1164 static void
1165 mips_insert_attributes (tree decl, tree *attributes)
1167 const char *name;
1168 bool mips16_p, nomips16_p;
1170 /* Check for "mips16" and "nomips16" attributes. */
1171 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1172 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1173 if (TREE_CODE (decl) != FUNCTION_DECL)
1175 if (mips16_p)
1176 error ("%qs attribute only applies to functions", "mips16");
1177 if (nomips16_p)
1178 error ("%qs attribute only applies to functions", "nomips16");
1180 else
1182 mips16_p |= mips_mips16_decl_p (decl);
1183 nomips16_p |= mips_nomips16_decl_p (decl);
1184 if (mips16_p || nomips16_p)
1186 /* DECL cannot be simultaneously "mips16" and "nomips16". */
1187 if (mips16_p && nomips16_p)
1188 error ("%qs cannot have both %<mips16%> and "
1189 "%<nomips16%> attributes",
1190 IDENTIFIER_POINTER (DECL_NAME (decl)));
1192 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1194 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1195 "mips16" attribute, arbitrarily pick one. We must pick the same
1196 setting for duplicate declarations of a function. */
1197 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1198 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1203 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1205 static tree
1206 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1208 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1209 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1210 error ("%qs redeclared with conflicting %qs attributes",
1211 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1212 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1213 error ("%qs redeclared with conflicting %qs attributes",
1214 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1216 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1217 DECL_ATTRIBUTES (newdecl));
1220 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1221 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1223 static void
1224 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1226 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1228 *base_ptr = XEXP (x, 0);
1229 *offset_ptr = INTVAL (XEXP (x, 1));
1231 else
1233 *base_ptr = x;
1234 *offset_ptr = 0;
1238 static unsigned int mips_build_integer (struct mips_integer_op *,
1239 unsigned HOST_WIDE_INT);
1241 /* A subroutine of mips_build_integer, with the same interface.
1242 Assume that the final action in the sequence should be a left shift. */
1244 static unsigned int
1245 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1247 unsigned int i, shift;
1249 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1250 since signed numbers are easier to load than unsigned ones. */
1251 shift = 0;
1252 while ((value & 1) == 0)
1253 value /= 2, shift++;
1255 i = mips_build_integer (codes, value);
1256 codes[i].code = ASHIFT;
1257 codes[i].value = shift;
1258 return i + 1;
1261 /* As for mips_build_shift, but assume that the final action will be
1262 an IOR or PLUS operation. */
1264 static unsigned int
1265 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1267 unsigned HOST_WIDE_INT high;
1268 unsigned int i;
1270 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1271 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1273 /* The constant is too complex to load with a simple LUI/ORI pair,
1274 so we want to give the recursive call as many trailing zeros as
1275 possible. In this case, we know bit 16 is set and that the
1276 low 16 bits form a negative number. If we subtract that number
1277 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1278 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1279 codes[i].code = PLUS;
1280 codes[i].value = CONST_LOW_PART (value);
1282 else
1284 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1285 bits gives a value with at least 17 trailing zeros. */
1286 i = mips_build_integer (codes, high);
1287 codes[i].code = IOR;
1288 codes[i].value = value & 0xffff;
1290 return i + 1;
1293 /* Fill CODES with a sequence of rtl operations to load VALUE.
1294 Return the number of operations needed. */
1296 static unsigned int
1297 mips_build_integer (struct mips_integer_op *codes,
1298 unsigned HOST_WIDE_INT value)
1300 if (SMALL_OPERAND (value)
1301 || SMALL_OPERAND_UNSIGNED (value)
1302 || LUI_OPERAND (value))
1304 /* The value can be loaded with a single instruction. */
1305 codes[0].code = UNKNOWN;
1306 codes[0].value = value;
1307 return 1;
1309 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1311 /* Either the constant is a simple LUI/ORI combination or its
1312 lowest bit is set. We don't want to shift in this case. */
1313 return mips_build_lower (codes, value);
1315 else if ((value & 0xffff) == 0)
1317 /* The constant will need at least three actions. The lowest
1318 16 bits are clear, so the final action will be a shift. */
1319 return mips_build_shift (codes, value);
1321 else
1323 /* The final action could be a shift, add or inclusive OR.
1324 Rather than use a complex condition to select the best
1325 approach, try both mips_build_shift and mips_build_lower
1326 and pick the one that gives the shortest sequence.
1327 Note that this case is only used once per constant. */
1328 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1329 unsigned int cost, alt_cost;
1331 cost = mips_build_shift (codes, value);
1332 alt_cost = mips_build_lower (alt_codes, value);
1333 if (alt_cost < cost)
1335 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1336 cost = alt_cost;
1338 return cost;
1342 /* Return true if X is a thread-local symbol. */
1344 static bool
1345 mips_tls_symbol_p (rtx x)
1347 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1350 /* Return true if SYMBOL_REF X is associated with a global symbol
1351 (in the STB_GLOBAL sense). */
1353 static bool
1354 mips_global_symbol_p (const_rtx x)
1356 const_tree decl = SYMBOL_REF_DECL (x);
1358 if (!decl)
1359 return !SYMBOL_REF_LOCAL_P (x);
1361 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1362 or weak symbols. Relocations in the object file will be against
1363 the target symbol, so it's that symbol's binding that matters here. */
1364 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1367 /* Return true if SYMBOL_REF X binds locally. */
1369 static bool
1370 mips_symbol_binds_local_p (const_rtx x)
1372 return (SYMBOL_REF_DECL (x)
1373 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1374 : SYMBOL_REF_LOCAL_P (x));
1377 /* Return true if rtx constants of mode MODE should be put into a small
1378 data section. */
1380 static bool
1381 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1383 return (!TARGET_EMBEDDED_DATA
1384 && TARGET_LOCAL_SDATA
1385 && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1388 /* Return true if X should not be moved directly into register $25.
1389 We need this because many versions of GAS will treat "la $25,foo" as
1390 part of a call sequence and so allow a global "foo" to be lazily bound. */
1392 bool
1393 mips_dangerous_for_la25_p (rtx x)
1395 return (!TARGET_EXPLICIT_RELOCS
1396 && TARGET_USE_GOT
1397 && GET_CODE (x) == SYMBOL_REF
1398 && mips_global_symbol_p (x));
1401 /* Return the method that should be used to access SYMBOL_REF or
1402 LABEL_REF X in context CONTEXT. */
1404 static enum mips_symbol_type
1405 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1407 if (TARGET_RTP_PIC)
1408 return SYMBOL_GOT_DISP;
1410 if (GET_CODE (x) == LABEL_REF)
1412 /* LABEL_REFs are used for jump tables as well as text labels.
1413 Only return SYMBOL_PC_RELATIVE if we know the label is in
1414 the text section. */
1415 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1416 return SYMBOL_PC_RELATIVE;
1418 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1419 return SYMBOL_GOT_PAGE_OFST;
1421 return SYMBOL_ABSOLUTE;
1424 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1426 if (SYMBOL_REF_TLS_MODEL (x))
1427 return SYMBOL_TLS;
1429 if (CONSTANT_POOL_ADDRESS_P (x))
1431 if (TARGET_MIPS16_TEXT_LOADS)
1432 return SYMBOL_PC_RELATIVE;
1434 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1435 return SYMBOL_PC_RELATIVE;
1437 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1438 return SYMBOL_GP_RELATIVE;
1441 /* Do not use small-data accesses for weak symbols; they may end up
1442 being zero. */
1443 if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1444 return SYMBOL_GP_RELATIVE;
1446 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1447 is in effect. */
1448 if (TARGET_ABICALLS
1449 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1451 /* There are three cases to consider:
1453 - o32 PIC (either with or without explicit relocs)
1454 - n32/n64 PIC without explicit relocs
1455 - n32/n64 PIC with explicit relocs
1457 In the first case, both local and global accesses will use an
1458 R_MIPS_GOT16 relocation. We must correctly predict which of
1459 the two semantics (local or global) the assembler and linker
1460 will apply. The choice depends on the symbol's binding rather
1461 than its visibility.
1463 In the second case, the assembler will not use R_MIPS_GOT16
1464 relocations, but it chooses between local and global accesses
1465 in the same way as for o32 PIC.
1467 In the third case we have more freedom since both forms of
1468 access will work for any kind of symbol. However, there seems
1469 little point in doing things differently. */
1470 if (mips_global_symbol_p (x))
1471 return SYMBOL_GOT_DISP;
1473 return SYMBOL_GOT_PAGE_OFST;
1476 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1477 return SYMBOL_FORCE_TO_MEM;
1479 return SYMBOL_ABSOLUTE;
1482 /* Classify the base of symbolic expression X, given that X appears in
1483 context CONTEXT. */
1485 static enum mips_symbol_type
1486 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1488 rtx offset;
1490 split_const (x, &x, &offset);
1491 if (UNSPEC_ADDRESS_P (x))
1492 return UNSPEC_ADDRESS_TYPE (x);
1494 return mips_classify_symbol (x, context);
1497 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1498 is the alignment in bytes of SYMBOL_REF X. */
1500 static bool
1501 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1503 HOST_WIDE_INT align;
1505 align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1506 return IN_RANGE (offset, 0, align - 1);
1509 /* Return true if X is a symbolic constant that can be used in context
1510 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1512 bool
1513 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1514 enum mips_symbol_type *symbol_type)
1516 rtx offset;
1518 split_const (x, &x, &offset);
1519 if (UNSPEC_ADDRESS_P (x))
1521 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1522 x = UNSPEC_ADDRESS (x);
1524 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1526 *symbol_type = mips_classify_symbol (x, context);
1527 if (*symbol_type == SYMBOL_TLS)
1528 return false;
1530 else
1531 return false;
1533 if (offset == const0_rtx)
1534 return true;
1536 /* Check whether a nonzero offset is valid for the underlying
1537 relocations. */
1538 switch (*symbol_type)
1540 case SYMBOL_ABSOLUTE:
1541 case SYMBOL_FORCE_TO_MEM:
1542 case SYMBOL_32_HIGH:
1543 case SYMBOL_64_HIGH:
1544 case SYMBOL_64_MID:
1545 case SYMBOL_64_LOW:
1546 /* If the target has 64-bit pointers and the object file only
1547 supports 32-bit symbols, the values of those symbols will be
1548 sign-extended. In this case we can't allow an arbitrary offset
1549 in case the 32-bit value X + OFFSET has a different sign from X. */
1550 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1551 return offset_within_block_p (x, INTVAL (offset));
1553 /* In other cases the relocations can handle any offset. */
1554 return true;
1556 case SYMBOL_PC_RELATIVE:
1557 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1558 In this case, we no longer have access to the underlying constant,
1559 but the original symbol-based access was known to be valid. */
1560 if (GET_CODE (x) == LABEL_REF)
1561 return true;
1563 /* Fall through. */
1565 case SYMBOL_GP_RELATIVE:
1566 /* Make sure that the offset refers to something within the
1567 same object block. This should guarantee that the final
1568 PC- or GP-relative offset is within the 16-bit limit. */
1569 return offset_within_block_p (x, INTVAL (offset));
1571 case SYMBOL_GOT_PAGE_OFST:
1572 case SYMBOL_GOTOFF_PAGE:
1573 /* If the symbol is global, the GOT entry will contain the symbol's
1574 address, and we will apply a 16-bit offset after loading it.
1575 If the symbol is local, the linker should provide enough local
1576 GOT entries for a 16-bit offset, but larger offsets may lead
1577 to GOT overflow. */
1578 return SMALL_INT (offset);
1580 case SYMBOL_TPREL:
1581 case SYMBOL_DTPREL:
1582 /* There is no carry between the HI and LO REL relocations, so the
1583 offset is only valid if we know it won't lead to such a carry. */
1584 return mips_offset_within_alignment_p (x, INTVAL (offset));
1586 case SYMBOL_GOT_DISP:
1587 case SYMBOL_GOTOFF_DISP:
1588 case SYMBOL_GOTOFF_CALL:
1589 case SYMBOL_GOTOFF_LOADGP:
1590 case SYMBOL_TLSGD:
1591 case SYMBOL_TLSLDM:
1592 case SYMBOL_GOTTPREL:
1593 case SYMBOL_TLS:
1594 case SYMBOL_HALF:
1595 return false;
1597 gcc_unreachable ();
1600 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1601 single instruction. We rely on the fact that, in the worst case,
1602 all instructions involved in a MIPS16 address calculation are usually
1603 extended ones. */
1605 static int
1606 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1608 switch (type)
1610 case SYMBOL_ABSOLUTE:
1611 /* When using 64-bit symbols, we need 5 preparatory instructions,
1612 such as:
1614 lui $at,%highest(symbol)
1615 daddiu $at,$at,%higher(symbol)
1616 dsll $at,$at,16
1617 daddiu $at,$at,%hi(symbol)
1618 dsll $at,$at,16
1620 The final address is then $at + %lo(symbol). With 32-bit
1621 symbols we just need a preparatory LUI for normal mode and
1622 a preparatory LI and SLL for MIPS16. */
1623 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1625 case SYMBOL_GP_RELATIVE:
1626 /* Treat GP-relative accesses as taking a single instruction on
1627 MIPS16 too; the copy of $gp can often be shared. */
1628 return 1;
1630 case SYMBOL_PC_RELATIVE:
1631 /* PC-relative constants can be only be used with ADDIUPC,
1632 DADDIUPC, LWPC and LDPC. */
1633 if (mode == MAX_MACHINE_MODE
1634 || GET_MODE_SIZE (mode) == 4
1635 || GET_MODE_SIZE (mode) == 8)
1636 return 1;
1638 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1639 return 0;
1641 case SYMBOL_FORCE_TO_MEM:
1642 /* LEAs will be converted into constant-pool references by
1643 mips_reorg. */
1644 if (mode == MAX_MACHINE_MODE)
1645 return 1;
1647 /* The constant must be loaded and then dereferenced. */
1648 return 0;
1650 case SYMBOL_GOT_DISP:
1651 /* The constant will have to be loaded from the GOT before it
1652 is used in an address. */
1653 if (mode != MAX_MACHINE_MODE)
1654 return 0;
1656 /* Fall through. */
1658 case SYMBOL_GOT_PAGE_OFST:
1659 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1660 local/global classification is accurate. The worst cases are:
1662 (1) For local symbols when generating o32 or o64 code. The assembler
1663 will use:
1665 lw $at,%got(symbol)
1668 ...and the final address will be $at + %lo(symbol).
1670 (2) For global symbols when -mxgot. The assembler will use:
1672 lui $at,%got_hi(symbol)
1673 (d)addu $at,$at,$gp
1675 ...and the final address will be $at + %got_lo(symbol). */
1676 return 3;
1678 case SYMBOL_GOTOFF_PAGE:
1679 case SYMBOL_GOTOFF_DISP:
1680 case SYMBOL_GOTOFF_CALL:
1681 case SYMBOL_GOTOFF_LOADGP:
1682 case SYMBOL_32_HIGH:
1683 case SYMBOL_64_HIGH:
1684 case SYMBOL_64_MID:
1685 case SYMBOL_64_LOW:
1686 case SYMBOL_TLSGD:
1687 case SYMBOL_TLSLDM:
1688 case SYMBOL_DTPREL:
1689 case SYMBOL_GOTTPREL:
1690 case SYMBOL_TPREL:
1691 case SYMBOL_HALF:
1692 /* A 16-bit constant formed by a single relocation, or a 32-bit
1693 constant formed from a high 16-bit relocation and a low 16-bit
1694 relocation. Use mips_split_p to determine which. 32-bit
1695 constants need an "lui; addiu" sequence for normal mode and
1696 an "li; sll; addiu" sequence for MIPS16 mode. */
1697 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1699 case SYMBOL_TLS:
1700 /* We don't treat a bare TLS symbol as a constant. */
1701 return 0;
1703 gcc_unreachable ();
1706 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1707 to load symbols of type TYPE into a register. Return 0 if the given
1708 type of symbol cannot be used as an immediate operand.
1710 Otherwise, return the number of instructions needed to load or store
1711 values of mode MODE to or from addresses of type TYPE. Return 0 if
1712 the given type of symbol is not valid in addresses.
1714 In both cases, treat extended MIPS16 instructions as two instructions. */
1716 static int
1717 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1719 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1722 /* A for_each_rtx callback. Stop the search if *X references a
1723 thread-local symbol. */
1725 static int
1726 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1728 return mips_tls_symbol_p (*x);
1731 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1733 static bool
1734 mips_cannot_force_const_mem (rtx x)
1736 rtx base, offset;
1738 if (!TARGET_MIPS16)
1740 /* As an optimization, reject constants that mips_legitimize_move
1741 can expand inline.
1743 Suppose we have a multi-instruction sequence that loads constant C
1744 into register R. If R does not get allocated a hard register, and
1745 R is used in an operand that allows both registers and memory
1746 references, reload will consider forcing C into memory and using
1747 one of the instruction's memory alternatives. Returning false
1748 here will force it to use an input reload instead. */
1749 if (GET_CODE (x) == CONST_INT)
1750 return true;
1752 split_const (x, &base, &offset);
1753 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1754 return true;
1757 /* TLS symbols must be computed by mips_legitimize_move. */
1758 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
1759 return true;
1761 return false;
1764 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1765 constants when we're using a per-function constant pool. */
1767 static bool
1768 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1769 const_rtx x ATTRIBUTE_UNUSED)
1771 return !TARGET_MIPS16_PCREL_LOADS;
1774 /* Return true if register REGNO is a valid base register for mode MODE.
1775 STRICT_P is true if REG_OK_STRICT is in effect. */
1778 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
1779 bool strict_p)
1781 if (!HARD_REGISTER_NUM_P (regno))
1783 if (!strict_p)
1784 return true;
1785 regno = reg_renumber[regno];
1788 /* These fake registers will be eliminated to either the stack or
1789 hard frame pointer, both of which are usually valid base registers.
1790 Reload deals with the cases where the eliminated form isn't valid. */
1791 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1792 return true;
1794 /* In MIPS16 mode, the stack pointer can only address word and doubleword
1795 values, nothing smaller. There are two problems here:
1797 (a) Instantiating virtual registers can introduce new uses of the
1798 stack pointer. If these virtual registers are valid addresses,
1799 the stack pointer should be too.
1801 (b) Most uses of the stack pointer are not made explicit until
1802 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1803 We don't know until that stage whether we'll be eliminating to the
1804 stack pointer (which needs the restriction) or the hard frame
1805 pointer (which doesn't).
1807 All in all, it seems more consistent to only enforce this restriction
1808 during and after reload. */
1809 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1810 return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1812 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1815 /* Return true if X is a valid base register for mode MODE.
1816 STRICT_P is true if REG_OK_STRICT is in effect. */
1818 static bool
1819 mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
1821 if (!strict_p && GET_CODE (x) == SUBREG)
1822 x = SUBREG_REG (x);
1824 return (REG_P (x)
1825 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
1828 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
1829 can address a value of mode MODE. */
1831 static bool
1832 mips_valid_offset_p (rtx x, enum machine_mode mode)
1834 /* Check that X is a signed 16-bit number. */
1835 if (!const_arith_operand (x, Pmode))
1836 return false;
1838 /* We may need to split multiword moves, so make sure that every word
1839 is accessible. */
1840 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1841 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
1842 return false;
1844 return true;
1847 /* Return true if a LO_SUM can address a value of mode MODE when the
1848 LO_SUM symbol has type SYMBOL_TYPE. */
1850 static bool
1851 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
1853 /* Check that symbols of type SYMBOL_TYPE can be used to access values
1854 of mode MODE. */
1855 if (mips_symbol_insns (symbol_type, mode) == 0)
1856 return false;
1858 /* Check that there is a known low-part relocation. */
1859 if (mips_lo_relocs[symbol_type] == NULL)
1860 return false;
1862 /* We may need to split multiword moves, so make sure that each word
1863 can be accessed without inducing a carry. This is mainly needed
1864 for o64, which has historically only guaranteed 64-bit alignment
1865 for 128-bit types. */
1866 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1867 && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
1868 return false;
1870 return true;
1873 /* Return true if X is a valid address for machine mode MODE. If it is,
1874 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
1875 effect. */
1877 static bool
1878 mips_classify_address (struct mips_address_info *info, rtx x,
1879 enum machine_mode mode, bool strict_p)
1881 switch (GET_CODE (x))
1883 case REG:
1884 case SUBREG:
1885 info->type = ADDRESS_REG;
1886 info->reg = x;
1887 info->offset = const0_rtx;
1888 return mips_valid_base_register_p (info->reg, mode, strict_p);
1890 case PLUS:
1891 info->type = ADDRESS_REG;
1892 info->reg = XEXP (x, 0);
1893 info->offset = XEXP (x, 1);
1894 return (mips_valid_base_register_p (info->reg, mode, strict_p)
1895 && mips_valid_offset_p (info->offset, mode));
1897 case LO_SUM:
1898 info->type = ADDRESS_LO_SUM;
1899 info->reg = XEXP (x, 0);
1900 info->offset = XEXP (x, 1);
1901 /* We have to trust the creator of the LO_SUM to do something vaguely
1902 sane. Target-independent code that creates a LO_SUM should also
1903 create and verify the matching HIGH. Target-independent code that
1904 adds an offset to a LO_SUM must prove that the offset will not
1905 induce a carry. Failure to do either of these things would be
1906 a bug, and we are not required to check for it here. The MIPS
1907 backend itself should only create LO_SUMs for valid symbolic
1908 constants, with the high part being either a HIGH or a copy
1909 of _gp. */
1910 info->symbol_type
1911 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1912 return (mips_valid_base_register_p (info->reg, mode, strict_p)
1913 && mips_valid_lo_sum_p (info->symbol_type, mode));
1915 case CONST_INT:
1916 /* Small-integer addresses don't occur very often, but they
1917 are legitimate if $0 is a valid base register. */
1918 info->type = ADDRESS_CONST_INT;
1919 return !TARGET_MIPS16 && SMALL_INT (x);
1921 case CONST:
1922 case LABEL_REF:
1923 case SYMBOL_REF:
1924 info->type = ADDRESS_SYMBOLIC;
1925 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1926 &info->symbol_type)
1927 && mips_symbol_insns (info->symbol_type, mode) > 0
1928 && !mips_split_p[info->symbol_type]);
1930 default:
1931 return false;
1935 /* Return true if X is a legitimate address for a memory operand of mode
1936 MODE. STRICT_P is true if REG_OK_STRICT is in effect. */
1938 bool
1939 mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
1941 struct mips_address_info addr;
1943 return mips_classify_address (&addr, x, mode, strict_p);
1946 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1948 bool
1949 mips_stack_address_p (rtx x, enum machine_mode mode)
1951 struct mips_address_info addr;
1953 return (mips_classify_address (&addr, x, mode, false)
1954 && addr.type == ADDRESS_REG
1955 && addr.reg == stack_pointer_rtx);
1958 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
1959 address instruction. Note that such addresses are not considered
1960 legitimate in the GO_IF_LEGITIMATE_ADDRESS sense, because their use
1961 is so restricted. */
1963 static bool
1964 mips_lwxs_address_p (rtx addr)
1966 if (ISA_HAS_LWXS
1967 && GET_CODE (addr) == PLUS
1968 && REG_P (XEXP (addr, 1)))
1970 rtx offset = XEXP (addr, 0);
1971 if (GET_CODE (offset) == MULT
1972 && REG_P (XEXP (offset, 0))
1973 && GET_CODE (XEXP (offset, 1)) == CONST_INT
1974 && INTVAL (XEXP (offset, 1)) == 4)
1975 return true;
1977 return false;
1980 /* Return true if a value at OFFSET bytes from base register BASE can be
1981 accessed using an unextended MIPS16 instruction. MODE is the mode of
1982 the value.
1984 Usually the offset in an unextended instruction is a 5-bit field.
1985 The offset is unsigned and shifted left once for LH and SH, twice
1986 for LW and SW, and so on. An exception is LWSP and SWSP, which have
1987 an 8-bit immediate field that's shifted left twice. */
1989 static bool
1990 mips16_unextended_reference_p (enum machine_mode mode, rtx base,
1991 unsigned HOST_WIDE_INT offset)
1993 if (offset % GET_MODE_SIZE (mode) == 0)
1995 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1996 return offset < 256U * GET_MODE_SIZE (mode);
1997 return offset < 32U * GET_MODE_SIZE (mode);
1999 return false;
2002 /* Return the number of instructions needed to load or store a value
2003 of mode MODE at address X. Return 0 if X isn't valid for MODE.
2004 Assume that multiword moves may need to be split into word moves
2005 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2006 enough.
2008 For MIPS16 code, count extended instructions as two instructions. */
2011 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2013 struct mips_address_info addr;
2014 int factor;
2016 /* BLKmode is used for single unaligned loads and stores and should
2017 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2018 meaningless, so we have to single it out as a special case one way
2019 or the other.) */
2020 if (mode != BLKmode && might_split_p)
2021 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2022 else
2023 factor = 1;
2025 if (mips_classify_address (&addr, x, mode, false))
2026 switch (addr.type)
2028 case ADDRESS_REG:
2029 if (TARGET_MIPS16
2030 && !mips16_unextended_reference_p (mode, addr.reg,
2031 UINTVAL (addr.offset)))
2032 return factor * 2;
2033 return factor;
2035 case ADDRESS_LO_SUM:
2036 return TARGET_MIPS16 ? factor * 2 : factor;
2038 case ADDRESS_CONST_INT:
2039 return factor;
2041 case ADDRESS_SYMBOLIC:
2042 return factor * mips_symbol_insns (addr.symbol_type, mode);
2044 return 0;
2047 /* Return the number of instructions needed to load constant X.
2048 Return 0 if X isn't a valid constant. */
2051 mips_const_insns (rtx x)
2053 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2054 enum mips_symbol_type symbol_type;
2055 rtx offset;
2057 switch (GET_CODE (x))
2059 case HIGH:
2060 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2061 &symbol_type)
2062 || !mips_split_p[symbol_type])
2063 return 0;
2065 /* This is simply an LUI for normal mode. It is an extended
2066 LI followed by an extended SLL for MIPS16. */
2067 return TARGET_MIPS16 ? 4 : 1;
2069 case CONST_INT:
2070 if (TARGET_MIPS16)
2071 /* Unsigned 8-bit constants can be loaded using an unextended
2072 LI instruction. Unsigned 16-bit constants can be loaded
2073 using an extended LI. Negative constants must be loaded
2074 using LI and then negated. */
2075 return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2076 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2077 : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2078 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2079 : 0);
2081 return mips_build_integer (codes, INTVAL (x));
2083 case CONST_DOUBLE:
2084 case CONST_VECTOR:
2085 /* Allow zeros for normal mode, where we can use $0. */
2086 return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2088 case CONST:
2089 if (CONST_GP_P (x))
2090 return 1;
2092 /* See if we can refer to X directly. */
2093 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2094 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2096 /* Otherwise try splitting the constant into a base and offset.
2097 16-bit offsets can be added using an extra ADDIU. Larger offsets
2098 must be calculated separately and then added to the base. */
2099 split_const (x, &x, &offset);
2100 if (offset != 0)
2102 int n = mips_const_insns (x);
2103 if (n != 0)
2105 if (SMALL_INT (offset))
2106 return n + 1;
2107 else
2108 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2111 return 0;
2113 case SYMBOL_REF:
2114 case LABEL_REF:
2115 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2116 MAX_MACHINE_MODE);
2118 default:
2119 return 0;
2123 /* Return the number of instructions needed to implement INSN,
2124 given that it loads from or stores to MEM. Count extended
2125 MIPS16 instructions as two instructions. */
2128 mips_load_store_insns (rtx mem, rtx insn)
2130 enum machine_mode mode;
2131 bool might_split_p;
2132 rtx set;
2134 gcc_assert (MEM_P (mem));
2135 mode = GET_MODE (mem);
2137 /* Try to prove that INSN does not need to be split. */
2138 might_split_p = true;
2139 if (GET_MODE_BITSIZE (mode) == 64)
2141 set = single_set (insn);
2142 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2143 might_split_p = false;
2146 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2149 /* Return the number of instructions needed for an integer division. */
2152 mips_idiv_insns (void)
2154 int count;
2156 count = 1;
2157 if (TARGET_CHECK_ZERO_DIV)
2159 if (GENERATE_DIVIDE_TRAPS)
2160 count++;
2161 else
2162 count += 2;
2165 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2166 count++;
2167 return count;
2170 /* Emit a move from SRC to DEST. Assume that the move expanders can
2171 handle all moves if !can_create_pseudo_p (). The distinction is
2172 important because, unlike emit_move_insn, the move expanders know
2173 how to force Pmode objects into the constant pool even when the
2174 constant pool address is not itself legitimate. */
2177 mips_emit_move (rtx dest, rtx src)
2179 return (can_create_pseudo_p ()
2180 ? emit_move_insn (dest, src)
2181 : emit_move_insn_1 (dest, src));
2184 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2186 static void
2187 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2189 emit_insn (gen_rtx_SET (VOIDmode, target,
2190 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2193 /* Compute (CODE OP0 OP1) and store the result in a new register
2194 of mode MODE. Return that new register. */
2196 static rtx
2197 mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
2199 rtx reg;
2201 reg = gen_reg_rtx (mode);
2202 mips_emit_binary (code, reg, op0, op1);
2203 return reg;
2206 /* Copy VALUE to a register and return that register. If new pseudos
2207 are allowed, copy it into a new register, otherwise use DEST. */
2209 static rtx
2210 mips_force_temporary (rtx dest, rtx value)
2212 if (can_create_pseudo_p ())
2213 return force_reg (Pmode, value);
2214 else
2216 mips_emit_move (dest, value);
2217 return dest;
2221 /* Emit a call sequence with call pattern PATTERN and return the call
2222 instruction itself (which is not necessarily the last instruction
2223 emitted). LAZY_P is true if the call address is lazily-bound. */
2225 static rtx
2226 mips_emit_call_insn (rtx pattern, bool lazy_p)
2228 rtx insn;
2230 insn = emit_call_insn (pattern);
2232 /* Lazy-binding stubs require $gp to be valid on entry. */
2233 if (lazy_p)
2234 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2236 if (TARGET_USE_GOT)
2238 /* See the comment above load_call<mode> for details. */
2239 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2240 gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2241 emit_insn (gen_update_got_version ());
2243 return insn;
2246 /* Return an instruction that copies $gp into register REG. We want
2247 GCC to treat the register's value as constant, so that its value
2248 can be rematerialized on demand. */
2250 static rtx
2251 gen_load_const_gp (rtx reg)
2253 return (Pmode == SImode
2254 ? gen_load_const_gp_si (reg)
2255 : gen_load_const_gp_di (reg));
2258 /* Return a pseudo register that contains the value of $gp throughout
2259 the current function. Such registers are needed by MIPS16 functions,
2260 for which $gp itself is not a valid base register or addition operand. */
2262 static rtx
2263 mips16_gp_pseudo_reg (void)
2265 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2266 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2268 /* Don't emit an instruction to initialize the pseudo register if
2269 we are being called from the tree optimizers' cost-calculation
2270 routines. */
2271 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2272 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2274 rtx insn, scan, after;
2276 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2278 push_topmost_sequence ();
2279 /* We need to emit the initialization after the FUNCTION_BEG
2280 note, so that it will be integrated. */
2281 after = get_insns ();
2282 for (scan = after; scan != NULL_RTX; scan = NEXT_INSN (scan))
2283 if (NOTE_P (scan) && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
2285 after = scan;
2286 break;
2288 insn = emit_insn_after (insn, after);
2289 pop_topmost_sequence ();
2291 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2294 return cfun->machine->mips16_gp_pseudo_rtx;
2297 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2298 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2299 constant in that context and can be split into a high part and a LO_SUM.
2300 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2301 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2303 TEMP is as for mips_force_temporary and is used to load the high
2304 part into a register. */
2306 bool
2307 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2309 enum mips_symbol_context context;
2310 enum mips_symbol_type symbol_type;
2311 rtx high;
2313 context = (mode == MAX_MACHINE_MODE
2314 ? SYMBOL_CONTEXT_LEA
2315 : SYMBOL_CONTEXT_MEM);
2316 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2317 || mips_symbol_insns (symbol_type, mode) == 0
2318 || !mips_split_p[symbol_type])
2319 return false;
2321 if (lo_sum_out)
2323 if (symbol_type == SYMBOL_GP_RELATIVE)
2325 if (!can_create_pseudo_p ())
2327 emit_insn (gen_load_const_gp (temp));
2328 high = temp;
2330 else
2331 high = mips16_gp_pseudo_reg ();
2333 else
2335 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2336 high = mips_force_temporary (temp, high);
2338 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2340 return true;
2343 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2344 then add CONST_INT OFFSET to the result. */
2346 static rtx
2347 mips_unspec_address_offset (rtx base, rtx offset,
2348 enum mips_symbol_type symbol_type)
2350 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2351 UNSPEC_ADDRESS_FIRST + symbol_type);
2352 if (offset != const0_rtx)
2353 base = gen_rtx_PLUS (Pmode, base, offset);
2354 return gen_rtx_CONST (Pmode, base);
2357 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2358 type SYMBOL_TYPE. */
2361 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2363 rtx base, offset;
2365 split_const (address, &base, &offset);
2366 return mips_unspec_address_offset (base, offset, symbol_type);
2369 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2370 high part to BASE and return the result. Just return BASE otherwise.
2371 TEMP is as for mips_force_temporary.
2373 The returned expression can be used as the first operand to a LO_SUM. */
2375 static rtx
2376 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2377 enum mips_symbol_type symbol_type)
2379 if (mips_split_p[symbol_type])
2381 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2382 addr = mips_force_temporary (temp, addr);
2383 base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2385 return base;
2388 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2389 mips_force_temporary; it is only needed when OFFSET is not a
2390 SMALL_OPERAND. */
2392 static rtx
2393 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2395 if (!SMALL_OPERAND (offset))
2397 rtx high;
2399 if (TARGET_MIPS16)
2401 /* Load the full offset into a register so that we can use
2402 an unextended instruction for the address itself. */
2403 high = GEN_INT (offset);
2404 offset = 0;
2406 else
2408 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2409 high = GEN_INT (CONST_HIGH_PART (offset));
2410 offset = CONST_LOW_PART (offset);
2412 high = mips_force_temporary (temp, high);
2413 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2415 return plus_constant (reg, offset);
2418 /* The __tls_get_attr symbol. */
2419 static GTY(()) rtx mips_tls_symbol;
2421 /* Return an instruction sequence that calls __tls_get_addr. SYM is
2422 the TLS symbol we are referencing and TYPE is the symbol type to use
2423 (either global dynamic or local dynamic). V0 is an RTX for the
2424 return value location. */
2426 static rtx
2427 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2429 rtx insn, loc, a0;
2431 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2433 if (!mips_tls_symbol)
2434 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2436 loc = mips_unspec_address (sym, type);
2438 start_sequence ();
2440 emit_insn (gen_rtx_SET (Pmode, a0,
2441 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2442 insn = mips_expand_call (v0, mips_tls_symbol, const0_rtx, const0_rtx, false);
2443 RTL_CONST_CALL_P (insn) = 1;
2444 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2445 insn = get_insns ();
2447 end_sequence ();
2449 return insn;
2452 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2453 its address. The return value will be both a valid address and a valid
2454 SET_SRC (either a REG or a LO_SUM). */
2456 static rtx
2457 mips_legitimize_tls_address (rtx loc)
2459 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2460 enum tls_model model;
2462 if (TARGET_MIPS16)
2464 sorry ("MIPS16 TLS");
2465 return gen_reg_rtx (Pmode);
2468 model = SYMBOL_REF_TLS_MODEL (loc);
2469 /* Only TARGET_ABICALLS code can have more than one module; other
2470 code must be be static and should not use a GOT. All TLS models
2471 reduce to local exec in this situation. */
2472 if (!TARGET_ABICALLS)
2473 model = TLS_MODEL_LOCAL_EXEC;
2475 switch (model)
2477 case TLS_MODEL_GLOBAL_DYNAMIC:
2478 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2479 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2480 dest = gen_reg_rtx (Pmode);
2481 emit_libcall_block (insn, dest, v0, loc);
2482 break;
2484 case TLS_MODEL_LOCAL_DYNAMIC:
2485 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2486 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2487 tmp1 = gen_reg_rtx (Pmode);
2489 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2490 share the LDM result with other LD model accesses. */
2491 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2492 UNSPEC_TLS_LDM);
2493 emit_libcall_block (insn, tmp1, v0, eqv);
2495 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2496 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2497 mips_unspec_address (loc, SYMBOL_DTPREL));
2498 break;
2500 case TLS_MODEL_INITIAL_EXEC:
2501 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2502 tmp1 = gen_reg_rtx (Pmode);
2503 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2504 if (Pmode == DImode)
2506 emit_insn (gen_tls_get_tp_di (v1));
2507 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2509 else
2511 emit_insn (gen_tls_get_tp_si (v1));
2512 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2514 dest = gen_reg_rtx (Pmode);
2515 emit_insn (gen_add3_insn (dest, tmp1, v1));
2516 break;
2518 case TLS_MODEL_LOCAL_EXEC:
2519 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2520 if (Pmode == DImode)
2521 emit_insn (gen_tls_get_tp_di (v1));
2522 else
2523 emit_insn (gen_tls_get_tp_si (v1));
2525 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2526 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2527 mips_unspec_address (loc, SYMBOL_TPREL));
2528 break;
2530 default:
2531 gcc_unreachable ();
2533 return dest;
2536 /* If X is not a valid address for mode MODE, force it into a register. */
2538 static rtx
2539 mips_force_address (rtx x, enum machine_mode mode)
2541 if (!mips_legitimate_address_p (mode, x, false))
2542 x = force_reg (Pmode, x);
2543 return x;
2546 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2547 be legitimized in a way that the generic machinery might not expect,
2548 put the new address in *XLOC and return true. MODE is the mode of
2549 the memory being accessed. */
2551 bool
2552 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2554 rtx base, addr;
2555 HOST_WIDE_INT offset;
2557 if (mips_tls_symbol_p (*xloc))
2559 *xloc = mips_legitimize_tls_address (*xloc);
2560 return true;
2563 /* See if the address can split into a high part and a LO_SUM. */
2564 if (mips_split_symbol (NULL, *xloc, mode, &addr))
2566 *xloc = mips_force_address (addr, mode);
2567 return true;
2570 /* Handle BASE + OFFSET using mips_add_offset. */
2571 mips_split_plus (*xloc, &base, &offset);
2572 if (offset != 0)
2574 if (!mips_valid_base_register_p (base, mode, false))
2575 base = copy_to_mode_reg (Pmode, base);
2576 addr = mips_add_offset (NULL, base, offset);
2577 *xloc = mips_force_address (addr, mode);
2578 return true;
2580 return false;
2583 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
2585 void
2586 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
2588 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2589 enum machine_mode mode;
2590 unsigned int i, num_ops;
2591 rtx x;
2593 mode = GET_MODE (dest);
2594 num_ops = mips_build_integer (codes, value);
2596 /* Apply each binary operation to X. Invariant: X is a legitimate
2597 source operand for a SET pattern. */
2598 x = GEN_INT (codes[0].value);
2599 for (i = 1; i < num_ops; i++)
2601 if (!can_create_pseudo_p ())
2603 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2604 x = temp;
2606 else
2607 x = force_reg (mode, x);
2608 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2611 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2614 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2615 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2616 move_operand. */
2618 static void
2619 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2621 rtx base, offset;
2623 /* Split moves of big integers into smaller pieces. */
2624 if (splittable_const_int_operand (src, mode))
2626 mips_move_integer (dest, dest, INTVAL (src));
2627 return;
2630 /* Split moves of symbolic constants into high/low pairs. */
2631 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2633 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2634 return;
2637 /* Generate the appropriate access sequences for TLS symbols. */
2638 if (mips_tls_symbol_p (src))
2640 mips_emit_move (dest, mips_legitimize_tls_address (src));
2641 return;
2644 /* If we have (const (plus symbol offset)), and that expression cannot
2645 be forced into memory, load the symbol first and add in the offset.
2646 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2647 forced into memory, as it usually produces better code. */
2648 split_const (src, &base, &offset);
2649 if (offset != const0_rtx
2650 && (targetm.cannot_force_const_mem (src)
2651 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2653 base = mips_force_temporary (dest, base);
2654 mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
2655 return;
2658 src = force_const_mem (mode, src);
2660 /* When using explicit relocs, constant pool references are sometimes
2661 not legitimate addresses. */
2662 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2663 mips_emit_move (dest, src);
2666 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
2667 sequence that is valid. */
2669 bool
2670 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2672 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2674 mips_emit_move (dest, force_reg (mode, src));
2675 return true;
2678 /* We need to deal with constants that would be legitimate
2679 immediate_operands but aren't legitimate move_operands. */
2680 if (CONSTANT_P (src) && !move_operand (src, mode))
2682 mips_legitimize_const_move (mode, dest, src);
2683 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2684 return true;
2686 return false;
2689 /* Return true if value X in context CONTEXT is a small-data address
2690 that can be rewritten as a LO_SUM. */
2692 static bool
2693 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2695 enum mips_symbol_type symbol_type;
2697 return (TARGET_EXPLICIT_RELOCS
2698 && mips_symbolic_constant_p (x, context, &symbol_type)
2699 && symbol_type == SYMBOL_GP_RELATIVE);
2702 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2703 containing MEM, or null if none. */
2705 static int
2706 mips_small_data_pattern_1 (rtx *loc, void *data)
2708 enum mips_symbol_context context;
2710 if (GET_CODE (*loc) == LO_SUM)
2711 return -1;
2713 if (MEM_P (*loc))
2715 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
2716 return 1;
2717 return -1;
2720 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2721 return mips_rewrite_small_data_p (*loc, context);
2724 /* Return true if OP refers to small data symbols directly, not through
2725 a LO_SUM. */
2727 bool
2728 mips_small_data_pattern_p (rtx op)
2730 return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
2733 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2734 DATA is the containing MEM, or null if none. */
2736 static int
2737 mips_rewrite_small_data_1 (rtx *loc, void *data)
2739 enum mips_symbol_context context;
2741 if (MEM_P (*loc))
2743 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
2744 return -1;
2747 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2748 if (mips_rewrite_small_data_p (*loc, context))
2749 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
2751 if (GET_CODE (*loc) == LO_SUM)
2752 return -1;
2754 return 0;
2757 /* Rewrite instruction pattern PATTERN so that it refers to small data
2758 using explicit relocations. */
2761 mips_rewrite_small_data (rtx pattern)
2763 pattern = copy_insn (pattern);
2764 for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
2765 return pattern;
2768 /* We need a lot of little routines to check the range of MIPS16 immediate
2769 operands. */
2771 static int
2772 m16_check_op (rtx op, int low, int high, int mask)
2774 return (GET_CODE (op) == CONST_INT
2775 && IN_RANGE (INTVAL (op), low, high)
2776 && (INTVAL (op) & mask) == 0);
2780 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2782 return m16_check_op (op, 0x1, 0x8, 0);
2786 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2788 return m16_check_op (op, -0x8, 0x7, 0);
2792 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2794 return m16_check_op (op, -0x7, 0x8, 0);
2798 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2800 return m16_check_op (op, -0x10, 0xf, 0);
2804 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2806 return m16_check_op (op, -0xf, 0x10, 0);
2810 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2812 return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
2816 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2818 return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
2822 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2824 return m16_check_op (op, -0x80, 0x7f, 0);
2828 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2830 return m16_check_op (op, -0x7f, 0x80, 0);
2834 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2836 return m16_check_op (op, 0x0, 0xff, 0);
2840 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2842 return m16_check_op (op, -0xff, 0x0, 0);
2846 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2848 return m16_check_op (op, -0x1, 0xfe, 0);
2852 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2854 return m16_check_op (op, 0x0, 0xff << 2, 3);
2858 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2860 return m16_check_op (op, -0xff << 2, 0x0, 3);
2864 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2866 return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
2870 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2872 return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
2875 /* The cost of loading values from the constant pool. It should be
2876 larger than the cost of any constant we want to synthesize inline. */
2877 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2879 /* Return the cost of X when used as an operand to the MIPS16 instruction
2880 that implements CODE. Return -1 if there is no such instruction, or if
2881 X is not a valid immediate operand for it. */
2883 static int
2884 mips16_constant_cost (int code, HOST_WIDE_INT x)
2886 switch (code)
2888 case ASHIFT:
2889 case ASHIFTRT:
2890 case LSHIFTRT:
2891 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2892 other shifts are extended. The shift patterns truncate the shift
2893 count to the right size, so there are no out-of-range values. */
2894 if (IN_RANGE (x, 1, 8))
2895 return 0;
2896 return COSTS_N_INSNS (1);
2898 case PLUS:
2899 if (IN_RANGE (x, -128, 127))
2900 return 0;
2901 if (SMALL_OPERAND (x))
2902 return COSTS_N_INSNS (1);
2903 return -1;
2905 case LEU:
2906 /* Like LE, but reject the always-true case. */
2907 if (x == -1)
2908 return -1;
2909 case LE:
2910 /* We add 1 to the immediate and use SLT. */
2911 x += 1;
2912 case XOR:
2913 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2914 case LT:
2915 case LTU:
2916 if (IN_RANGE (x, 0, 255))
2917 return 0;
2918 if (SMALL_OPERAND_UNSIGNED (x))
2919 return COSTS_N_INSNS (1);
2920 return -1;
2922 case EQ:
2923 case NE:
2924 /* Equality comparisons with 0 are cheap. */
2925 if (x == 0)
2926 return 0;
2927 return -1;
2929 default:
2930 return -1;
2934 /* Return true if there is a non-MIPS16 instruction that implements CODE
2935 and if that instruction accepts X as an immediate operand. */
2937 static int
2938 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2940 switch (code)
2942 case ASHIFT:
2943 case ASHIFTRT:
2944 case LSHIFTRT:
2945 /* All shift counts are truncated to a valid constant. */
2946 return true;
2948 case ROTATE:
2949 case ROTATERT:
2950 /* Likewise rotates, if the target supports rotates at all. */
2951 return ISA_HAS_ROR;
2953 case AND:
2954 case IOR:
2955 case XOR:
2956 /* These instructions take 16-bit unsigned immediates. */
2957 return SMALL_OPERAND_UNSIGNED (x);
2959 case PLUS:
2960 case LT:
2961 case LTU:
2962 /* These instructions take 16-bit signed immediates. */
2963 return SMALL_OPERAND (x);
2965 case EQ:
2966 case NE:
2967 case GT:
2968 case GTU:
2969 /* The "immediate" forms of these instructions are really
2970 implemented as comparisons with register 0. */
2971 return x == 0;
2973 case GE:
2974 case GEU:
2975 /* Likewise, meaning that the only valid immediate operand is 1. */
2976 return x == 1;
2978 case LE:
2979 /* We add 1 to the immediate and use SLT. */
2980 return SMALL_OPERAND (x + 1);
2982 case LEU:
2983 /* Likewise SLTU, but reject the always-true case. */
2984 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2986 case SIGN_EXTRACT:
2987 case ZERO_EXTRACT:
2988 /* The bit position and size are immediate operands. */
2989 return ISA_HAS_EXT_INS;
2991 default:
2992 /* By default assume that $0 can be used for 0. */
2993 return x == 0;
2997 /* Return the cost of binary operation X, given that the instruction
2998 sequence for a word-sized or smaller operation has cost SINGLE_COST
2999 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3001 static int
3002 mips_binary_cost (rtx x, int single_cost, int double_cost)
3004 int cost;
3006 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3007 cost = double_cost;
3008 else
3009 cost = single_cost;
3010 return (cost
3011 + rtx_cost (XEXP (x, 0), 0)
3012 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
3015 /* Return the cost of floating-point multiplications of mode MODE. */
3017 static int
3018 mips_fp_mult_cost (enum machine_mode mode)
3020 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3023 /* Return the cost of floating-point divisions of mode MODE. */
3025 static int
3026 mips_fp_div_cost (enum machine_mode mode)
3028 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3031 /* Return the cost of sign-extending OP to mode MODE, not including the
3032 cost of OP itself. */
3034 static int
3035 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3037 if (MEM_P (op))
3038 /* Extended loads are as cheap as unextended ones. */
3039 return 0;
3041 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3042 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3043 return 0;
3045 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3046 /* We can use SEB or SEH. */
3047 return COSTS_N_INSNS (1);
3049 /* We need to use a shift left and a shift right. */
3050 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3053 /* Return the cost of zero-extending OP to mode MODE, not including the
3054 cost of OP itself. */
3056 static int
3057 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3059 if (MEM_P (op))
3060 /* Extended loads are as cheap as unextended ones. */
3061 return 0;
3063 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3064 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3065 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3067 if (GENERATE_MIPS16E)
3068 /* We can use ZEB or ZEH. */
3069 return COSTS_N_INSNS (1);
3071 if (TARGET_MIPS16)
3072 /* We need to load 0xff or 0xffff into a register and use AND. */
3073 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3075 /* We can use ANDI. */
3076 return COSTS_N_INSNS (1);
3079 /* Implement TARGET_RTX_COSTS. */
3081 static bool
3082 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3084 enum machine_mode mode = GET_MODE (x);
3085 bool float_mode_p = FLOAT_MODE_P (mode);
3086 int cost;
3087 rtx addr;
3089 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3090 appear in the instruction stream, and the cost of a comparison is
3091 really the cost of the branch or scc condition. At the time of
3092 writing, GCC only uses an explicit outer COMPARE code when optabs
3093 is testing whether a constant is expensive enough to force into a
3094 register. We want optabs to pass such constants through the MIPS
3095 expanders instead, so make all constants very cheap here. */
3096 if (outer_code == COMPARE)
3098 gcc_assert (CONSTANT_P (x));
3099 *total = 0;
3100 return true;
3103 switch (code)
3105 case CONST_INT:
3106 /* Treat *clear_upper32-style ANDs as having zero cost in the
3107 second operand. The cost is entirely in the first operand.
3109 ??? This is needed because we would otherwise try to CSE
3110 the constant operand. Although that's the right thing for
3111 instructions that continue to be a register operation throughout
3112 compilation, it is disastrous for instructions that could
3113 later be converted into a memory operation. */
3114 if (TARGET_64BIT
3115 && outer_code == AND
3116 && UINTVAL (x) == 0xffffffff)
3118 *total = 0;
3119 return true;
3122 if (TARGET_MIPS16)
3124 cost = mips16_constant_cost (outer_code, INTVAL (x));
3125 if (cost >= 0)
3127 *total = cost;
3128 return true;
3131 else
3133 /* When not optimizing for size, we care more about the cost
3134 of hot code, and hot code is often in a loop. If a constant
3135 operand needs to be forced into a register, we will often be
3136 able to hoist the constant load out of the loop, so the load
3137 should not contribute to the cost. */
3138 if (!optimize_size
3139 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3141 *total = 0;
3142 return true;
3145 /* Fall through. */
3147 case CONST:
3148 case SYMBOL_REF:
3149 case LABEL_REF:
3150 case CONST_DOUBLE:
3151 if (force_to_mem_operand (x, VOIDmode))
3153 *total = COSTS_N_INSNS (1);
3154 return true;
3156 cost = mips_const_insns (x);
3157 if (cost > 0)
3159 /* If the constant is likely to be stored in a GPR, SETs of
3160 single-insn constants are as cheap as register sets; we
3161 never want to CSE them.
3163 Don't reduce the cost of storing a floating-point zero in
3164 FPRs. If we have a zero in an FPR for other reasons, we
3165 can get better cfg-cleanup and delayed-branch results by
3166 using it consistently, rather than using $0 sometimes and
3167 an FPR at other times. Also, moves between floating-point
3168 registers are sometimes cheaper than (D)MTC1 $0. */
3169 if (cost == 1
3170 && outer_code == SET
3171 && !(float_mode_p && TARGET_HARD_FLOAT))
3172 cost = 0;
3173 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3174 want to CSE the constant itself. It is usually better to
3175 have N copies of the last operation in the sequence and one
3176 shared copy of the other operations. (Note that this is
3177 not true for MIPS16 code, where the final operation in the
3178 sequence is often an extended instruction.)
3180 Also, if we have a CONST_INT, we don't know whether it is
3181 for a word or doubleword operation, so we cannot rely on
3182 the result of mips_build_integer. */
3183 else if (!TARGET_MIPS16
3184 && (outer_code == SET || mode == VOIDmode))
3185 cost = 1;
3186 *total = COSTS_N_INSNS (cost);
3187 return true;
3189 /* The value will need to be fetched from the constant pool. */
3190 *total = CONSTANT_POOL_COST;
3191 return true;
3193 case MEM:
3194 /* If the address is legitimate, return the number of
3195 instructions it needs. */
3196 addr = XEXP (x, 0);
3197 cost = mips_address_insns (addr, mode, true);
3198 if (cost > 0)
3200 *total = COSTS_N_INSNS (cost + 1);
3201 return true;
3203 /* Check for a scaled indexed address. */
3204 if (mips_lwxs_address_p (addr))
3206 *total = COSTS_N_INSNS (2);
3207 return true;
3209 /* Otherwise use the default handling. */
3210 return false;
3212 case FFS:
3213 *total = COSTS_N_INSNS (6);
3214 return false;
3216 case NOT:
3217 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3218 return false;
3220 case AND:
3221 /* Check for a *clear_upper32 pattern and treat it like a zero
3222 extension. See the pattern's comment for details. */
3223 if (TARGET_64BIT
3224 && mode == DImode
3225 && CONST_INT_P (XEXP (x, 1))
3226 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3228 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3229 + rtx_cost (XEXP (x, 0), 0));
3230 return true;
3232 /* Fall through. */
3234 case IOR:
3235 case XOR:
3236 /* Double-word operations use two single-word operations. */
3237 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3238 return true;
3240 case ASHIFT:
3241 case ASHIFTRT:
3242 case LSHIFTRT:
3243 case ROTATE:
3244 case ROTATERT:
3245 if (CONSTANT_P (XEXP (x, 1)))
3246 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3247 else
3248 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3249 return true;
3251 case ABS:
3252 if (float_mode_p)
3253 *total = mips_cost->fp_add;
3254 else
3255 *total = COSTS_N_INSNS (4);
3256 return false;
3258 case LO_SUM:
3259 /* Low-part immediates need an extended MIPS16 instruction. */
3260 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3261 + rtx_cost (XEXP (x, 0), 0));
3262 return true;
3264 case LT:
3265 case LTU:
3266 case LE:
3267 case LEU:
3268 case GT:
3269 case GTU:
3270 case GE:
3271 case GEU:
3272 case EQ:
3273 case NE:
3274 case UNORDERED:
3275 case LTGT:
3276 /* Branch comparisons have VOIDmode, so use the first operand's
3277 mode instead. */
3278 mode = GET_MODE (XEXP (x, 0));
3279 if (FLOAT_MODE_P (mode))
3281 *total = mips_cost->fp_add;
3282 return false;
3284 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3285 return true;
3287 case MINUS:
3288 if (float_mode_p
3289 && ISA_HAS_NMADD_NMSUB (mode)
3290 && TARGET_FUSED_MADD
3291 && !HONOR_NANS (mode)
3292 && !HONOR_SIGNED_ZEROS (mode))
3294 /* See if we can use NMADD or NMSUB. See mips.md for the
3295 associated patterns. */
3296 rtx op0 = XEXP (x, 0);
3297 rtx op1 = XEXP (x, 1);
3298 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3300 *total = (mips_fp_mult_cost (mode)
3301 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3302 + rtx_cost (XEXP (op0, 1), 0)
3303 + rtx_cost (op1, 0));
3304 return true;
3306 if (GET_CODE (op1) == MULT)
3308 *total = (mips_fp_mult_cost (mode)
3309 + rtx_cost (op0, 0)
3310 + rtx_cost (XEXP (op1, 0), 0)
3311 + rtx_cost (XEXP (op1, 1), 0));
3312 return true;
3315 /* Fall through. */
3317 case PLUS:
3318 if (float_mode_p)
3320 /* If this is part of a MADD or MSUB, treat the PLUS as
3321 being free. */
3322 if (ISA_HAS_FP4
3323 && TARGET_FUSED_MADD
3324 && GET_CODE (XEXP (x, 0)) == MULT)
3325 *total = 0;
3326 else
3327 *total = mips_cost->fp_add;
3328 return false;
3331 /* Double-word operations require three single-word operations and
3332 an SLTU. The MIPS16 version then needs to move the result of
3333 the SLTU from $24 to a MIPS16 register. */
3334 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3335 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3336 return true;
3338 case NEG:
3339 if (float_mode_p
3340 && ISA_HAS_NMADD_NMSUB (mode)
3341 && TARGET_FUSED_MADD
3342 && !HONOR_NANS (mode)
3343 && HONOR_SIGNED_ZEROS (mode))
3345 /* See if we can use NMADD or NMSUB. See mips.md for the
3346 associated patterns. */
3347 rtx op = XEXP (x, 0);
3348 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3349 && GET_CODE (XEXP (op, 0)) == MULT)
3351 *total = (mips_fp_mult_cost (mode)
3352 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3353 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3354 + rtx_cost (XEXP (op, 1), 0));
3355 return true;
3359 if (float_mode_p)
3360 *total = mips_cost->fp_add;
3361 else
3362 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3363 return false;
3365 case MULT:
3366 if (float_mode_p)
3367 *total = mips_fp_mult_cost (mode);
3368 else if (mode == DImode && !TARGET_64BIT)
3369 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3370 where the mulsidi3 always includes an MFHI and an MFLO. */
3371 *total = (optimize_size
3372 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3373 : mips_cost->int_mult_si * 3 + 6);
3374 else if (optimize_size)
3375 *total = (ISA_HAS_MUL3 ? 1 : 2);
3376 else if (mode == DImode)
3377 *total = mips_cost->int_mult_di;
3378 else
3379 *total = mips_cost->int_mult_si;
3380 return false;
3382 case DIV:
3383 /* Check for a reciprocal. */
3384 if (float_mode_p
3385 && ISA_HAS_FP4
3386 && flag_unsafe_math_optimizations
3387 && XEXP (x, 0) == CONST1_RTX (mode))
3389 if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3390 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3391 division as being free. */
3392 *total = rtx_cost (XEXP (x, 1), 0);
3393 else
3394 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3395 return true;
3397 /* Fall through. */
3399 case SQRT:
3400 case MOD:
3401 if (float_mode_p)
3403 *total = mips_fp_div_cost (mode);
3404 return false;
3406 /* Fall through. */
3408 case UDIV:
3409 case UMOD:
3410 if (optimize_size)
3412 /* It is our responsibility to make division by a power of 2
3413 as cheap as 2 register additions if we want the division
3414 expanders to be used for such operations; see the setting
3415 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3416 should always produce shorter code than using
3417 expand_sdiv2_pow2. */
3418 if (TARGET_MIPS16
3419 && CONST_INT_P (XEXP (x, 1))
3420 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3422 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3423 return true;
3425 *total = COSTS_N_INSNS (mips_idiv_insns ());
3427 else if (mode == DImode)
3428 *total = mips_cost->int_div_di;
3429 else
3430 *total = mips_cost->int_div_si;
3431 return false;
3433 case SIGN_EXTEND:
3434 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3435 return false;
3437 case ZERO_EXTEND:
3438 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3439 return false;
3441 case FLOAT:
3442 case UNSIGNED_FLOAT:
3443 case FIX:
3444 case FLOAT_EXTEND:
3445 case FLOAT_TRUNCATE:
3446 *total = mips_cost->fp_add;
3447 return false;
3449 default:
3450 return false;
3454 /* Implement TARGET_ADDRESS_COST. */
3456 static int
3457 mips_address_cost (rtx addr)
3459 return mips_address_insns (addr, SImode, false);
3462 /* Return one word of double-word value OP, taking into account the fixed
3463 endianness of certain registers. HIGH_P is true to select the high part,
3464 false to select the low part. */
3467 mips_subword (rtx op, bool high_p)
3469 unsigned int byte, offset;
3470 enum machine_mode mode;
3472 mode = GET_MODE (op);
3473 if (mode == VOIDmode)
3474 mode = TARGET_64BIT ? TImode : DImode;
3476 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3477 byte = UNITS_PER_WORD;
3478 else
3479 byte = 0;
3481 if (FP_REG_RTX_P (op))
3483 /* Paired FPRs are always ordered little-endian. */
3484 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3485 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3488 if (MEM_P (op))
3489 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3491 return simplify_gen_subreg (word_mode, op, mode, byte);
3494 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3496 bool
3497 mips_split_64bit_move_p (rtx dest, rtx src)
3499 if (TARGET_64BIT)
3500 return false;
3502 /* FPR-to-FPR moves can be done in a single instruction, if they're
3503 allowed at all. */
3504 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3505 return false;
3507 /* Check for floating-point loads and stores. */
3508 if (ISA_HAS_LDC1_SDC1)
3510 if (FP_REG_RTX_P (dest) && MEM_P (src))
3511 return false;
3512 if (FP_REG_RTX_P (src) && MEM_P (dest))
3513 return false;
3515 return true;
3518 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3519 this function handles 64-bit moves for which mips_split_64bit_move_p
3520 holds. For 64-bit targets, this function handles 128-bit moves. */
3522 void
3523 mips_split_doubleword_move (rtx dest, rtx src)
3525 rtx low_dest;
3527 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3529 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3530 emit_insn (gen_move_doubleword_fprdi (dest, src));
3531 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3532 emit_insn (gen_move_doubleword_fprdf (dest, src));
3533 else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
3534 emit_insn (gen_move_doubleword_fprv2sf (dest, src));
3535 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3536 emit_insn (gen_move_doubleword_fprtf (dest, src));
3537 else
3538 gcc_unreachable ();
3540 else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
3542 low_dest = mips_subword (dest, false);
3543 mips_emit_move (low_dest, mips_subword (src, false));
3544 if (TARGET_64BIT)
3545 emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
3546 else
3547 emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
3549 else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
3551 mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
3552 if (TARGET_64BIT)
3553 emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
3554 else
3555 emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
3557 else
3559 /* The operation can be split into two normal moves. Decide in
3560 which order to do them. */
3561 low_dest = mips_subword (dest, false);
3562 if (REG_P (low_dest)
3563 && reg_overlap_mentioned_p (low_dest, src))
3565 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3566 mips_emit_move (low_dest, mips_subword (src, false));
3568 else
3570 mips_emit_move (low_dest, mips_subword (src, false));
3571 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3576 /* Return the appropriate instructions to move SRC into DEST. Assume
3577 that SRC is operand 1 and DEST is operand 0. */
3579 const char *
3580 mips_output_move (rtx dest, rtx src)
3582 enum rtx_code dest_code, src_code;
3583 enum machine_mode mode;
3584 enum mips_symbol_type symbol_type;
3585 bool dbl_p;
3587 dest_code = GET_CODE (dest);
3588 src_code = GET_CODE (src);
3589 mode = GET_MODE (dest);
3590 dbl_p = (GET_MODE_SIZE (mode) == 8);
3592 if (dbl_p && mips_split_64bit_move_p (dest, src))
3593 return "#";
3595 if ((src_code == REG && GP_REG_P (REGNO (src)))
3596 || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
3598 if (dest_code == REG)
3600 if (GP_REG_P (REGNO (dest)))
3601 return "move\t%0,%z1";
3603 /* Moves to HI are handled by special .md insns. */
3604 if (REGNO (dest) == LO_REGNUM)
3605 return "mtlo\t%z1";
3607 if (DSP_ACC_REG_P (REGNO (dest)))
3609 static char retval[] = "mt__\t%z1,%q0";
3611 retval[2] = reg_names[REGNO (dest)][4];
3612 retval[3] = reg_names[REGNO (dest)][5];
3613 return retval;
3616 if (FP_REG_P (REGNO (dest)))
3617 return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
3619 if (ALL_COP_REG_P (REGNO (dest)))
3621 static char retval[] = "dmtc_\t%z1,%0";
3623 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3624 return dbl_p ? retval : retval + 1;
3627 if (dest_code == MEM)
3628 switch (GET_MODE_SIZE (mode))
3630 case 1: return "sb\t%z1,%0";
3631 case 2: return "sh\t%z1,%0";
3632 case 4: return "sw\t%z1,%0";
3633 case 8: return "sd\t%z1,%0";
3636 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3638 if (src_code == REG)
3640 /* Moves from HI are handled by special .md insns. */
3641 if (REGNO (src) == LO_REGNUM)
3643 /* When generating VR4120 or VR4130 code, we use MACC and
3644 DMACC instead of MFLO. This avoids both the normal
3645 MIPS III HI/LO hazards and the errata related to
3646 -mfix-vr4130. */
3647 if (ISA_HAS_MACCHI)
3648 return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
3649 return "mflo\t%0";
3652 if (DSP_ACC_REG_P (REGNO (src)))
3654 static char retval[] = "mf__\t%0,%q1";
3656 retval[2] = reg_names[REGNO (src)][4];
3657 retval[3] = reg_names[REGNO (src)][5];
3658 return retval;
3661 if (FP_REG_P (REGNO (src)))
3662 return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
3664 if (ALL_COP_REG_P (REGNO (src)))
3666 static char retval[] = "dmfc_\t%0,%1";
3668 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3669 return dbl_p ? retval : retval + 1;
3672 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3673 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3676 if (src_code == MEM)
3677 switch (GET_MODE_SIZE (mode))
3679 case 1: return "lbu\t%0,%1";
3680 case 2: return "lhu\t%0,%1";
3681 case 4: return "lw\t%0,%1";
3682 case 8: return "ld\t%0,%1";
3685 if (src_code == CONST_INT)
3687 /* Don't use the X format for the operand itself, because that
3688 will give out-of-range numbers for 64-bit hosts and 32-bit
3689 targets. */
3690 if (!TARGET_MIPS16)
3691 return "li\t%0,%1\t\t\t# %X1";
3693 if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
3694 return "li\t%0,%1";
3696 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
3697 return "#";
3700 if (src_code == HIGH)
3701 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3703 if (CONST_GP_P (src))
3704 return "move\t%0,%1";
3706 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3707 && mips_lo_relocs[symbol_type] != 0)
3709 /* A signed 16-bit constant formed by applying a relocation
3710 operator to a symbolic address. */
3711 gcc_assert (!mips_split_p[symbol_type]);
3712 return "li\t%0,%R1";
3715 if (symbolic_operand (src, VOIDmode))
3717 gcc_assert (TARGET_MIPS16
3718 ? TARGET_MIPS16_TEXT_LOADS
3719 : !TARGET_EXPLICIT_RELOCS);
3720 return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
3723 if (src_code == REG && FP_REG_P (REGNO (src)))
3725 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3727 if (GET_MODE (dest) == V2SFmode)
3728 return "mov.ps\t%0,%1";
3729 else
3730 return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
3733 if (dest_code == MEM)
3734 return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
3736 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3738 if (src_code == MEM)
3739 return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
3741 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3743 static char retval[] = "l_c_\t%0,%1";
3745 retval[1] = (dbl_p ? 'd' : 'w');
3746 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3747 return retval;
3749 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3751 static char retval[] = "s_c_\t%1,%0";
3753 retval[1] = (dbl_p ? 'd' : 'w');
3754 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3755 return retval;
3757 gcc_unreachable ();
3760 /* Return true if CMP1 is a suitable second operand for integer ordering
3761 test CODE. See also the *sCC patterns in mips.md. */
3763 static bool
3764 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
3766 switch (code)
3768 case GT:
3769 case GTU:
3770 return reg_or_0_operand (cmp1, VOIDmode);
3772 case GE:
3773 case GEU:
3774 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3776 case LT:
3777 case LTU:
3778 return arith_operand (cmp1, VOIDmode);
3780 case LE:
3781 return sle_operand (cmp1, VOIDmode);
3783 case LEU:
3784 return sleu_operand (cmp1, VOIDmode);
3786 default:
3787 gcc_unreachable ();
3791 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
3792 integer ordering test *CODE, or if an equivalent combination can
3793 be formed by adjusting *CODE and *CMP1. When returning true, update
3794 *CODE and *CMP1 with the chosen code and operand, otherwise leave
3795 them alone. */
3797 static bool
3798 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
3799 enum machine_mode mode)
3801 HOST_WIDE_INT plus_one;
3803 if (mips_int_order_operand_ok_p (*code, *cmp1))
3804 return true;
3806 if (GET_CODE (*cmp1) == CONST_INT)
3807 switch (*code)
3809 case LE:
3810 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
3811 if (INTVAL (*cmp1) < plus_one)
3813 *code = LT;
3814 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3815 return true;
3817 break;
3819 case LEU:
3820 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
3821 if (plus_one != 0)
3823 *code = LTU;
3824 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3825 return true;
3827 break;
3829 default:
3830 break;
3832 return false;
3835 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
3836 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
3837 is nonnull, it's OK to set TARGET to the inverse of the result and
3838 flip *INVERT_PTR instead. */
3840 static void
3841 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
3842 rtx target, rtx cmp0, rtx cmp1)
3844 enum machine_mode mode;
3846 /* First see if there is a MIPS instruction that can do this operation.
3847 If not, try doing the same for the inverse operation. If that also
3848 fails, force CMP1 into a register and try again. */
3849 mode = GET_MODE (cmp0);
3850 if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
3851 mips_emit_binary (code, target, cmp0, cmp1);
3852 else
3854 enum rtx_code inv_code = reverse_condition (code);
3855 if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
3857 cmp1 = force_reg (mode, cmp1);
3858 mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
3860 else if (invert_ptr == 0)
3862 rtx inv_target;
3864 inv_target = mips_force_binary (GET_MODE (target),
3865 inv_code, cmp0, cmp1);
3866 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3868 else
3870 *invert_ptr = !*invert_ptr;
3871 mips_emit_binary (inv_code, target, cmp0, cmp1);
3876 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3877 The register will have the same mode as CMP0. */
3879 static rtx
3880 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3882 if (cmp1 == const0_rtx)
3883 return cmp0;
3885 if (uns_arith_operand (cmp1, VOIDmode))
3886 return expand_binop (GET_MODE (cmp0), xor_optab,
3887 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3889 return expand_binop (GET_MODE (cmp0), sub_optab,
3890 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3893 /* Convert *CODE into a code that can be used in a floating-point
3894 scc instruction (C.cond.fmt). Return true if the values of
3895 the condition code registers will be inverted, with 0 indicating
3896 that the condition holds. */
3898 static bool
3899 mips_reversed_fp_cond (enum rtx_code *code)
3901 switch (*code)
3903 case NE:
3904 case LTGT:
3905 case ORDERED:
3906 *code = reverse_condition_maybe_unordered (*code);
3907 return true;
3909 default:
3910 return false;
3914 /* Convert a comparison into something that can be used in a branch or
3915 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3916 being compared and *CODE is the code used to compare them.
3918 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3919 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
3920 otherwise any standard branch condition can be used. The standard branch
3921 conditions are:
3923 - EQ or NE between two registers.
3924 - any comparison between a register and zero. */
3926 static void
3927 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3929 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3931 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3933 *op0 = cmp_operands[0];
3934 *op1 = cmp_operands[1];
3936 else if (*code == EQ || *code == NE)
3938 if (need_eq_ne_p)
3940 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3941 *op1 = const0_rtx;
3943 else
3945 *op0 = cmp_operands[0];
3946 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3949 else
3951 /* The comparison needs a separate scc instruction. Store the
3952 result of the scc in *OP0 and compare it against zero. */
3953 bool invert = false;
3954 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3955 mips_emit_int_order_test (*code, &invert, *op0,
3956 cmp_operands[0], cmp_operands[1]);
3957 *code = (invert ? EQ : NE);
3958 *op1 = const0_rtx;
3961 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3963 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3964 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3965 *code = NE;
3966 *op1 = const0_rtx;
3968 else
3970 enum rtx_code cmp_code;
3972 /* Floating-point tests use a separate C.cond.fmt comparison to
3973 set a condition code register. The branch or conditional move
3974 will then compare that register against zero.
3976 Set CMP_CODE to the code of the comparison instruction and
3977 *CODE to the code that the branch or move should use. */
3978 cmp_code = *code;
3979 *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
3980 *op0 = (ISA_HAS_8CC
3981 ? gen_reg_rtx (CCmode)
3982 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3983 *op1 = const0_rtx;
3984 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3988 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3989 Store the result in TARGET and return true if successful.
3991 On 64-bit targets, TARGET may be narrower than cmp_operands[0]. */
3993 bool
3994 mips_expand_scc (enum rtx_code code, rtx target)
3996 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3997 return false;
3999 if (code == EQ || code == NE)
4001 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4002 mips_emit_binary (code, target, zie, const0_rtx);
4004 else
4005 mips_emit_int_order_test (code, 0, target,
4006 cmp_operands[0], cmp_operands[1]);
4007 return true;
4010 /* Compare cmp_operands[0] with cmp_operands[1] using comparison code
4011 CODE and jump to OPERANDS[0] if the condition holds. */
4013 void
4014 mips_expand_conditional_branch (rtx *operands, enum rtx_code code)
4016 rtx op0, op1, condition;
4018 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4019 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4020 emit_jump_insn (gen_condjump (condition, operands[0]));
4023 /* Implement:
4025 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4026 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4028 void
4029 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4030 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4032 rtx cmp_result;
4033 bool reversed_p;
4035 reversed_p = mips_reversed_fp_cond (&cond);
4036 cmp_result = gen_reg_rtx (CCV2mode);
4037 emit_insn (gen_scc_ps (cmp_result,
4038 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4039 if (reversed_p)
4040 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4041 cmp_result));
4042 else
4043 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4044 cmp_result));
4047 /* Compare cmp_operands[0] with cmp_operands[1] using the code of
4048 OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] if the condition
4049 holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
4051 void
4052 mips_expand_conditional_move (rtx *operands)
4054 enum rtx_code code;
4055 rtx cond, op0, op1;
4057 code = GET_CODE (operands[1]);
4058 mips_emit_compare (&code, &op0, &op1, true);
4059 cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1),
4060 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4061 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
4062 operands[2], operands[3])));
4065 /* Compare cmp_operands[0] with cmp_operands[1] using rtl code CODE,
4066 then trap if the condition holds. */
4068 void
4069 mips_expand_conditional_trap (enum rtx_code code)
4071 rtx op0, op1;
4072 enum machine_mode mode;
4074 /* MIPS conditional trap instructions don't have GT or LE flavors,
4075 so we must swap the operands and convert to LT and GE respectively. */
4076 switch (code)
4078 case GT:
4079 case LE:
4080 case GTU:
4081 case LEU:
4082 code = swap_condition (code);
4083 op0 = cmp_operands[1];
4084 op1 = cmp_operands[0];
4085 break;
4087 default:
4088 op0 = cmp_operands[0];
4089 op1 = cmp_operands[1];
4090 break;
4093 mode = GET_MODE (cmp_operands[0]);
4094 op0 = force_reg (mode, op0);
4095 if (!arith_operand (op1, mode))
4096 op1 = force_reg (mode, op1);
4098 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4099 gen_rtx_fmt_ee (code, mode, op0, op1),
4100 const0_rtx));
4103 /* Initialize *CUM for a call to a function of type FNTYPE. */
4105 void
4106 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
4108 memset (cum, 0, sizeof (*cum));
4109 cum->prototype = (fntype && prototype_p (fntype));
4110 cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
4113 /* Fill INFO with information about a single argument. CUM is the
4114 cumulative state for earlier arguments. MODE is the mode of this
4115 argument and TYPE is its type (if known). NAMED is true if this
4116 is a named (fixed) argument rather than a variable one. */
4118 static void
4119 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4120 enum machine_mode mode, tree type, int named)
4122 bool doubleword_aligned_p;
4123 unsigned int num_bytes, num_words, max_regs;
4125 /* Work out the size of the argument. */
4126 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4127 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4129 /* Decide whether it should go in a floating-point register, assuming
4130 one is free. Later code checks for availability.
4132 The checks against UNITS_PER_FPVALUE handle the soft-float and
4133 single-float cases. */
4134 switch (mips_abi)
4136 case ABI_EABI:
4137 /* The EABI conventions have traditionally been defined in terms
4138 of TYPE_MODE, regardless of the actual type. */
4139 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4140 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4141 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4142 break;
4144 case ABI_32:
4145 case ABI_O64:
4146 /* Only leading floating-point scalars are passed in
4147 floating-point registers. We also handle vector floats the same
4148 say, which is OK because they are not covered by the standard ABI. */
4149 info->fpr_p = (!cum->gp_reg_found
4150 && cum->arg_number < 2
4151 && (type == 0
4152 || SCALAR_FLOAT_TYPE_P (type)
4153 || VECTOR_FLOAT_TYPE_P (type))
4154 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4155 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4156 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4157 break;
4159 case ABI_N32:
4160 case ABI_64:
4161 /* Scalar, complex and vector floating-point types are passed in
4162 floating-point registers, as long as this is a named rather
4163 than a variable argument. */
4164 info->fpr_p = (named
4165 && (type == 0 || FLOAT_TYPE_P (type))
4166 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4167 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4168 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4169 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4171 /* ??? According to the ABI documentation, the real and imaginary
4172 parts of complex floats should be passed in individual registers.
4173 The real and imaginary parts of stack arguments are supposed
4174 to be contiguous and there should be an extra word of padding
4175 at the end.
4177 This has two problems. First, it makes it impossible to use a
4178 single "void *" va_list type, since register and stack arguments
4179 are passed differently. (At the time of writing, MIPSpro cannot
4180 handle complex float varargs correctly.) Second, it's unclear
4181 what should happen when there is only one register free.
4183 For now, we assume that named complex floats should go into FPRs
4184 if there are two FPRs free, otherwise they should be passed in the
4185 same way as a struct containing two floats. */
4186 if (info->fpr_p
4187 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4188 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4190 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4191 info->fpr_p = false;
4192 else
4193 num_words = 2;
4195 break;
4197 default:
4198 gcc_unreachable ();
4201 /* See whether the argument has doubleword alignment. */
4202 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4204 /* Set REG_OFFSET to the register count we're interested in.
4205 The EABI allocates the floating-point registers separately,
4206 but the other ABIs allocate them like integer registers. */
4207 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4208 ? cum->num_fprs
4209 : cum->num_gprs);
4211 /* Advance to an even register if the argument is doubleword-aligned. */
4212 if (doubleword_aligned_p)
4213 info->reg_offset += info->reg_offset & 1;
4215 /* Work out the offset of a stack argument. */
4216 info->stack_offset = cum->stack_words;
4217 if (doubleword_aligned_p)
4218 info->stack_offset += info->stack_offset & 1;
4220 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4222 /* Partition the argument between registers and stack. */
4223 info->reg_words = MIN (num_words, max_regs);
4224 info->stack_words = num_words - info->reg_words;
4227 /* INFO describes a register argument that has the normal format for the
4228 argument's mode. Return the register it uses, assuming that FPRs are
4229 available if HARD_FLOAT_P. */
4231 static unsigned int
4232 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4234 if (!info->fpr_p || !hard_float_p)
4235 return GP_ARG_FIRST + info->reg_offset;
4236 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4237 /* In o32, the second argument is always passed in $f14
4238 for TARGET_DOUBLE_FLOAT, regardless of whether the
4239 first argument was a word or doubleword. */
4240 return FP_ARG_FIRST + 2;
4241 else
4242 return FP_ARG_FIRST + info->reg_offset;
4245 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
4247 static bool
4248 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4250 return !TARGET_OLDABI;
4253 /* Implement FUNCTION_ARG. */
4256 mips_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4257 tree type, int named)
4259 struct mips_arg_info info;
4261 /* We will be called with a mode of VOIDmode after the last argument
4262 has been seen. Whatever we return will be passed to the call expander.
4263 If we need a MIPS16 fp_code, return a REG with the code stored as
4264 the mode. */
4265 if (mode == VOIDmode)
4267 if (TARGET_MIPS16 && cum->fp_code != 0)
4268 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4269 else
4270 return NULL;
4273 mips_get_arg_info (&info, cum, mode, type, named);
4275 /* Return straight away if the whole argument is passed on the stack. */
4276 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4277 return NULL;
4279 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4280 contains a double in its entirety, then that 64-bit chunk is passed
4281 in a floating-point register. */
4282 if (TARGET_NEWABI
4283 && TARGET_HARD_FLOAT
4284 && named
4285 && type != 0
4286 && TREE_CODE (type) == RECORD_TYPE
4287 && TYPE_SIZE_UNIT (type)
4288 && host_integerp (TYPE_SIZE_UNIT (type), 1))
4290 tree field;
4292 /* First check to see if there is any such field. */
4293 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4294 if (TREE_CODE (field) == FIELD_DECL
4295 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4296 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4297 && host_integerp (bit_position (field), 0)
4298 && int_bit_position (field) % BITS_PER_WORD == 0)
4299 break;
4301 if (field != 0)
4303 /* Now handle the special case by returning a PARALLEL
4304 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4305 chunks are passed in registers. */
4306 unsigned int i;
4307 HOST_WIDE_INT bitpos;
4308 rtx ret;
4310 /* assign_parms checks the mode of ENTRY_PARM, so we must
4311 use the actual mode here. */
4312 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4314 bitpos = 0;
4315 field = TYPE_FIELDS (type);
4316 for (i = 0; i < info.reg_words; i++)
4318 rtx reg;
4320 for (; field; field = TREE_CHAIN (field))
4321 if (TREE_CODE (field) == FIELD_DECL
4322 && int_bit_position (field) >= bitpos)
4323 break;
4325 if (field
4326 && int_bit_position (field) == bitpos
4327 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4328 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4329 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4330 else
4331 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4333 XVECEXP (ret, 0, i)
4334 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4335 GEN_INT (bitpos / BITS_PER_UNIT));
4337 bitpos += BITS_PER_WORD;
4339 return ret;
4343 /* Handle the n32/n64 conventions for passing complex floating-point
4344 arguments in FPR pairs. The real part goes in the lower register
4345 and the imaginary part goes in the upper register. */
4346 if (TARGET_NEWABI
4347 && info.fpr_p
4348 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4350 rtx real, imag;
4351 enum machine_mode inner;
4352 unsigned int regno;
4354 inner = GET_MODE_INNER (mode);
4355 regno = FP_ARG_FIRST + info.reg_offset;
4356 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4358 /* Real part in registers, imaginary part on stack. */
4359 gcc_assert (info.stack_words == info.reg_words);
4360 return gen_rtx_REG (inner, regno);
4362 else
4364 gcc_assert (info.stack_words == 0);
4365 real = gen_rtx_EXPR_LIST (VOIDmode,
4366 gen_rtx_REG (inner, regno),
4367 const0_rtx);
4368 imag = gen_rtx_EXPR_LIST (VOIDmode,
4369 gen_rtx_REG (inner,
4370 regno + info.reg_words / 2),
4371 GEN_INT (GET_MODE_SIZE (inner)));
4372 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4376 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4379 /* Implement FUNCTION_ARG_ADVANCE. */
4381 void
4382 mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4383 tree type, int named)
4385 struct mips_arg_info info;
4387 mips_get_arg_info (&info, cum, mode, type, named);
4389 if (!info.fpr_p)
4390 cum->gp_reg_found = true;
4392 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4393 an explanation of what this code does. It assumes that we're using
4394 either the o32 or the o64 ABI, both of which pass at most 2 arguments
4395 in FPRs. */
4396 if (cum->arg_number < 2 && info.fpr_p)
4397 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4399 /* Advance the register count. This has the effect of setting
4400 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4401 argument required us to skip the final GPR and pass the whole
4402 argument on the stack. */
4403 if (mips_abi != ABI_EABI || !info.fpr_p)
4404 cum->num_gprs = info.reg_offset + info.reg_words;
4405 else if (info.reg_words > 0)
4406 cum->num_fprs += MAX_FPRS_PER_FMT;
4408 /* Advance the stack word count. */
4409 if (info.stack_words > 0)
4410 cum->stack_words = info.stack_offset + info.stack_words;
4412 cum->arg_number++;
4415 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4417 static int
4418 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4419 enum machine_mode mode, tree type, bool named)
4421 struct mips_arg_info info;
4423 mips_get_arg_info (&info, cum, mode, type, named);
4424 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4427 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4428 PARM_BOUNDARY bits of alignment, but will be given anything up
4429 to STACK_BOUNDARY bits if the type requires it. */
4432 mips_function_arg_boundary (enum machine_mode mode, tree type)
4434 unsigned int alignment;
4436 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4437 if (alignment < PARM_BOUNDARY)
4438 alignment = PARM_BOUNDARY;
4439 if (alignment > STACK_BOUNDARY)
4440 alignment = STACK_BOUNDARY;
4441 return alignment;
4444 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4445 upward rather than downward. In other words, return true if the
4446 first byte of the stack slot has useful data, false if the last
4447 byte does. */
4449 bool
4450 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4452 /* On little-endian targets, the first byte of every stack argument
4453 is passed in the first byte of the stack slot. */
4454 if (!BYTES_BIG_ENDIAN)
4455 return true;
4457 /* Otherwise, integral types are padded downward: the last byte of a
4458 stack argument is passed in the last byte of the stack slot. */
4459 if (type != 0
4460 ? (INTEGRAL_TYPE_P (type)
4461 || POINTER_TYPE_P (type)
4462 || FIXED_POINT_TYPE_P (type))
4463 : (SCALAR_INT_MODE_P (mode)
4464 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4465 return false;
4467 /* Big-endian o64 pads floating-point arguments downward. */
4468 if (mips_abi == ABI_O64)
4469 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4470 return false;
4472 /* Other types are padded upward for o32, o64, n32 and n64. */
4473 if (mips_abi != ABI_EABI)
4474 return true;
4476 /* Arguments smaller than a stack slot are padded downward. */
4477 if (mode != BLKmode)
4478 return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
4479 else
4480 return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
4483 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4484 if the least significant byte of the register has useful data. Return
4485 the opposite if the most significant byte does. */
4487 bool
4488 mips_pad_reg_upward (enum machine_mode mode, tree type)
4490 /* No shifting is required for floating-point arguments. */
4491 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4492 return !BYTES_BIG_ENDIAN;
4494 /* Otherwise, apply the same padding to register arguments as we do
4495 to stack arguments. */
4496 return mips_pad_arg_upward (mode, type);
4499 /* Return nonzero when an argument must be passed by reference. */
4501 static bool
4502 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4503 enum machine_mode mode, const_tree type,
4504 bool named ATTRIBUTE_UNUSED)
4506 if (mips_abi == ABI_EABI)
4508 int size;
4510 /* ??? How should SCmode be handled? */
4511 if (mode == DImode || mode == DFmode
4512 || mode == DQmode || mode == UDQmode
4513 || mode == DAmode || mode == UDAmode)
4514 return 0;
4516 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4517 return size == -1 || size > UNITS_PER_WORD;
4519 else
4521 /* If we have a variable-sized parameter, we have no choice. */
4522 return targetm.calls.must_pass_in_stack (mode, type);
4526 /* Implement TARGET_CALLEE_COPIES. */
4528 static bool
4529 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4530 enum machine_mode mode ATTRIBUTE_UNUSED,
4531 const_tree type ATTRIBUTE_UNUSED, bool named)
4533 return mips_abi == ABI_EABI && named;
4536 /* See whether VALTYPE is a record whose fields should be returned in
4537 floating-point registers. If so, return the number of fields and
4538 list them in FIELDS (which should have two elements). Return 0
4539 otherwise.
4541 For n32 & n64, a structure with one or two fields is returned in
4542 floating-point registers as long as every field has a floating-point
4543 type. */
4545 static int
4546 mips_fpr_return_fields (const_tree valtype, tree *fields)
4548 tree field;
4549 int i;
4551 if (!TARGET_NEWABI)
4552 return 0;
4554 if (TREE_CODE (valtype) != RECORD_TYPE)
4555 return 0;
4557 i = 0;
4558 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4560 if (TREE_CODE (field) != FIELD_DECL)
4561 continue;
4563 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
4564 return 0;
4566 if (i == 2)
4567 return 0;
4569 fields[i++] = field;
4571 return i;
4574 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4575 a value in the most significant part of $2/$3 if:
4577 - the target is big-endian;
4579 - the value has a structure or union type (we generalize this to
4580 cover aggregates from other languages too); and
4582 - the structure is not returned in floating-point registers. */
4584 static bool
4585 mips_return_in_msb (const_tree valtype)
4587 tree fields[2];
4589 return (TARGET_NEWABI
4590 && TARGET_BIG_ENDIAN
4591 && AGGREGATE_TYPE_P (valtype)
4592 && mips_fpr_return_fields (valtype, fields) == 0);
4595 /* Return true if the function return value MODE will get returned in a
4596 floating-point register. */
4598 static bool
4599 mips_return_mode_in_fpr_p (enum machine_mode mode)
4601 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4602 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4603 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4604 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4607 /* Return the representation of an FPR return register when the
4608 value being returned in FP_RETURN has mode VALUE_MODE and the
4609 return type itself has mode TYPE_MODE. On NewABI targets,
4610 the two modes may be different for structures like:
4612 struct __attribute__((packed)) foo { float f; }
4614 where we return the SFmode value of "f" in FP_RETURN, but where
4615 the structure itself has mode BLKmode. */
4617 static rtx
4618 mips_return_fpr_single (enum machine_mode type_mode,
4619 enum machine_mode value_mode)
4621 rtx x;
4623 x = gen_rtx_REG (value_mode, FP_RETURN);
4624 if (type_mode != value_mode)
4626 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
4627 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
4629 return x;
4632 /* Return a composite value in a pair of floating-point registers.
4633 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4634 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4635 complete value.
4637 For n32 & n64, $f0 always holds the first value and $f2 the second.
4638 Otherwise the values are packed together as closely as possible. */
4640 static rtx
4641 mips_return_fpr_pair (enum machine_mode mode,
4642 enum machine_mode mode1, HOST_WIDE_INT offset1,
4643 enum machine_mode mode2, HOST_WIDE_INT offset2)
4645 int inc;
4647 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4648 return gen_rtx_PARALLEL
4649 (mode,
4650 gen_rtvec (2,
4651 gen_rtx_EXPR_LIST (VOIDmode,
4652 gen_rtx_REG (mode1, FP_RETURN),
4653 GEN_INT (offset1)),
4654 gen_rtx_EXPR_LIST (VOIDmode,
4655 gen_rtx_REG (mode2, FP_RETURN + inc),
4656 GEN_INT (offset2))));
4660 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4661 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4662 VALTYPE is null and MODE is the mode of the return value. */
4665 mips_function_value (const_tree valtype, enum machine_mode mode)
4667 if (valtype)
4669 tree fields[2];
4670 int unsigned_p;
4672 mode = TYPE_MODE (valtype);
4673 unsigned_p = TYPE_UNSIGNED (valtype);
4675 /* Since TARGET_PROMOTE_FUNCTION_RETURN unconditionally returns true,
4676 we must promote the mode just as PROMOTE_MODE does. */
4677 mode = promote_mode (valtype, mode, &unsigned_p, 1);
4679 /* Handle structures whose fields are returned in $f0/$f2. */
4680 switch (mips_fpr_return_fields (valtype, fields))
4682 case 1:
4683 return mips_return_fpr_single (mode,
4684 TYPE_MODE (TREE_TYPE (fields[0])));
4686 case 2:
4687 return mips_return_fpr_pair (mode,
4688 TYPE_MODE (TREE_TYPE (fields[0])),
4689 int_byte_position (fields[0]),
4690 TYPE_MODE (TREE_TYPE (fields[1])),
4691 int_byte_position (fields[1]));
4694 /* If a value is passed in the most significant part of a register, see
4695 whether we have to round the mode up to a whole number of words. */
4696 if (mips_return_in_msb (valtype))
4698 HOST_WIDE_INT size = int_size_in_bytes (valtype);
4699 if (size % UNITS_PER_WORD != 0)
4701 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4702 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4706 /* For EABI, the class of return register depends entirely on MODE.
4707 For example, "struct { some_type x; }" and "union { some_type x; }"
4708 are returned in the same way as a bare "some_type" would be.
4709 Other ABIs only use FPRs for scalar, complex or vector types. */
4710 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
4711 return gen_rtx_REG (mode, GP_RETURN);
4714 if (!TARGET_MIPS16)
4716 /* Handle long doubles for n32 & n64. */
4717 if (mode == TFmode)
4718 return mips_return_fpr_pair (mode,
4719 DImode, 0,
4720 DImode, GET_MODE_SIZE (mode) / 2);
4722 if (mips_return_mode_in_fpr_p (mode))
4724 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4725 return mips_return_fpr_pair (mode,
4726 GET_MODE_INNER (mode), 0,
4727 GET_MODE_INNER (mode),
4728 GET_MODE_SIZE (mode) / 2);
4729 else
4730 return gen_rtx_REG (mode, FP_RETURN);
4734 return gen_rtx_REG (mode, GP_RETURN);
4737 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
4738 all BLKmode objects are returned in memory. Under the n32, n64
4739 and embedded ABIs, small structures are returned in a register.
4740 Objects with varying size must still be returned in memory, of
4741 course. */
4743 static bool
4744 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
4746 return (TARGET_OLDABI
4747 ? TYPE_MODE (type) == BLKmode
4748 : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
4751 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
4753 static void
4754 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4755 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4756 int no_rtl)
4758 CUMULATIVE_ARGS local_cum;
4759 int gp_saved, fp_saved;
4761 /* The caller has advanced CUM up to, but not beyond, the last named
4762 argument. Advance a local copy of CUM past the last "real" named
4763 argument, to find out how many registers are left over. */
4764 local_cum = *cum;
4765 FUNCTION_ARG_ADVANCE (local_cum, mode, type, true);
4767 /* Found out how many registers we need to save. */
4768 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4769 fp_saved = (EABI_FLOAT_VARARGS_P
4770 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4771 : 0);
4773 if (!no_rtl)
4775 if (gp_saved > 0)
4777 rtx ptr, mem;
4779 ptr = plus_constant (virtual_incoming_args_rtx,
4780 REG_PARM_STACK_SPACE (cfun->decl)
4781 - gp_saved * UNITS_PER_WORD);
4782 mem = gen_frame_mem (BLKmode, ptr);
4783 set_mem_alias_set (mem, get_varargs_alias_set ());
4785 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4786 mem, gp_saved);
4788 if (fp_saved > 0)
4790 /* We can't use move_block_from_reg, because it will use
4791 the wrong mode. */
4792 enum machine_mode mode;
4793 int off, i;
4795 /* Set OFF to the offset from virtual_incoming_args_rtx of
4796 the first float register. The FP save area lies below
4797 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4798 off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
4799 off -= fp_saved * UNITS_PER_FPREG;
4801 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4803 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4804 i += MAX_FPRS_PER_FMT)
4806 rtx ptr, mem;
4808 ptr = plus_constant (virtual_incoming_args_rtx, off);
4809 mem = gen_frame_mem (mode, ptr);
4810 set_mem_alias_set (mem, get_varargs_alias_set ());
4811 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4812 off += UNITS_PER_HWFPVALUE;
4816 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4817 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4818 + fp_saved * UNITS_PER_FPREG);
4821 /* Implement TARGET_BUILTIN_VA_LIST. */
4823 static tree
4824 mips_build_builtin_va_list (void)
4826 if (EABI_FLOAT_VARARGS_P)
4828 /* We keep 3 pointers, and two offsets.
4830 Two pointers are to the overflow area, which starts at the CFA.
4831 One of these is constant, for addressing into the GPR save area
4832 below it. The other is advanced up the stack through the
4833 overflow region.
4835 The third pointer is to the bottom of the GPR save area.
4836 Since the FPR save area is just below it, we can address
4837 FPR slots off this pointer.
4839 We also keep two one-byte offsets, which are to be subtracted
4840 from the constant pointers to yield addresses in the GPR and
4841 FPR save areas. These are downcounted as float or non-float
4842 arguments are used, and when they get to zero, the argument
4843 must be obtained from the overflow region. */
4844 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4845 tree array, index;
4847 record = lang_hooks.types.make_type (RECORD_TYPE);
4849 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4850 ptr_type_node);
4851 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4852 ptr_type_node);
4853 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4854 ptr_type_node);
4855 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4856 unsigned_char_type_node);
4857 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4858 unsigned_char_type_node);
4859 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4860 warn on every user file. */
4861 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4862 array = build_array_type (unsigned_char_type_node,
4863 build_index_type (index));
4864 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4866 DECL_FIELD_CONTEXT (f_ovfl) = record;
4867 DECL_FIELD_CONTEXT (f_gtop) = record;
4868 DECL_FIELD_CONTEXT (f_ftop) = record;
4869 DECL_FIELD_CONTEXT (f_goff) = record;
4870 DECL_FIELD_CONTEXT (f_foff) = record;
4871 DECL_FIELD_CONTEXT (f_res) = record;
4873 TYPE_FIELDS (record) = f_ovfl;
4874 TREE_CHAIN (f_ovfl) = f_gtop;
4875 TREE_CHAIN (f_gtop) = f_ftop;
4876 TREE_CHAIN (f_ftop) = f_goff;
4877 TREE_CHAIN (f_goff) = f_foff;
4878 TREE_CHAIN (f_foff) = f_res;
4880 layout_type (record);
4881 return record;
4883 else if (TARGET_IRIX && TARGET_IRIX6)
4884 /* On IRIX 6, this type is 'char *'. */
4885 return build_pointer_type (char_type_node);
4886 else
4887 /* Otherwise, we use 'void *'. */
4888 return ptr_type_node;
4891 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
4893 static void
4894 mips_va_start (tree valist, rtx nextarg)
4896 if (EABI_FLOAT_VARARGS_P)
4898 const CUMULATIVE_ARGS *cum;
4899 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4900 tree ovfl, gtop, ftop, goff, foff;
4901 tree t;
4902 int gpr_save_area_size;
4903 int fpr_save_area_size;
4904 int fpr_offset;
4906 cum = &crtl->args.info;
4907 gpr_save_area_size
4908 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4909 fpr_save_area_size
4910 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4912 f_ovfl = TYPE_FIELDS (va_list_type_node);
4913 f_gtop = TREE_CHAIN (f_ovfl);
4914 f_ftop = TREE_CHAIN (f_gtop);
4915 f_goff = TREE_CHAIN (f_ftop);
4916 f_foff = TREE_CHAIN (f_goff);
4918 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4919 NULL_TREE);
4920 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4921 NULL_TREE);
4922 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4923 NULL_TREE);
4924 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4925 NULL_TREE);
4926 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4927 NULL_TREE);
4929 /* Emit code to initialize OVFL, which points to the next varargs
4930 stack argument. CUM->STACK_WORDS gives the number of stack
4931 words used by named arguments. */
4932 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4933 if (cum->stack_words > 0)
4934 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4935 size_int (cum->stack_words * UNITS_PER_WORD));
4936 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4937 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4939 /* Emit code to initialize GTOP, the top of the GPR save area. */
4940 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4941 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4942 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4944 /* Emit code to initialize FTOP, the top of the FPR save area.
4945 This address is gpr_save_area_bytes below GTOP, rounded
4946 down to the next fp-aligned boundary. */
4947 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4948 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4949 fpr_offset &= -UNITS_PER_FPVALUE;
4950 if (fpr_offset)
4951 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4952 size_int (-fpr_offset));
4953 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4954 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4956 /* Emit code to initialize GOFF, the offset from GTOP of the
4957 next GPR argument. */
4958 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4959 build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
4960 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4962 /* Likewise emit code to initialize FOFF, the offset from FTOP
4963 of the next FPR argument. */
4964 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4965 build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
4966 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4968 else
4970 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4971 std_expand_builtin_va_start (valist, nextarg);
4975 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
4977 static tree
4978 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4980 tree addr;
4981 bool indirect_p;
4983 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4984 if (indirect_p)
4985 type = build_pointer_type (type);
4987 if (!EABI_FLOAT_VARARGS_P)
4988 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4989 else
4991 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4992 tree ovfl, top, off, align;
4993 HOST_WIDE_INT size, rsize, osize;
4994 tree t, u;
4996 f_ovfl = TYPE_FIELDS (va_list_type_node);
4997 f_gtop = TREE_CHAIN (f_ovfl);
4998 f_ftop = TREE_CHAIN (f_gtop);
4999 f_goff = TREE_CHAIN (f_ftop);
5000 f_foff = TREE_CHAIN (f_goff);
5002 /* Let:
5004 TOP be the top of the GPR or FPR save area;
5005 OFF be the offset from TOP of the next register;
5006 ADDR_RTX be the address of the argument;
5007 SIZE be the number of bytes in the argument type;
5008 RSIZE be the number of bytes used to store the argument
5009 when it's in the register save area; and
5010 OSIZE be the number of bytes used to store it when it's
5011 in the stack overflow area.
5013 The code we want is:
5015 1: off &= -rsize; // round down
5016 2: if (off != 0)
5017 3: {
5018 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
5019 5: off -= rsize;
5020 6: }
5021 7: else
5022 8: {
5023 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
5024 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
5025 11: ovfl += osize;
5026 14: }
5028 [1] and [9] can sometimes be optimized away. */
5030 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5031 NULL_TREE);
5032 size = int_size_in_bytes (type);
5034 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5035 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5037 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5038 NULL_TREE);
5039 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5040 NULL_TREE);
5042 /* When va_start saves FPR arguments to the stack, each slot
5043 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
5044 argument's precision. */
5045 rsize = UNITS_PER_HWFPVALUE;
5047 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5048 (= PARM_BOUNDARY bits). This can be different from RSIZE
5049 in two cases:
5051 (1) On 32-bit targets when TYPE is a structure such as:
5053 struct s { float f; };
5055 Such structures are passed in paired FPRs, so RSIZE
5056 will be 8 bytes. However, the structure only takes
5057 up 4 bytes of memory, so OSIZE will only be 4.
5059 (2) In combinations such as -mgp64 -msingle-float
5060 -fshort-double. Doubles passed in registers will then take
5061 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
5062 stack take up UNITS_PER_WORD bytes. */
5063 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5065 else
5067 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5068 NULL_TREE);
5069 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5070 NULL_TREE);
5071 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5072 if (rsize > UNITS_PER_WORD)
5074 /* [1] Emit code for: off &= -rsize. */
5075 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5076 build_int_cst (NULL_TREE, -rsize));
5077 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
5078 gimplify_and_add (t, pre_p);
5080 osize = rsize;
5083 /* [2] Emit code to branch if off == 0. */
5084 t = build2 (NE_EXPR, boolean_type_node, off,
5085 build_int_cst (TREE_TYPE (off), 0));
5086 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5088 /* [5] Emit code for: off -= rsize. We do this as a form of
5089 post-decrement not available to C. */
5090 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5091 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5093 /* [4] Emit code for:
5094 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
5095 t = fold_convert (sizetype, t);
5096 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5097 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5098 if (BYTES_BIG_ENDIAN && rsize > size)
5100 u = size_int (rsize - size);
5101 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5103 COND_EXPR_THEN (addr) = t;
5105 if (osize > UNITS_PER_WORD)
5107 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
5108 u = size_int (osize - 1);
5109 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5110 t = fold_convert (sizetype, t);
5111 u = size_int (-osize);
5112 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5113 t = fold_convert (TREE_TYPE (ovfl), t);
5114 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5116 else
5117 align = NULL;
5119 /* [10, 11] Emit code for:
5120 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5121 ovfl += osize. */
5122 u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
5123 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5124 if (BYTES_BIG_ENDIAN && osize > size)
5126 u = size_int (osize - size);
5127 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5130 /* String [9] and [10, 11] together. */
5131 if (align)
5132 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5133 COND_EXPR_ELSE (addr) = t;
5135 addr = fold_convert (build_pointer_type (type), addr);
5136 addr = build_va_arg_indirect_ref (addr);
5139 if (indirect_p)
5140 addr = build_va_arg_indirect_ref (addr);
5142 return addr;
5145 /* A chained list of functions for which mips16_build_call_stub has already
5146 generated a stub. NAME is the name of the function and FP_RET_P is true
5147 if the function returns a value in floating-point registers. */
5148 struct mips16_stub {
5149 struct mips16_stub *next;
5150 char *name;
5151 bool fp_ret_p;
5153 static struct mips16_stub *mips16_stubs;
5155 /* Return the two-character string that identifies floating-point
5156 return mode MODE in the name of a MIPS16 function stub. */
5158 static const char *
5159 mips16_call_stub_mode_suffix (enum machine_mode mode)
5161 if (mode == SFmode)
5162 return "sf";
5163 else if (mode == DFmode)
5164 return "df";
5165 else if (mode == SCmode)
5166 return "sc";
5167 else if (mode == DCmode)
5168 return "dc";
5169 else if (mode == V2SFmode)
5170 return "df";
5171 else
5172 gcc_unreachable ();
5175 /* Write instructions to move a 32-bit value between general register
5176 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5177 from GPREG to FPREG and 'f' to move in the opposite direction. */
5179 static void
5180 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5182 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5183 reg_names[gpreg], reg_names[fpreg]);
5186 /* Likewise for 64-bit values. */
5188 static void
5189 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5191 if (TARGET_64BIT)
5192 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
5193 reg_names[gpreg], reg_names[fpreg]);
5194 else if (TARGET_FLOAT64)
5196 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5197 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5198 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
5199 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
5201 else
5203 /* Move the least-significant word. */
5204 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5205 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5206 /* ...then the most significant word. */
5207 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5208 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
5212 /* Write out code to move floating-point arguments into or out of
5213 general registers. FP_CODE is the code describing which arguments
5214 are present (see the comment above the definition of CUMULATIVE_ARGS
5215 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
5217 static void
5218 mips_output_args_xfer (int fp_code, char direction)
5220 unsigned int gparg, fparg, f;
5221 CUMULATIVE_ARGS cum;
5223 /* This code only works for o32 and o64. */
5224 gcc_assert (TARGET_OLDABI);
5226 mips_init_cumulative_args (&cum, NULL);
5228 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5230 enum machine_mode mode;
5231 struct mips_arg_info info;
5233 if ((f & 3) == 1)
5234 mode = SFmode;
5235 else if ((f & 3) == 2)
5236 mode = DFmode;
5237 else
5238 gcc_unreachable ();
5240 mips_get_arg_info (&info, &cum, mode, NULL, true);
5241 gparg = mips_arg_regno (&info, false);
5242 fparg = mips_arg_regno (&info, true);
5244 if (mode == SFmode)
5245 mips_output_32bit_xfer (direction, gparg, fparg);
5246 else
5247 mips_output_64bit_xfer (direction, gparg, fparg);
5249 mips_function_arg_advance (&cum, mode, NULL, true);
5253 /* Write a MIPS16 stub for the current function. This stub is used
5254 for functions which take arguments in the floating-point registers.
5255 It is normal-mode code that moves the floating-point arguments
5256 into the general registers and then jumps to the MIPS16 code. */
5258 static void
5259 mips16_build_function_stub (void)
5261 const char *fnname, *separator;
5262 char *secname, *stubname;
5263 tree stubdecl;
5264 unsigned int f;
5266 /* Create the name of the stub, and its unique section. */
5267 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5268 fnname = targetm.strip_name_encoding (fnname);
5269 secname = ACONCAT ((".mips16.fn.", fnname, NULL));
5270 stubname = ACONCAT (("__fn_stub_", fnname, NULL));
5272 /* Build a decl for the stub. */
5273 stubdecl = build_decl (FUNCTION_DECL, get_identifier (stubname),
5274 build_function_type (void_type_node, NULL_TREE));
5275 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5276 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5278 /* Output a comment. */
5279 fprintf (asm_out_file, "\t# Stub function for %s (",
5280 current_function_name ());
5281 separator = "";
5282 for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
5284 fprintf (asm_out_file, "%s%s", separator,
5285 (f & 3) == 1 ? "float" : "double");
5286 separator = ", ";
5288 fprintf (asm_out_file, ")\n");
5290 /* Write the preamble leading up to the function declaration. */
5291 fprintf (asm_out_file, "\t.set\tnomips16\n");
5292 switch_to_section (function_section (stubdecl));
5293 ASM_OUTPUT_ALIGN (asm_out_file,
5294 floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
5296 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
5297 within a .ent, and we cannot emit another .ent. */
5298 if (!FUNCTION_NAME_ALREADY_DECLARED)
5300 fputs ("\t.ent\t", asm_out_file);
5301 assemble_name (asm_out_file, stubname);
5302 fputs ("\n", asm_out_file);
5305 /* Start the definition proper. */
5306 assemble_name (asm_out_file, stubname);
5307 fputs (":\n", asm_out_file);
5309 /* Load the address of the MIPS16 function into $at. Do this first so
5310 that targets with coprocessor interlocks can use an MFC1 to fill the
5311 delay slot. */
5312 fprintf (asm_out_file, "\t.set\tnoat\n");
5313 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
5314 assemble_name (asm_out_file, fnname);
5315 fprintf (asm_out_file, "\n");
5317 /* Move the arguments from floating-point registers to general registers. */
5318 mips_output_args_xfer (crtl->args.info.fp_code, 'f');
5320 /* Jump to the MIPS16 function. */
5321 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5322 fprintf (asm_out_file, "\t.set\tat\n");
5324 if (!FUNCTION_NAME_ALREADY_DECLARED)
5326 fputs ("\t.end\t", asm_out_file);
5327 assemble_name (asm_out_file, stubname);
5328 fputs ("\n", asm_out_file);
5331 switch_to_section (function_section (current_function_decl));
5334 /* The current function is a MIPS16 function that returns a value in an FPR.
5335 Copy the return value from its soft-float to its hard-float location.
5336 libgcc2 has special non-MIPS16 helper functions for each case. */
5338 static void
5339 mips16_copy_fpr_return_value (void)
5341 rtx fn, insn, arg, call;
5342 tree id, return_type;
5343 enum machine_mode return_mode;
5345 return_type = DECL_RESULT (current_function_decl);
5346 return_mode = DECL_MODE (return_type);
5348 id = get_identifier (ACONCAT (("__mips16_ret_",
5349 mips16_call_stub_mode_suffix (return_mode),
5350 NULL)));
5351 fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5352 arg = gen_rtx_REG (return_mode, GP_RETURN);
5353 call = gen_call_value_internal (arg, fn, const0_rtx);
5354 insn = mips_emit_call_insn (call, false);
5355 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), arg);
5358 /* Consider building a stub for a MIPS16 call to function FN.
5359 RETVAL is the location of the return value, or null if this is
5360 a "call" rather than a "call_value". ARGS_SIZE is the size of the
5361 arguments and FP_CODE is the code built by mips_function_arg;
5362 see the comment above CUMULATIVE_ARGS for details.
5364 If a stub was needed, emit the call and return the call insn itself.
5365 Return null otherwise.
5367 A stub is needed for calls to functions that, in normal mode,
5368 receive arguments in FPRs or return values in FPRs. The stub
5369 copies the arguments from their soft-float positions to their
5370 hard-float positions, calls the real function, then copies the
5371 return value from its hard-float position to its soft-float
5372 position.
5374 We emit a JAL to FN even when FN might need a stub. If FN turns out
5375 to be to a non-MIPS16 function, the linker automatically redirects
5376 the JAL to the stub, otherwise the JAL continues to call FN directly. */
5378 static rtx
5379 mips16_build_call_stub (rtx retval, rtx fn, rtx args_size, int fp_code)
5381 const char *fnname;
5382 bool fp_ret_p;
5383 struct mips16_stub *l;
5384 rtx insn;
5386 /* We don't need to do anything if we aren't in MIPS16 mode, or if
5387 we were invoked with the -msoft-float option. */
5388 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5389 return NULL_RTX;
5391 /* Figure out whether the value might come back in a floating-point
5392 register. */
5393 fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
5395 /* We don't need to do anything if there were no floating-point
5396 arguments and the value will not be returned in a floating-point
5397 register. */
5398 if (fp_code == 0 && !fp_ret_p)
5399 return NULL_RTX;
5401 /* We don't need to do anything if this is a call to a special
5402 MIPS16 support function. */
5403 if (GET_CODE (fn) == SYMBOL_REF
5404 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
5405 return NULL_RTX;
5407 /* This code will only work for o32 and o64 abis. The other ABI's
5408 require more sophisticated support. */
5409 gcc_assert (TARGET_OLDABI);
5411 /* If we're calling via a function pointer, use one of the magic
5412 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
5413 Each stub expects the function address to arrive in register $2. */
5414 if (GET_CODE (fn) != SYMBOL_REF)
5416 char buf[30];
5417 tree id;
5418 rtx stub_fn, insn;
5420 /* Create a SYMBOL_REF for the libgcc.a function. */
5421 if (fp_ret_p)
5422 sprintf (buf, "__mips16_call_stub_%s_%d",
5423 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5424 fp_code);
5425 else
5426 sprintf (buf, "__mips16_call_stub_%d", fp_code);
5427 id = get_identifier (buf);
5428 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5430 /* Load the target function into $2. */
5431 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
5433 /* Emit the call. */
5434 if (retval == NULL_RTX)
5435 insn = gen_call_internal (stub_fn, args_size);
5436 else
5437 insn = gen_call_value_internal (retval, stub_fn, args_size);
5438 insn = mips_emit_call_insn (insn, false);
5440 /* Tell GCC that this call does indeed use the value of $2. */
5441 CALL_INSN_FUNCTION_USAGE (insn) =
5442 gen_rtx_EXPR_LIST (VOIDmode,
5443 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
5444 CALL_INSN_FUNCTION_USAGE (insn));
5446 /* If we are handling a floating-point return value, we need to
5447 save $18 in the function prologue. Putting a note on the
5448 call will mean that df_regs_ever_live_p ($18) will be true if the
5449 call is not eliminated, and we can check that in the prologue
5450 code. */
5451 if (fp_ret_p)
5452 CALL_INSN_FUNCTION_USAGE (insn) =
5453 gen_rtx_EXPR_LIST (VOIDmode,
5454 gen_rtx_USE (VOIDmode,
5455 gen_rtx_REG (word_mode, 18)),
5456 CALL_INSN_FUNCTION_USAGE (insn));
5458 return insn;
5461 /* We know the function we are going to call. If we have already
5462 built a stub, we don't need to do anything further. */
5463 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5464 for (l = mips16_stubs; l != NULL; l = l->next)
5465 if (strcmp (l->name, fnname) == 0)
5466 break;
5468 if (l == NULL)
5470 const char *separator;
5471 char *secname, *stubname;
5472 tree stubid, stubdecl;
5473 unsigned int f;
5475 /* If the function does not return in FPRs, the special stub
5476 section is named
5477 .mips16.call.FNNAME
5479 If the function does return in FPRs, the stub section is named
5480 .mips16.call.fp.FNNAME
5482 Build a decl for the stub. */
5483 secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
5484 fnname, NULL));
5485 stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
5486 fnname, NULL));
5487 stubid = get_identifier (stubname);
5488 stubdecl = build_decl (FUNCTION_DECL, stubid,
5489 build_function_type (void_type_node, NULL_TREE));
5490 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5491 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE,
5492 void_type_node);
5494 /* Output a comment. */
5495 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
5496 (fp_ret_p
5497 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
5498 : ""),
5499 fnname);
5500 separator = "";
5501 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5503 fprintf (asm_out_file, "%s%s", separator,
5504 (f & 3) == 1 ? "float" : "double");
5505 separator = ", ";
5507 fprintf (asm_out_file, ")\n");
5509 /* Write the preamble leading up to the function declaration. */
5510 fprintf (asm_out_file, "\t.set\tnomips16\n");
5511 assemble_start_function (stubdecl, stubname);
5513 if (!FUNCTION_NAME_ALREADY_DECLARED)
5515 fputs ("\t.ent\t", asm_out_file);
5516 assemble_name (asm_out_file, stubname);
5517 fputs ("\n", asm_out_file);
5519 assemble_name (asm_out_file, stubname);
5520 fputs (":\n", asm_out_file);
5523 if (!fp_ret_p)
5525 /* Load the address of the MIPS16 function into $at. Do this
5526 first so that targets with coprocessor interlocks can use
5527 an MFC1 to fill the delay slot. */
5528 fprintf (asm_out_file, "\t.set\tnoat\n");
5529 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
5530 fnname);
5533 /* Move the arguments from general registers to floating-point
5534 registers. */
5535 mips_output_args_xfer (fp_code, 't');
5537 if (!fp_ret_p)
5539 /* Jump to the previously-loaded address. */
5540 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5541 fprintf (asm_out_file, "\t.set\tat\n");
5543 else
5545 /* Save the return address in $18 and call the non-MIPS16 function.
5546 The stub's caller knows that $18 might be clobbered, even though
5547 $18 is usually a call-saved register. */
5548 fprintf (asm_out_file, "\tmove\t%s,%s\n",
5549 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
5550 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
5552 /* Move the result from floating-point registers to
5553 general registers. */
5554 switch (GET_MODE (retval))
5556 case SCmode:
5557 mips_output_32bit_xfer ('f', GP_RETURN + 1,
5558 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5559 /* Fall though. */
5560 case SFmode:
5561 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5562 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
5564 /* On 64-bit targets, complex floats are returned in
5565 a single GPR, such that "sd" on a suitably-aligned
5566 target would store the value correctly. */
5567 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
5568 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
5569 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
5570 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
5571 reg_names[GP_RETURN],
5572 reg_names[GP_RETURN],
5573 reg_names[GP_RETURN + 1]);
5575 break;
5577 case DCmode:
5578 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
5579 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5580 /* Fall though. */
5581 case DFmode:
5582 case V2SFmode:
5583 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5584 break;
5586 default:
5587 gcc_unreachable ();
5589 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
5592 #ifdef ASM_DECLARE_FUNCTION_SIZE
5593 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
5594 #endif
5596 if (!FUNCTION_NAME_ALREADY_DECLARED)
5598 fputs ("\t.end\t", asm_out_file);
5599 assemble_name (asm_out_file, stubname);
5600 fputs ("\n", asm_out_file);
5603 /* Record this stub. */
5604 l = XNEW (struct mips16_stub);
5605 l->name = xstrdup (fnname);
5606 l->fp_ret_p = fp_ret_p;
5607 l->next = mips16_stubs;
5608 mips16_stubs = l;
5611 /* If we expect a floating-point return value, but we've built a
5612 stub which does not expect one, then we're in trouble. We can't
5613 use the existing stub, because it won't handle the floating-point
5614 value. We can't build a new stub, because the linker won't know
5615 which stub to use for the various calls in this object file.
5616 Fortunately, this case is illegal, since it means that a function
5617 was declared in two different ways in a single compilation. */
5618 if (fp_ret_p && !l->fp_ret_p)
5619 error ("cannot handle inconsistent calls to %qs", fnname);
5621 if (retval == NULL_RTX)
5622 insn = gen_call_internal_direct (fn, args_size);
5623 else
5624 insn = gen_call_value_internal_direct (retval, fn, args_size);
5625 insn = mips_emit_call_insn (insn, false);
5627 /* If we are calling a stub which handles a floating-point return
5628 value, we need to arrange to save $18 in the prologue. We do this
5629 by marking the function call as using the register. The prologue
5630 will later see that it is used, and emit code to save it. */
5631 if (fp_ret_p)
5632 CALL_INSN_FUNCTION_USAGE (insn) =
5633 gen_rtx_EXPR_LIST (VOIDmode,
5634 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
5635 CALL_INSN_FUNCTION_USAGE (insn));
5637 return insn;
5640 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5642 static bool
5643 mips_ok_for_lazy_binding_p (rtx x)
5645 return (TARGET_USE_GOT
5646 && GET_CODE (x) == SYMBOL_REF
5647 && !mips_symbol_binds_local_p (x));
5650 /* Load function address ADDR into register DEST. SIBCALL_P is true
5651 if the address is needed for a sibling call. Return true if we
5652 used an explicit lazy-binding sequence. */
5654 static bool
5655 mips_load_call_address (rtx dest, rtx addr, bool sibcall_p)
5657 /* If we're generating PIC, and this call is to a global function,
5658 try to allow its address to be resolved lazily. This isn't
5659 possible for sibcalls when $gp is call-saved because the value
5660 of $gp on entry to the stub would be our caller's gp, not ours. */
5661 if (TARGET_EXPLICIT_RELOCS
5662 && !(sibcall_p && TARGET_CALL_SAVED_GP)
5663 && mips_ok_for_lazy_binding_p (addr))
5665 rtx high, lo_sum_symbol;
5667 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
5668 addr, SYMBOL_GOTOFF_CALL);
5669 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
5670 if (Pmode == SImode)
5671 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
5672 else
5673 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
5674 return true;
5676 else
5678 mips_emit_move (dest, addr);
5679 return false;
5683 /* Expand a "call", "sibcall", "call_value" or "sibcall_value" instruction.
5684 RESULT is where the result will go (null for "call"s and "sibcall"s),
5685 ADDR is the address of the function, ARGS_SIZE is the size of the
5686 arguments and AUX is the value passed to us by mips_function_arg.
5687 SIBCALL_P is true if we are expanding a sibling call, false if we're
5688 expanding a normal call.
5690 Return the call itself. */
5693 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, bool sibcall_p)
5695 rtx orig_addr, pattern, insn;
5696 bool lazy_p;
5698 orig_addr = addr;
5699 lazy_p = false;
5700 if (!call_insn_operand (addr, VOIDmode))
5702 addr = gen_reg_rtx (Pmode);
5703 lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
5706 insn = mips16_build_call_stub (result, addr, args_size,
5707 aux == 0 ? 0 : (int) GET_MODE (aux));
5708 if (insn)
5710 gcc_assert (!sibcall_p && !lazy_p);
5711 return insn;
5714 if (result == 0)
5715 pattern = (sibcall_p
5716 ? gen_sibcall_internal (addr, args_size)
5717 : gen_call_internal (addr, args_size));
5718 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
5720 /* Handle return values created by mips_return_fpr_pair. */
5721 rtx reg1, reg2;
5723 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
5724 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
5725 pattern =
5726 (sibcall_p
5727 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
5728 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
5730 else
5732 /* Handle return values created by mips_return_fpr_single. */
5733 if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
5734 result = XEXP (XVECEXP (result, 0, 0), 0);
5735 pattern = (sibcall_p
5736 ? gen_sibcall_value_internal (result, addr, args_size)
5737 : gen_call_value_internal (result, addr, args_size));
5740 return mips_emit_call_insn (pattern, lazy_p);
5743 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
5745 static bool
5746 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5748 if (!TARGET_SIBCALLS)
5749 return false;
5751 /* We can't do a sibcall if the called function is a MIPS16 function
5752 because there is no direct "jx" instruction equivalent to "jalx" to
5753 switch the ISA mode. We only care about cases where the sibling
5754 and normal calls would both be direct. */
5755 if (mips_use_mips16_mode_p (decl)
5756 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5757 return false;
5759 /* When -minterlink-mips16 is in effect, assume that non-locally-binding
5760 functions could be MIPS16 ones unless an attribute explicitly tells
5761 us otherwise. */
5762 if (TARGET_INTERLINK_MIPS16
5763 && decl
5764 && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
5765 && !mips_nomips16_decl_p (decl)
5766 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5767 return false;
5769 /* Otherwise OK. */
5770 return true;
5773 /* Emit code to move general operand SRC into condition-code
5774 register DEST given that SCRATCH is a scratch TFmode FPR.
5775 The sequence is:
5777 FP1 = SRC
5778 FP2 = 0.0f
5779 DEST = FP2 < FP1
5781 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
5783 void
5784 mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
5786 rtx fp1, fp2;
5788 /* Change the source to SFmode. */
5789 if (MEM_P (src))
5790 src = adjust_address (src, SFmode, 0);
5791 else if (REG_P (src) || GET_CODE (src) == SUBREG)
5792 src = gen_rtx_REG (SFmode, true_regnum (src));
5794 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
5795 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
5797 mips_emit_move (copy_rtx (fp1), src);
5798 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
5799 emit_insn (gen_slt_sf (dest, fp2, fp1));
5802 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
5803 Assume that the areas do not overlap. */
5805 static void
5806 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
5808 HOST_WIDE_INT offset, delta;
5809 unsigned HOST_WIDE_INT bits;
5810 int i;
5811 enum machine_mode mode;
5812 rtx *regs;
5814 /* Work out how many bits to move at a time. If both operands have
5815 half-word alignment, it is usually better to move in half words.
5816 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
5817 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
5818 Otherwise move word-sized chunks. */
5819 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
5820 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
5821 bits = BITS_PER_WORD / 2;
5822 else
5823 bits = BITS_PER_WORD;
5825 mode = mode_for_size (bits, MODE_INT, 0);
5826 delta = bits / BITS_PER_UNIT;
5828 /* Allocate a buffer for the temporary registers. */
5829 regs = alloca (sizeof (rtx) * length / delta);
5831 /* Load as many BITS-sized chunks as possible. Use a normal load if
5832 the source has enough alignment, otherwise use left/right pairs. */
5833 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5835 regs[i] = gen_reg_rtx (mode);
5836 if (MEM_ALIGN (src) >= bits)
5837 mips_emit_move (regs[i], adjust_address (src, mode, offset));
5838 else
5840 rtx part = adjust_address (src, BLKmode, offset);
5841 if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
5842 gcc_unreachable ();
5846 /* Copy the chunks to the destination. */
5847 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5848 if (MEM_ALIGN (dest) >= bits)
5849 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
5850 else
5852 rtx part = adjust_address (dest, BLKmode, offset);
5853 if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
5854 gcc_unreachable ();
5857 /* Mop up any left-over bytes. */
5858 if (offset < length)
5860 src = adjust_address (src, BLKmode, offset);
5861 dest = adjust_address (dest, BLKmode, offset);
5862 move_by_pieces (dest, src, length - offset,
5863 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
5867 /* Helper function for doing a loop-based block operation on memory
5868 reference MEM. Each iteration of the loop will operate on LENGTH
5869 bytes of MEM.
5871 Create a new base register for use within the loop and point it to
5872 the start of MEM. Create a new memory reference that uses this
5873 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
5875 static void
5876 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
5877 rtx *loop_reg, rtx *loop_mem)
5879 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
5881 /* Although the new mem does not refer to a known location,
5882 it does keep up to LENGTH bytes of alignment. */
5883 *loop_mem = change_address (mem, BLKmode, *loop_reg);
5884 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
5887 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
5888 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
5889 the memory regions do not overlap. */
5891 static void
5892 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
5893 HOST_WIDE_INT bytes_per_iter)
5895 rtx label, src_reg, dest_reg, final_src;
5896 HOST_WIDE_INT leftover;
5898 leftover = length % bytes_per_iter;
5899 length -= leftover;
5901 /* Create registers and memory references for use within the loop. */
5902 mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
5903 mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
5905 /* Calculate the value that SRC_REG should have after the last iteration
5906 of the loop. */
5907 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
5908 0, 0, OPTAB_WIDEN);
5910 /* Emit the start of the loop. */
5911 label = gen_label_rtx ();
5912 emit_label (label);
5914 /* Emit the loop body. */
5915 mips_block_move_straight (dest, src, bytes_per_iter);
5917 /* Move on to the next block. */
5918 mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
5919 mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
5921 /* Emit the loop condition. */
5922 if (Pmode == DImode)
5923 emit_insn (gen_cmpdi (src_reg, final_src));
5924 else
5925 emit_insn (gen_cmpsi (src_reg, final_src));
5926 emit_jump_insn (gen_bne (label));
5928 /* Mop up any left-over bytes. */
5929 if (leftover)
5930 mips_block_move_straight (dest, src, leftover);
5933 /* Expand a movmemsi instruction, which copies LENGTH bytes from
5934 memory reference SRC to memory reference DEST. */
5936 bool
5937 mips_expand_block_move (rtx dest, rtx src, rtx length)
5939 if (GET_CODE (length) == CONST_INT)
5941 if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
5943 mips_block_move_straight (dest, src, INTVAL (length));
5944 return true;
5946 else if (optimize)
5948 mips_block_move_loop (dest, src, INTVAL (length),
5949 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
5950 return true;
5953 return false;
5956 /* Expand a loop of synci insns for the address range [BEGIN, END). */
5958 void
5959 mips_expand_synci_loop (rtx begin, rtx end)
5961 rtx inc, label, cmp, cmp_result;
5963 /* Load INC with the cache line size (rdhwr INC,$1). */
5964 inc = gen_reg_rtx (SImode);
5965 emit_insn (gen_rdhwr (inc, const1_rtx));
5967 /* Loop back to here. */
5968 label = gen_label_rtx ();
5969 emit_label (label);
5971 emit_insn (gen_synci (begin));
5973 cmp = mips_force_binary (Pmode, GTU, begin, end);
5975 mips_emit_binary (PLUS, begin, begin, inc);
5977 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
5978 emit_jump_insn (gen_condjump (cmp_result, label));
5981 /* Expand a QI or HI mode atomic memory operation.
5983 GENERATOR contains a pointer to the gen_* function that generates
5984 the SI mode underlying atomic operation using masks that we
5985 calculate.
5987 RESULT is the return register for the operation. Its value is NULL
5988 if unused.
5990 MEM is the location of the atomic access.
5992 OLDVAL is the first operand for the operation.
5994 NEWVAL is the optional second operand for the operation. Its value
5995 is NULL if unused. */
5997 void
5998 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
5999 rtx result, rtx mem, rtx oldval, rtx newval)
6001 rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
6002 rtx unshifted_mask_reg, mask, inverted_mask, si_op;
6003 rtx res = NULL;
6004 enum machine_mode mode;
6006 mode = GET_MODE (mem);
6008 /* Compute the address of the containing SImode value. */
6009 orig_addr = force_reg (Pmode, XEXP (mem, 0));
6010 memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
6011 force_reg (Pmode, GEN_INT (-4)));
6013 /* Create a memory reference for it. */
6014 memsi = gen_rtx_MEM (SImode, memsi_addr);
6015 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
6016 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
6018 /* Work out the byte offset of the QImode or HImode value,
6019 counting from the least significant byte. */
6020 shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
6021 if (TARGET_BIG_ENDIAN)
6022 mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
6024 /* Multiply by eight to convert the shift value from bytes to bits. */
6025 mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
6027 /* Make the final shift an SImode value, so that it can be used in
6028 SImode operations. */
6029 shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
6031 /* Set MASK to an inclusive mask of the QImode or HImode value. */
6032 unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
6033 unshifted_mask_reg = force_reg (SImode, unshifted_mask);
6034 mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
6036 /* Compute the equivalent exclusive mask. */
6037 inverted_mask = gen_reg_rtx (SImode);
6038 emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
6039 gen_rtx_NOT (SImode, mask)));
6041 /* Shift the old value into place. */
6042 if (oldval != const0_rtx)
6044 oldval = convert_modes (SImode, mode, oldval, true);
6045 oldval = force_reg (SImode, oldval);
6046 oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
6049 /* Do the same for the new value. */
6050 if (newval && newval != const0_rtx)
6052 newval = convert_modes (SImode, mode, newval, true);
6053 newval = force_reg (SImode, newval);
6054 newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
6057 /* Do the SImode atomic access. */
6058 if (result)
6059 res = gen_reg_rtx (SImode);
6060 if (newval)
6061 si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
6062 else if (result)
6063 si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
6064 else
6065 si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
6067 emit_insn (si_op);
6069 if (result)
6071 /* Shift and convert the result. */
6072 mips_emit_binary (AND, res, res, mask);
6073 mips_emit_binary (LSHIFTRT, res, res, shiftsi);
6074 mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
6078 /* Return true if it is possible to use left/right accesses for a
6079 bitfield of WIDTH bits starting BITPOS bits into *OP. When
6080 returning true, update *OP, *LEFT and *RIGHT as follows:
6082 *OP is a BLKmode reference to the whole field.
6084 *LEFT is a QImode reference to the first byte if big endian or
6085 the last byte if little endian. This address can be used in the
6086 left-side instructions (LWL, SWL, LDL, SDL).
6088 *RIGHT is a QImode reference to the opposite end of the field and
6089 can be used in the patterning right-side instruction. */
6091 static bool
6092 mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
6093 rtx *left, rtx *right)
6095 rtx first, last;
6097 /* Check that the operand really is a MEM. Not all the extv and
6098 extzv predicates are checked. */
6099 if (!MEM_P (*op))
6100 return false;
6102 /* Check that the size is valid. */
6103 if (width != 32 && (!TARGET_64BIT || width != 64))
6104 return false;
6106 /* We can only access byte-aligned values. Since we are always passed
6107 a reference to the first byte of the field, it is not necessary to
6108 do anything with BITPOS after this check. */
6109 if (bitpos % BITS_PER_UNIT != 0)
6110 return false;
6112 /* Reject aligned bitfields: we want to use a normal load or store
6113 instead of a left/right pair. */
6114 if (MEM_ALIGN (*op) >= width)
6115 return false;
6117 /* Adjust *OP to refer to the whole field. This also has the effect
6118 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
6119 *op = adjust_address (*op, BLKmode, 0);
6120 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
6122 /* Get references to both ends of the field. We deliberately don't
6123 use the original QImode *OP for FIRST since the new BLKmode one
6124 might have a simpler address. */
6125 first = adjust_address (*op, QImode, 0);
6126 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
6128 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
6129 correspond to the MSB and RIGHT to the LSB. */
6130 if (TARGET_BIG_ENDIAN)
6131 *left = first, *right = last;
6132 else
6133 *left = last, *right = first;
6135 return true;
6138 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
6139 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
6140 the operation is the equivalent of:
6142 (set DEST (*_extract SRC WIDTH BITPOS))
6144 Return true on success. */
6146 bool
6147 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
6148 HOST_WIDE_INT bitpos)
6150 rtx left, right, temp;
6152 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
6153 be a paradoxical word_mode subreg. This is the only case in which
6154 we allow the destination to be larger than the source. */
6155 if (GET_CODE (dest) == SUBREG
6156 && GET_MODE (dest) == DImode
6157 && GET_MODE (SUBREG_REG (dest)) == SImode)
6158 dest = SUBREG_REG (dest);
6160 /* After the above adjustment, the destination must be the same
6161 width as the source. */
6162 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
6163 return false;
6165 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
6166 return false;
6168 temp = gen_reg_rtx (GET_MODE (dest));
6169 if (GET_MODE (dest) == DImode)
6171 emit_insn (gen_mov_ldl (temp, src, left));
6172 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
6174 else
6176 emit_insn (gen_mov_lwl (temp, src, left));
6177 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
6179 return true;
6182 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
6183 BITPOS and SRC are the operands passed to the expander; the operation
6184 is the equivalent of:
6186 (set (zero_extract DEST WIDTH BITPOS) SRC)
6188 Return true on success. */
6190 bool
6191 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
6192 HOST_WIDE_INT bitpos)
6194 rtx left, right;
6195 enum machine_mode mode;
6197 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
6198 return false;
6200 mode = mode_for_size (width, MODE_INT, 0);
6201 src = gen_lowpart (mode, src);
6202 if (mode == DImode)
6204 emit_insn (gen_mov_sdl (dest, src, left));
6205 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
6207 else
6209 emit_insn (gen_mov_swl (dest, src, left));
6210 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
6212 return true;
6215 /* Return true if X is a MEM with the same size as MODE. */
6217 bool
6218 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6220 rtx size;
6222 if (!MEM_P (x))
6223 return false;
6225 size = MEM_SIZE (x);
6226 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6229 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
6230 source of an "ext" instruction or the destination of an "ins"
6231 instruction. OP must be a register operand and the following
6232 conditions must hold:
6234 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
6235 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6236 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6238 Also reject lengths equal to a word as they are better handled
6239 by the move patterns. */
6241 bool
6242 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
6244 if (!ISA_HAS_EXT_INS
6245 || !register_operand (op, VOIDmode)
6246 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6247 return false;
6249 if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
6250 return false;
6252 if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
6253 return false;
6255 return true;
6258 /* Return true if -msplit-addresses is selected and should be honored.
6260 -msplit-addresses is a half-way house between explicit relocations
6261 and the traditional assembler macros. It can split absolute 32-bit
6262 symbolic constants into a high/lo_sum pair but uses macros for other
6263 sorts of access.
6265 Like explicit relocation support for REL targets, it relies
6266 on GNU extensions in the assembler and the linker.
6268 Although this code should work for -O0, it has traditionally
6269 been treated as an optimization. */
6271 static bool
6272 mips_split_addresses_p (void)
6274 return (TARGET_SPLIT_ADDRESSES
6275 && optimize
6276 && !TARGET_MIPS16
6277 && !flag_pic
6278 && !ABI_HAS_64BIT_SYMBOLS);
6281 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
6283 static void
6284 mips_init_relocs (void)
6286 memset (mips_split_p, '\0', sizeof (mips_split_p));
6287 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6288 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6290 if (ABI_HAS_64BIT_SYMBOLS)
6292 if (TARGET_EXPLICIT_RELOCS)
6294 mips_split_p[SYMBOL_64_HIGH] = true;
6295 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6296 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6298 mips_split_p[SYMBOL_64_MID] = true;
6299 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6300 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6302 mips_split_p[SYMBOL_64_LOW] = true;
6303 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6304 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6306 mips_split_p[SYMBOL_ABSOLUTE] = true;
6307 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6310 else
6312 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses_p () || TARGET_MIPS16)
6314 mips_split_p[SYMBOL_ABSOLUTE] = true;
6315 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6316 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6318 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6322 if (TARGET_MIPS16)
6324 /* The high part is provided by a pseudo copy of $gp. */
6325 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6326 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6329 if (TARGET_EXPLICIT_RELOCS)
6331 /* Small data constants are kept whole until after reload,
6332 then lowered by mips_rewrite_small_data. */
6333 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6335 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6336 if (TARGET_NEWABI)
6338 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6339 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6341 else
6343 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6344 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6347 if (TARGET_XGOT)
6349 /* The HIGH and LO_SUM are matched by special .md patterns. */
6350 mips_split_p[SYMBOL_GOT_DISP] = true;
6352 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6353 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6354 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6356 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6357 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6358 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6360 else
6362 if (TARGET_NEWABI)
6363 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6364 else
6365 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6366 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6370 if (TARGET_NEWABI)
6372 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6373 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6374 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6377 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6378 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6380 mips_split_p[SYMBOL_DTPREL] = true;
6381 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6382 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6384 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6386 mips_split_p[SYMBOL_TPREL] = true;
6387 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6388 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6390 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6393 /* If OP is an UNSPEC address, return the address to which it refers,
6394 otherwise return OP itself. */
6396 static rtx
6397 mips_strip_unspec_address (rtx op)
6399 rtx base, offset;
6401 split_const (op, &base, &offset);
6402 if (UNSPEC_ADDRESS_P (base))
6403 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6404 return op;
6407 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6408 in context CONTEXT. RELOCS is the array of relocations to use. */
6410 static void
6411 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6412 const char **relocs)
6414 enum mips_symbol_type symbol_type;
6415 const char *p;
6417 symbol_type = mips_classify_symbolic_expression (op, context);
6418 gcc_assert (relocs[symbol_type]);
6420 fputs (relocs[symbol_type], file);
6421 output_addr_const (file, mips_strip_unspec_address (op));
6422 for (p = relocs[symbol_type]; *p != 0; p++)
6423 if (*p == '(')
6424 fputc (')', file);
6427 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6428 The punctuation characters are:
6430 '(' Start a nested ".set noreorder" block.
6431 ')' End a nested ".set noreorder" block.
6432 '[' Start a nested ".set noat" block.
6433 ']' End a nested ".set noat" block.
6434 '<' Start a nested ".set nomacro" block.
6435 '>' End a nested ".set nomacro" block.
6436 '*' Behave like %(%< if generating a delayed-branch sequence.
6437 '#' Print a nop if in a ".set noreorder" block.
6438 '/' Like '#', but do nothing within a delayed-branch sequence.
6439 '?' Print "l" if mips_branch_likely is true
6440 '.' Print the name of the register with a hard-wired zero (zero or $0).
6441 '@' Print the name of the assembler temporary register (at or $1).
6442 '^' Print the name of the pic call-through register (t9 or $25).
6443 '+' Print the name of the gp register (usually gp or $28).
6444 '$' Print the name of the stack pointer register (sp or $29).
6445 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6446 '-' Print ".set pop" under the same conditions for '|'.
6448 See also mips_init_print_operand_pucnt. */
6450 static void
6451 mips_print_operand_punctuation (FILE *file, int ch)
6453 switch (ch)
6455 case '(':
6456 if (set_noreorder++ == 0)
6457 fputs (".set\tnoreorder\n\t", file);
6458 break;
6460 case ')':
6461 gcc_assert (set_noreorder > 0);
6462 if (--set_noreorder == 0)
6463 fputs ("\n\t.set\treorder", file);
6464 break;
6466 case '[':
6467 if (set_noat++ == 0)
6468 fputs (".set\tnoat\n\t", file);
6469 break;
6471 case ']':
6472 gcc_assert (set_noat > 0);
6473 if (--set_noat == 0)
6474 fputs ("\n\t.set\tat", file);
6475 break;
6477 case '<':
6478 if (set_nomacro++ == 0)
6479 fputs (".set\tnomacro\n\t", file);
6480 break;
6482 case '>':
6483 gcc_assert (set_nomacro > 0);
6484 if (--set_nomacro == 0)
6485 fputs ("\n\t.set\tmacro", file);
6486 break;
6488 case '*':
6489 if (final_sequence != 0)
6491 mips_print_operand_punctuation (file, '(');
6492 mips_print_operand_punctuation (file, '<');
6494 break;
6496 case '#':
6497 if (set_noreorder != 0)
6498 fputs ("\n\tnop", file);
6499 break;
6501 case '/':
6502 /* Print an extra newline so that the delayed insn is separated
6503 from the following ones. This looks neater and is consistent
6504 with non-nop delayed sequences. */
6505 if (set_noreorder != 0 && final_sequence == 0)
6506 fputs ("\n\tnop\n", file);
6507 break;
6509 case '?':
6510 if (mips_branch_likely)
6511 putc ('l', file);
6512 break;
6514 case '.':
6515 fputs (reg_names[GP_REG_FIRST + 0], file);
6516 break;
6518 case '@':
6519 fputs (reg_names[GP_REG_FIRST + 1], file);
6520 break;
6522 case '^':
6523 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
6524 break;
6526 case '+':
6527 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6528 break;
6530 case '$':
6531 fputs (reg_names[STACK_POINTER_REGNUM], file);
6532 break;
6534 case '|':
6535 if (!ISA_HAS_LL_SC)
6536 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6537 break;
6539 case '-':
6540 if (!ISA_HAS_LL_SC)
6541 fputs ("\n\t.set\tpop", file);
6542 break;
6544 default:
6545 gcc_unreachable ();
6546 break;
6550 /* Initialize mips_print_operand_punct. */
6552 static void
6553 mips_init_print_operand_punct (void)
6555 const char *p;
6557 for (p = "()[]<>*#/?.@^+$|-"; *p; p++)
6558 mips_print_operand_punct[(unsigned char) *p] = true;
6561 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
6562 associated with condition CODE. Print the condition part of the
6563 opcode to FILE. */
6565 static void
6566 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
6568 switch (code)
6570 case EQ:
6571 case NE:
6572 case GT:
6573 case GE:
6574 case LT:
6575 case LE:
6576 case GTU:
6577 case GEU:
6578 case LTU:
6579 case LEU:
6580 /* Conveniently, the MIPS names for these conditions are the same
6581 as their RTL equivalents. */
6582 fputs (GET_RTX_NAME (code), file);
6583 break;
6585 default:
6586 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6587 break;
6591 /* Likewise floating-point branches. */
6593 static void
6594 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
6596 switch (code)
6598 case EQ:
6599 fputs ("c1f", file);
6600 break;
6602 case NE:
6603 fputs ("c1t", file);
6604 break;
6606 default:
6607 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6608 break;
6612 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6614 'X' Print CONST_INT OP in hexadecimal format.
6615 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
6616 'd' Print CONST_INT OP in decimal.
6617 'h' Print the high-part relocation associated with OP, after stripping
6618 any outermost HIGH.
6619 'R' Print the low-part relocation associated with OP.
6620 'C' Print the integer branch condition for comparison OP.
6621 'N' Print the inverse of the integer branch condition for comparison OP.
6622 'F' Print the FPU branch condition for comparison OP.
6623 'W' Print the inverse of the FPU branch condition for comparison OP.
6624 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6625 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6626 't' Like 'T', but with the EQ/NE cases reversed
6627 'Y' Print mips_fp_conditions[INTVAL (OP)]
6628 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
6629 'q' Print a DSP accumulator register.
6630 'D' Print the second part of a double-word register or memory operand.
6631 'L' Print the low-order register in a double-word register operand.
6632 'M' Print high-order register in a double-word register operand.
6633 'z' Print $0 if OP is zero, otherwise print OP normally. */
6635 void
6636 mips_print_operand (FILE *file, rtx op, int letter)
6638 enum rtx_code code;
6640 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6642 mips_print_operand_punctuation (file, letter);
6643 return;
6646 gcc_assert (op);
6647 code = GET_CODE (op);
6649 switch (letter)
6651 case 'X':
6652 if (GET_CODE (op) == CONST_INT)
6653 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6654 else
6655 output_operand_lossage ("invalid use of '%%%c'", letter);
6656 break;
6658 case 'x':
6659 if (GET_CODE (op) == CONST_INT)
6660 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
6661 else
6662 output_operand_lossage ("invalid use of '%%%c'", letter);
6663 break;
6665 case 'd':
6666 if (GET_CODE (op) == CONST_INT)
6667 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
6668 else
6669 output_operand_lossage ("invalid use of '%%%c'", letter);
6670 break;
6672 case 'h':
6673 if (code == HIGH)
6674 op = XEXP (op, 0);
6675 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6676 break;
6678 case 'R':
6679 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6680 break;
6682 case 'C':
6683 mips_print_int_branch_condition (file, code, letter);
6684 break;
6686 case 'N':
6687 mips_print_int_branch_condition (file, reverse_condition (code), letter);
6688 break;
6690 case 'F':
6691 mips_print_float_branch_condition (file, code, letter);
6692 break;
6694 case 'W':
6695 mips_print_float_branch_condition (file, reverse_condition (code),
6696 letter);
6697 break;
6699 case 'T':
6700 case 't':
6702 int truth = (code == NE) == (letter == 'T');
6703 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6705 break;
6707 case 'Y':
6708 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
6709 fputs (mips_fp_conditions[UINTVAL (op)], file);
6710 else
6711 output_operand_lossage ("'%%%c' is not a valid operand prefix",
6712 letter);
6713 break;
6715 case 'Z':
6716 if (ISA_HAS_8CC)
6718 mips_print_operand (file, op, 0);
6719 fputc (',', file);
6721 break;
6723 case 'q':
6724 if (code == REG && MD_REG_P (REGNO (op)))
6725 fprintf (file, "$ac0");
6726 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
6727 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
6728 else
6729 output_operand_lossage ("invalid use of '%%%c'", letter);
6730 break;
6732 default:
6733 switch (code)
6735 case REG:
6737 unsigned int regno = REGNO (op);
6738 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
6739 || (letter == 'L' && TARGET_BIG_ENDIAN)
6740 || letter == 'D')
6741 regno++;
6742 fprintf (file, "%s", reg_names[regno]);
6744 break;
6746 case MEM:
6747 if (letter == 'D')
6748 output_address (plus_constant (XEXP (op, 0), 4));
6749 else
6750 output_address (XEXP (op, 0));
6751 break;
6753 default:
6754 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6755 fputs (reg_names[GP_REG_FIRST], file);
6756 else if (CONST_GP_P (op))
6757 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6758 else
6759 output_addr_const (file, mips_strip_unspec_address (op));
6760 break;
6765 /* Output address operand X to FILE. */
6767 void
6768 mips_print_operand_address (FILE *file, rtx x)
6770 struct mips_address_info addr;
6772 if (mips_classify_address (&addr, x, word_mode, true))
6773 switch (addr.type)
6775 case ADDRESS_REG:
6776 mips_print_operand (file, addr.offset, 0);
6777 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6778 return;
6780 case ADDRESS_LO_SUM:
6781 mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6782 mips_lo_relocs);
6783 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6784 return;
6786 case ADDRESS_CONST_INT:
6787 output_addr_const (file, x);
6788 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
6789 return;
6791 case ADDRESS_SYMBOLIC:
6792 output_addr_const (file, mips_strip_unspec_address (x));
6793 return;
6795 gcc_unreachable ();
6798 /* Implement TARGET_ENCODE_SECTION_INFO. */
6800 static void
6801 mips_encode_section_info (tree decl, rtx rtl, int first)
6803 default_encode_section_info (decl, rtl, first);
6805 if (TREE_CODE (decl) == FUNCTION_DECL)
6807 rtx symbol = XEXP (rtl, 0);
6808 tree type = TREE_TYPE (decl);
6810 /* Encode whether the symbol is short or long. */
6811 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
6812 || mips_far_type_p (type))
6813 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
6817 /* Implement TARGET_SELECT_RTX_SECTION. */
6819 static section *
6820 mips_select_rtx_section (enum machine_mode mode, rtx x,
6821 unsigned HOST_WIDE_INT align)
6823 /* ??? Consider using mergeable small data sections. */
6824 if (mips_rtx_constant_in_small_data_p (mode))
6825 return get_named_section (NULL, ".sdata", 0);
6827 return default_elf_select_rtx_section (mode, x, align);
6830 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6832 The complication here is that, with the combination TARGET_ABICALLS
6833 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6834 therefore not be included in the read-only part of a DSO. Handle such
6835 cases by selecting a normal data section instead of a read-only one.
6836 The logic apes that in default_function_rodata_section. */
6838 static section *
6839 mips_function_rodata_section (tree decl)
6841 if (!TARGET_ABICALLS || TARGET_GPWORD)
6842 return default_function_rodata_section (decl);
6844 if (decl && DECL_SECTION_NAME (decl))
6846 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6847 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6849 char *rname = ASTRDUP (name);
6850 rname[14] = 'd';
6851 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6853 else if (flag_function_sections
6854 && flag_data_sections
6855 && strncmp (name, ".text.", 6) == 0)
6857 char *rname = ASTRDUP (name);
6858 memcpy (rname + 1, "data", 4);
6859 return get_section (rname, SECTION_WRITE, decl);
6862 return data_section;
6865 /* Implement TARGET_IN_SMALL_DATA_P. */
6867 static bool
6868 mips_in_small_data_p (const_tree decl)
6870 unsigned HOST_WIDE_INT size;
6872 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6873 return false;
6875 /* We don't yet generate small-data references for -mabicalls
6876 or VxWorks RTP code. See the related -G handling in
6877 mips_override_options. */
6878 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
6879 return false;
6881 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6883 const char *name;
6885 /* Reject anything that isn't in a known small-data section. */
6886 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6887 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6888 return false;
6890 /* If a symbol is defined externally, the assembler will use the
6891 usual -G rules when deciding how to implement macros. */
6892 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
6893 return true;
6895 else if (TARGET_EMBEDDED_DATA)
6897 /* Don't put constants into the small data section: we want them
6898 to be in ROM rather than RAM. */
6899 if (TREE_CODE (decl) != VAR_DECL)
6900 return false;
6902 if (TREE_READONLY (decl)
6903 && !TREE_SIDE_EFFECTS (decl)
6904 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6905 return false;
6908 /* Enforce -mlocal-sdata. */
6909 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
6910 return false;
6912 /* Enforce -mextern-sdata. */
6913 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
6915 if (DECL_EXTERNAL (decl))
6916 return false;
6917 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
6918 return false;
6921 /* We have traditionally not treated zero-sized objects as small data,
6922 so this is now effectively part of the ABI. */
6923 size = int_size_in_bytes (TREE_TYPE (decl));
6924 return size > 0 && size <= mips_small_data_threshold;
6927 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
6928 anchors for small data: the GP register acts as an anchor in that
6929 case. We also don't want to use them for PC-relative accesses,
6930 where the PC acts as an anchor. */
6932 static bool
6933 mips_use_anchors_for_symbol_p (const_rtx symbol)
6935 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
6937 case SYMBOL_PC_RELATIVE:
6938 case SYMBOL_GP_RELATIVE:
6939 return false;
6941 default:
6942 return default_use_anchors_for_symbol_p (symbol);
6946 /* The MIPS debug format wants all automatic variables and arguments
6947 to be in terms of the virtual frame pointer (stack pointer before
6948 any adjustment in the function), while the MIPS 3.0 linker wants
6949 the frame pointer to be the stack pointer after the initial
6950 adjustment. So, we do the adjustment here. The arg pointer (which
6951 is eliminated) points to the virtual frame pointer, while the frame
6952 pointer (which may be eliminated) points to the stack pointer after
6953 the initial adjustments. */
6955 HOST_WIDE_INT
6956 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6958 rtx offset2 = const0_rtx;
6959 rtx reg = eliminate_constant_term (addr, &offset2);
6961 if (offset == 0)
6962 offset = INTVAL (offset2);
6964 if (reg == stack_pointer_rtx
6965 || reg == frame_pointer_rtx
6966 || reg == hard_frame_pointer_rtx)
6968 offset -= cfun->machine->frame.total_size;
6969 if (reg == hard_frame_pointer_rtx)
6970 offset += cfun->machine->frame.hard_frame_pointer_offset;
6973 /* sdbout_parms does not want this to crash for unrecognized cases. */
6974 #if 0
6975 else if (reg != arg_pointer_rtx)
6976 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6977 addr);
6978 #endif
6980 return offset;
6983 /* Implement ASM_OUTPUT_EXTERNAL. */
6985 void
6986 mips_output_external (FILE *file, tree decl, const char *name)
6988 default_elf_asm_output_external (file, decl, name);
6990 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6991 set in order to avoid putting out names that are never really
6992 used. */
6993 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6995 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6997 /* When using assembler macros, emit .extern directives for
6998 all small-data externs so that the assembler knows how
6999 big they are.
7001 In most cases it would be safe (though pointless) to emit
7002 .externs for other symbols too. One exception is when an
7003 object is within the -G limit but declared by the user to
7004 be in a section other than .sbss or .sdata. */
7005 fputs ("\t.extern\t", file);
7006 assemble_name (file, name);
7007 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
7008 int_size_in_bytes (TREE_TYPE (decl)));
7010 else if (TARGET_IRIX
7011 && mips_abi == ABI_32
7012 && TREE_CODE (decl) == FUNCTION_DECL)
7014 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
7015 `.global name .text' directive for every used but
7016 undefined function. If we don't, the linker may perform
7017 an optimization (skipping over the insns that set $gp)
7018 when it is unsafe. */
7019 fputs ("\t.globl ", file);
7020 assemble_name (file, name);
7021 fputs (" .text\n", file);
7026 /* Implement ASM_OUTPUT_SOURCE_FILENAME. */
7028 void
7029 mips_output_filename (FILE *stream, const char *name)
7031 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
7032 directives. */
7033 if (write_symbols == DWARF2_DEBUG)
7034 return;
7035 else if (mips_output_filename_first_time)
7037 mips_output_filename_first_time = 0;
7038 num_source_filenames += 1;
7039 current_function_file = name;
7040 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7041 output_quoted_string (stream, name);
7042 putc ('\n', stream);
7044 /* If we are emitting stabs, let dbxout.c handle this (except for
7045 the mips_output_filename_first_time case). */
7046 else if (write_symbols == DBX_DEBUG)
7047 return;
7048 else if (name != current_function_file
7049 && strcmp (name, current_function_file) != 0)
7051 num_source_filenames += 1;
7052 current_function_file = name;
7053 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7054 output_quoted_string (stream, name);
7055 putc ('\n', stream);
7059 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
7061 static void ATTRIBUTE_UNUSED
7062 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
7064 switch (size)
7066 case 4:
7067 fputs ("\t.dtprelword\t", file);
7068 break;
7070 case 8:
7071 fputs ("\t.dtpreldword\t", file);
7072 break;
7074 default:
7075 gcc_unreachable ();
7077 output_addr_const (file, x);
7078 fputs ("+0x8000", file);
7081 /* Implement TARGET_DWARF_REGISTER_SPAN. */
7083 static rtx
7084 mips_dwarf_register_span (rtx reg)
7086 rtx high, low;
7087 enum machine_mode mode;
7089 /* By default, GCC maps increasing register numbers to increasing
7090 memory locations, but paired FPRs are always little-endian,
7091 regardless of the prevailing endianness. */
7092 mode = GET_MODE (reg);
7093 if (FP_REG_P (REGNO (reg))
7094 && TARGET_BIG_ENDIAN
7095 && MAX_FPRS_PER_FMT > 1
7096 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
7098 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
7099 high = mips_subword (reg, true);
7100 low = mips_subword (reg, false);
7101 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
7104 return NULL_RTX;
7107 /* Implement ASM_OUTPUT_ASCII. */
7109 void
7110 mips_output_ascii (FILE *stream, const char *string, size_t len)
7112 size_t i;
7113 int cur_pos;
7115 cur_pos = 17;
7116 fprintf (stream, "\t.ascii\t\"");
7117 for (i = 0; i < len; i++)
7119 int c;
7121 c = (unsigned char) string[i];
7122 if (ISPRINT (c))
7124 if (c == '\\' || c == '\"')
7126 putc ('\\', stream);
7127 cur_pos++;
7129 putc (c, stream);
7130 cur_pos++;
7132 else
7134 fprintf (stream, "\\%03o", c);
7135 cur_pos += 4;
7138 if (cur_pos > 72 && i+1 < len)
7140 cur_pos = 17;
7141 fprintf (stream, "\"\n\t.ascii\t\"");
7144 fprintf (stream, "\"\n");
7147 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7148 macros, mark the symbol as written so that mips_asm_output_external
7149 won't emit an .extern for it. STREAM is the output file, NAME is the
7150 name of the symbol, INIT_STRING is the string that should be written
7151 before the symbol and FINAL_STRING is the string that should be
7152 written after it. FINAL_STRING is a printf format that consumes the
7153 remaining arguments. */
7155 void
7156 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7157 const char *final_string, ...)
7159 va_list ap;
7161 fputs (init_string, stream);
7162 assemble_name (stream, name);
7163 va_start (ap, final_string);
7164 vfprintf (stream, final_string, ap);
7165 va_end (ap);
7167 if (!TARGET_EXPLICIT_RELOCS)
7169 tree name_tree = get_identifier (name);
7170 TREE_ASM_WRITTEN (name_tree) = 1;
7174 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7175 NAME is the name of the object and ALIGN is the required alignment
7176 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7177 alignment argument. */
7179 void
7180 mips_declare_common_object (FILE *stream, const char *name,
7181 const char *init_string,
7182 unsigned HOST_WIDE_INT size,
7183 unsigned int align, bool takes_alignment_p)
7185 if (!takes_alignment_p)
7187 size += (align / BITS_PER_UNIT) - 1;
7188 size -= size % (align / BITS_PER_UNIT);
7189 mips_declare_object (stream, name, init_string,
7190 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7192 else
7193 mips_declare_object (stream, name, init_string,
7194 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7195 size, align / BITS_PER_UNIT);
7198 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7199 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7201 void
7202 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7203 unsigned HOST_WIDE_INT size,
7204 unsigned int align)
7206 /* If the target wants uninitialized const declarations in
7207 .rdata then don't put them in .comm. */
7208 if (TARGET_EMBEDDED_DATA
7209 && TARGET_UNINIT_CONST_IN_RODATA
7210 && TREE_CODE (decl) == VAR_DECL
7211 && TREE_READONLY (decl)
7212 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7214 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7215 targetm.asm_out.globalize_label (stream, name);
7217 switch_to_section (readonly_data_section);
7218 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7219 mips_declare_object (stream, name, "",
7220 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7221 size);
7223 else
7224 mips_declare_common_object (stream, name, "\n\t.comm\t",
7225 size, align, true);
7228 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7229 extern int size_directive_output;
7231 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7232 definitions except that it uses mips_declare_object to emit the label. */
7234 void
7235 mips_declare_object_name (FILE *stream, const char *name,
7236 tree decl ATTRIBUTE_UNUSED)
7238 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7239 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7240 #endif
7242 size_directive_output = 0;
7243 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7245 HOST_WIDE_INT size;
7247 size_directive_output = 1;
7248 size = int_size_in_bytes (TREE_TYPE (decl));
7249 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7252 mips_declare_object (stream, name, "", ":\n");
7255 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7257 void
7258 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7260 const char *name;
7262 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7263 if (!flag_inhibit_size_directive
7264 && DECL_SIZE (decl) != 0
7265 && !at_end
7266 && top_level
7267 && DECL_INITIAL (decl) == error_mark_node
7268 && !size_directive_output)
7270 HOST_WIDE_INT size;
7272 size_directive_output = 1;
7273 size = int_size_in_bytes (TREE_TYPE (decl));
7274 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7277 #endif
7279 /* Return the FOO in the name of the ".mdebug.FOO" section associated
7280 with the current ABI. */
7282 static const char *
7283 mips_mdebug_abi_name (void)
7285 switch (mips_abi)
7287 case ABI_32:
7288 return "abi32";
7289 case ABI_O64:
7290 return "abiO64";
7291 case ABI_N32:
7292 return "abiN32";
7293 case ABI_64:
7294 return "abiN64";
7295 case ABI_EABI:
7296 return TARGET_64BIT ? "eabi64" : "eabi32";
7297 default:
7298 gcc_unreachable ();
7302 /* Implement TARGET_ASM_FILE_START. */
7304 static void
7305 mips_file_start (void)
7307 default_file_start ();
7309 /* Generate a special section to describe the ABI switches used to
7310 produce the resultant binary. This is unnecessary on IRIX and
7311 causes unwanted warnings from the native linker. */
7312 if (!TARGET_IRIX)
7314 /* Record the ABI itself. Modern versions of binutils encode
7315 this information in the ELF header flags, but GDB needs the
7316 information in order to correctly debug binaries produced by
7317 older binutils. See the function mips_gdbarch_init in
7318 gdb/mips-tdep.c. */
7319 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7320 mips_mdebug_abi_name ());
7322 /* There is no ELF header flag to distinguish long32 forms of the
7323 EABI from long64 forms. Emit a special section to help tools
7324 such as GDB. Do the same for o64, which is sometimes used with
7325 -mlong64. */
7326 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7327 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7328 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7330 #ifdef HAVE_AS_GNU_ATTRIBUTE
7331 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7332 (TARGET_HARD_FLOAT_ABI
7333 ? (TARGET_DOUBLE_FLOAT
7334 ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
7335 #endif
7338 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
7339 if (TARGET_ABICALLS)
7340 fprintf (asm_out_file, "\t.abicalls\n");
7342 if (flag_verbose_asm)
7343 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7344 ASM_COMMENT_START,
7345 mips_small_data_threshold, mips_arch_info->name, mips_isa);
7348 /* Make the last instruction frame-related and note that it performs
7349 the operation described by FRAME_PATTERN. */
7351 static void
7352 mips_set_frame_expr (rtx frame_pattern)
7354 rtx insn;
7356 insn = get_last_insn ();
7357 RTX_FRAME_RELATED_P (insn) = 1;
7358 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7359 frame_pattern,
7360 REG_NOTES (insn));
7363 /* Return a frame-related rtx that stores REG at MEM.
7364 REG must be a single register. */
7366 static rtx
7367 mips_frame_set (rtx mem, rtx reg)
7369 rtx set;
7371 /* If we're saving the return address register and the DWARF return
7372 address column differs from the hard register number, adjust the
7373 note reg to refer to the former. */
7374 if (REGNO (reg) == GP_REG_FIRST + 31
7375 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7376 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7378 set = gen_rtx_SET (VOIDmode, mem, reg);
7379 RTX_FRAME_RELATED_P (set) = 1;
7381 return set;
7384 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7385 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7386 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7387 static const unsigned char mips16e_s2_s8_regs[] = {
7388 30, 23, 22, 21, 20, 19, 18
7390 static const unsigned char mips16e_a0_a3_regs[] = {
7391 4, 5, 6, 7
7394 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7395 ordered from the uppermost in memory to the lowest in memory. */
7396 static const unsigned char mips16e_save_restore_regs[] = {
7397 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7400 /* Return the index of the lowest X in the range [0, SIZE) for which
7401 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7403 static unsigned int
7404 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7405 unsigned int size)
7407 unsigned int i;
7409 for (i = 0; i < size; i++)
7410 if (BITSET_P (mask, regs[i]))
7411 break;
7413 return i;
7416 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7417 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7418 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7419 is true for all indexes (X, SIZE). */
7421 static void
7422 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7423 unsigned int size, unsigned int *num_regs_ptr)
7425 unsigned int i;
7427 i = mips16e_find_first_register (*mask_ptr, regs, size);
7428 for (i++; i < size; i++)
7429 if (!BITSET_P (*mask_ptr, regs[i]))
7431 *num_regs_ptr += 1;
7432 *mask_ptr |= 1 << regs[i];
7436 /* Return a simplified form of X using the register values in REG_VALUES.
7437 REG_VALUES[R] is the last value assigned to hard register R, or null
7438 if R has not been modified.
7440 This function is rather limited, but is good enough for our purposes. */
7442 static rtx
7443 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7445 x = avoid_constant_pool_reference (x);
7447 if (UNARY_P (x))
7449 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7450 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7451 x0, GET_MODE (XEXP (x, 0)));
7454 if (ARITHMETIC_P (x))
7456 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7457 rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7458 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7461 if (REG_P (x)
7462 && reg_values[REGNO (x)]
7463 && !rtx_unstable_p (reg_values[REGNO (x)]))
7464 return reg_values[REGNO (x)];
7466 return x;
7469 /* Return true if (set DEST SRC) stores an argument register into its
7470 caller-allocated save slot, storing the number of that argument
7471 register in *REGNO_PTR if so. REG_VALUES is as for
7472 mips16e_collect_propagate_value. */
7474 static bool
7475 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7476 unsigned int *regno_ptr)
7478 unsigned int argno, regno;
7479 HOST_WIDE_INT offset, required_offset;
7480 rtx addr, base;
7482 /* Check that this is a word-mode store. */
7483 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7484 return false;
7486 /* Check that the register being saved is an unmodified argument
7487 register. */
7488 regno = REGNO (src);
7489 if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
7490 return false;
7491 argno = regno - GP_ARG_FIRST;
7493 /* Check whether the address is an appropriate stack-pointer or
7494 frame-pointer access. */
7495 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7496 mips_split_plus (addr, &base, &offset);
7497 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7498 if (base == hard_frame_pointer_rtx)
7499 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
7500 else if (base != stack_pointer_rtx)
7501 return false;
7502 if (offset != required_offset)
7503 return false;
7505 *regno_ptr = regno;
7506 return true;
7509 /* A subroutine of mips_expand_prologue, called only when generating
7510 MIPS16e SAVE instructions. Search the start of the function for any
7511 instructions that save argument registers into their caller-allocated
7512 save slots. Delete such instructions and return a value N such that
7513 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7514 instructions redundant. */
7516 static unsigned int
7517 mips16e_collect_argument_saves (void)
7519 rtx reg_values[FIRST_PSEUDO_REGISTER];
7520 rtx insn, next, set, dest, src;
7521 unsigned int nargs, regno;
7523 push_topmost_sequence ();
7524 nargs = 0;
7525 memset (reg_values, 0, sizeof (reg_values));
7526 for (insn = get_insns (); insn; insn = next)
7528 next = NEXT_INSN (insn);
7529 if (NOTE_P (insn))
7530 continue;
7532 if (!INSN_P (insn))
7533 break;
7535 set = PATTERN (insn);
7536 if (GET_CODE (set) != SET)
7537 break;
7539 dest = SET_DEST (set);
7540 src = SET_SRC (set);
7541 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
7543 if (!BITSET_P (cfun->machine->frame.mask, regno))
7545 delete_insn (insn);
7546 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7549 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7550 reg_values[REGNO (dest)]
7551 = mips16e_collect_propagate_value (src, reg_values);
7552 else
7553 break;
7555 pop_topmost_sequence ();
7557 return nargs;
7560 /* Return a move between register REGNO and memory location SP + OFFSET.
7561 Make the move a load if RESTORE_P, otherwise make it a frame-related
7562 store. */
7564 static rtx
7565 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7566 unsigned int regno)
7568 rtx reg, mem;
7570 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7571 reg = gen_rtx_REG (SImode, regno);
7572 return (restore_p
7573 ? gen_rtx_SET (VOIDmode, reg, mem)
7574 : mips_frame_set (mem, reg));
7577 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7578 The instruction must:
7580 - Allocate or deallocate SIZE bytes in total; SIZE is known
7581 to be nonzero.
7583 - Save or restore as many registers in *MASK_PTR as possible.
7584 The instruction saves the first registers at the top of the
7585 allocated area, with the other registers below it.
7587 - Save NARGS argument registers above the allocated area.
7589 (NARGS is always zero if RESTORE_P.)
7591 The SAVE and RESTORE instructions cannot save and restore all general
7592 registers, so there may be some registers left over for the caller to
7593 handle. Destructively modify *MASK_PTR so that it contains the registers
7594 that still need to be saved or restored. The caller can save these
7595 registers in the memory immediately below *OFFSET_PTR, which is a
7596 byte offset from the bottom of the allocated stack area. */
7598 static rtx
7599 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7600 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7601 HOST_WIDE_INT size)
7603 rtx pattern, set;
7604 HOST_WIDE_INT offset, top_offset;
7605 unsigned int i, regno;
7606 int n;
7608 gcc_assert (cfun->machine->frame.num_fp == 0);
7610 /* Calculate the number of elements in the PARALLEL. We need one element
7611 for the stack adjustment, one for each argument register save, and one
7612 for each additional register move. */
7613 n = 1 + nargs;
7614 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7615 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7616 n++;
7618 /* Create the final PARALLEL. */
7619 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7620 n = 0;
7622 /* Add the stack pointer adjustment. */
7623 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7624 plus_constant (stack_pointer_rtx,
7625 restore_p ? size : -size));
7626 RTX_FRAME_RELATED_P (set) = 1;
7627 XVECEXP (pattern, 0, n++) = set;
7629 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7630 top_offset = restore_p ? size : 0;
7632 /* Save the arguments. */
7633 for (i = 0; i < nargs; i++)
7635 offset = top_offset + i * UNITS_PER_WORD;
7636 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7637 XVECEXP (pattern, 0, n++) = set;
7640 /* Then fill in the other register moves. */
7641 offset = top_offset;
7642 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7644 regno = mips16e_save_restore_regs[i];
7645 if (BITSET_P (*mask_ptr, regno))
7647 offset -= UNITS_PER_WORD;
7648 set = mips16e_save_restore_reg (restore_p, offset, regno);
7649 XVECEXP (pattern, 0, n++) = set;
7650 *mask_ptr &= ~(1 << regno);
7654 /* Tell the caller what offset it should use for the remaining registers. */
7655 *offset_ptr = size + (offset - top_offset);
7657 gcc_assert (n == XVECLEN (pattern, 0));
7659 return pattern;
7662 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7663 pointer. Return true if PATTERN matches the kind of instruction
7664 generated by mips16e_build_save_restore. If INFO is nonnull,
7665 initialize it when returning true. */
7667 bool
7668 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7669 struct mips16e_save_restore_info *info)
7671 unsigned int i, nargs, mask, extra;
7672 HOST_WIDE_INT top_offset, save_offset, offset;
7673 rtx set, reg, mem, base;
7674 int n;
7676 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7677 return false;
7679 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7680 top_offset = adjust > 0 ? adjust : 0;
7682 /* Interpret all other members of the PARALLEL. */
7683 save_offset = top_offset - UNITS_PER_WORD;
7684 mask = 0;
7685 nargs = 0;
7686 i = 0;
7687 for (n = 1; n < XVECLEN (pattern, 0); n++)
7689 /* Check that we have a SET. */
7690 set = XVECEXP (pattern, 0, n);
7691 if (GET_CODE (set) != SET)
7692 return false;
7694 /* Check that the SET is a load (if restoring) or a store
7695 (if saving). */
7696 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7697 if (!MEM_P (mem))
7698 return false;
7700 /* Check that the address is the sum of the stack pointer and a
7701 possibly-zero constant offset. */
7702 mips_split_plus (XEXP (mem, 0), &base, &offset);
7703 if (base != stack_pointer_rtx)
7704 return false;
7706 /* Check that SET's other operand is a register. */
7707 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7708 if (!REG_P (reg))
7709 return false;
7711 /* Check for argument saves. */
7712 if (offset == top_offset + nargs * UNITS_PER_WORD
7713 && REGNO (reg) == GP_ARG_FIRST + nargs)
7714 nargs++;
7715 else if (offset == save_offset)
7717 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7718 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7719 return false;
7721 mask |= 1 << REGNO (reg);
7722 save_offset -= UNITS_PER_WORD;
7724 else
7725 return false;
7728 /* Check that the restrictions on register ranges are met. */
7729 extra = 0;
7730 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7731 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7732 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7733 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7734 if (extra != 0)
7735 return false;
7737 /* Make sure that the topmost argument register is not saved twice.
7738 The checks above ensure that the same is then true for the other
7739 argument registers. */
7740 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7741 return false;
7743 /* Pass back information, if requested. */
7744 if (info)
7746 info->nargs = nargs;
7747 info->mask = mask;
7748 info->size = (adjust > 0 ? adjust : -adjust);
7751 return true;
7754 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7755 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7756 the null terminator. */
7758 static char *
7759 mips16e_add_register_range (char *s, unsigned int min_reg,
7760 unsigned int max_reg)
7762 if (min_reg != max_reg)
7763 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7764 else
7765 s += sprintf (s, ",%s", reg_names[min_reg]);
7766 return s;
7769 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7770 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7772 const char *
7773 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7775 static char buffer[300];
7777 struct mips16e_save_restore_info info;
7778 unsigned int i, end;
7779 char *s;
7781 /* Parse the pattern. */
7782 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7783 gcc_unreachable ();
7785 /* Add the mnemonic. */
7786 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7787 s += strlen (s);
7789 /* Save the arguments. */
7790 if (info.nargs > 1)
7791 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7792 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7793 else if (info.nargs == 1)
7794 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7796 /* Emit the amount of stack space to allocate or deallocate. */
7797 s += sprintf (s, "%d", (int) info.size);
7799 /* Save or restore $16. */
7800 if (BITSET_P (info.mask, 16))
7801 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7803 /* Save or restore $17. */
7804 if (BITSET_P (info.mask, 17))
7805 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7807 /* Save or restore registers in the range $s2...$s8, which
7808 mips16e_s2_s8_regs lists in decreasing order. Note that this
7809 is a software register range; the hardware registers are not
7810 numbered consecutively. */
7811 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7812 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7813 if (i < end)
7814 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7815 mips16e_s2_s8_regs[i]);
7817 /* Save or restore registers in the range $a0...$a3. */
7818 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7819 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7820 if (i < end)
7821 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7822 mips16e_a0_a3_regs[end - 1]);
7824 /* Save or restore $31. */
7825 if (BITSET_P (info.mask, 31))
7826 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7828 return buffer;
7831 /* Return true if the current function has an insn that implicitly
7832 refers to $gp. */
7834 static bool
7835 mips_function_has_gp_insn (void)
7837 /* Don't bother rechecking if we found one last time. */
7838 if (!cfun->machine->has_gp_insn_p)
7840 rtx insn;
7842 push_topmost_sequence ();
7843 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7844 if (USEFUL_INSN_P (insn)
7845 && (get_attr_got (insn) != GOT_UNSET
7846 || mips_small_data_pattern_p (PATTERN (insn))))
7848 cfun->machine->has_gp_insn_p = true;
7849 break;
7851 pop_topmost_sequence ();
7853 return cfun->machine->has_gp_insn_p;
7856 /* Return the register that should be used as the global pointer
7857 within this function. Return 0 if the function doesn't need
7858 a global pointer. */
7860 static unsigned int
7861 mips_global_pointer (void)
7863 unsigned int regno;
7865 /* $gp is always available unless we're using a GOT. */
7866 if (!TARGET_USE_GOT)
7867 return GLOBAL_POINTER_REGNUM;
7869 /* We must always provide $gp when it is used implicitly. */
7870 if (!TARGET_EXPLICIT_RELOCS)
7871 return GLOBAL_POINTER_REGNUM;
7873 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7874 a valid gp. */
7875 if (crtl->profile)
7876 return GLOBAL_POINTER_REGNUM;
7878 /* If the function has a nonlocal goto, $gp must hold the correct
7879 global pointer for the target function. */
7880 if (crtl->has_nonlocal_goto)
7881 return GLOBAL_POINTER_REGNUM;
7883 /* If the gp is never referenced, there's no need to initialize it.
7884 Note that reload can sometimes introduce constant pool references
7885 into a function that otherwise didn't need them. For example,
7886 suppose we have an instruction like:
7888 (set (reg:DF R1) (float:DF (reg:SI R2)))
7890 If R2 turns out to be constant such as 1, the instruction may have a
7891 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7892 using this constant if R2 doesn't get allocated to a register.
7894 In cases like these, reload will have added the constant to the pool
7895 but no instruction will yet refer to it. */
7896 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7897 && !crtl->uses_const_pool
7898 && !mips_function_has_gp_insn ())
7899 return 0;
7901 /* We need a global pointer, but perhaps we can use a call-clobbered
7902 register instead of $gp. */
7903 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7904 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7905 if (!df_regs_ever_live_p (regno)
7906 && call_really_used_regs[regno]
7907 && !fixed_regs[regno]
7908 && regno != PIC_FUNCTION_ADDR_REGNUM)
7909 return regno;
7911 return GLOBAL_POINTER_REGNUM;
7914 /* Return true if the current function returns its value in a floating-point
7915 register in MIPS16 mode. */
7917 static bool
7918 mips16_cfun_returns_in_fpr_p (void)
7920 tree return_type = DECL_RESULT (current_function_decl);
7921 return (TARGET_MIPS16
7922 && TARGET_HARD_FLOAT_ABI
7923 && !aggregate_value_p (return_type, current_function_decl)
7924 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7927 /* Return true if the current function must save register REGNO. */
7929 static bool
7930 mips_save_reg_p (unsigned int regno)
7932 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7933 if we have not chosen a call-clobbered substitute. */
7934 if (regno == GLOBAL_POINTER_REGNUM)
7935 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7937 /* Check call-saved registers. */
7938 if ((crtl->saves_all_registers || df_regs_ever_live_p (regno))
7939 && !call_really_used_regs[regno])
7940 return true;
7942 /* Save both registers in an FPR pair if either one is used. This is
7943 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7944 register to be used without the even register. */
7945 if (FP_REG_P (regno)
7946 && MAX_FPRS_PER_FMT == 2
7947 && df_regs_ever_live_p (regno + 1)
7948 && !call_really_used_regs[regno + 1])
7949 return true;
7951 /* We need to save the old frame pointer before setting up a new one. */
7952 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7953 return true;
7955 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7956 if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7957 return true;
7959 /* We need to save the incoming return address if it is ever clobbered
7960 within the function, if __builtin_eh_return is being used to set a
7961 different return address, or if a stub is being used to return a
7962 value in FPRs. */
7963 if (regno == GP_REG_FIRST + 31
7964 && (df_regs_ever_live_p (regno)
7965 || crtl->calls_eh_return
7966 || mips16_cfun_returns_in_fpr_p ()))
7967 return true;
7969 return false;
7972 /* Populate the current function's mips_frame_info structure.
7974 MIPS stack frames look like:
7976 +-------------------------------+
7978 | incoming stack arguments |
7980 +-------------------------------+
7982 | caller-allocated save area |
7983 A | for register arguments |
7985 +-------------------------------+ <-- incoming stack pointer
7987 | callee-allocated save area |
7988 B | for arguments that are |
7989 | split between registers and |
7990 | the stack |
7992 +-------------------------------+ <-- arg_pointer_rtx
7994 C | callee-allocated save area |
7995 | for register varargs |
7997 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
7998 | | + UNITS_PER_HWFPVALUE
7999 | FPR save area |
8001 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
8002 | | + UNITS_PER_WORD
8003 | GPR save area |
8005 +-------------------------------+
8006 | | \
8007 | local variables | | var_size
8008 | | /
8009 +-------------------------------+
8010 | | \
8011 | $gp save area | | cprestore_size
8012 | | /
8013 P +-------------------------------+ <-- hard_frame_pointer_rtx for
8014 | | MIPS16 code
8015 | outgoing stack arguments |
8017 +-------------------------------+
8019 | caller-allocated save area |
8020 | for register arguments |
8022 +-------------------------------+ <-- stack_pointer_rtx
8023 frame_pointer_rtx
8024 hard_frame_pointer_rtx for
8025 non-MIPS16 code.
8027 At least two of A, B and C will be empty.
8029 Dynamic stack allocations such as alloca insert data at point P.
8030 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
8031 hard_frame_pointer_rtx unchanged. */
8033 static void
8034 mips_compute_frame_info (void)
8036 struct mips_frame_info *frame;
8037 HOST_WIDE_INT offset, size;
8038 unsigned int regno, i;
8040 frame = &cfun->machine->frame;
8041 memset (frame, 0, sizeof (*frame));
8042 size = get_frame_size ();
8044 cfun->machine->global_pointer = mips_global_pointer ();
8046 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
8047 area and the $gp save slot. This area isn't needed in leaf functions,
8048 but if the target-independent frame size is nonzero, we're committed
8049 to allocating it anyway. */
8050 if (size == 0 && current_function_is_leaf)
8052 /* The MIPS 3.0 linker does not like functions that dynamically
8053 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
8054 looks like we are trying to create a second frame pointer to the
8055 function, so allocate some stack space to make it happy. */
8056 if (cfun->calls_alloca)
8057 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
8058 else
8059 frame->args_size = 0;
8060 frame->cprestore_size = 0;
8062 else
8064 frame->args_size = crtl->outgoing_args_size;
8065 frame->cprestore_size = STARTING_FRAME_OFFSET - frame->args_size;
8067 offset = frame->args_size + frame->cprestore_size;
8069 /* Move above the local variables. */
8070 frame->var_size = MIPS_STACK_ALIGN (size);
8071 offset += frame->var_size;
8073 /* Find out which GPRs we need to save. */
8074 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8075 if (mips_save_reg_p (regno))
8077 frame->num_gp++;
8078 frame->mask |= 1 << (regno - GP_REG_FIRST);
8081 /* If this function calls eh_return, we must also save and restore the
8082 EH data registers. */
8083 if (crtl->calls_eh_return)
8084 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
8086 frame->num_gp++;
8087 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
8090 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
8091 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
8092 save all later registers too. */
8093 if (GENERATE_MIPS16E_SAVE_RESTORE)
8095 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
8096 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
8097 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
8098 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
8101 /* Move above the GPR save area. */
8102 if (frame->num_gp > 0)
8104 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
8105 frame->gp_sp_offset = offset - UNITS_PER_WORD;
8108 /* Find out which FPRs we need to save. This loop must iterate over
8109 the same space as its companion in mips_for_each_saved_reg. */
8110 if (TARGET_HARD_FLOAT)
8111 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
8112 if (mips_save_reg_p (regno))
8114 frame->num_fp += MAX_FPRS_PER_FMT;
8115 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
8118 /* Move above the FPR save area. */
8119 if (frame->num_fp > 0)
8121 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
8122 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
8125 /* Move above the callee-allocated varargs save area. */
8126 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
8127 frame->arg_pointer_offset = offset;
8129 /* Move above the callee-allocated area for pretend stack arguments. */
8130 offset += crtl->args.pretend_args_size;
8131 frame->total_size = offset;
8133 /* Work out the offsets of the save areas from the top of the frame. */
8134 if (frame->gp_sp_offset > 0)
8135 frame->gp_save_offset = frame->gp_sp_offset - offset;
8136 if (frame->fp_sp_offset > 0)
8137 frame->fp_save_offset = frame->fp_sp_offset - offset;
8139 /* MIPS16 code offsets the frame pointer by the size of the outgoing
8140 arguments. This tends to increase the chances of using unextended
8141 instructions for local variables and incoming arguments. */
8142 if (TARGET_MIPS16)
8143 frame->hard_frame_pointer_offset = frame->args_size;
8146 /* Return the style of GP load sequence that is being used for the
8147 current function. */
8149 enum mips_loadgp_style
8150 mips_current_loadgp_style (void)
8152 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
8153 return LOADGP_NONE;
8155 if (TARGET_RTP_PIC)
8156 return LOADGP_RTP;
8158 if (TARGET_ABSOLUTE_ABICALLS)
8159 return LOADGP_ABSOLUTE;
8161 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
8164 /* Implement FRAME_POINTER_REQUIRED. */
8166 bool
8167 mips_frame_pointer_required (void)
8169 /* If the function contains dynamic stack allocations, we need to
8170 use the frame pointer to access the static parts of the frame. */
8171 if (cfun->calls_alloca)
8172 return true;
8174 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
8175 reload may be unable to compute the address of a local variable,
8176 since there is no way to add a large constant to the stack pointer
8177 without using a second temporary register. */
8178 if (TARGET_MIPS16)
8180 mips_compute_frame_info ();
8181 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
8182 return true;
8185 return false;
8188 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
8189 or argument pointer. TO is either the stack pointer or hard frame
8190 pointer. */
8192 HOST_WIDE_INT
8193 mips_initial_elimination_offset (int from, int to)
8195 HOST_WIDE_INT offset;
8197 mips_compute_frame_info ();
8199 /* Set OFFSET to the offset from the soft frame pointer, which is also
8200 the offset from the end-of-prologue stack pointer. */
8201 switch (from)
8203 case FRAME_POINTER_REGNUM:
8204 offset = 0;
8205 break;
8207 case ARG_POINTER_REGNUM:
8208 offset = cfun->machine->frame.arg_pointer_offset;
8209 break;
8211 default:
8212 gcc_unreachable ();
8215 if (to == HARD_FRAME_POINTER_REGNUM)
8216 offset -= cfun->machine->frame.hard_frame_pointer_offset;
8218 return offset;
8221 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
8223 static void
8224 mips_extra_live_on_entry (bitmap regs)
8226 if (TARGET_USE_GOT)
8228 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
8229 the global pointer. */
8230 if (!TARGET_ABSOLUTE_ABICALLS)
8231 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8233 /* See the comment above load_call<mode> for details. */
8234 bitmap_set_bit (regs, GOT_VERSION_REGNUM);
8238 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
8239 previous frame. */
8242 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8244 if (count != 0)
8245 return const0_rtx;
8247 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8250 /* Emit code to change the current function's return address to
8251 ADDRESS. SCRATCH is available as a scratch register, if needed.
8252 ADDRESS and SCRATCH are both word-mode GPRs. */
8254 void
8255 mips_set_return_address (rtx address, rtx scratch)
8257 rtx slot_address;
8259 gcc_assert (BITSET_P (cfun->machine->frame.mask, 31));
8260 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
8261 cfun->machine->frame.gp_sp_offset);
8262 mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
8265 /* Restore $gp from its save slot. Valid only when using o32 or
8266 o64 abicalls. */
8268 void
8269 mips_restore_gp (void)
8271 rtx base, address;
8273 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
8275 base = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
8276 address = mips_add_offset (pic_offset_table_rtx, base,
8277 crtl->outgoing_args_size);
8278 mips_emit_move (pic_offset_table_rtx, gen_frame_mem (Pmode, address));
8279 if (!TARGET_EXPLICIT_RELOCS)
8280 emit_insn (gen_blockage ());
8283 /* A function to save or store a register. The first argument is the
8284 register and the second is the stack slot. */
8285 typedef void (*mips_save_restore_fn) (rtx, rtx);
8287 /* Use FN to save or restore register REGNO. MODE is the register's
8288 mode and OFFSET is the offset of its save slot from the current
8289 stack pointer. */
8291 static void
8292 mips_save_restore_reg (enum machine_mode mode, int regno,
8293 HOST_WIDE_INT offset, mips_save_restore_fn fn)
8295 rtx mem;
8297 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
8298 fn (gen_rtx_REG (mode, regno), mem);
8301 /* Call FN for each register that is saved by the current function.
8302 SP_OFFSET is the offset of the current stack pointer from the start
8303 of the frame. */
8305 static void
8306 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
8308 enum machine_mode fpr_mode;
8309 HOST_WIDE_INT offset;
8310 int regno;
8312 /* Save registers starting from high to low. The debuggers prefer at least
8313 the return register be stored at func+4, and also it allows us not to
8314 need a nop in the epilogue if at least one register is reloaded in
8315 addition to return address. */
8316 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
8317 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
8318 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
8320 mips_save_restore_reg (word_mode, regno, offset, fn);
8321 offset -= UNITS_PER_WORD;
8324 /* This loop must iterate over the same space as its companion in
8325 mips_compute_frame_info. */
8326 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
8327 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
8328 for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
8329 regno >= FP_REG_FIRST;
8330 regno -= MAX_FPRS_PER_FMT)
8331 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
8333 mips_save_restore_reg (fpr_mode, regno, offset, fn);
8334 offset -= GET_MODE_SIZE (fpr_mode);
8338 /* If we're generating n32 or n64 abicalls, and the current function
8339 does not use $28 as its global pointer, emit a cplocal directive.
8340 Use pic_offset_table_rtx as the argument to the directive. */
8342 static void
8343 mips_output_cplocal (void)
8345 if (!TARGET_EXPLICIT_RELOCS
8346 && cfun->machine->global_pointer > 0
8347 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
8348 output_asm_insn (".cplocal %+", 0);
8351 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
8353 static void
8354 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8356 const char *fnname;
8358 #ifdef SDB_DEBUGGING_INFO
8359 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
8360 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
8361 #endif
8363 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
8364 floating-point arguments. */
8365 if (TARGET_MIPS16
8366 && TARGET_HARD_FLOAT_ABI
8367 && crtl->args.info.fp_code != 0)
8368 mips16_build_function_stub ();
8370 /* Select the MIPS16 mode for this function. */
8371 if (TARGET_MIPS16)
8372 fprintf (file, "\t.set\tmips16\n");
8373 else
8374 fprintf (file, "\t.set\tnomips16\n");
8376 if (!FUNCTION_NAME_ALREADY_DECLARED)
8378 /* Get the function name the same way that toplev.c does before calling
8379 assemble_start_function. This is needed so that the name used here
8380 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8381 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8383 if (!flag_inhibit_size_directive)
8385 fputs ("\t.ent\t", file);
8386 assemble_name (file, fnname);
8387 fputs ("\n", file);
8390 assemble_name (file, fnname);
8391 fputs (":\n", file);
8394 /* Stop mips_file_end from treating this function as external. */
8395 if (TARGET_IRIX && mips_abi == ABI_32)
8396 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
8398 /* Output MIPS-specific frame information. */
8399 if (!flag_inhibit_size_directive)
8401 const struct mips_frame_info *frame;
8403 frame = &cfun->machine->frame;
8405 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
8406 fprintf (file,
8407 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
8408 "# vars= " HOST_WIDE_INT_PRINT_DEC
8409 ", regs= %d/%d"
8410 ", args= " HOST_WIDE_INT_PRINT_DEC
8411 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
8412 reg_names[frame_pointer_needed
8413 ? HARD_FRAME_POINTER_REGNUM
8414 : STACK_POINTER_REGNUM],
8415 (frame_pointer_needed
8416 ? frame->total_size - frame->hard_frame_pointer_offset
8417 : frame->total_size),
8418 reg_names[GP_REG_FIRST + 31],
8419 frame->var_size,
8420 frame->num_gp, frame->num_fp,
8421 frame->args_size,
8422 frame->cprestore_size);
8424 /* .mask MASK, OFFSET. */
8425 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8426 frame->mask, frame->gp_save_offset);
8428 /* .fmask MASK, OFFSET. */
8429 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8430 frame->fmask, frame->fp_save_offset);
8433 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
8434 Also emit the ".set noreorder; .set nomacro" sequence for functions
8435 that need it. */
8436 if (mips_current_loadgp_style () == LOADGP_OLDABI)
8438 /* .cpload must be in a .set noreorder but not a .set nomacro block. */
8439 if (!cfun->machine->all_noreorder_p)
8440 output_asm_insn ("%(.cpload\t%^%)", 0);
8441 else
8442 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8444 else if (cfun->machine->all_noreorder_p)
8445 output_asm_insn ("%(%<", 0);
8447 /* Tell the assembler which register we're using as the global
8448 pointer. This is needed for thunks, since they can use either
8449 explicit relocs or assembler macros. */
8450 mips_output_cplocal ();
8453 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
8455 static void
8456 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8457 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8459 /* Reinstate the normal $gp. */
8460 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8461 mips_output_cplocal ();
8463 if (cfun->machine->all_noreorder_p)
8465 /* Avoid using %>%) since it adds excess whitespace. */
8466 output_asm_insn (".set\tmacro", 0);
8467 output_asm_insn (".set\treorder", 0);
8468 set_noreorder = set_nomacro = 0;
8471 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8473 const char *fnname;
8475 /* Get the function name the same way that toplev.c does before calling
8476 assemble_start_function. This is needed so that the name used here
8477 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8478 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8479 fputs ("\t.end\t", file);
8480 assemble_name (file, fnname);
8481 fputs ("\n", file);
8485 /* Save register REG to MEM. Make the instruction frame-related. */
8487 static void
8488 mips_save_reg (rtx reg, rtx mem)
8490 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8492 rtx x1, x2;
8494 if (mips_split_64bit_move_p (mem, reg))
8495 mips_split_doubleword_move (mem, reg);
8496 else
8497 mips_emit_move (mem, reg);
8499 x1 = mips_frame_set (mips_subword (mem, false),
8500 mips_subword (reg, false));
8501 x2 = mips_frame_set (mips_subword (mem, true),
8502 mips_subword (reg, true));
8503 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8505 else
8507 if (TARGET_MIPS16
8508 && REGNO (reg) != GP_REG_FIRST + 31
8509 && !M16_REG_P (REGNO (reg)))
8511 /* Save a non-MIPS16 register by moving it through a temporary.
8512 We don't need to do this for $31 since there's a special
8513 instruction for it. */
8514 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8515 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8517 else
8518 mips_emit_move (mem, reg);
8520 mips_set_frame_expr (mips_frame_set (mem, reg));
8524 /* The __gnu_local_gp symbol. */
8526 static GTY(()) rtx mips_gnu_local_gp;
8528 /* If we're generating n32 or n64 abicalls, emit instructions
8529 to set up the global pointer. */
8531 static void
8532 mips_emit_loadgp (void)
8534 rtx addr, offset, incoming_address, base, index, pic_reg;
8536 pic_reg = pic_offset_table_rtx;
8537 switch (mips_current_loadgp_style ())
8539 case LOADGP_ABSOLUTE:
8540 if (mips_gnu_local_gp == NULL)
8542 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
8543 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
8545 emit_insn (Pmode == SImode
8546 ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
8547 : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
8548 break;
8550 case LOADGP_NEWABI:
8551 addr = XEXP (DECL_RTL (current_function_decl), 0);
8552 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
8553 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8554 emit_insn (Pmode == SImode
8555 ? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
8556 : gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
8557 break;
8559 case LOADGP_RTP:
8560 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
8561 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
8562 emit_insn (Pmode == SImode
8563 ? gen_loadgp_rtp_si (pic_reg, base, index)
8564 : gen_loadgp_rtp_di (pic_reg, base, index));
8565 break;
8567 default:
8568 return;
8570 /* Emit a blockage if there are implicit uses of the GP register.
8571 This includes profiled functions, because FUNCTION_PROFILE uses
8572 a jal macro. */
8573 if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
8574 emit_insn (gen_loadgp_blockage ());
8577 /* Expand the "prologue" pattern. */
8579 void
8580 mips_expand_prologue (void)
8582 const struct mips_frame_info *frame;
8583 HOST_WIDE_INT size;
8584 unsigned int nargs;
8585 rtx insn;
8587 if (cfun->machine->global_pointer > 0)
8588 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8590 frame = &cfun->machine->frame;
8591 size = frame->total_size;
8593 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8594 bytes beforehand; this is enough to cover the register save area
8595 without going out of range. */
8596 if ((frame->mask | frame->fmask) != 0)
8598 HOST_WIDE_INT step1;
8600 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8601 if (GENERATE_MIPS16E_SAVE_RESTORE)
8603 HOST_WIDE_INT offset;
8604 unsigned int mask, regno;
8606 /* Try to merge argument stores into the save instruction. */
8607 nargs = mips16e_collect_argument_saves ();
8609 /* Build the save instruction. */
8610 mask = frame->mask;
8611 insn = mips16e_build_save_restore (false, &mask, &offset,
8612 nargs, step1);
8613 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8614 size -= step1;
8616 /* Check if we need to save other registers. */
8617 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8618 if (BITSET_P (mask, regno - GP_REG_FIRST))
8620 offset -= UNITS_PER_WORD;
8621 mips_save_restore_reg (word_mode, regno,
8622 offset, mips_save_reg);
8625 else
8627 insn = gen_add3_insn (stack_pointer_rtx,
8628 stack_pointer_rtx,
8629 GEN_INT (-step1));
8630 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8631 size -= step1;
8632 mips_for_each_saved_reg (size, mips_save_reg);
8636 /* Allocate the rest of the frame. */
8637 if (size > 0)
8639 if (SMALL_OPERAND (-size))
8640 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8641 stack_pointer_rtx,
8642 GEN_INT (-size)))) = 1;
8643 else
8645 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8646 if (TARGET_MIPS16)
8648 /* There are no instructions to add or subtract registers
8649 from the stack pointer, so use the frame pointer as a
8650 temporary. We should always be using a frame pointer
8651 in this case anyway. */
8652 gcc_assert (frame_pointer_needed);
8653 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8654 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8655 hard_frame_pointer_rtx,
8656 MIPS_PROLOGUE_TEMP (Pmode)));
8657 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8659 else
8660 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8661 stack_pointer_rtx,
8662 MIPS_PROLOGUE_TEMP (Pmode)));
8664 /* Describe the combined effect of the previous instructions. */
8665 mips_set_frame_expr
8666 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8667 plus_constant (stack_pointer_rtx, -size)));
8671 /* Set up the frame pointer, if we're using one. */
8672 if (frame_pointer_needed)
8674 HOST_WIDE_INT offset;
8676 offset = frame->hard_frame_pointer_offset;
8677 if (offset == 0)
8679 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8680 RTX_FRAME_RELATED_P (insn) = 1;
8682 else if (SMALL_OPERAND (offset))
8684 insn = gen_add3_insn (hard_frame_pointer_rtx,
8685 stack_pointer_rtx, GEN_INT (offset));
8686 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8688 else
8690 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
8691 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8692 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8693 hard_frame_pointer_rtx,
8694 MIPS_PROLOGUE_TEMP (Pmode)));
8695 mips_set_frame_expr
8696 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8697 plus_constant (stack_pointer_rtx, offset)));
8701 mips_emit_loadgp ();
8703 /* Initialize the $gp save slot. */
8704 if (frame->cprestore_size > 0)
8705 emit_insn (gen_cprestore (GEN_INT (crtl->outgoing_args_size)));
8707 /* If we are profiling, make sure no instructions are scheduled before
8708 the call to mcount. */
8709 if (crtl->profile)
8710 emit_insn (gen_blockage ());
8713 /* Emit instructions to restore register REG from slot MEM. */
8715 static void
8716 mips_restore_reg (rtx reg, rtx mem)
8718 /* There's no MIPS16 instruction to load $31 directly. Load into
8719 $7 instead and adjust the return insn appropriately. */
8720 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8721 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
8723 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8725 /* Can't restore directly; move through a temporary. */
8726 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8727 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8729 else
8730 mips_emit_move (reg, mem);
8733 /* Emit any instructions needed before a return. */
8735 void
8736 mips_expand_before_return (void)
8738 /* When using a call-clobbered gp, we start out with unified call
8739 insns that include instructions to restore the gp. We then split
8740 these unified calls after reload. These split calls explicitly
8741 clobber gp, so there is no need to define
8742 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
8744 For consistency, we should also insert an explicit clobber of $28
8745 before return insns, so that the post-reload optimizers know that
8746 the register is not live on exit. */
8747 if (TARGET_CALL_CLOBBERED_GP)
8748 emit_clobber (pic_offset_table_rtx);
8751 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
8752 says which. */
8754 void
8755 mips_expand_epilogue (bool sibcall_p)
8757 const struct mips_frame_info *frame;
8758 HOST_WIDE_INT step1, step2;
8759 rtx base, target;
8761 if (!sibcall_p && mips_can_use_return_insn ())
8763 emit_jump_insn (gen_return ());
8764 return;
8767 /* In MIPS16 mode, if the return value should go into a floating-point
8768 register, we need to call a helper routine to copy it over. */
8769 if (mips16_cfun_returns_in_fpr_p ())
8770 mips16_copy_fpr_return_value ();
8772 /* Split the frame into two. STEP1 is the amount of stack we should
8773 deallocate before restoring the registers. STEP2 is the amount we
8774 should deallocate afterwards.
8776 Start off by assuming that no registers need to be restored. */
8777 frame = &cfun->machine->frame;
8778 step1 = frame->total_size;
8779 step2 = 0;
8781 /* Work out which register holds the frame address. */
8782 if (!frame_pointer_needed)
8783 base = stack_pointer_rtx;
8784 else
8786 base = hard_frame_pointer_rtx;
8787 step1 -= frame->hard_frame_pointer_offset;
8790 /* If we need to restore registers, deallocate as much stack as
8791 possible in the second step without going out of range. */
8792 if ((frame->mask | frame->fmask) != 0)
8794 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8795 step1 -= step2;
8798 /* Set TARGET to BASE + STEP1. */
8799 target = base;
8800 if (step1 > 0)
8802 rtx adjust;
8804 /* Get an rtx for STEP1 that we can add to BASE. */
8805 adjust = GEN_INT (step1);
8806 if (!SMALL_OPERAND (step1))
8808 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8809 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8812 /* Normal mode code can copy the result straight into $sp. */
8813 if (!TARGET_MIPS16)
8814 target = stack_pointer_rtx;
8816 emit_insn (gen_add3_insn (target, base, adjust));
8819 /* Copy TARGET into the stack pointer. */
8820 if (target != stack_pointer_rtx)
8821 mips_emit_move (stack_pointer_rtx, target);
8823 /* If we're using addressing macros, $gp is implicitly used by all
8824 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8825 from the stack. */
8826 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8827 emit_insn (gen_blockage ());
8829 if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
8831 unsigned int regno, mask;
8832 HOST_WIDE_INT offset;
8833 rtx restore;
8835 /* Generate the restore instruction. */
8836 mask = frame->mask;
8837 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8839 /* Restore any other registers manually. */
8840 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8841 if (BITSET_P (mask, regno - GP_REG_FIRST))
8843 offset -= UNITS_PER_WORD;
8844 mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
8847 /* Restore the remaining registers and deallocate the final bit
8848 of the frame. */
8849 emit_insn (restore);
8851 else
8853 /* Restore the registers. */
8854 mips_for_each_saved_reg (frame->total_size - step2, mips_restore_reg);
8856 /* Deallocate the final bit of the frame. */
8857 if (step2 > 0)
8858 emit_insn (gen_add3_insn (stack_pointer_rtx,
8859 stack_pointer_rtx,
8860 GEN_INT (step2)));
8863 /* Add in the __builtin_eh_return stack adjustment. We need to
8864 use a temporary in MIPS16 code. */
8865 if (crtl->calls_eh_return)
8867 if (TARGET_MIPS16)
8869 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8870 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8871 MIPS_EPILOGUE_TEMP (Pmode),
8872 EH_RETURN_STACKADJ_RTX));
8873 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8875 else
8876 emit_insn (gen_add3_insn (stack_pointer_rtx,
8877 stack_pointer_rtx,
8878 EH_RETURN_STACKADJ_RTX));
8881 if (!sibcall_p)
8883 unsigned int regno;
8885 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8886 path will restore the return address into $7 rather than $31. */
8887 if (TARGET_MIPS16
8888 && !GENERATE_MIPS16E_SAVE_RESTORE
8889 && BITSET_P (frame->mask, 31))
8890 regno = GP_REG_FIRST + 7;
8891 else
8892 regno = GP_REG_FIRST + 31;
8893 mips_expand_before_return ();
8894 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
8898 /* Return nonzero if this function is known to have a null epilogue.
8899 This allows the optimizer to omit jumps to jumps if no stack
8900 was created. */
8902 bool
8903 mips_can_use_return_insn (void)
8905 if (!reload_completed)
8906 return false;
8908 if (crtl->profile)
8909 return false;
8911 /* In MIPS16 mode, a function that returns a floating-point value
8912 needs to arrange to copy the return value into the floating-point
8913 registers. */
8914 if (mips16_cfun_returns_in_fpr_p ())
8915 return false;
8917 return cfun->machine->frame.total_size == 0;
8920 /* Return true if register REGNO can store a value of mode MODE.
8921 The result of this function is cached in mips_hard_regno_mode_ok. */
8923 static bool
8924 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
8926 unsigned int size;
8927 enum mode_class class;
8929 if (mode == CCV2mode)
8930 return (ISA_HAS_8CC
8931 && ST_REG_P (regno)
8932 && (regno - ST_REG_FIRST) % 2 == 0);
8934 if (mode == CCV4mode)
8935 return (ISA_HAS_8CC
8936 && ST_REG_P (regno)
8937 && (regno - ST_REG_FIRST) % 4 == 0);
8939 if (mode == CCmode)
8941 if (!ISA_HAS_8CC)
8942 return regno == FPSW_REGNUM;
8944 return (ST_REG_P (regno)
8945 || GP_REG_P (regno)
8946 || FP_REG_P (regno));
8949 size = GET_MODE_SIZE (mode);
8950 class = GET_MODE_CLASS (mode);
8952 if (GP_REG_P (regno))
8953 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
8955 if (FP_REG_P (regno)
8956 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
8957 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
8959 /* Allow TFmode for CCmode reloads. */
8960 if (mode == TFmode && ISA_HAS_8CC)
8961 return true;
8963 if (class == MODE_FLOAT
8964 || class == MODE_COMPLEX_FLOAT
8965 || class == MODE_VECTOR_FLOAT)
8966 return size <= UNITS_PER_FPVALUE;
8968 /* Allow integer modes that fit into a single register. We need
8969 to put integers into FPRs when using instructions like CVT
8970 and TRUNC. There's no point allowing sizes smaller than a word,
8971 because the FPU has no appropriate load/store instructions. */
8972 if (class == MODE_INT)
8973 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
8976 if (ACC_REG_P (regno)
8977 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
8979 if (MD_REG_P (regno))
8981 /* After a multiplication or division, clobbering HI makes
8982 the value of LO unpredictable, and vice versa. This means
8983 that, for all interesting cases, HI and LO are effectively
8984 a single register.
8986 We model this by requiring that any value that uses HI
8987 also uses LO. */
8988 if (size <= UNITS_PER_WORD * 2)
8989 return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
8991 else
8993 /* DSP accumulators do not have the same restrictions as
8994 HI and LO, so we can treat them as normal doubleword
8995 registers. */
8996 if (size <= UNITS_PER_WORD)
8997 return true;
8999 if (size <= UNITS_PER_WORD * 2
9000 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
9001 return true;
9005 if (ALL_COP_REG_P (regno))
9006 return class == MODE_INT && size <= UNITS_PER_WORD;
9008 if (regno == GOT_VERSION_REGNUM)
9009 return mode == SImode;
9011 return false;
9014 /* Implement HARD_REGNO_NREGS. */
9016 unsigned int
9017 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9019 if (ST_REG_P (regno))
9020 /* The size of FP status registers is always 4, because they only hold
9021 CCmode values, and CCmode is always considered to be 4 bytes wide. */
9022 return (GET_MODE_SIZE (mode) + 3) / 4;
9024 if (FP_REG_P (regno))
9025 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9027 /* All other registers are word-sized. */
9028 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9031 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
9032 in mips_hard_regno_nregs. */
9035 mips_class_max_nregs (enum reg_class class, enum machine_mode mode)
9037 int size;
9038 HARD_REG_SET left;
9040 size = 0x8000;
9041 COPY_HARD_REG_SET (left, reg_class_contents[(int) class]);
9042 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
9044 size = MIN (size, 4);
9045 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
9047 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
9049 size = MIN (size, UNITS_PER_FPREG);
9050 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
9052 if (!hard_reg_set_empty_p (left))
9053 size = MIN (size, UNITS_PER_WORD);
9054 return (GET_MODE_SIZE (mode) + size - 1) / size;
9057 /* Implement CANNOT_CHANGE_MODE_CLASS. */
9059 bool
9060 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
9061 enum machine_mode to ATTRIBUTE_UNUSED,
9062 enum reg_class class)
9064 /* There are several problems with changing the modes of values
9065 in floating-point registers:
9067 - When a multi-word value is stored in paired floating-point
9068 registers, the first register always holds the low word.
9069 We therefore can't allow FPRs to change between single-word
9070 and multi-word modes on big-endian targets.
9072 - GCC assumes that each word of a multiword register can be accessed
9073 individually using SUBREGs. This is not true for floating-point
9074 registers if they are bigger than a word.
9076 - Loading a 32-bit value into a 64-bit floating-point register
9077 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9078 We can't allow FPRs to change from SImode to to a wider mode on
9079 64-bit targets.
9081 - If the FPU has already interpreted a value in one format, we must
9082 not ask it to treat the value as having a different format.
9084 We therefore disallow all mode changes involving FPRs. */
9085 return reg_classes_intersect_p (FP_REGS, class);
9088 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
9090 static bool
9091 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
9093 switch (mode)
9095 case SFmode:
9096 return TARGET_HARD_FLOAT;
9098 case DFmode:
9099 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
9101 case V2SFmode:
9102 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
9104 default:
9105 return false;
9109 /* Implement MODES_TIEABLE_P. */
9111 bool
9112 mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9114 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
9115 prefer to put one of them in FPRs. */
9116 return (mode1 == mode2
9117 || (!mips_mode_ok_for_mov_fmt_p (mode1)
9118 && !mips_mode_ok_for_mov_fmt_p (mode2)));
9121 /* Implement PREFERRED_RELOAD_CLASS. */
9123 enum reg_class
9124 mips_preferred_reload_class (rtx x, enum reg_class class)
9126 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
9127 return LEA_REGS;
9129 if (reg_class_subset_p (FP_REGS, class)
9130 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
9131 return FP_REGS;
9133 if (reg_class_subset_p (GR_REGS, class))
9134 class = GR_REGS;
9136 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
9137 class = M16_REGS;
9139 return class;
9142 /* Implement REGISTER_MOVE_COST. */
9145 mips_register_move_cost (enum machine_mode mode,
9146 enum reg_class to, enum reg_class from)
9148 if (TARGET_MIPS16)
9150 /* ??? We cannot move general registers into HI and LO because
9151 MIPS16 has no MTHI and MTLO instructions. Make the cost of
9152 moves in the opposite direction just as high, which stops the
9153 register allocators from using HI and LO for pseudos. */
9154 if (reg_class_subset_p (from, GENERAL_REGS)
9155 && reg_class_subset_p (to, GENERAL_REGS))
9157 if (reg_class_subset_p (from, M16_REGS)
9158 || reg_class_subset_p (to, M16_REGS))
9159 return 2;
9160 /* Two MOVEs. */
9161 return 4;
9164 else if (reg_class_subset_p (from, GENERAL_REGS))
9166 if (reg_class_subset_p (to, GENERAL_REGS))
9167 return 2;
9168 if (reg_class_subset_p (to, FP_REGS))
9169 return 4;
9170 if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
9171 return 5;
9172 if (reg_class_subset_p (to, ACC_REGS))
9173 return 6;
9175 else if (reg_class_subset_p (to, GENERAL_REGS))
9177 if (reg_class_subset_p (from, FP_REGS))
9178 return 4;
9179 if (reg_class_subset_p (from, ST_REGS))
9180 /* LUI followed by MOVF. */
9181 return 4;
9182 if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
9183 return 5;
9184 if (reg_class_subset_p (from, ACC_REGS))
9185 return 6;
9187 else if (reg_class_subset_p (from, FP_REGS))
9189 if (reg_class_subset_p (to, FP_REGS)
9190 && mips_mode_ok_for_mov_fmt_p (mode))
9191 return 4;
9192 if (reg_class_subset_p (to, ST_REGS))
9193 /* An expensive sequence. */
9194 return 8;
9197 return 12;
9200 /* Return the register class required for a secondary register when
9201 copying between one of the registers in CLASS and value X, which
9202 has mode MODE. X is the source of the move if IN_P, otherwise it
9203 is the destination. Return NO_REGS if no secondary register is
9204 needed. */
9206 enum reg_class
9207 mips_secondary_reload_class (enum reg_class class,
9208 enum machine_mode mode, rtx x, bool in_p)
9210 int regno;
9212 /* If X is a constant that cannot be loaded into $25, it must be loaded
9213 into some other GPR. No other register class allows a direct move. */
9214 if (mips_dangerous_for_la25_p (x))
9215 return reg_class_subset_p (class, LEA_REGS) ? NO_REGS : LEA_REGS;
9217 regno = true_regnum (x);
9218 if (TARGET_MIPS16)
9220 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
9221 if (!reg_class_subset_p (class, M16_REGS) && !M16_REG_P (regno))
9222 return M16_REGS;
9224 /* We can't really copy to HI or LO at all in MIPS16 mode. */
9225 if (in_p ? reg_classes_intersect_p (class, ACC_REGS) : ACC_REG_P (regno))
9226 return M16_REGS;
9228 return NO_REGS;
9231 /* Copying from accumulator registers to anywhere other than a general
9232 register requires a temporary general register. */
9233 if (reg_class_subset_p (class, ACC_REGS))
9234 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9235 if (ACC_REG_P (regno))
9236 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9238 /* We can only copy a value to a condition code register from a
9239 floating-point register, and even then we require a scratch
9240 floating-point register. We can only copy a value out of a
9241 condition-code register into a general register. */
9242 if (reg_class_subset_p (class, ST_REGS))
9244 if (in_p)
9245 return FP_REGS;
9246 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9248 if (ST_REG_P (regno))
9250 if (!in_p)
9251 return FP_REGS;
9252 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9255 if (reg_class_subset_p (class, FP_REGS))
9257 if (MEM_P (x)
9258 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
9259 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9260 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9261 return NO_REGS;
9263 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
9264 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9265 return NO_REGS;
9267 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
9268 /* We can force the constant to memory and use lwc1
9269 and ldc1. As above, we will use pairs of lwc1s if
9270 ldc1 is not supported. */
9271 return NO_REGS;
9273 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
9274 /* In this case we can use mov.fmt. */
9275 return NO_REGS;
9277 /* Otherwise, we need to reload through an integer register. */
9278 return GR_REGS;
9280 if (FP_REG_P (regno))
9281 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9283 return NO_REGS;
9286 /* Implement TARGET_MODE_REP_EXTENDED. */
9288 static int
9289 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
9291 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
9292 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
9293 return SIGN_EXTEND;
9295 return UNKNOWN;
9298 /* Implement TARGET_VALID_POINTER_MODE. */
9300 static bool
9301 mips_valid_pointer_mode (enum machine_mode mode)
9303 return mode == SImode || (TARGET_64BIT && mode == DImode);
9306 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
9308 static bool
9309 mips_vector_mode_supported_p (enum machine_mode mode)
9311 switch (mode)
9313 case V2SFmode:
9314 return TARGET_PAIRED_SINGLE_FLOAT;
9316 case V2HImode:
9317 case V4QImode:
9318 case V2HQmode:
9319 case V2UHQmode:
9320 case V2HAmode:
9321 case V2UHAmode:
9322 case V4QQmode:
9323 case V4UQQmode:
9324 return TARGET_DSP;
9326 default:
9327 return false;
9331 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9333 static bool
9334 mips_scalar_mode_supported_p (enum machine_mode mode)
9336 if (ALL_FIXED_POINT_MODE_P (mode)
9337 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9338 return true;
9340 return default_scalar_mode_supported_p (mode);
9343 /* Implement TARGET_INIT_LIBFUNCS. */
9345 #include "config/gofast.h"
9347 static void
9348 mips_init_libfuncs (void)
9350 if (TARGET_FIX_VR4120)
9352 /* Register the special divsi3 and modsi3 functions needed to work
9353 around VR4120 division errata. */
9354 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9355 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9358 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
9360 /* Register the MIPS16 -mhard-float stubs. */
9361 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9362 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9363 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9364 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9366 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9367 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9368 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9369 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9370 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9371 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9372 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
9374 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9375 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9376 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
9378 if (TARGET_DOUBLE_FLOAT)
9380 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9381 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9382 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9383 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9385 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9386 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9387 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9388 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9389 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9390 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9391 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
9393 set_conv_libfunc (sext_optab, DFmode, SFmode,
9394 "__mips16_extendsfdf2");
9395 set_conv_libfunc (trunc_optab, SFmode, DFmode,
9396 "__mips16_truncdfsf2");
9397 set_conv_libfunc (sfix_optab, SImode, DFmode,
9398 "__mips16_fix_truncdfsi");
9399 set_conv_libfunc (sfloat_optab, DFmode, SImode,
9400 "__mips16_floatsidf");
9401 set_conv_libfunc (ufloat_optab, DFmode, SImode,
9402 "__mips16_floatunsidf");
9405 else
9406 /* Register the gofast functions if selected using --enable-gofast. */
9407 gofast_maybe_init_libfuncs ();
9410 /* Return the length of INSN. LENGTH is the initial length computed by
9411 attributes in the machine-description file. */
9414 mips_adjust_insn_length (rtx insn, int length)
9416 /* A unconditional jump has an unfilled delay slot if it is not part
9417 of a sequence. A conditional jump normally has a delay slot, but
9418 does not on MIPS16. */
9419 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9420 length += 4;
9422 /* See how many nops might be needed to avoid hardware hazards. */
9423 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9424 switch (get_attr_hazard (insn))
9426 case HAZARD_NONE:
9427 break;
9429 case HAZARD_DELAY:
9430 length += 4;
9431 break;
9433 case HAZARD_HILO:
9434 length += 8;
9435 break;
9438 /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
9439 the .md file length attributes are 4-based for both modes.
9440 Adjust the MIPS16 ones here. */
9441 if (TARGET_MIPS16)
9442 length /= 2;
9444 return length;
9447 /* Return an asm sequence to start a noat block and load the address
9448 of a label into $1. */
9450 const char *
9451 mips_output_load_label (void)
9453 if (TARGET_EXPLICIT_RELOCS)
9454 switch (mips_abi)
9456 case ABI_N32:
9457 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9459 case ABI_64:
9460 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9462 default:
9463 if (ISA_HAS_LOAD_DELAY)
9464 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9465 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9467 else
9469 if (Pmode == DImode)
9470 return "%[dla\t%@,%0";
9471 else
9472 return "%[la\t%@,%0";
9476 /* Return the assembly code for INSN, which has the operands given by
9477 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9478 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9479 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9480 version of BRANCH_IF_TRUE. */
9482 const char *
9483 mips_output_conditional_branch (rtx insn, rtx *operands,
9484 const char *branch_if_true,
9485 const char *branch_if_false)
9487 unsigned int length;
9488 rtx taken, not_taken;
9490 length = get_attr_length (insn);
9491 if (length <= 8)
9493 /* Just a simple conditional branch. */
9494 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9495 return branch_if_true;
9498 /* Generate a reversed branch around a direct jump. This fallback does
9499 not use branch-likely instructions. */
9500 mips_branch_likely = false;
9501 not_taken = gen_label_rtx ();
9502 taken = operands[1];
9504 /* Generate the reversed branch to NOT_TAKEN. */
9505 operands[1] = not_taken;
9506 output_asm_insn (branch_if_false, operands);
9508 /* If INSN has a delay slot, we must provide delay slots for both the
9509 branch to NOT_TAKEN and the conditional jump. We must also ensure
9510 that INSN's delay slot is executed in the appropriate cases. */
9511 if (final_sequence)
9513 /* This first delay slot will always be executed, so use INSN's
9514 delay slot if is not annulled. */
9515 if (!INSN_ANNULLED_BRANCH_P (insn))
9517 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9518 asm_out_file, optimize, 1, NULL);
9519 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9521 else
9522 output_asm_insn ("nop", 0);
9523 fprintf (asm_out_file, "\n");
9526 /* Output the unconditional branch to TAKEN. */
9527 if (length <= 16)
9528 output_asm_insn ("j\t%0%/", &taken);
9529 else
9531 output_asm_insn (mips_output_load_label (), &taken);
9532 output_asm_insn ("jr\t%@%]%/", 0);
9535 /* Now deal with its delay slot; see above. */
9536 if (final_sequence)
9538 /* This delay slot will only be executed if the branch is taken.
9539 Use INSN's delay slot if is annulled. */
9540 if (INSN_ANNULLED_BRANCH_P (insn))
9542 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9543 asm_out_file, optimize, 1, NULL);
9544 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9546 else
9547 output_asm_insn ("nop", 0);
9548 fprintf (asm_out_file, "\n");
9551 /* Output NOT_TAKEN. */
9552 targetm.asm_out.internal_label (asm_out_file, "L",
9553 CODE_LABEL_NUMBER (not_taken));
9554 return "";
9557 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9558 if some ordering condition is true. The condition is given by
9559 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9560 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9561 its second is always zero. */
9563 const char *
9564 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9566 const char *branch[2];
9568 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9569 Make BRANCH[0] branch on the inverse condition. */
9570 switch (GET_CODE (operands[0]))
9572 /* These cases are equivalent to comparisons against zero. */
9573 case LEU:
9574 inverted_p = !inverted_p;
9575 /* Fall through. */
9576 case GTU:
9577 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9578 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9579 break;
9581 /* These cases are always true or always false. */
9582 case LTU:
9583 inverted_p = !inverted_p;
9584 /* Fall through. */
9585 case GEU:
9586 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9587 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9588 break;
9590 default:
9591 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9592 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9593 break;
9595 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9598 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
9599 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
9601 When working around R4000 and R4400 errata, we need to make sure that
9602 the division is not immediately followed by a shift[1][2]. We also
9603 need to stop the division from being put into a branch delay slot[3].
9604 The easiest way to avoid both problems is to add a nop after the
9605 division. When a divide-by-zero check is needed, this nop can be
9606 used to fill the branch delay slot.
9608 [1] If a double-word or a variable shift executes immediately
9609 after starting an integer division, the shift may give an
9610 incorrect result. See quotations of errata #16 and #28 from
9611 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9612 in mips.md for details.
9614 [2] A similar bug to [1] exists for all revisions of the
9615 R4000 and the R4400 when run in an MC configuration.
9616 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9618 "19. In this following sequence:
9620 ddiv (or ddivu or div or divu)
9621 dsll32 (or dsrl32, dsra32)
9623 if an MPT stall occurs, while the divide is slipping the cpu
9624 pipeline, then the following double shift would end up with an
9625 incorrect result.
9627 Workaround: The compiler needs to avoid generating any
9628 sequence with divide followed by extended double shift."
9630 This erratum is also present in "MIPS R4400MC Errata, Processor
9631 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9632 & 3.0" as errata #10 and #4, respectively.
9634 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9635 (also valid for MIPS R4000MC processors):
9637 "52. R4000SC: This bug does not apply for the R4000PC.
9639 There are two flavors of this bug:
9641 1) If the instruction just after divide takes an RF exception
9642 (tlb-refill, tlb-invalid) and gets an instruction cache
9643 miss (both primary and secondary) and the line which is
9644 currently in secondary cache at this index had the first
9645 data word, where the bits 5..2 are set, then R4000 would
9646 get a wrong result for the div.
9650 div r8, r9
9651 ------------------- # end-of page. -tlb-refill
9655 div r8, r9
9656 ------------------- # end-of page. -tlb-invalid
9659 2) If the divide is in the taken branch delay slot, where the
9660 target takes RF exception and gets an I-cache miss for the
9661 exception vector or where I-cache miss occurs for the
9662 target address, under the above mentioned scenarios, the
9663 div would get wrong results.
9666 j r2 # to next page mapped or unmapped
9667 div r8,r9 # this bug would be there as long
9668 # as there is an ICache miss and
9669 nop # the "data pattern" is present
9672 beq r0, r0, NextPage # to Next page
9673 div r8,r9
9676 This bug is present for div, divu, ddiv, and ddivu
9677 instructions.
9679 Workaround: For item 1), OS could make sure that the next page
9680 after the divide instruction is also mapped. For item 2), the
9681 compiler could make sure that the divide instruction is not in
9682 the branch delay slot."
9684 These processors have PRId values of 0x00004220 and 0x00004300 for
9685 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9687 const char *
9688 mips_output_division (const char *division, rtx *operands)
9690 const char *s;
9692 s = division;
9693 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9695 output_asm_insn (s, operands);
9696 s = "nop";
9698 if (TARGET_CHECK_ZERO_DIV)
9700 if (TARGET_MIPS16)
9702 output_asm_insn (s, operands);
9703 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9705 else if (GENERATE_DIVIDE_TRAPS)
9707 output_asm_insn (s, operands);
9708 s = "teq\t%2,%.,7";
9710 else
9712 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9713 output_asm_insn (s, operands);
9714 s = "break\t7%)\n1:";
9717 return s;
9720 /* Return true if IN_INSN is a multiply-add or multiply-subtract
9721 instruction and if OUT_INSN assigns to the accumulator operand. */
9723 bool
9724 mips_linked_madd_p (rtx out_insn, rtx in_insn)
9726 rtx x;
9728 x = single_set (in_insn);
9729 if (x == 0)
9730 return false;
9732 x = SET_SRC (x);
9734 if (GET_CODE (x) == PLUS
9735 && GET_CODE (XEXP (x, 0)) == MULT
9736 && reg_set_p (XEXP (x, 1), out_insn))
9737 return true;
9739 if (GET_CODE (x) == MINUS
9740 && GET_CODE (XEXP (x, 1)) == MULT
9741 && reg_set_p (XEXP (x, 0), out_insn))
9742 return true;
9744 return false;
9747 /* True if the dependency between OUT_INSN and IN_INSN is on the store
9748 data rather than the address. We need this because the cprestore
9749 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
9750 which causes the default routine to abort. We just return false
9751 for that case. */
9753 bool
9754 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
9756 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
9757 return false;
9759 return !store_data_bypass_p (out_insn, in_insn);
9762 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9763 dependencies have no cost, except on the 20Kc where output-dependence
9764 is treated like input-dependence. */
9766 static int
9767 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9768 rtx dep ATTRIBUTE_UNUSED, int cost)
9770 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
9771 && TUNE_20KC)
9772 return cost;
9773 if (REG_NOTE_KIND (link) != 0)
9774 return 0;
9775 return cost;
9778 /* Return the number of instructions that can be issued per cycle. */
9780 static int
9781 mips_issue_rate (void)
9783 switch (mips_tune)
9785 case PROCESSOR_74KC:
9786 case PROCESSOR_74KF2_1:
9787 case PROCESSOR_74KF1_1:
9788 case PROCESSOR_74KF3_2:
9789 /* The 74k is not strictly quad-issue cpu, but can be seen as one
9790 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
9791 but in reality only a maximum of 3 insns can be issued as
9792 floating-point loads and stores also require a slot in the
9793 AGEN pipe. */
9794 return 4;
9796 case PROCESSOR_20KC:
9797 case PROCESSOR_R4130:
9798 case PROCESSOR_R5400:
9799 case PROCESSOR_R5500:
9800 case PROCESSOR_R7000:
9801 case PROCESSOR_R9000:
9802 return 2;
9804 case PROCESSOR_SB1:
9805 case PROCESSOR_SB1A:
9806 /* This is actually 4, but we get better performance if we claim 3.
9807 This is partly because of unwanted speculative code motion with the
9808 larger number, and partly because in most common cases we can't
9809 reach the theoretical max of 4. */
9810 return 3;
9812 default:
9813 return 1;
9817 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9818 be as wide as the scheduling freedom in the DFA. */
9820 static int
9821 mips_multipass_dfa_lookahead (void)
9823 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9824 if (TUNE_SB1)
9825 return 4;
9827 return 0;
9830 /* Remove the instruction at index LOWER from ready queue READY and
9831 reinsert it in front of the instruction at index HIGHER. LOWER must
9832 be <= HIGHER. */
9834 static void
9835 mips_promote_ready (rtx *ready, int lower, int higher)
9837 rtx new_head;
9838 int i;
9840 new_head = ready[lower];
9841 for (i = lower; i < higher; i++)
9842 ready[i] = ready[i + 1];
9843 ready[i] = new_head;
9846 /* If the priority of the instruction at POS2 in the ready queue READY
9847 is within LIMIT units of that of the instruction at POS1, swap the
9848 instructions if POS2 is not already less than POS1. */
9850 static void
9851 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
9853 if (pos1 < pos2
9854 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
9856 rtx temp;
9858 temp = ready[pos1];
9859 ready[pos1] = ready[pos2];
9860 ready[pos2] = temp;
9864 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9865 that may clobber hi or lo. */
9866 static rtx mips_macc_chains_last_hilo;
9868 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9869 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9871 static void
9872 mips_macc_chains_record (rtx insn)
9874 if (get_attr_may_clobber_hilo (insn))
9875 mips_macc_chains_last_hilo = insn;
9878 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9879 has NREADY elements, looking for a multiply-add or multiply-subtract
9880 instruction that is cumulative with mips_macc_chains_last_hilo.
9881 If there is one, promote it ahead of anything else that might
9882 clobber hi or lo. */
9884 static void
9885 mips_macc_chains_reorder (rtx *ready, int nready)
9887 int i, j;
9889 if (mips_macc_chains_last_hilo != 0)
9890 for (i = nready - 1; i >= 0; i--)
9891 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9893 for (j = nready - 1; j > i; j--)
9894 if (recog_memoized (ready[j]) >= 0
9895 && get_attr_may_clobber_hilo (ready[j]))
9897 mips_promote_ready (ready, i, j);
9898 break;
9900 break;
9904 /* The last instruction to be scheduled. */
9905 static rtx vr4130_last_insn;
9907 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9908 points to an rtx that is initially an instruction. Nullify the rtx
9909 if the instruction uses the value of register X. */
9911 static void
9912 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
9913 void *data)
9915 rtx *insn_ptr;
9917 insn_ptr = (rtx *) data;
9918 if (REG_P (x)
9919 && *insn_ptr != 0
9920 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9921 *insn_ptr = 0;
9924 /* Return true if there is true register dependence between vr4130_last_insn
9925 and INSN. */
9927 static bool
9928 vr4130_true_reg_dependence_p (rtx insn)
9930 note_stores (PATTERN (vr4130_last_insn),
9931 vr4130_true_reg_dependence_p_1, &insn);
9932 return insn == 0;
9935 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9936 the ready queue and that INSN2 is the instruction after it, return
9937 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9938 in which INSN1 and INSN2 can probably issue in parallel, but for
9939 which (INSN2, INSN1) should be less sensitive to instruction
9940 alignment than (INSN1, INSN2). See 4130.md for more details. */
9942 static bool
9943 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9945 sd_iterator_def sd_it;
9946 dep_t dep;
9948 /* Check for the following case:
9950 1) there is some other instruction X with an anti dependence on INSN1;
9951 2) X has a higher priority than INSN2; and
9952 3) X is an arithmetic instruction (and thus has no unit restrictions).
9954 If INSN1 is the last instruction blocking X, it would better to
9955 choose (INSN1, X) over (INSN2, INSN1). */
9956 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
9957 if (DEP_TYPE (dep) == REG_DEP_ANTI
9958 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
9959 && recog_memoized (DEP_CON (dep)) >= 0
9960 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
9961 return false;
9963 if (vr4130_last_insn != 0
9964 && recog_memoized (insn1) >= 0
9965 && recog_memoized (insn2) >= 0)
9967 /* See whether INSN1 and INSN2 use different execution units,
9968 or if they are both ALU-type instructions. If so, they can
9969 probably execute in parallel. */
9970 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9971 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9972 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9974 /* If only one of the instructions has a dependence on
9975 vr4130_last_insn, prefer to schedule the other one first. */
9976 bool dep1_p = vr4130_true_reg_dependence_p (insn1);
9977 bool dep2_p = vr4130_true_reg_dependence_p (insn2);
9978 if (dep1_p != dep2_p)
9979 return dep1_p;
9981 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9982 is not an ALU-type instruction and if INSN1 uses the same
9983 execution unit. (Note that if this condition holds, we already
9984 know that INSN2 uses a different execution unit.) */
9985 if (class1 != VR4130_CLASS_ALU
9986 && recog_memoized (vr4130_last_insn) >= 0
9987 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9988 return true;
9991 return false;
9994 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9995 queue with at least two instructions. Swap the first two if
9996 vr4130_swap_insns_p says that it could be worthwhile. */
9998 static void
9999 vr4130_reorder (rtx *ready, int nready)
10001 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10002 mips_promote_ready (ready, nready - 2, nready - 1);
10005 /* Record whether last 74k AGEN instruction was a load or store. */
10006 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
10008 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
10009 resets to TYPE_UNKNOWN state. */
10011 static void
10012 mips_74k_agen_init (rtx insn)
10014 if (!insn || !NONJUMP_INSN_P (insn))
10015 mips_last_74k_agen_insn = TYPE_UNKNOWN;
10016 else
10018 enum attr_type type = get_attr_type (insn);
10019 if (type == TYPE_LOAD || type == TYPE_STORE)
10020 mips_last_74k_agen_insn = type;
10024 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
10025 loads to be grouped together, and multiple stores to be grouped
10026 together. Swap things around in the ready queue to make this happen. */
10028 static void
10029 mips_74k_agen_reorder (rtx *ready, int nready)
10031 int i;
10032 int store_pos, load_pos;
10034 store_pos = -1;
10035 load_pos = -1;
10037 for (i = nready - 1; i >= 0; i--)
10039 rtx insn = ready[i];
10040 if (USEFUL_INSN_P (insn))
10041 switch (get_attr_type (insn))
10043 case TYPE_STORE:
10044 if (store_pos == -1)
10045 store_pos = i;
10046 break;
10048 case TYPE_LOAD:
10049 if (load_pos == -1)
10050 load_pos = i;
10051 break;
10053 default:
10054 break;
10058 if (load_pos == -1 || store_pos == -1)
10059 return;
10061 switch (mips_last_74k_agen_insn)
10063 case TYPE_UNKNOWN:
10064 /* Prefer to schedule loads since they have a higher latency. */
10065 case TYPE_LOAD:
10066 /* Swap loads to the front of the queue. */
10067 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
10068 break;
10069 case TYPE_STORE:
10070 /* Swap stores to the front of the queue. */
10071 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
10072 break;
10073 default:
10074 break;
10078 /* Implement TARGET_SCHED_INIT. */
10080 static void
10081 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10082 int max_ready ATTRIBUTE_UNUSED)
10084 mips_macc_chains_last_hilo = 0;
10085 vr4130_last_insn = 0;
10086 mips_74k_agen_init (NULL_RTX);
10089 /* Implement TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
10091 static int
10092 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10093 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
10095 if (!reload_completed
10096 && TUNE_MACC_CHAINS
10097 && *nreadyp > 0)
10098 mips_macc_chains_reorder (ready, *nreadyp);
10100 if (reload_completed
10101 && TUNE_MIPS4130
10102 && !TARGET_VR4130_ALIGN
10103 && *nreadyp > 1)
10104 vr4130_reorder (ready, *nreadyp);
10106 if (TUNE_74K)
10107 mips_74k_agen_reorder (ready, *nreadyp);
10109 return mips_issue_rate ();
10112 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
10114 static int
10115 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10116 rtx insn, int more)
10118 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
10119 if (USEFUL_INSN_P (insn))
10121 more--;
10122 if (!reload_completed && TUNE_MACC_CHAINS)
10123 mips_macc_chains_record (insn);
10124 vr4130_last_insn = insn;
10125 if (TUNE_74K)
10126 mips_74k_agen_init (insn);
10128 return more;
10131 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
10132 return the first operand of the associated PREF or PREFX insn. */
10135 mips_prefetch_cookie (rtx write, rtx locality)
10137 /* store_streamed / load_streamed. */
10138 if (INTVAL (locality) <= 0)
10139 return GEN_INT (INTVAL (write) + 4);
10141 /* store / load. */
10142 if (INTVAL (locality) <= 2)
10143 return write;
10145 /* store_retained / load_retained. */
10146 return GEN_INT (INTVAL (write) + 6);
10149 /* Flags that indicate when a built-in function is available.
10151 BUILTIN_AVAIL_NON_MIPS16
10152 The function is available on the current target, but only
10153 in non-MIPS16 mode. */
10154 #define BUILTIN_AVAIL_NON_MIPS16 1
10156 /* Declare an availability predicate for built-in functions that
10157 require non-MIPS16 mode and also require COND to be true.
10158 NAME is the main part of the predicate's name. */
10159 #define AVAIL_NON_MIPS16(NAME, COND) \
10160 static unsigned int \
10161 mips_builtin_avail_##NAME (void) \
10163 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
10166 /* This structure describes a single built-in function. */
10167 struct mips_builtin_description {
10168 /* The code of the main .md file instruction. See mips_builtin_type
10169 for more information. */
10170 enum insn_code icode;
10172 /* The floating-point comparison code to use with ICODE, if any. */
10173 enum mips_fp_condition cond;
10175 /* The name of the built-in function. */
10176 const char *name;
10178 /* Specifies how the function should be expanded. */
10179 enum mips_builtin_type builtin_type;
10181 /* The function's prototype. */
10182 enum mips_function_type function_type;
10184 /* Whether the function is available. */
10185 unsigned int (*avail) (void);
10188 AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
10189 AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
10190 AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
10191 AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
10192 AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
10193 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
10194 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
10196 /* Construct a mips_builtin_description from the given arguments.
10198 INSN is the name of the associated instruction pattern, without the
10199 leading CODE_FOR_mips_.
10201 CODE is the floating-point condition code associated with the
10202 function. It can be 'f' if the field is not applicable.
10204 NAME is the name of the function itself, without the leading
10205 "__builtin_mips_".
10207 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
10209 AVAIL is the name of the availability predicate, without the leading
10210 mips_builtin_avail_. */
10211 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
10212 FUNCTION_TYPE, AVAIL) \
10213 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
10214 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
10215 mips_builtin_avail_ ## AVAIL }
10217 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
10218 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
10219 are as for MIPS_BUILTIN. */
10220 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
10221 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
10223 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
10224 are subject to mips_builtin_avail_<AVAIL>. */
10225 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
10226 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
10227 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
10228 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
10229 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
10231 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10232 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
10233 while the any and all forms are subject to mips_builtin_avail_mips3d. */
10234 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
10235 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
10236 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
10237 mips3d), \
10238 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
10239 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
10240 mips3d), \
10241 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
10242 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
10243 AVAIL), \
10244 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
10245 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
10246 AVAIL)
10248 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10249 are subject to mips_builtin_avail_mips3d. */
10250 #define CMP_4S_BUILTINS(INSN, COND) \
10251 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
10252 MIPS_BUILTIN_CMP_ANY, \
10253 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
10254 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
10255 MIPS_BUILTIN_CMP_ALL, \
10256 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
10258 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10259 instruction requires mips_builtin_avail_<AVAIL>. */
10260 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
10261 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
10262 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10263 AVAIL), \
10264 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
10265 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10266 AVAIL)
10268 /* Define all the built-in functions related to C.cond.fmt condition COND. */
10269 #define CMP_BUILTINS(COND) \
10270 MOVTF_BUILTINS (c, COND, paired_single), \
10271 MOVTF_BUILTINS (cabs, COND, mips3d), \
10272 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
10273 CMP_PS_BUILTINS (c, COND, paired_single), \
10274 CMP_PS_BUILTINS (cabs, COND, mips3d), \
10275 CMP_4S_BUILTINS (c, COND), \
10276 CMP_4S_BUILTINS (cabs, COND)
10278 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
10279 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
10280 and AVAIL are as for MIPS_BUILTIN. */
10281 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
10282 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
10283 FUNCTION_TYPE, AVAIL)
10285 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10286 branch instruction. AVAIL is as for MIPS_BUILTIN. */
10287 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
10288 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
10289 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
10291 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10292 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10293 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10294 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10295 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10296 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
10298 static const struct mips_builtin_description mips_builtins[] = {
10299 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10300 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10301 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10302 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10303 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
10304 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
10305 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
10306 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
10308 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
10309 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10310 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10311 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
10312 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
10314 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
10315 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
10316 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
10317 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
10318 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
10319 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10321 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
10322 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
10323 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
10324 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
10325 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
10326 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10328 MIPS_FP_CONDITIONS (CMP_BUILTINS),
10330 /* Built-in functions for the SB-1 processor. */
10331 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
10333 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
10334 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10335 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10336 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
10337 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10338 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10339 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10340 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10341 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
10342 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10343 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10344 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
10345 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
10346 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
10347 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
10348 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
10349 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
10350 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
10351 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
10352 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
10353 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
10354 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
10355 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
10356 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
10357 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
10358 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
10359 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
10360 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
10361 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
10362 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
10363 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
10364 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
10365 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10366 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10367 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
10368 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
10369 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10370 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10371 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
10372 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
10373 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
10374 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10375 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
10376 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
10377 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
10378 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
10379 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
10380 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
10381 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
10382 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
10383 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
10384 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
10385 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
10386 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
10387 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
10388 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
10389 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
10390 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10391 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10392 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10393 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
10394 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
10395 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
10396 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
10397 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
10398 BPOSGE_BUILTIN (32, dsp),
10400 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
10401 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
10402 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10403 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10404 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10405 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10406 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
10407 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
10408 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
10409 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
10410 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
10411 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10412 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10413 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10414 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10415 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10416 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
10417 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
10418 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
10419 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
10420 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
10421 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
10422 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
10423 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10424 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10425 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10426 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10427 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10428 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10429 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10430 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10431 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10432 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10433 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10434 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10436 /* Built-in functions for the DSP ASE (32-bit only). */
10437 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10438 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10439 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10440 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10441 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10442 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10443 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10444 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
10445 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
10446 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10447 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10448 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10449 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10450 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
10451 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
10452 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
10453 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
10454 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
10455 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
10456 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
10457 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
10459 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
10460 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10461 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10462 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
10463 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
10464 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
10465 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
10466 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10467 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dspr2_32),
10468 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dspr2_32),
10469 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10470 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10471 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10472 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10473 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10474 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32)
10477 /* MODE is a vector mode whose elements have type TYPE. Return the type
10478 of the vector itself. */
10480 static tree
10481 mips_builtin_vector_type (tree type, enum machine_mode mode)
10483 static tree types[(int) MAX_MACHINE_MODE];
10485 if (types[(int) mode] == NULL_TREE)
10486 types[(int) mode] = build_vector_type_for_mode (type, mode);
10487 return types[(int) mode];
10490 /* Source-level argument types. */
10491 #define MIPS_ATYPE_VOID void_type_node
10492 #define MIPS_ATYPE_INT integer_type_node
10493 #define MIPS_ATYPE_POINTER ptr_type_node
10495 /* Standard mode-based argument types. */
10496 #define MIPS_ATYPE_SI intSI_type_node
10497 #define MIPS_ATYPE_USI unsigned_intSI_type_node
10498 #define MIPS_ATYPE_DI intDI_type_node
10499 #define MIPS_ATYPE_SF float_type_node
10500 #define MIPS_ATYPE_DF double_type_node
10502 /* Vector argument types. */
10503 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
10504 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
10505 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
10507 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
10508 their associated MIPS_ATYPEs. */
10509 #define MIPS_FTYPE_ATYPES1(A, B) \
10510 MIPS_ATYPE_##A, MIPS_ATYPE_##B
10512 #define MIPS_FTYPE_ATYPES2(A, B, C) \
10513 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
10515 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
10516 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
10518 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
10519 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
10520 MIPS_ATYPE_##E
10522 /* Return the function type associated with function prototype TYPE. */
10524 static tree
10525 mips_build_function_type (enum mips_function_type type)
10527 static tree types[(int) MIPS_MAX_FTYPE_MAX];
10529 if (types[(int) type] == NULL_TREE)
10530 switch (type)
10532 #define DEF_MIPS_FTYPE(NUM, ARGS) \
10533 case MIPS_FTYPE_NAME##NUM ARGS: \
10534 types[(int) type] \
10535 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
10536 NULL_TREE); \
10537 break;
10538 #include "config/mips/mips-ftypes.def"
10539 #undef DEF_MIPS_FTYPE
10540 default:
10541 gcc_unreachable ();
10544 return types[(int) type];
10547 /* Implement TARGET_INIT_BUILTINS. */
10549 static void
10550 mips_init_builtins (void)
10552 const struct mips_builtin_description *d;
10553 unsigned int i;
10555 /* Iterate through all of the bdesc arrays, initializing all of the
10556 builtin functions. */
10557 for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
10559 d = &mips_builtins[i];
10560 if (d->avail ())
10561 add_builtin_function (d->name,
10562 mips_build_function_type (d->function_type),
10563 i, BUILT_IN_MD, NULL, NULL);
10567 /* Take argument ARGNO from EXP's argument list and convert it into a
10568 form suitable for input operand OPNO of instruction ICODE. Return the
10569 value. */
10571 static rtx
10572 mips_prepare_builtin_arg (enum insn_code icode,
10573 unsigned int opno, tree exp, unsigned int argno)
10575 rtx value;
10576 enum machine_mode mode;
10578 value = expand_normal (CALL_EXPR_ARG (exp, argno));
10579 mode = insn_data[icode].operand[opno].mode;
10580 if (!insn_data[icode].operand[opno].predicate (value, mode))
10582 value = copy_to_mode_reg (mode, value);
10583 /* Check the predicate again. */
10584 if (!insn_data[icode].operand[opno].predicate (value, mode))
10586 error ("invalid argument to built-in function");
10587 return const0_rtx;
10591 return value;
10594 /* Return an rtx suitable for output operand OP of instruction ICODE.
10595 If TARGET is non-null, try to use it where possible. */
10597 static rtx
10598 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10600 enum machine_mode mode;
10602 mode = insn_data[icode].operand[op].mode;
10603 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10604 target = gen_reg_rtx (mode);
10606 return target;
10609 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
10610 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
10611 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
10612 suggests a good place to put the result. */
10614 static rtx
10615 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
10616 bool has_target_p)
10618 rtx ops[MAX_RECOG_OPERANDS];
10619 int opno, argno;
10621 /* Map any target to operand 0. */
10622 opno = 0;
10623 if (has_target_p)
10625 ops[opno] = mips_prepare_builtin_target (icode, opno, target);
10626 opno++;
10629 /* Map the arguments to the other operands. The n_operands value
10630 for an expander includes match_dups and match_scratches as well as
10631 match_operands, so n_operands is only an upper bound on the number
10632 of arguments to the expander function. */
10633 gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
10634 for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
10635 ops[opno] = mips_prepare_builtin_arg (icode, opno, exp, argno);
10637 switch (opno)
10639 case 2:
10640 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10641 break;
10643 case 3:
10644 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10645 break;
10647 case 4:
10648 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10649 break;
10651 default:
10652 gcc_unreachable ();
10654 return target;
10657 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
10658 function; TYPE says which. EXP is the CALL_EXPR that calls the
10659 function, ICODE is the instruction that should be used to compare
10660 the first two arguments, and COND is the condition it should test.
10661 TARGET, if nonnull, suggests a good place to put the result. */
10663 static rtx
10664 mips_expand_builtin_movtf (enum mips_builtin_type type,
10665 enum insn_code icode, enum mips_fp_condition cond,
10666 rtx target, tree exp)
10668 rtx cmp_result, op0, op1;
10670 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10671 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
10672 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
10673 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10675 icode = CODE_FOR_mips_cond_move_tf_ps;
10676 target = mips_prepare_builtin_target (icode, 0, target);
10677 if (type == MIPS_BUILTIN_MOVT)
10679 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
10680 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
10682 else
10684 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
10685 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
10687 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10688 return target;
10691 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10692 into TARGET otherwise. Return TARGET. */
10694 static rtx
10695 mips_builtin_branch_and_move (rtx condition, rtx target,
10696 rtx value_if_true, rtx value_if_false)
10698 rtx true_label, done_label;
10700 true_label = gen_label_rtx ();
10701 done_label = gen_label_rtx ();
10703 /* First assume that CONDITION is false. */
10704 mips_emit_move (target, value_if_false);
10706 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10707 emit_jump_insn (gen_condjump (condition, true_label));
10708 emit_jump_insn (gen_jump (done_label));
10709 emit_barrier ();
10711 /* Fix TARGET if CONDITION is true. */
10712 emit_label (true_label);
10713 mips_emit_move (target, value_if_true);
10715 emit_label (done_label);
10716 return target;
10719 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
10720 the CALL_EXPR that calls the function, ICODE is the code of the
10721 comparison instruction, and COND is the condition it should test.
10722 TARGET, if nonnull, suggests a good place to put the boolean result. */
10724 static rtx
10725 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10726 enum insn_code icode, enum mips_fp_condition cond,
10727 rtx target, tree exp)
10729 rtx offset, condition, cmp_result, args[MAX_RECOG_OPERANDS];
10730 int argno;
10732 if (target == 0 || GET_MODE (target) != SImode)
10733 target = gen_reg_rtx (SImode);
10735 /* The instruction should have a target operand, an operand for each
10736 argument, and an operand for COND. */
10737 gcc_assert (call_expr_nargs (exp) + 2 == insn_data[icode].n_operands);
10739 /* Prepare the operands to the comparison. */
10740 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10741 for (argno = 0; argno < call_expr_nargs (exp); argno++)
10742 args[argno] = mips_prepare_builtin_arg (icode, argno + 1, exp, argno);
10744 switch (insn_data[icode].n_operands)
10746 case 4:
10747 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
10748 GEN_INT (cond)));
10749 break;
10751 case 6:
10752 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
10753 args[2], args[3], GEN_INT (cond)));
10754 break;
10756 default:
10757 gcc_unreachable ();
10760 /* If the comparison sets more than one register, we define the result
10761 to be 0 if all registers are false and -1 if all registers are true.
10762 The value of the complete result is indeterminate otherwise. */
10763 switch (builtin_type)
10765 case MIPS_BUILTIN_CMP_ALL:
10766 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10767 return mips_builtin_branch_and_move (condition, target,
10768 const0_rtx, const1_rtx);
10770 case MIPS_BUILTIN_CMP_UPPER:
10771 case MIPS_BUILTIN_CMP_LOWER:
10772 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10773 condition = gen_single_cc (cmp_result, offset);
10774 return mips_builtin_branch_and_move (condition, target,
10775 const1_rtx, const0_rtx);
10777 default:
10778 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10779 return mips_builtin_branch_and_move (condition, target,
10780 const1_rtx, const0_rtx);
10784 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
10785 if nonnull, suggests a good place to put the boolean result. */
10787 static rtx
10788 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10790 rtx condition, cmp_result;
10791 int cmp_value;
10793 if (target == 0 || GET_MODE (target) != SImode)
10794 target = gen_reg_rtx (SImode);
10796 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10798 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10799 cmp_value = 32;
10800 else
10801 gcc_assert (0);
10803 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10804 return mips_builtin_branch_and_move (condition, target,
10805 const1_rtx, const0_rtx);
10808 /* Implement TARGET_EXPAND_BUILTIN. */
10810 static rtx
10811 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10812 enum machine_mode mode ATTRIBUTE_UNUSED,
10813 int ignore ATTRIBUTE_UNUSED)
10815 tree fndecl;
10816 unsigned int fcode, avail;
10817 const struct mips_builtin_description *d;
10819 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10820 fcode = DECL_FUNCTION_CODE (fndecl);
10821 gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
10822 d = &mips_builtins[fcode];
10823 avail = d->avail ();
10824 gcc_assert (avail != 0);
10825 if (TARGET_MIPS16)
10827 error ("built-in function %qs not supported for MIPS16",
10828 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
10829 return const0_rtx;
10831 switch (d->builtin_type)
10833 case MIPS_BUILTIN_DIRECT:
10834 return mips_expand_builtin_direct (d->icode, target, exp, true);
10836 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10837 return mips_expand_builtin_direct (d->icode, target, exp, false);
10839 case MIPS_BUILTIN_MOVT:
10840 case MIPS_BUILTIN_MOVF:
10841 return mips_expand_builtin_movtf (d->builtin_type, d->icode,
10842 d->cond, target, exp);
10844 case MIPS_BUILTIN_CMP_ANY:
10845 case MIPS_BUILTIN_CMP_ALL:
10846 case MIPS_BUILTIN_CMP_UPPER:
10847 case MIPS_BUILTIN_CMP_LOWER:
10848 case MIPS_BUILTIN_CMP_SINGLE:
10849 return mips_expand_builtin_compare (d->builtin_type, d->icode,
10850 d->cond, target, exp);
10852 case MIPS_BUILTIN_BPOSGE32:
10853 return mips_expand_builtin_bposge (d->builtin_type, target);
10855 gcc_unreachable ();
10858 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
10859 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
10860 struct mips16_constant {
10861 struct mips16_constant *next;
10862 rtx value;
10863 rtx label;
10864 enum machine_mode mode;
10867 /* Information about an incomplete MIPS16 constant pool. FIRST is the
10868 first constant, HIGHEST_ADDRESS is the highest address that the first
10869 byte of the pool can have, and INSN_ADDRESS is the current instruction
10870 address. */
10871 struct mips16_constant_pool {
10872 struct mips16_constant *first;
10873 int highest_address;
10874 int insn_address;
10877 /* Add constant VALUE to POOL and return its label. MODE is the
10878 value's mode (used for CONST_INTs, etc.). */
10880 static rtx
10881 mips16_add_constant (struct mips16_constant_pool *pool,
10882 rtx value, enum machine_mode mode)
10884 struct mips16_constant **p, *c;
10885 bool first_of_size_p;
10887 /* See whether the constant is already in the pool. If so, return the
10888 existing label, otherwise leave P pointing to the place where the
10889 constant should be added.
10891 Keep the pool sorted in increasing order of mode size so that we can
10892 reduce the number of alignments needed. */
10893 first_of_size_p = true;
10894 for (p = &pool->first; *p != 0; p = &(*p)->next)
10896 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
10897 return (*p)->label;
10898 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
10899 break;
10900 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
10901 first_of_size_p = false;
10904 /* In the worst case, the constant needed by the earliest instruction
10905 will end up at the end of the pool. The entire pool must then be
10906 accessible from that instruction.
10908 When adding the first constant, set the pool's highest address to
10909 the address of the first out-of-range byte. Adjust this address
10910 downwards each time a new constant is added. */
10911 if (pool->first == 0)
10912 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
10913 of the instruction with the lowest two bits clear. The base PC
10914 value for LDPC has the lowest three bits clear. Assume the worst
10915 case here; namely that the PC-relative instruction occupies the
10916 last 2 bytes in an aligned word. */
10917 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10918 pool->highest_address -= GET_MODE_SIZE (mode);
10919 if (first_of_size_p)
10920 /* Take into account the worst possible padding due to alignment. */
10921 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10923 /* Create a new entry. */
10924 c = XNEW (struct mips16_constant);
10925 c->value = value;
10926 c->mode = mode;
10927 c->label = gen_label_rtx ();
10928 c->next = *p;
10929 *p = c;
10931 return c->label;
10934 /* Output constant VALUE after instruction INSN and return the last
10935 instruction emitted. MODE is the mode of the constant. */
10937 static rtx
10938 mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10940 if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
10942 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10943 return emit_insn_after (gen_consttable_int (value, size), insn);
10946 if (SCALAR_FLOAT_MODE_P (mode))
10947 return emit_insn_after (gen_consttable_float (value), insn);
10949 if (VECTOR_MODE_P (mode))
10951 int i;
10953 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10954 insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
10955 CONST_VECTOR_ELT (value, i), insn);
10956 return insn;
10959 gcc_unreachable ();
10962 /* Dump out the constants in CONSTANTS after INSN. */
10964 static void
10965 mips16_emit_constants (struct mips16_constant *constants, rtx insn)
10967 struct mips16_constant *c, *next;
10968 int align;
10970 align = 0;
10971 for (c = constants; c != NULL; c = next)
10973 /* If necessary, increase the alignment of PC. */
10974 if (align < GET_MODE_SIZE (c->mode))
10976 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10977 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
10979 align = GET_MODE_SIZE (c->mode);
10981 insn = emit_label_after (c->label, insn);
10982 insn = mips16_emit_constants_1 (c->mode, c->value, insn);
10984 next = c->next;
10985 free (c);
10988 emit_barrier_after (insn);
10991 /* Return the length of instruction INSN. */
10993 static int
10994 mips16_insn_length (rtx insn)
10996 if (JUMP_P (insn))
10998 rtx body = PATTERN (insn);
10999 if (GET_CODE (body) == ADDR_VEC)
11000 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
11001 if (GET_CODE (body) == ADDR_DIFF_VEC)
11002 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
11004 return get_attr_length (insn);
11007 /* If *X is a symbolic constant that refers to the constant pool, add
11008 the constant to POOL and rewrite *X to use the constant's label. */
11010 static void
11011 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
11013 rtx base, offset, label;
11015 split_const (*x, &base, &offset);
11016 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
11018 label = mips16_add_constant (pool, get_pool_constant (base),
11019 get_pool_mode (base));
11020 base = gen_rtx_LABEL_REF (Pmode, label);
11021 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
11025 /* This structure is used to communicate with mips16_rewrite_pool_refs.
11026 INSN is the instruction we're rewriting and POOL points to the current
11027 constant pool. */
11028 struct mips16_rewrite_pool_refs_info {
11029 rtx insn;
11030 struct mips16_constant_pool *pool;
11033 /* Rewrite *X so that constant pool references refer to the constant's
11034 label instead. DATA points to a mips16_rewrite_pool_refs_info
11035 structure. */
11037 static int
11038 mips16_rewrite_pool_refs (rtx *x, void *data)
11040 struct mips16_rewrite_pool_refs_info *info = data;
11042 if (force_to_mem_operand (*x, Pmode))
11044 rtx mem = force_const_mem (GET_MODE (*x), *x);
11045 validate_change (info->insn, x, mem, false);
11048 if (MEM_P (*x))
11050 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
11051 return -1;
11054 if (TARGET_MIPS16_TEXT_LOADS)
11055 mips16_rewrite_pool_constant (info->pool, x);
11057 return GET_CODE (*x) == CONST ? -1 : 0;
11060 /* Build MIPS16 constant pools. */
11062 static void
11063 mips16_lay_out_constants (void)
11065 struct mips16_constant_pool pool;
11066 struct mips16_rewrite_pool_refs_info info;
11067 rtx insn, barrier;
11069 if (!TARGET_MIPS16_PCREL_LOADS)
11070 return;
11072 barrier = 0;
11073 memset (&pool, 0, sizeof (pool));
11074 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11076 /* Rewrite constant pool references in INSN. */
11077 if (INSN_P (insn))
11079 info.insn = insn;
11080 info.pool = &pool;
11081 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
11084 pool.insn_address += mips16_insn_length (insn);
11086 if (pool.first != NULL)
11088 /* If there are no natural barriers between the first user of
11089 the pool and the highest acceptable address, we'll need to
11090 create a new instruction to jump around the constant pool.
11091 In the worst case, this instruction will be 4 bytes long.
11093 If it's too late to do this transformation after INSN,
11094 do it immediately before INSN. */
11095 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
11097 rtx label, jump;
11099 label = gen_label_rtx ();
11101 jump = emit_jump_insn_before (gen_jump (label), insn);
11102 JUMP_LABEL (jump) = label;
11103 LABEL_NUSES (label) = 1;
11104 barrier = emit_barrier_after (jump);
11106 emit_label_after (label, barrier);
11107 pool.insn_address += 4;
11110 /* See whether the constant pool is now out of range of the first
11111 user. If so, output the constants after the previous barrier.
11112 Note that any instructions between BARRIER and INSN (inclusive)
11113 will use negative offsets to refer to the pool. */
11114 if (pool.insn_address > pool.highest_address)
11116 mips16_emit_constants (pool.first, barrier);
11117 pool.first = NULL;
11118 barrier = 0;
11120 else if (BARRIER_P (insn))
11121 barrier = insn;
11124 mips16_emit_constants (pool.first, get_last_insn ());
11127 /* A temporary variable used by for_each_rtx callbacks, etc. */
11128 static rtx mips_sim_insn;
11130 /* A structure representing the state of the processor pipeline.
11131 Used by the mips_sim_* family of functions. */
11132 struct mips_sim {
11133 /* The maximum number of instructions that can be issued in a cycle.
11134 (Caches mips_issue_rate.) */
11135 unsigned int issue_rate;
11137 /* The current simulation time. */
11138 unsigned int time;
11140 /* How many more instructions can be issued in the current cycle. */
11141 unsigned int insns_left;
11143 /* LAST_SET[X].INSN is the last instruction to set register X.
11144 LAST_SET[X].TIME is the time at which that instruction was issued.
11145 INSN is null if no instruction has yet set register X. */
11146 struct {
11147 rtx insn;
11148 unsigned int time;
11149 } last_set[FIRST_PSEUDO_REGISTER];
11151 /* The pipeline's current DFA state. */
11152 state_t dfa_state;
11155 /* Reset STATE to the initial simulation state. */
11157 static void
11158 mips_sim_reset (struct mips_sim *state)
11160 state->time = 0;
11161 state->insns_left = state->issue_rate;
11162 memset (&state->last_set, 0, sizeof (state->last_set));
11163 state_reset (state->dfa_state);
11166 /* Initialize STATE before its first use. DFA_STATE points to an
11167 allocated but uninitialized DFA state. */
11169 static void
11170 mips_sim_init (struct mips_sim *state, state_t dfa_state)
11172 state->issue_rate = mips_issue_rate ();
11173 state->dfa_state = dfa_state;
11174 mips_sim_reset (state);
11177 /* Advance STATE by one clock cycle. */
11179 static void
11180 mips_sim_next_cycle (struct mips_sim *state)
11182 state->time++;
11183 state->insns_left = state->issue_rate;
11184 state_transition (state->dfa_state, 0);
11187 /* Advance simulation state STATE until instruction INSN can read
11188 register REG. */
11190 static void
11191 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
11193 unsigned int regno, end_regno;
11195 end_regno = END_REGNO (reg);
11196 for (regno = REGNO (reg); regno < end_regno; regno++)
11197 if (state->last_set[regno].insn != 0)
11199 unsigned int t;
11201 t = (state->last_set[regno].time
11202 + insn_latency (state->last_set[regno].insn, insn));
11203 while (state->time < t)
11204 mips_sim_next_cycle (state);
11208 /* A for_each_rtx callback. If *X is a register, advance simulation state
11209 DATA until mips_sim_insn can read the register's value. */
11211 static int
11212 mips_sim_wait_regs_2 (rtx *x, void *data)
11214 if (REG_P (*x))
11215 mips_sim_wait_reg (data, mips_sim_insn, *x);
11216 return 0;
11219 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
11221 static void
11222 mips_sim_wait_regs_1 (rtx *x, void *data)
11224 for_each_rtx (x, mips_sim_wait_regs_2, data);
11227 /* Advance simulation state STATE until all of INSN's register
11228 dependencies are satisfied. */
11230 static void
11231 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
11233 mips_sim_insn = insn;
11234 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
11237 /* Advance simulation state STATE until the units required by
11238 instruction INSN are available. */
11240 static void
11241 mips_sim_wait_units (struct mips_sim *state, rtx insn)
11243 state_t tmp_state;
11245 tmp_state = alloca (state_size ());
11246 while (state->insns_left == 0
11247 || (memcpy (tmp_state, state->dfa_state, state_size ()),
11248 state_transition (tmp_state, insn) >= 0))
11249 mips_sim_next_cycle (state);
11252 /* Advance simulation state STATE until INSN is ready to issue. */
11254 static void
11255 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
11257 mips_sim_wait_regs (state, insn);
11258 mips_sim_wait_units (state, insn);
11261 /* mips_sim_insn has just set X. Update the LAST_SET array
11262 in simulation state DATA. */
11264 static void
11265 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11267 struct mips_sim *state;
11269 state = data;
11270 if (REG_P (x))
11272 unsigned int regno, end_regno;
11274 end_regno = END_REGNO (x);
11275 for (regno = REGNO (x); regno < end_regno; regno++)
11277 state->last_set[regno].insn = mips_sim_insn;
11278 state->last_set[regno].time = state->time;
11283 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
11284 can issue immediately (i.e., that mips_sim_wait_insn has already
11285 been called). */
11287 static void
11288 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
11290 state_transition (state->dfa_state, insn);
11291 state->insns_left--;
11293 mips_sim_insn = insn;
11294 note_stores (PATTERN (insn), mips_sim_record_set, state);
11297 /* Simulate issuing a NOP in state STATE. */
11299 static void
11300 mips_sim_issue_nop (struct mips_sim *state)
11302 if (state->insns_left == 0)
11303 mips_sim_next_cycle (state);
11304 state->insns_left--;
11307 /* Update simulation state STATE so that it's ready to accept the instruction
11308 after INSN. INSN should be part of the main rtl chain, not a member of a
11309 SEQUENCE. */
11311 static void
11312 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
11314 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
11315 if (JUMP_P (insn))
11316 mips_sim_issue_nop (state);
11318 switch (GET_CODE (SEQ_BEGIN (insn)))
11320 case CODE_LABEL:
11321 case CALL_INSN:
11322 /* We can't predict the processor state after a call or label. */
11323 mips_sim_reset (state);
11324 break;
11326 case JUMP_INSN:
11327 /* The delay slots of branch likely instructions are only executed
11328 when the branch is taken. Therefore, if the caller has simulated
11329 the delay slot instruction, STATE does not really reflect the state
11330 of the pipeline for the instruction after the delay slot. Also,
11331 branch likely instructions tend to incur a penalty when not taken,
11332 so there will probably be an extra delay between the branch and
11333 the instruction after the delay slot. */
11334 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
11335 mips_sim_reset (state);
11336 break;
11338 default:
11339 break;
11343 /* The VR4130 pipeline issues aligned pairs of instructions together,
11344 but it stalls the second instruction if it depends on the first.
11345 In order to cut down the amount of logic required, this dependence
11346 check is not based on a full instruction decode. Instead, any non-SPECIAL
11347 instruction is assumed to modify the register specified by bits 20-16
11348 (which is usually the "rt" field).
11350 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
11351 input, so we can end up with a false dependence between the branch
11352 and its delay slot. If this situation occurs in instruction INSN,
11353 try to avoid it by swapping rs and rt. */
11355 static void
11356 vr4130_avoid_branch_rt_conflict (rtx insn)
11358 rtx first, second;
11360 first = SEQ_BEGIN (insn);
11361 second = SEQ_END (insn);
11362 if (JUMP_P (first)
11363 && NONJUMP_INSN_P (second)
11364 && GET_CODE (PATTERN (first)) == SET
11365 && GET_CODE (SET_DEST (PATTERN (first))) == PC
11366 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
11368 /* Check for the right kind of condition. */
11369 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
11370 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
11371 && REG_P (XEXP (cond, 0))
11372 && REG_P (XEXP (cond, 1))
11373 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
11374 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
11376 /* SECOND mentions the rt register but not the rs register. */
11377 rtx tmp = XEXP (cond, 0);
11378 XEXP (cond, 0) = XEXP (cond, 1);
11379 XEXP (cond, 1) = tmp;
11384 /* Implement -mvr4130-align. Go through each basic block and simulate the
11385 processor pipeline. If we find that a pair of instructions could execute
11386 in parallel, and the first of those instructions is not 8-byte aligned,
11387 insert a nop to make it aligned. */
11389 static void
11390 vr4130_align_insns (void)
11392 struct mips_sim state;
11393 rtx insn, subinsn, last, last2, next;
11394 bool aligned_p;
11396 dfa_start ();
11398 /* LAST is the last instruction before INSN to have a nonzero length.
11399 LAST2 is the last such instruction before LAST. */
11400 last = 0;
11401 last2 = 0;
11403 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
11404 aligned_p = true;
11406 mips_sim_init (&state, alloca (state_size ()));
11407 for (insn = get_insns (); insn != 0; insn = next)
11409 unsigned int length;
11411 next = NEXT_INSN (insn);
11413 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
11414 This isn't really related to the alignment pass, but we do it on
11415 the fly to avoid a separate instruction walk. */
11416 vr4130_avoid_branch_rt_conflict (insn);
11418 if (USEFUL_INSN_P (insn))
11419 FOR_EACH_SUBINSN (subinsn, insn)
11421 mips_sim_wait_insn (&state, subinsn);
11423 /* If we want this instruction to issue in parallel with the
11424 previous one, make sure that the previous instruction is
11425 aligned. There are several reasons why this isn't worthwhile
11426 when the second instruction is a call:
11428 - Calls are less likely to be performance critical,
11429 - There's a good chance that the delay slot can execute
11430 in parallel with the call.
11431 - The return address would then be unaligned.
11433 In general, if we're going to insert a nop between instructions
11434 X and Y, it's better to insert it immediately after X. That
11435 way, if the nop makes Y aligned, it will also align any labels
11436 between X and Y. */
11437 if (state.insns_left != state.issue_rate
11438 && !CALL_P (subinsn))
11440 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
11442 /* SUBINSN is the first instruction in INSN and INSN is
11443 aligned. We want to align the previous instruction
11444 instead, so insert a nop between LAST2 and LAST.
11446 Note that LAST could be either a single instruction
11447 or a branch with a delay slot. In the latter case,
11448 LAST, like INSN, is already aligned, but the delay
11449 slot must have some extra delay that stops it from
11450 issuing at the same time as the branch. We therefore
11451 insert a nop before the branch in order to align its
11452 delay slot. */
11453 emit_insn_after (gen_nop (), last2);
11454 aligned_p = false;
11456 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
11458 /* SUBINSN is the delay slot of INSN, but INSN is
11459 currently unaligned. Insert a nop between
11460 LAST and INSN to align it. */
11461 emit_insn_after (gen_nop (), last);
11462 aligned_p = true;
11465 mips_sim_issue_insn (&state, subinsn);
11467 mips_sim_finish_insn (&state, insn);
11469 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
11470 length = get_attr_length (insn);
11471 if (length > 0)
11473 /* If the instruction is an asm statement or multi-instruction
11474 mips.md patern, the length is only an estimate. Insert an
11475 8 byte alignment after it so that the following instructions
11476 can be handled correctly. */
11477 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
11478 && (recog_memoized (insn) < 0 || length >= 8))
11480 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
11481 next = NEXT_INSN (next);
11482 mips_sim_next_cycle (&state);
11483 aligned_p = true;
11485 else if (length & 4)
11486 aligned_p = !aligned_p;
11487 last2 = last;
11488 last = insn;
11491 /* See whether INSN is an aligned label. */
11492 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
11493 aligned_p = true;
11495 dfa_finish ();
11498 /* This structure records that the current function has a LO_SUM
11499 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
11500 the largest offset applied to BASE by all such LO_SUMs. */
11501 struct mips_lo_sum_offset {
11502 rtx base;
11503 HOST_WIDE_INT offset;
11506 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
11508 static hashval_t
11509 mips_hash_base (rtx base)
11511 int do_not_record_p;
11513 return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
11516 /* Hash-table callbacks for mips_lo_sum_offsets. */
11518 static hashval_t
11519 mips_lo_sum_offset_hash (const void *entry)
11521 return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
11524 static int
11525 mips_lo_sum_offset_eq (const void *entry, const void *value)
11527 return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
11528 (const_rtx) value);
11531 /* Look up symbolic constant X in HTAB, which is a hash table of
11532 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
11533 paired with a recorded LO_SUM, otherwise record X in the table. */
11535 static bool
11536 mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
11538 rtx base, offset;
11539 void **slot;
11540 struct mips_lo_sum_offset *entry;
11542 /* Split X into a base and offset. */
11543 split_const (x, &base, &offset);
11544 if (UNSPEC_ADDRESS_P (base))
11545 base = UNSPEC_ADDRESS (base);
11547 /* Look up the base in the hash table. */
11548 slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
11549 if (slot == NULL)
11550 return false;
11552 entry = (struct mips_lo_sum_offset *) *slot;
11553 if (option == INSERT)
11555 if (entry == NULL)
11557 entry = XNEW (struct mips_lo_sum_offset);
11558 entry->base = base;
11559 entry->offset = INTVAL (offset);
11560 *slot = entry;
11562 else
11564 if (INTVAL (offset) > entry->offset)
11565 entry->offset = INTVAL (offset);
11568 return INTVAL (offset) <= entry->offset;
11571 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
11572 Record every LO_SUM in *LOC. */
11574 static int
11575 mips_record_lo_sum (rtx *loc, void *data)
11577 if (GET_CODE (*loc) == LO_SUM)
11578 mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
11579 return 0;
11582 /* Return true if INSN is a SET of an orphaned high-part relocation.
11583 HTAB is a hash table of mips_lo_sum_offsets that describes all the
11584 LO_SUMs in the current function. */
11586 static bool
11587 mips_orphaned_high_part_p (htab_t htab, rtx insn)
11589 enum mips_symbol_type type;
11590 rtx x, set;
11592 set = single_set (insn);
11593 if (set)
11595 /* Check for %his. */
11596 x = SET_SRC (set);
11597 if (GET_CODE (x) == HIGH
11598 && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
11599 return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
11601 /* Check for local %gots (and %got_pages, which is redundant but OK). */
11602 if (GET_CODE (x) == UNSPEC
11603 && XINT (x, 1) == UNSPEC_LOAD_GOT
11604 && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
11605 SYMBOL_CONTEXT_LEA, &type)
11606 && type == SYMBOL_GOTOFF_PAGE)
11607 return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
11609 return false;
11612 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
11613 INSN and a previous instruction, avoid it by inserting nops after
11614 instruction AFTER.
11616 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
11617 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
11618 before using the value of that register. *HILO_DELAY counts the
11619 number of instructions since the last hilo hazard (that is,
11620 the number of instructions since the last MFLO or MFHI).
11622 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
11623 for the next instruction.
11625 LO_REG is an rtx for the LO register, used in dependence checking. */
11627 static void
11628 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
11629 rtx *delayed_reg, rtx lo_reg)
11631 rtx pattern, set;
11632 int nops, ninsns;
11634 pattern = PATTERN (insn);
11636 /* Do not put the whole function in .set noreorder if it contains
11637 an asm statement. We don't know whether there will be hazards
11638 between the asm statement and the gcc-generated code. */
11639 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
11640 cfun->machine->all_noreorder_p = false;
11642 /* Ignore zero-length instructions (barriers and the like). */
11643 ninsns = get_attr_length (insn) / 4;
11644 if (ninsns == 0)
11645 return;
11647 /* Work out how many nops are needed. Note that we only care about
11648 registers that are explicitly mentioned in the instruction's pattern.
11649 It doesn't matter that calls use the argument registers or that they
11650 clobber hi and lo. */
11651 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
11652 nops = 2 - *hilo_delay;
11653 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
11654 nops = 1;
11655 else
11656 nops = 0;
11658 /* Insert the nops between this instruction and the previous one.
11659 Each new nop takes us further from the last hilo hazard. */
11660 *hilo_delay += nops;
11661 while (nops-- > 0)
11662 emit_insn_after (gen_hazard_nop (), after);
11664 /* Set up the state for the next instruction. */
11665 *hilo_delay += ninsns;
11666 *delayed_reg = 0;
11667 if (INSN_CODE (insn) >= 0)
11668 switch (get_attr_hazard (insn))
11670 case HAZARD_NONE:
11671 break;
11673 case HAZARD_HILO:
11674 *hilo_delay = 0;
11675 break;
11677 case HAZARD_DELAY:
11678 set = single_set (insn);
11679 gcc_assert (set);
11680 *delayed_reg = SET_DEST (set);
11681 break;
11685 /* Go through the instruction stream and insert nops where necessary.
11686 Also delete any high-part relocations whose partnering low parts
11687 are now all dead. See if the whole function can then be put into
11688 .set noreorder and .set nomacro. */
11690 static void
11691 mips_reorg_process_insns (void)
11693 rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
11694 int hilo_delay;
11695 htab_t htab;
11697 /* Force all instructions to be split into their final form. */
11698 split_all_insns_noflow ();
11700 /* Recalculate instruction lengths without taking nops into account. */
11701 cfun->machine->ignore_hazard_length_p = true;
11702 shorten_branches (get_insns ());
11704 cfun->machine->all_noreorder_p = true;
11706 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
11707 if (!TARGET_EXPLICIT_RELOCS)
11708 cfun->machine->all_noreorder_p = false;
11710 /* Profiled functions can't be all noreorder because the profiler
11711 support uses assembler macros. */
11712 if (crtl->profile)
11713 cfun->machine->all_noreorder_p = false;
11715 /* Code compiled with -mfix-vr4120 can't be all noreorder because
11716 we rely on the assembler to work around some errata. */
11717 if (TARGET_FIX_VR4120)
11718 cfun->machine->all_noreorder_p = false;
11720 /* The same is true for -mfix-vr4130 if we might generate MFLO or
11721 MFHI instructions. Note that we avoid using MFLO and MFHI if
11722 the VR4130 MACC and DMACC instructions are available instead;
11723 see the *mfhilo_{si,di}_macc patterns. */
11724 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
11725 cfun->machine->all_noreorder_p = false;
11727 htab = htab_create (37, mips_lo_sum_offset_hash,
11728 mips_lo_sum_offset_eq, free);
11730 /* Make a first pass over the instructions, recording all the LO_SUMs. */
11731 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
11732 FOR_EACH_SUBINSN (subinsn, insn)
11733 if (INSN_P (subinsn))
11734 for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
11736 last_insn = 0;
11737 hilo_delay = 2;
11738 delayed_reg = 0;
11739 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
11741 /* Make a second pass over the instructions. Delete orphaned
11742 high-part relocations or turn them into NOPs. Avoid hazards
11743 by inserting NOPs. */
11744 for (insn = get_insns (); insn != 0; insn = next_insn)
11746 next_insn = NEXT_INSN (insn);
11747 if (INSN_P (insn))
11749 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
11751 /* If we find an orphaned high-part relocation in a delay
11752 slot, it's easier to turn that instruction into a NOP than
11753 to delete it. The delay slot will be a NOP either way. */
11754 FOR_EACH_SUBINSN (subinsn, insn)
11755 if (INSN_P (subinsn))
11757 if (mips_orphaned_high_part_p (htab, subinsn))
11759 PATTERN (subinsn) = gen_nop ();
11760 INSN_CODE (subinsn) = CODE_FOR_nop;
11762 mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
11763 &delayed_reg, lo_reg);
11765 last_insn = insn;
11767 else
11769 /* INSN is a single instruction. Delete it if it's an
11770 orphaned high-part relocation. */
11771 if (mips_orphaned_high_part_p (htab, insn))
11772 delete_insn (insn);
11773 else
11775 mips_avoid_hazard (last_insn, insn, &hilo_delay,
11776 &delayed_reg, lo_reg);
11777 last_insn = insn;
11783 htab_delete (htab);
11786 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
11788 static void
11789 mips_reorg (void)
11791 mips16_lay_out_constants ();
11792 if (mips_base_delayed_branch)
11793 dbr_schedule (get_insns ());
11794 mips_reorg_process_insns ();
11795 if (TARGET_EXPLICIT_RELOCS && TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
11796 vr4130_align_insns ();
11799 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
11800 in order to avoid duplicating too much logic from elsewhere. */
11802 static void
11803 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
11804 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11805 tree function)
11807 rtx this, temp1, temp2, insn, fnaddr;
11808 bool use_sibcall_p;
11810 /* Pretend to be a post-reload pass while generating rtl. */
11811 reload_completed = 1;
11813 /* Mark the end of the (empty) prologue. */
11814 emit_note (NOTE_INSN_PROLOGUE_END);
11816 /* Determine if we can use a sibcall to call FUNCTION directly. */
11817 fnaddr = XEXP (DECL_RTL (function), 0);
11818 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
11819 && const_call_insn_operand (fnaddr, Pmode));
11821 /* Determine if we need to load FNADDR from the GOT. */
11822 if (!use_sibcall_p)
11823 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
11825 case SYMBOL_GOT_PAGE_OFST:
11826 case SYMBOL_GOT_DISP:
11827 /* Pick a global pointer. Use a call-clobbered register if
11828 TARGET_CALL_SAVED_GP. */
11829 cfun->machine->global_pointer =
11830 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
11831 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
11833 /* Set up the global pointer for n32 or n64 abicalls. */
11834 mips_emit_loadgp ();
11835 break;
11837 default:
11838 break;
11841 /* We need two temporary registers in some cases. */
11842 temp1 = gen_rtx_REG (Pmode, 2);
11843 temp2 = gen_rtx_REG (Pmode, 3);
11845 /* Find out which register contains the "this" pointer. */
11846 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11847 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
11848 else
11849 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
11851 /* Add DELTA to THIS. */
11852 if (delta != 0)
11854 rtx offset = GEN_INT (delta);
11855 if (!SMALL_OPERAND (delta))
11857 mips_emit_move (temp1, offset);
11858 offset = temp1;
11860 emit_insn (gen_add3_insn (this, this, offset));
11863 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
11864 if (vcall_offset != 0)
11866 rtx addr;
11868 /* Set TEMP1 to *THIS. */
11869 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
11871 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
11872 addr = mips_add_offset (temp2, temp1, vcall_offset);
11874 /* Load the offset and add it to THIS. */
11875 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
11876 emit_insn (gen_add3_insn (this, this, temp1));
11879 /* Jump to the target function. Use a sibcall if direct jumps are
11880 allowed, otherwise load the address into a register first. */
11881 if (use_sibcall_p)
11883 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
11884 SIBLING_CALL_P (insn) = 1;
11886 else
11888 /* This is messy. GAS treats "la $25,foo" as part of a call
11889 sequence and may allow a global "foo" to be lazily bound.
11890 The general move patterns therefore reject this combination.
11892 In this context, lazy binding would actually be OK
11893 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
11894 TARGET_CALL_SAVED_GP; see mips_load_call_address.
11895 We must therefore load the address via a temporary
11896 register if mips_dangerous_for_la25_p.
11898 If we jump to the temporary register rather than $25, the assembler
11899 can use the move insn to fill the jump's delay slot. */
11900 if (TARGET_USE_PIC_FN_ADDR_REG
11901 && !mips_dangerous_for_la25_p (fnaddr))
11902 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
11903 mips_load_call_address (temp1, fnaddr, true);
11905 if (TARGET_USE_PIC_FN_ADDR_REG
11906 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
11907 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
11908 emit_jump_insn (gen_indirect_jump (temp1));
11911 /* Run just enough of rest_of_compilation. This sequence was
11912 "borrowed" from alpha.c. */
11913 insn = get_insns ();
11914 insn_locators_alloc ();
11915 split_all_insns_noflow ();
11916 mips16_lay_out_constants ();
11917 shorten_branches (insn);
11918 final_start_function (insn, file, 1);
11919 final (insn, file, 1);
11920 final_end_function ();
11921 free_after_compilation (cfun);
11923 /* Clean up the vars set above. Note that final_end_function resets
11924 the global pointer for us. */
11925 reload_completed = 0;
11928 /* The last argument passed to mips_set_mips16_mode, or negative if the
11929 function hasn't been called yet. */
11930 static GTY(()) int was_mips16_p = -1;
11932 /* Set up the target-dependent global state so that it matches the
11933 current function's ISA mode. */
11935 static void
11936 mips_set_mips16_mode (int mips16_p)
11938 if (mips16_p == was_mips16_p)
11939 return;
11941 /* Restore base settings of various flags. */
11942 target_flags = mips_base_target_flags;
11943 flag_schedule_insns = mips_base_schedule_insns;
11944 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
11945 flag_move_loop_invariants = mips_base_move_loop_invariants;
11946 align_loops = mips_base_align_loops;
11947 align_jumps = mips_base_align_jumps;
11948 align_functions = mips_base_align_functions;
11950 if (mips16_p)
11952 /* Switch to MIPS16 mode. */
11953 target_flags |= MASK_MIPS16;
11955 /* Don't run the scheduler before reload, since it tends to
11956 increase register pressure. */
11957 flag_schedule_insns = 0;
11959 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
11960 the whole function to be in a single section. */
11961 flag_reorder_blocks_and_partition = 0;
11963 /* Don't move loop invariants, because it tends to increase
11964 register pressure. It also introduces an extra move in cases
11965 where the constant is the first operand in a two-operand binary
11966 instruction, or when it forms a register argument to a functon
11967 call. */
11968 flag_move_loop_invariants = 0;
11970 /* Silently disable -mexplicit-relocs since it doesn't apply
11971 to MIPS16 code. Even so, it would overly pedantic to warn
11972 about "-mips16 -mexplicit-relocs", especially given that
11973 we use a %gprel() operator. */
11974 target_flags &= ~MASK_EXPLICIT_RELOCS;
11976 /* Experiments suggest we get the best overall section-anchor
11977 results from using the range of an unextended LW or SW. Code
11978 that makes heavy use of byte or short accesses can do better
11979 with ranges of 0...31 and 0...63 respectively, but most code is
11980 sensitive to the range of LW and SW instead. */
11981 targetm.min_anchor_offset = 0;
11982 targetm.max_anchor_offset = 127;
11984 if (flag_pic || TARGET_ABICALLS)
11985 sorry ("MIPS16 PIC");
11987 if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
11988 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
11990 else
11992 /* Switch to normal (non-MIPS16) mode. */
11993 target_flags &= ~MASK_MIPS16;
11995 /* Provide default values for align_* for 64-bit targets. */
11996 if (TARGET_64BIT)
11998 if (align_loops == 0)
11999 align_loops = 8;
12000 if (align_jumps == 0)
12001 align_jumps = 8;
12002 if (align_functions == 0)
12003 align_functions = 8;
12006 targetm.min_anchor_offset = -32768;
12007 targetm.max_anchor_offset = 32767;
12010 /* (Re)initialize MIPS target internals for new ISA. */
12011 mips_init_relocs ();
12013 if (was_mips16_p >= 0)
12014 /* Reinitialize target-dependent state. */
12015 target_reinit ();
12017 was_mips16_p = mips16_p;
12020 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
12021 function should use the MIPS16 ISA and switch modes accordingly. */
12023 static void
12024 mips_set_current_function (tree fndecl)
12026 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
12029 /* Allocate a chunk of memory for per-function machine-dependent data. */
12031 static struct machine_function *
12032 mips_init_machine_status (void)
12034 return ((struct machine_function *)
12035 ggc_alloc_cleared (sizeof (struct machine_function)));
12038 /* Return the processor associated with the given ISA level, or null
12039 if the ISA isn't valid. */
12041 static const struct mips_cpu_info *
12042 mips_cpu_info_from_isa (int isa)
12044 unsigned int i;
12046 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
12047 if (mips_cpu_info_table[i].isa == isa)
12048 return mips_cpu_info_table + i;
12050 return NULL;
12053 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
12054 with a final "000" replaced by "k". Ignore case.
12056 Note: this function is shared between GCC and GAS. */
12058 static bool
12059 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
12061 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
12062 given++, canonical++;
12064 return ((*given == 0 && *canonical == 0)
12065 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
12068 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
12069 CPU name. We've traditionally allowed a lot of variation here.
12071 Note: this function is shared between GCC and GAS. */
12073 static bool
12074 mips_matching_cpu_name_p (const char *canonical, const char *given)
12076 /* First see if the name matches exactly, or with a final "000"
12077 turned into "k". */
12078 if (mips_strict_matching_cpu_name_p (canonical, given))
12079 return true;
12081 /* If not, try comparing based on numerical designation alone.
12082 See if GIVEN is an unadorned number, or 'r' followed by a number. */
12083 if (TOLOWER (*given) == 'r')
12084 given++;
12085 if (!ISDIGIT (*given))
12086 return false;
12088 /* Skip over some well-known prefixes in the canonical name,
12089 hoping to find a number there too. */
12090 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
12091 canonical += 2;
12092 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
12093 canonical += 2;
12094 else if (TOLOWER (canonical[0]) == 'r')
12095 canonical += 1;
12097 return mips_strict_matching_cpu_name_p (canonical, given);
12100 /* Return the mips_cpu_info entry for the processor or ISA given
12101 by CPU_STRING. Return null if the string isn't recognized.
12103 A similar function exists in GAS. */
12105 static const struct mips_cpu_info *
12106 mips_parse_cpu (const char *cpu_string)
12108 unsigned int i;
12109 const char *s;
12111 /* In the past, we allowed upper-case CPU names, but it doesn't
12112 work well with the multilib machinery. */
12113 for (s = cpu_string; *s != 0; s++)
12114 if (ISUPPER (*s))
12116 warning (0, "CPU names must be lower case");
12117 break;
12120 /* 'from-abi' selects the most compatible architecture for the given
12121 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
12122 EABIs, we have to decide whether we're using the 32-bit or 64-bit
12123 version. */
12124 if (strcasecmp (cpu_string, "from-abi") == 0)
12125 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
12126 : ABI_NEEDS_64BIT_REGS ? 3
12127 : (TARGET_64BIT ? 3 : 1));
12129 /* 'default' has traditionally been a no-op. Probably not very useful. */
12130 if (strcasecmp (cpu_string, "default") == 0)
12131 return NULL;
12133 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
12134 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
12135 return mips_cpu_info_table + i;
12137 return NULL;
12140 /* Set up globals to generate code for the ISA or processor
12141 described by INFO. */
12143 static void
12144 mips_set_architecture (const struct mips_cpu_info *info)
12146 if (info != 0)
12148 mips_arch_info = info;
12149 mips_arch = info->cpu;
12150 mips_isa = info->isa;
12154 /* Likewise for tuning. */
12156 static void
12157 mips_set_tune (const struct mips_cpu_info *info)
12159 if (info != 0)
12161 mips_tune_info = info;
12162 mips_tune = info->cpu;
12166 /* Implement TARGET_HANDLE_OPTION. */
12168 static bool
12169 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
12171 switch (code)
12173 case OPT_mabi_:
12174 if (strcmp (arg, "32") == 0)
12175 mips_abi = ABI_32;
12176 else if (strcmp (arg, "o64") == 0)
12177 mips_abi = ABI_O64;
12178 else if (strcmp (arg, "n32") == 0)
12179 mips_abi = ABI_N32;
12180 else if (strcmp (arg, "64") == 0)
12181 mips_abi = ABI_64;
12182 else if (strcmp (arg, "eabi") == 0)
12183 mips_abi = ABI_EABI;
12184 else
12185 return false;
12186 return true;
12188 case OPT_march_:
12189 case OPT_mtune_:
12190 return mips_parse_cpu (arg) != 0;
12192 case OPT_mips:
12193 mips_isa_option_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
12194 return mips_isa_option_info != 0;
12196 case OPT_mno_flush_func:
12197 mips_cache_flush_func = NULL;
12198 return true;
12200 case OPT_mcode_readable_:
12201 if (strcmp (arg, "yes") == 0)
12202 mips_code_readable = CODE_READABLE_YES;
12203 else if (strcmp (arg, "pcrel") == 0)
12204 mips_code_readable = CODE_READABLE_PCREL;
12205 else if (strcmp (arg, "no") == 0)
12206 mips_code_readable = CODE_READABLE_NO;
12207 else
12208 return false;
12209 return true;
12211 default:
12212 return true;
12216 /* Implement OVERRIDE_OPTIONS. */
12218 void
12219 mips_override_options (void)
12221 int i, start, regno, mode;
12223 #ifdef SUBTARGET_OVERRIDE_OPTIONS
12224 SUBTARGET_OVERRIDE_OPTIONS;
12225 #endif
12227 /* Set the small data limit. */
12228 mips_small_data_threshold = (g_switch_set
12229 ? g_switch_value
12230 : MIPS_DEFAULT_GVALUE);
12232 /* The following code determines the architecture and register size.
12233 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
12234 The GAS and GCC code should be kept in sync as much as possible. */
12236 if (mips_arch_string != 0)
12237 mips_set_architecture (mips_parse_cpu (mips_arch_string));
12239 if (mips_isa_option_info != 0)
12241 if (mips_arch_info == 0)
12242 mips_set_architecture (mips_isa_option_info);
12243 else if (mips_arch_info->isa != mips_isa_option_info->isa)
12244 error ("%<-%s%> conflicts with the other architecture options, "
12245 "which specify a %s processor",
12246 mips_isa_option_info->name,
12247 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
12250 if (mips_arch_info == 0)
12252 #ifdef MIPS_CPU_STRING_DEFAULT
12253 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
12254 #else
12255 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
12256 #endif
12259 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
12260 error ("%<-march=%s%> is not compatible with the selected ABI",
12261 mips_arch_info->name);
12263 /* Optimize for mips_arch, unless -mtune selects a different processor. */
12264 if (mips_tune_string != 0)
12265 mips_set_tune (mips_parse_cpu (mips_tune_string));
12267 if (mips_tune_info == 0)
12268 mips_set_tune (mips_arch_info);
12270 if ((target_flags_explicit & MASK_64BIT) != 0)
12272 /* The user specified the size of the integer registers. Make sure
12273 it agrees with the ABI and ISA. */
12274 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
12275 error ("%<-mgp64%> used with a 32-bit processor");
12276 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
12277 error ("%<-mgp32%> used with a 64-bit ABI");
12278 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
12279 error ("%<-mgp64%> used with a 32-bit ABI");
12281 else
12283 /* Infer the integer register size from the ABI and processor.
12284 Restrict ourselves to 32-bit registers if that's all the
12285 processor has, or if the ABI cannot handle 64-bit registers. */
12286 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
12287 target_flags &= ~MASK_64BIT;
12288 else
12289 target_flags |= MASK_64BIT;
12292 if ((target_flags_explicit & MASK_FLOAT64) != 0)
12294 if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
12295 error ("unsupported combination: %s", "-mfp64 -msingle-float");
12296 else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
12297 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
12298 else if (!TARGET_64BIT && TARGET_FLOAT64)
12300 if (!ISA_HAS_MXHC1)
12301 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
12302 " the target supports the mfhc1 and mthc1 instructions");
12303 else if (mips_abi != ABI_32)
12304 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
12305 " the o32 ABI");
12308 else
12310 /* -msingle-float selects 32-bit float registers. Otherwise the
12311 float registers should be the same size as the integer ones. */
12312 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
12313 target_flags |= MASK_FLOAT64;
12314 else
12315 target_flags &= ~MASK_FLOAT64;
12318 /* End of code shared with GAS. */
12320 /* If no -mlong* option was given, infer it from the other options. */
12321 if ((target_flags_explicit & MASK_LONG64) == 0)
12323 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
12324 target_flags |= MASK_LONG64;
12325 else
12326 target_flags &= ~MASK_LONG64;
12329 if (!TARGET_OLDABI)
12330 flag_pcc_struct_return = 0;
12332 /* Decide which rtx_costs structure to use. */
12333 if (optimize_size)
12334 mips_cost = &mips_rtx_cost_optimize_size;
12335 else
12336 mips_cost = &mips_rtx_cost_data[mips_tune];
12338 /* If the user hasn't specified a branch cost, use the processor's
12339 default. */
12340 if (mips_branch_cost == 0)
12341 mips_branch_cost = mips_cost->branch_cost;
12343 /* If neither -mbranch-likely nor -mno-branch-likely was given
12344 on the command line, set MASK_BRANCHLIKELY based on the target
12345 architecture and tuning flags. Annulled delay slots are a
12346 size win, so we only consider the processor-specific tuning
12347 for !optimize_size. */
12348 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
12350 if (ISA_HAS_BRANCHLIKELY
12351 && (optimize_size
12352 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
12353 target_flags |= MASK_BRANCHLIKELY;
12354 else
12355 target_flags &= ~MASK_BRANCHLIKELY;
12357 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
12358 warning (0, "the %qs architecture does not support branch-likely"
12359 " instructions", mips_arch_info->name);
12361 /* The effect of -mabicalls isn't defined for the EABI. */
12362 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
12364 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
12365 target_flags &= ~MASK_ABICALLS;
12368 /* MIPS16 cannot generate PIC yet. */
12369 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
12371 sorry ("MIPS16 PIC");
12372 target_flags &= ~MASK_ABICALLS;
12373 flag_pic = flag_pie = flag_shlib = 0;
12376 if (TARGET_ABICALLS)
12377 /* We need to set flag_pic for executables as well as DSOs
12378 because we may reference symbols that are not defined in
12379 the final executable. (MIPS does not use things like
12380 copy relocs, for example.)
12382 Also, there is a body of code that uses __PIC__ to distinguish
12383 between -mabicalls and -mno-abicalls code. */
12384 flag_pic = 1;
12386 /* -mvr4130-align is a "speed over size" optimization: it usually produces
12387 faster code, but at the expense of more nops. Enable it at -O3 and
12388 above. */
12389 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
12390 target_flags |= MASK_VR4130_ALIGN;
12392 /* Prefer a call to memcpy over inline code when optimizing for size,
12393 though see MOVE_RATIO in mips.h. */
12394 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
12395 target_flags |= MASK_MEMCPY;
12397 /* If we have a nonzero small-data limit, check that the -mgpopt
12398 setting is consistent with the other target flags. */
12399 if (mips_small_data_threshold > 0)
12401 if (!TARGET_GPOPT)
12403 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
12404 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
12406 TARGET_LOCAL_SDATA = false;
12407 TARGET_EXTERN_SDATA = false;
12409 else
12411 if (TARGET_VXWORKS_RTP)
12412 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
12414 if (TARGET_ABICALLS)
12415 warning (0, "cannot use small-data accesses for %qs",
12416 "-mabicalls");
12420 #ifdef MIPS_TFMODE_FORMAT
12421 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
12422 #endif
12424 /* Make sure that the user didn't turn off paired single support when
12425 MIPS-3D support is requested. */
12426 if (TARGET_MIPS3D
12427 && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
12428 && !TARGET_PAIRED_SINGLE_FLOAT)
12429 error ("%<-mips3d%> requires %<-mpaired-single%>");
12431 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
12432 if (TARGET_MIPS3D)
12433 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
12435 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
12436 and TARGET_HARD_FLOAT_ABI are both true. */
12437 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
12438 error ("%qs must be used with %qs",
12439 TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
12440 TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
12442 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
12443 enabled. */
12444 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
12445 warning (0, "the %qs architecture does not support paired-single"
12446 " instructions", mips_arch_info->name);
12448 /* If TARGET_DSPR2, enable MASK_DSP. */
12449 if (TARGET_DSPR2)
12450 target_flags |= MASK_DSP;
12452 mips_init_print_operand_punct ();
12454 /* Set up array to map GCC register number to debug register number.
12455 Ignore the special purpose register numbers. */
12457 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12459 mips_dbx_regno[i] = INVALID_REGNUM;
12460 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
12461 mips_dwarf_regno[i] = i;
12462 else
12463 mips_dwarf_regno[i] = INVALID_REGNUM;
12466 start = GP_DBX_FIRST - GP_REG_FIRST;
12467 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
12468 mips_dbx_regno[i] = i + start;
12470 start = FP_DBX_FIRST - FP_REG_FIRST;
12471 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
12472 mips_dbx_regno[i] = i + start;
12474 /* Accumulator debug registers use big-endian ordering. */
12475 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
12476 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
12477 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
12478 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
12479 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
12481 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
12482 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
12485 /* Set up mips_hard_regno_mode_ok. */
12486 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
12487 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12488 mips_hard_regno_mode_ok[(int)mode][regno]
12489 = mips_hard_regno_mode_ok_p (regno, mode);
12491 /* Function to allocate machine-dependent function status. */
12492 init_machine_status = &mips_init_machine_status;
12494 /* Default to working around R4000 errata only if the processor
12495 was selected explicitly. */
12496 if ((target_flags_explicit & MASK_FIX_R4000) == 0
12497 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
12498 target_flags |= MASK_FIX_R4000;
12500 /* Default to working around R4400 errata only if the processor
12501 was selected explicitly. */
12502 if ((target_flags_explicit & MASK_FIX_R4400) == 0
12503 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
12504 target_flags |= MASK_FIX_R4400;
12506 /* Save base state of options. */
12507 mips_base_mips16 = TARGET_MIPS16;
12508 mips_base_target_flags = target_flags;
12509 mips_base_delayed_branch = flag_delayed_branch;
12510 mips_base_schedule_insns = flag_schedule_insns;
12511 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
12512 mips_base_move_loop_invariants = flag_move_loop_invariants;
12513 mips_base_align_loops = align_loops;
12514 mips_base_align_jumps = align_jumps;
12515 mips_base_align_functions = align_functions;
12517 /* Now select the ISA mode. */
12518 mips_set_mips16_mode (mips_base_mips16);
12520 /* We call dbr_schedule from within mips_reorg. */
12521 flag_delayed_branch = 0;
12524 /* Swap the register information for registers I and I + 1, which
12525 currently have the wrong endianness. Note that the registers'
12526 fixedness and call-clobberedness might have been set on the
12527 command line. */
12529 static void
12530 mips_swap_registers (unsigned int i)
12532 int tmpi;
12533 const char *tmps;
12535 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
12536 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
12538 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
12539 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
12540 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
12541 SWAP_STRING (reg_names[i], reg_names[i + 1]);
12543 #undef SWAP_STRING
12544 #undef SWAP_INT
12547 /* Implement CONDITIONAL_REGISTER_USAGE. */
12549 void
12550 mips_conditional_register_usage (void)
12552 if (!ISA_HAS_DSP)
12554 int regno;
12556 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
12557 fixed_regs[regno] = call_used_regs[regno] = 1;
12559 if (!TARGET_HARD_FLOAT)
12561 int regno;
12563 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
12564 fixed_regs[regno] = call_used_regs[regno] = 1;
12565 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12566 fixed_regs[regno] = call_used_regs[regno] = 1;
12568 else if (! ISA_HAS_8CC)
12570 int regno;
12572 /* We only have a single condition-code register. We implement
12573 this by fixing all the condition-code registers and generating
12574 RTL that refers directly to ST_REG_FIRST. */
12575 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12576 fixed_regs[regno] = call_used_regs[regno] = 1;
12578 /* In MIPS16 mode, we permit the $t temporary registers to be used
12579 for reload. We prohibit the unused $s registers, since they
12580 are call-saved, and saving them via a MIPS16 register would
12581 probably waste more time than just reloading the value. */
12582 if (TARGET_MIPS16)
12584 fixed_regs[18] = call_used_regs[18] = 1;
12585 fixed_regs[19] = call_used_regs[19] = 1;
12586 fixed_regs[20] = call_used_regs[20] = 1;
12587 fixed_regs[21] = call_used_regs[21] = 1;
12588 fixed_regs[22] = call_used_regs[22] = 1;
12589 fixed_regs[23] = call_used_regs[23] = 1;
12590 fixed_regs[26] = call_used_regs[26] = 1;
12591 fixed_regs[27] = call_used_regs[27] = 1;
12592 fixed_regs[30] = call_used_regs[30] = 1;
12594 /* $f20-$f23 are call-clobbered for n64. */
12595 if (mips_abi == ABI_64)
12597 int regno;
12598 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
12599 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12601 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
12602 for n32. */
12603 if (mips_abi == ABI_N32)
12605 int regno;
12606 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
12607 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12609 /* Make sure that double-register accumulator values are correctly
12610 ordered for the current endianness. */
12611 if (TARGET_LITTLE_ENDIAN)
12613 unsigned int regno;
12615 mips_swap_registers (MD_REG_FIRST);
12616 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
12617 mips_swap_registers (regno);
12621 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
12622 other registers for instructions for which it is possible. This
12623 encourages the compiler to use CMP in cases where an XOR would
12624 require some register shuffling. */
12626 void
12627 mips_order_regs_for_local_alloc (void)
12629 int i;
12631 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12632 reg_alloc_order[i] = i;
12634 if (TARGET_MIPS16)
12636 /* It really doesn't matter where we put register 0, since it is
12637 a fixed register anyhow. */
12638 reg_alloc_order[0] = 24;
12639 reg_alloc_order[24] = 0;
12643 /* Initialize the GCC target structure. */
12644 #undef TARGET_ASM_ALIGNED_HI_OP
12645 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
12646 #undef TARGET_ASM_ALIGNED_SI_OP
12647 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
12648 #undef TARGET_ASM_ALIGNED_DI_OP
12649 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
12651 #undef TARGET_ASM_FUNCTION_PROLOGUE
12652 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
12653 #undef TARGET_ASM_FUNCTION_EPILOGUE
12654 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
12655 #undef TARGET_ASM_SELECT_RTX_SECTION
12656 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
12657 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
12658 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
12660 #undef TARGET_SCHED_INIT
12661 #define TARGET_SCHED_INIT mips_sched_init
12662 #undef TARGET_SCHED_REORDER
12663 #define TARGET_SCHED_REORDER mips_sched_reorder
12664 #undef TARGET_SCHED_REORDER2
12665 #define TARGET_SCHED_REORDER2 mips_sched_reorder
12666 #undef TARGET_SCHED_VARIABLE_ISSUE
12667 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
12668 #undef TARGET_SCHED_ADJUST_COST
12669 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
12670 #undef TARGET_SCHED_ISSUE_RATE
12671 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
12672 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
12673 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
12674 mips_multipass_dfa_lookahead
12676 #undef TARGET_DEFAULT_TARGET_FLAGS
12677 #define TARGET_DEFAULT_TARGET_FLAGS \
12678 (TARGET_DEFAULT \
12679 | TARGET_CPU_DEFAULT \
12680 | TARGET_ENDIAN_DEFAULT \
12681 | TARGET_FP_EXCEPTIONS_DEFAULT \
12682 | MASK_CHECK_ZERO_DIV \
12683 | MASK_FUSED_MADD)
12684 #undef TARGET_HANDLE_OPTION
12685 #define TARGET_HANDLE_OPTION mips_handle_option
12687 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
12688 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
12690 #undef TARGET_INSERT_ATTRIBUTES
12691 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
12692 #undef TARGET_MERGE_DECL_ATTRIBUTES
12693 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
12694 #undef TARGET_SET_CURRENT_FUNCTION
12695 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
12697 #undef TARGET_VALID_POINTER_MODE
12698 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
12699 #undef TARGET_RTX_COSTS
12700 #define TARGET_RTX_COSTS mips_rtx_costs
12701 #undef TARGET_ADDRESS_COST
12702 #define TARGET_ADDRESS_COST mips_address_cost
12704 #undef TARGET_IN_SMALL_DATA_P
12705 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
12707 #undef TARGET_MACHINE_DEPENDENT_REORG
12708 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
12710 #undef TARGET_ASM_FILE_START
12711 #define TARGET_ASM_FILE_START mips_file_start
12712 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
12713 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
12715 #undef TARGET_INIT_LIBFUNCS
12716 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
12718 #undef TARGET_BUILD_BUILTIN_VA_LIST
12719 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
12720 #undef TARGET_EXPAND_BUILTIN_VA_START
12721 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
12722 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
12723 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
12725 #undef TARGET_PROMOTE_FUNCTION_ARGS
12726 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
12727 #undef TARGET_PROMOTE_FUNCTION_RETURN
12728 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
12729 #undef TARGET_PROMOTE_PROTOTYPES
12730 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
12732 #undef TARGET_RETURN_IN_MEMORY
12733 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
12734 #undef TARGET_RETURN_IN_MSB
12735 #define TARGET_RETURN_IN_MSB mips_return_in_msb
12737 #undef TARGET_ASM_OUTPUT_MI_THUNK
12738 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
12739 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
12740 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
12742 #undef TARGET_SETUP_INCOMING_VARARGS
12743 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
12744 #undef TARGET_STRICT_ARGUMENT_NAMING
12745 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
12746 #undef TARGET_MUST_PASS_IN_STACK
12747 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
12748 #undef TARGET_PASS_BY_REFERENCE
12749 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
12750 #undef TARGET_CALLEE_COPIES
12751 #define TARGET_CALLEE_COPIES mips_callee_copies
12752 #undef TARGET_ARG_PARTIAL_BYTES
12753 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
12755 #undef TARGET_MODE_REP_EXTENDED
12756 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
12758 #undef TARGET_VECTOR_MODE_SUPPORTED_P
12759 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
12761 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12762 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
12764 #undef TARGET_INIT_BUILTINS
12765 #define TARGET_INIT_BUILTINS mips_init_builtins
12766 #undef TARGET_EXPAND_BUILTIN
12767 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
12769 #undef TARGET_HAVE_TLS
12770 #define TARGET_HAVE_TLS HAVE_AS_TLS
12772 #undef TARGET_CANNOT_FORCE_CONST_MEM
12773 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
12775 #undef TARGET_ENCODE_SECTION_INFO
12776 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
12778 #undef TARGET_ATTRIBUTE_TABLE
12779 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
12780 /* All our function attributes are related to how out-of-line copies should
12781 be compiled or called. They don't in themselves prevent inlining. */
12782 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
12783 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
12785 #undef TARGET_EXTRA_LIVE_ON_ENTRY
12786 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
12788 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
12789 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
12790 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
12791 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
12793 #undef TARGET_COMP_TYPE_ATTRIBUTES
12794 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
12796 #ifdef HAVE_AS_DTPRELWORD
12797 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12798 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
12799 #endif
12800 #undef TARGET_DWARF_REGISTER_SPAN
12801 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
12803 struct gcc_target targetm = TARGET_INITIALIZER;
12805 #include "gt-mips.h"