* config.gcc (mipsisa64*-*-linux*): New configuration. Set ISA
[official-gcc.git] / gcc / config / mips / mips.c
blobc4006c2f616d25e58cade03010029e76e8cda0e8
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "libfuncs.h"
46 #include "flags.h"
47 #include "reload.h"
48 #include "tm_p.h"
49 #include "ggc.h"
50 #include "gstab.h"
51 #include "hashtab.h"
52 #include "debug.h"
53 #include "target.h"
54 #include "target-def.h"
55 #include "integrate.h"
56 #include "langhooks.h"
57 #include "cfglayout.h"
58 #include "sched-int.h"
59 #include "gimple.h"
60 #include "bitmap.h"
61 #include "diagnostic.h"
63 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
64 #define UNSPEC_ADDRESS_P(X) \
65 (GET_CODE (X) == UNSPEC \
66 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
67 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
69 /* Extract the symbol or label from UNSPEC wrapper X. */
70 #define UNSPEC_ADDRESS(X) \
71 XVECEXP (X, 0, 0)
73 /* Extract the symbol type from UNSPEC wrapper X. */
74 #define UNSPEC_ADDRESS_TYPE(X) \
75 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
77 /* The maximum distance between the top of the stack frame and the
78 value $sp has when we save and restore registers.
80 The value for normal-mode code must be a SMALL_OPERAND and must
81 preserve the maximum stack alignment. We therefore use a value
82 of 0x7ff0 in this case.
84 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
85 up to 0x7f8 bytes and can usually save or restore all the registers
86 that we need to save or restore. (Note that we can only use these
87 instructions for o32, for which the stack alignment is 8 bytes.)
89 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
90 RESTORE are not available. We can then use unextended instructions
91 to save and restore registers, and to allocate and deallocate the top
92 part of the frame. */
93 #define MIPS_MAX_FIRST_STACK_STEP \
94 (!TARGET_MIPS16 ? 0x7ff0 \
95 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
96 : TARGET_64BIT ? 0x100 : 0x400)
98 /* True if INSN is a mips.md pattern or asm statement. */
99 #define USEFUL_INSN_P(INSN) \
100 (INSN_P (INSN) \
101 && GET_CODE (PATTERN (INSN)) != USE \
102 && GET_CODE (PATTERN (INSN)) != CLOBBER \
103 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
104 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
106 /* If INSN is a delayed branch sequence, return the first instruction
107 in the sequence, otherwise return INSN itself. */
108 #define SEQ_BEGIN(INSN) \
109 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
110 ? XVECEXP (PATTERN (INSN), 0, 0) \
111 : (INSN))
113 /* Likewise for the last instruction in a delayed branch sequence. */
114 #define SEQ_END(INSN) \
115 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
116 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117 : (INSN))
119 /* Execute the following loop body with SUBINSN set to each instruction
120 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
121 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
122 for ((SUBINSN) = SEQ_BEGIN (INSN); \
123 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
124 (SUBINSN) = NEXT_INSN (SUBINSN))
126 /* True if bit BIT is set in VALUE. */
127 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
129 /* Classifies an address.
131 ADDRESS_REG
132 A natural register + offset address. The register satisfies
133 mips_valid_base_register_p and the offset is a const_arith_operand.
135 ADDRESS_LO_SUM
136 A LO_SUM rtx. The first operand is a valid base register and
137 the second operand is a symbolic address.
139 ADDRESS_CONST_INT
140 A signed 16-bit constant address.
142 ADDRESS_SYMBOLIC:
143 A constant symbolic address. */
144 enum mips_address_type {
145 ADDRESS_REG,
146 ADDRESS_LO_SUM,
147 ADDRESS_CONST_INT,
148 ADDRESS_SYMBOLIC
151 /* Macros to create an enumeration identifier for a function prototype. */
152 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
153 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
154 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
155 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
157 /* Classifies the prototype of a built-in function. */
158 enum mips_function_type {
159 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
160 #include "config/mips/mips-ftypes.def"
161 #undef DEF_MIPS_FTYPE
162 MIPS_MAX_FTYPE_MAX
165 /* Specifies how a built-in function should be converted into rtl. */
166 enum mips_builtin_type {
167 /* The function corresponds directly to an .md pattern. The return
168 value is mapped to operand 0 and the arguments are mapped to
169 operands 1 and above. */
170 MIPS_BUILTIN_DIRECT,
172 /* The function corresponds directly to an .md pattern. There is no return
173 value and the arguments are mapped to operands 0 and above. */
174 MIPS_BUILTIN_DIRECT_NO_TARGET,
176 /* The function corresponds to a comparison instruction followed by
177 a mips_cond_move_tf_ps pattern. The first two arguments are the
178 values to compare and the second two arguments are the vector
179 operands for the movt.ps or movf.ps instruction (in assembly order). */
180 MIPS_BUILTIN_MOVF,
181 MIPS_BUILTIN_MOVT,
183 /* The function corresponds to a V2SF comparison instruction. Operand 0
184 of this instruction is the result of the comparison, which has mode
185 CCV2 or CCV4. The function arguments are mapped to operands 1 and
186 above. The function's return value is an SImode boolean that is
187 true under the following conditions:
189 MIPS_BUILTIN_CMP_ANY: one of the registers is true
190 MIPS_BUILTIN_CMP_ALL: all of the registers are true
191 MIPS_BUILTIN_CMP_LOWER: the first register is true
192 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
193 MIPS_BUILTIN_CMP_ANY,
194 MIPS_BUILTIN_CMP_ALL,
195 MIPS_BUILTIN_CMP_UPPER,
196 MIPS_BUILTIN_CMP_LOWER,
198 /* As above, but the instruction only sets a single $fcc register. */
199 MIPS_BUILTIN_CMP_SINGLE,
201 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
202 MIPS_BUILTIN_BPOSGE32
205 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
206 #define MIPS_FP_CONDITIONS(MACRO) \
207 MACRO (f), \
208 MACRO (un), \
209 MACRO (eq), \
210 MACRO (ueq), \
211 MACRO (olt), \
212 MACRO (ult), \
213 MACRO (ole), \
214 MACRO (ule), \
215 MACRO (sf), \
216 MACRO (ngle), \
217 MACRO (seq), \
218 MACRO (ngl), \
219 MACRO (lt), \
220 MACRO (nge), \
221 MACRO (le), \
222 MACRO (ngt)
224 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
225 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
226 enum mips_fp_condition {
227 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
230 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
231 #define STRINGIFY(X) #X
232 static const char *const mips_fp_conditions[] = {
233 MIPS_FP_CONDITIONS (STRINGIFY)
236 /* Information about a function's frame layout. */
237 struct mips_frame_info GTY(()) {
238 /* The size of the frame in bytes. */
239 HOST_WIDE_INT total_size;
241 /* The number of bytes allocated to variables. */
242 HOST_WIDE_INT var_size;
244 /* The number of bytes allocated to outgoing function arguments. */
245 HOST_WIDE_INT args_size;
247 /* The number of bytes allocated to the .cprestore slot, or 0 if there
248 is no such slot. */
249 HOST_WIDE_INT cprestore_size;
251 /* Bit X is set if the function saves or restores GPR X. */
252 unsigned int mask;
254 /* Likewise FPR X. */
255 unsigned int fmask;
257 /* The number of GPRs and FPRs saved. */
258 unsigned int num_gp;
259 unsigned int num_fp;
261 /* The offset of the topmost GPR and FPR save slots from the top of
262 the frame, or zero if no such slots are needed. */
263 HOST_WIDE_INT gp_save_offset;
264 HOST_WIDE_INT fp_save_offset;
266 /* Likewise, but giving offsets from the bottom of the frame. */
267 HOST_WIDE_INT gp_sp_offset;
268 HOST_WIDE_INT fp_sp_offset;
270 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
271 HOST_WIDE_INT arg_pointer_offset;
273 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
274 HOST_WIDE_INT hard_frame_pointer_offset;
277 struct machine_function GTY(()) {
278 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
279 rtx mips16_gp_pseudo_rtx;
281 /* The number of extra stack bytes taken up by register varargs.
282 This area is allocated by the callee at the very top of the frame. */
283 int varargs_size;
285 /* The current frame information, calculated by mips_compute_frame_info. */
286 struct mips_frame_info frame;
288 /* The register to use as the function's global pointer. */
289 unsigned int global_pointer;
291 /* True if mips_adjust_insn_length should ignore an instruction's
292 hazard attribute. */
293 bool ignore_hazard_length_p;
295 /* True if the whole function is suitable for .set noreorder and
296 .set nomacro. */
297 bool all_noreorder_p;
299 /* True if the function is known to have an instruction that needs $gp. */
300 bool has_gp_insn_p;
302 /* True if we have emitted an instruction to initialize
303 mips16_gp_pseudo_rtx. */
304 bool initialized_mips16_gp_pseudo_p;
307 /* Information about a single argument. */
308 struct mips_arg_info {
309 /* True if the argument is passed in a floating-point register, or
310 would have been if we hadn't run out of registers. */
311 bool fpr_p;
313 /* The number of words passed in registers, rounded up. */
314 unsigned int reg_words;
316 /* For EABI, the offset of the first register from GP_ARG_FIRST or
317 FP_ARG_FIRST. For other ABIs, the offset of the first register from
318 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
319 comment for details).
321 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
322 on the stack. */
323 unsigned int reg_offset;
325 /* The number of words that must be passed on the stack, rounded up. */
326 unsigned int stack_words;
328 /* The offset from the start of the stack overflow area of the argument's
329 first stack word. Only meaningful when STACK_WORDS is nonzero. */
330 unsigned int stack_offset;
333 /* Information about an address described by mips_address_type.
335 ADDRESS_CONST_INT
336 No fields are used.
338 ADDRESS_REG
339 REG is the base register and OFFSET is the constant offset.
341 ADDRESS_LO_SUM
342 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
343 is the type of symbol it references.
345 ADDRESS_SYMBOLIC
346 SYMBOL_TYPE is the type of symbol that the address references. */
347 struct mips_address_info {
348 enum mips_address_type type;
349 rtx reg;
350 rtx offset;
351 enum mips_symbol_type symbol_type;
354 /* One stage in a constant building sequence. These sequences have
355 the form:
357 A = VALUE[0]
358 A = A CODE[1] VALUE[1]
359 A = A CODE[2] VALUE[2]
362 where A is an accumulator, each CODE[i] is a binary rtl operation
363 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
364 struct mips_integer_op {
365 enum rtx_code code;
366 unsigned HOST_WIDE_INT value;
369 /* The largest number of operations needed to load an integer constant.
370 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
371 When the lowest bit is clear, we can try, but reject a sequence with
372 an extra SLL at the end. */
373 #define MIPS_MAX_INTEGER_OPS 7
375 /* Information about a MIPS16e SAVE or RESTORE instruction. */
376 struct mips16e_save_restore_info {
377 /* The number of argument registers saved by a SAVE instruction.
378 0 for RESTORE instructions. */
379 unsigned int nargs;
381 /* Bit X is set if the instruction saves or restores GPR X. */
382 unsigned int mask;
384 /* The total number of bytes to allocate. */
385 HOST_WIDE_INT size;
388 /* Global variables for machine-dependent things. */
390 /* The -G setting, or the configuration's default small-data limit if
391 no -G option is given. */
392 static unsigned int mips_small_data_threshold;
394 /* The number of file directives written by mips_output_filename. */
395 int num_source_filenames;
397 /* The name that appeared in the last .file directive written by
398 mips_output_filename, or "" if mips_output_filename hasn't
399 written anything yet. */
400 const char *current_function_file = "";
402 /* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
403 int sdb_label_count;
405 /* Arrays that map GCC register numbers to debugger register numbers. */
406 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
407 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
409 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
410 int set_noreorder;
411 int set_nomacro;
412 static int set_noat;
414 /* True if we're writing out a branch-likely instruction rather than a
415 normal branch. */
416 static bool mips_branch_likely;
418 /* The operands passed to the last cmpMM expander. */
419 rtx cmp_operands[2];
421 /* The current instruction-set architecture. */
422 enum processor_type mips_arch;
423 const struct mips_cpu_info *mips_arch_info;
425 /* The processor that we should tune the code for. */
426 enum processor_type mips_tune;
427 const struct mips_cpu_info *mips_tune_info;
429 /* The ISA level associated with mips_arch. */
430 int mips_isa;
432 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
433 static const struct mips_cpu_info *mips_isa_option_info;
435 /* Which ABI to use. */
436 int mips_abi = MIPS_ABI_DEFAULT;
438 /* Which cost information to use. */
439 const struct mips_rtx_cost_data *mips_cost;
441 /* The ambient target flags, excluding MASK_MIPS16. */
442 static int mips_base_target_flags;
444 /* True if MIPS16 is the default mode. */
445 bool mips_base_mips16;
447 /* The ambient values of other global variables. */
448 static int mips_base_delayed_branch; /* flag_delayed_branch */
449 static int mips_base_schedule_insns; /* flag_schedule_insns */
450 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
451 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
452 static int mips_base_align_loops; /* align_loops */
453 static int mips_base_align_jumps; /* align_jumps */
454 static int mips_base_align_functions; /* align_functions */
456 /* The -mcode-readable setting. */
457 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
459 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
460 bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
462 /* Index C is true if character C is a valid PRINT_OPERAND punctation
463 character. */
464 bool mips_print_operand_punct[256];
466 static GTY (()) int mips_output_filename_first_time = 1;
468 /* mips_split_p[X] is true if symbols of type X can be split by
469 mips_split_symbol. */
470 bool mips_split_p[NUM_SYMBOL_TYPES];
472 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
473 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
474 if they are matched by a special .md file pattern. */
475 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
477 /* Likewise for HIGHs. */
478 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
480 /* Index R is the smallest register class that contains register R. */
481 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
482 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
483 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
484 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
485 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
486 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
487 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
488 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
489 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
490 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
491 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
492 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
493 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
494 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
495 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
496 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
497 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
498 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
499 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
500 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
501 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
502 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
503 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
504 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
505 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
506 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
507 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
508 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
509 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
510 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
511 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
512 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
513 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
514 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
515 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
516 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
517 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
518 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
519 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
520 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
521 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
522 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
523 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
524 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
525 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
526 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
527 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
528 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
531 /* The value of TARGET_ATTRIBUTE_TABLE. */
532 const struct attribute_spec mips_attribute_table[] = {
533 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
534 { "long_call", 0, 0, false, true, true, NULL },
535 { "far", 0, 0, false, true, true, NULL },
536 { "near", 0, 0, false, true, true, NULL },
537 /* We would really like to treat "mips16" and "nomips16" as type
538 attributes, but GCC doesn't provide the hooks we need to support
539 the right conversion rules. As declaration attributes, they affect
540 code generation but don't carry other semantics. */
541 { "mips16", 0, 0, true, false, false, NULL },
542 { "nomips16", 0, 0, true, false, false, NULL },
543 { NULL, 0, 0, false, false, false, NULL }
546 /* A table describing all the processors GCC knows about. Names are
547 matched in the order listed. The first mention of an ISA level is
548 taken as the canonical name for that ISA.
550 To ease comparison, please keep this table in the same order
551 as GAS's mips_cpu_info_table. Please also make sure that
552 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
553 options correctly. */
554 static const struct mips_cpu_info mips_cpu_info_table[] = {
555 /* Entries for generic ISAs. */
556 { "mips1", PROCESSOR_R3000, 1, 0 },
557 { "mips2", PROCESSOR_R6000, 2, 0 },
558 { "mips3", PROCESSOR_R4000, 3, 0 },
559 { "mips4", PROCESSOR_R8000, 4, 0 },
560 /* Prefer not to use branch-likely instructions for generic MIPS32rX
561 and MIPS64rX code. The instructions were officially deprecated
562 in revisions 2 and earlier, but revision 3 is likely to downgrade
563 that to a recommendation to avoid the instructions in code that
564 isn't tuned to a specific processor. */
565 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
566 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
567 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
568 /* ??? For now just tune the generic MIPS64r2 for 5KC as well. */
569 { "mips64r2", PROCESSOR_5KC, 65, PTF_AVOID_BRANCHLIKELY },
571 /* MIPS I processors. */
572 { "r3000", PROCESSOR_R3000, 1, 0 },
573 { "r2000", PROCESSOR_R3000, 1, 0 },
574 { "r3900", PROCESSOR_R3900, 1, 0 },
576 /* MIPS II processors. */
577 { "r6000", PROCESSOR_R6000, 2, 0 },
579 /* MIPS III processors. */
580 { "r4000", PROCESSOR_R4000, 3, 0 },
581 { "vr4100", PROCESSOR_R4100, 3, 0 },
582 { "vr4111", PROCESSOR_R4111, 3, 0 },
583 { "vr4120", PROCESSOR_R4120, 3, 0 },
584 { "vr4130", PROCESSOR_R4130, 3, 0 },
585 { "vr4300", PROCESSOR_R4300, 3, 0 },
586 { "r4400", PROCESSOR_R4000, 3, 0 },
587 { "r4600", PROCESSOR_R4600, 3, 0 },
588 { "orion", PROCESSOR_R4600, 3, 0 },
589 { "r4650", PROCESSOR_R4650, 3, 0 },
590 /* ST Loongson 2E/2F processors. */
591 { "loongson2e", PROCESSOR_LOONGSON_2E, 3, PTF_AVOID_BRANCHLIKELY },
592 { "loongson2f", PROCESSOR_LOONGSON_2F, 3, PTF_AVOID_BRANCHLIKELY },
594 /* MIPS IV processors. */
595 { "r8000", PROCESSOR_R8000, 4, 0 },
596 { "vr5000", PROCESSOR_R5000, 4, 0 },
597 { "vr5400", PROCESSOR_R5400, 4, 0 },
598 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
599 { "rm7000", PROCESSOR_R7000, 4, 0 },
600 { "rm9000", PROCESSOR_R9000, 4, 0 },
602 /* MIPS32 processors. */
603 { "4kc", PROCESSOR_4KC, 32, 0 },
604 { "4km", PROCESSOR_4KC, 32, 0 },
605 { "4kp", PROCESSOR_4KP, 32, 0 },
606 { "4ksc", PROCESSOR_4KC, 32, 0 },
608 /* MIPS32 Release 2 processors. */
609 { "m4k", PROCESSOR_M4K, 33, 0 },
610 { "4kec", PROCESSOR_4KC, 33, 0 },
611 { "4kem", PROCESSOR_4KC, 33, 0 },
612 { "4kep", PROCESSOR_4KP, 33, 0 },
613 { "4ksd", PROCESSOR_4KC, 33, 0 },
615 { "24kc", PROCESSOR_24KC, 33, 0 },
616 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
617 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
618 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
619 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
620 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
622 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP. */
623 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
624 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
625 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
626 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
627 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
629 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP. */
630 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
631 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
632 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
633 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
634 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
636 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2. */
637 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
638 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
639 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
640 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
641 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
642 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
644 /* MIPS64 processors. */
645 { "5kc", PROCESSOR_5KC, 64, 0 },
646 { "5kf", PROCESSOR_5KF, 64, 0 },
647 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
648 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
649 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
650 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
651 { "xlr", PROCESSOR_XLR, 64, 0 }
654 /* Default costs. If these are used for a processor we should look
655 up the actual costs. */
656 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
657 COSTS_N_INSNS (7), /* fp_mult_sf */ \
658 COSTS_N_INSNS (8), /* fp_mult_df */ \
659 COSTS_N_INSNS (23), /* fp_div_sf */ \
660 COSTS_N_INSNS (36), /* fp_div_df */ \
661 COSTS_N_INSNS (10), /* int_mult_si */ \
662 COSTS_N_INSNS (10), /* int_mult_di */ \
663 COSTS_N_INSNS (69), /* int_div_si */ \
664 COSTS_N_INSNS (69), /* int_div_di */ \
665 2, /* branch_cost */ \
666 4 /* memory_latency */
668 /* Floating-point costs for processors without an FPU. Just assume that
669 all floating-point libcalls are very expensive. */
670 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
671 COSTS_N_INSNS (256), /* fp_mult_sf */ \
672 COSTS_N_INSNS (256), /* fp_mult_df */ \
673 COSTS_N_INSNS (256), /* fp_div_sf */ \
674 COSTS_N_INSNS (256) /* fp_div_df */
676 /* Costs to use when optimizing for size. */
677 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
678 COSTS_N_INSNS (1), /* fp_add */
679 COSTS_N_INSNS (1), /* fp_mult_sf */
680 COSTS_N_INSNS (1), /* fp_mult_df */
681 COSTS_N_INSNS (1), /* fp_div_sf */
682 COSTS_N_INSNS (1), /* fp_div_df */
683 COSTS_N_INSNS (1), /* int_mult_si */
684 COSTS_N_INSNS (1), /* int_mult_di */
685 COSTS_N_INSNS (1), /* int_div_si */
686 COSTS_N_INSNS (1), /* int_div_di */
687 2, /* branch_cost */
688 4 /* memory_latency */
691 /* Costs to use when optimizing for speed, indexed by processor. */
692 static const struct mips_rtx_cost_data mips_rtx_cost_data[PROCESSOR_MAX] = {
693 { /* R3000 */
694 COSTS_N_INSNS (2), /* fp_add */
695 COSTS_N_INSNS (4), /* fp_mult_sf */
696 COSTS_N_INSNS (5), /* fp_mult_df */
697 COSTS_N_INSNS (12), /* fp_div_sf */
698 COSTS_N_INSNS (19), /* fp_div_df */
699 COSTS_N_INSNS (12), /* int_mult_si */
700 COSTS_N_INSNS (12), /* int_mult_di */
701 COSTS_N_INSNS (35), /* int_div_si */
702 COSTS_N_INSNS (35), /* int_div_di */
703 1, /* branch_cost */
704 4 /* memory_latency */
706 { /* 4KC */
707 SOFT_FP_COSTS,
708 COSTS_N_INSNS (6), /* int_mult_si */
709 COSTS_N_INSNS (6), /* int_mult_di */
710 COSTS_N_INSNS (36), /* int_div_si */
711 COSTS_N_INSNS (36), /* int_div_di */
712 1, /* branch_cost */
713 4 /* memory_latency */
715 { /* 4KP */
716 SOFT_FP_COSTS,
717 COSTS_N_INSNS (36), /* int_mult_si */
718 COSTS_N_INSNS (36), /* int_mult_di */
719 COSTS_N_INSNS (37), /* int_div_si */
720 COSTS_N_INSNS (37), /* int_div_di */
721 1, /* branch_cost */
722 4 /* memory_latency */
724 { /* 5KC */
725 SOFT_FP_COSTS,
726 COSTS_N_INSNS (4), /* int_mult_si */
727 COSTS_N_INSNS (11), /* int_mult_di */
728 COSTS_N_INSNS (36), /* int_div_si */
729 COSTS_N_INSNS (68), /* int_div_di */
730 1, /* branch_cost */
731 4 /* memory_latency */
733 { /* 5KF */
734 COSTS_N_INSNS (4), /* fp_add */
735 COSTS_N_INSNS (4), /* fp_mult_sf */
736 COSTS_N_INSNS (5), /* fp_mult_df */
737 COSTS_N_INSNS (17), /* fp_div_sf */
738 COSTS_N_INSNS (32), /* fp_div_df */
739 COSTS_N_INSNS (4), /* int_mult_si */
740 COSTS_N_INSNS (11), /* int_mult_di */
741 COSTS_N_INSNS (36), /* int_div_si */
742 COSTS_N_INSNS (68), /* int_div_di */
743 1, /* branch_cost */
744 4 /* memory_latency */
746 { /* 20KC */
747 COSTS_N_INSNS (4), /* fp_add */
748 COSTS_N_INSNS (4), /* fp_mult_sf */
749 COSTS_N_INSNS (5), /* fp_mult_df */
750 COSTS_N_INSNS (17), /* fp_div_sf */
751 COSTS_N_INSNS (32), /* fp_div_df */
752 COSTS_N_INSNS (4), /* int_mult_si */
753 COSTS_N_INSNS (7), /* int_mult_di */
754 COSTS_N_INSNS (42), /* int_div_si */
755 COSTS_N_INSNS (72), /* int_div_di */
756 1, /* branch_cost */
757 4 /* memory_latency */
759 { /* 24KC */
760 SOFT_FP_COSTS,
761 COSTS_N_INSNS (5), /* int_mult_si */
762 COSTS_N_INSNS (5), /* int_mult_di */
763 COSTS_N_INSNS (41), /* int_div_si */
764 COSTS_N_INSNS (41), /* int_div_di */
765 1, /* branch_cost */
766 4 /* memory_latency */
768 { /* 24KF2_1 */
769 COSTS_N_INSNS (8), /* fp_add */
770 COSTS_N_INSNS (8), /* fp_mult_sf */
771 COSTS_N_INSNS (10), /* fp_mult_df */
772 COSTS_N_INSNS (34), /* fp_div_sf */
773 COSTS_N_INSNS (64), /* fp_div_df */
774 COSTS_N_INSNS (5), /* int_mult_si */
775 COSTS_N_INSNS (5), /* int_mult_di */
776 COSTS_N_INSNS (41), /* int_div_si */
777 COSTS_N_INSNS (41), /* int_div_di */
778 1, /* branch_cost */
779 4 /* memory_latency */
781 { /* 24KF1_1 */
782 COSTS_N_INSNS (4), /* fp_add */
783 COSTS_N_INSNS (4), /* fp_mult_sf */
784 COSTS_N_INSNS (5), /* fp_mult_df */
785 COSTS_N_INSNS (17), /* fp_div_sf */
786 COSTS_N_INSNS (32), /* fp_div_df */
787 COSTS_N_INSNS (5), /* int_mult_si */
788 COSTS_N_INSNS (5), /* int_mult_di */
789 COSTS_N_INSNS (41), /* int_div_si */
790 COSTS_N_INSNS (41), /* int_div_di */
791 1, /* branch_cost */
792 4 /* memory_latency */
794 { /* 74KC */
795 SOFT_FP_COSTS,
796 COSTS_N_INSNS (5), /* int_mult_si */
797 COSTS_N_INSNS (5), /* int_mult_di */
798 COSTS_N_INSNS (41), /* int_div_si */
799 COSTS_N_INSNS (41), /* int_div_di */
800 1, /* branch_cost */
801 4 /* memory_latency */
803 { /* 74KF2_1 */
804 COSTS_N_INSNS (8), /* fp_add */
805 COSTS_N_INSNS (8), /* fp_mult_sf */
806 COSTS_N_INSNS (10), /* fp_mult_df */
807 COSTS_N_INSNS (34), /* fp_div_sf */
808 COSTS_N_INSNS (64), /* fp_div_df */
809 COSTS_N_INSNS (5), /* int_mult_si */
810 COSTS_N_INSNS (5), /* int_mult_di */
811 COSTS_N_INSNS (41), /* int_div_si */
812 COSTS_N_INSNS (41), /* int_div_di */
813 1, /* branch_cost */
814 4 /* memory_latency */
816 { /* 74KF1_1 */
817 COSTS_N_INSNS (4), /* fp_add */
818 COSTS_N_INSNS (4), /* fp_mult_sf */
819 COSTS_N_INSNS (5), /* fp_mult_df */
820 COSTS_N_INSNS (17), /* fp_div_sf */
821 COSTS_N_INSNS (32), /* fp_div_df */
822 COSTS_N_INSNS (5), /* int_mult_si */
823 COSTS_N_INSNS (5), /* int_mult_di */
824 COSTS_N_INSNS (41), /* int_div_si */
825 COSTS_N_INSNS (41), /* int_div_di */
826 1, /* branch_cost */
827 4 /* memory_latency */
829 { /* 74KF3_2 */
830 COSTS_N_INSNS (6), /* fp_add */
831 COSTS_N_INSNS (6), /* fp_mult_sf */
832 COSTS_N_INSNS (7), /* fp_mult_df */
833 COSTS_N_INSNS (25), /* fp_div_sf */
834 COSTS_N_INSNS (48), /* fp_div_df */
835 COSTS_N_INSNS (5), /* int_mult_si */
836 COSTS_N_INSNS (5), /* int_mult_di */
837 COSTS_N_INSNS (41), /* int_div_si */
838 COSTS_N_INSNS (41), /* int_div_di */
839 1, /* branch_cost */
840 4 /* memory_latency */
842 { /* Loongson-2E */
843 DEFAULT_COSTS
845 { /* Loongson-2F */
846 DEFAULT_COSTS
848 { /* M4k */
849 DEFAULT_COSTS
851 { /* R3900 */
852 COSTS_N_INSNS (2), /* fp_add */
853 COSTS_N_INSNS (4), /* fp_mult_sf */
854 COSTS_N_INSNS (5), /* fp_mult_df */
855 COSTS_N_INSNS (12), /* fp_div_sf */
856 COSTS_N_INSNS (19), /* fp_div_df */
857 COSTS_N_INSNS (2), /* int_mult_si */
858 COSTS_N_INSNS (2), /* int_mult_di */
859 COSTS_N_INSNS (35), /* int_div_si */
860 COSTS_N_INSNS (35), /* int_div_di */
861 1, /* branch_cost */
862 4 /* memory_latency */
864 { /* R6000 */
865 COSTS_N_INSNS (3), /* fp_add */
866 COSTS_N_INSNS (5), /* fp_mult_sf */
867 COSTS_N_INSNS (6), /* fp_mult_df */
868 COSTS_N_INSNS (15), /* fp_div_sf */
869 COSTS_N_INSNS (16), /* fp_div_df */
870 COSTS_N_INSNS (17), /* int_mult_si */
871 COSTS_N_INSNS (17), /* int_mult_di */
872 COSTS_N_INSNS (38), /* int_div_si */
873 COSTS_N_INSNS (38), /* int_div_di */
874 2, /* branch_cost */
875 6 /* memory_latency */
877 { /* R4000 */
878 COSTS_N_INSNS (6), /* fp_add */
879 COSTS_N_INSNS (7), /* fp_mult_sf */
880 COSTS_N_INSNS (8), /* fp_mult_df */
881 COSTS_N_INSNS (23), /* fp_div_sf */
882 COSTS_N_INSNS (36), /* fp_div_df */
883 COSTS_N_INSNS (10), /* int_mult_si */
884 COSTS_N_INSNS (10), /* int_mult_di */
885 COSTS_N_INSNS (69), /* int_div_si */
886 COSTS_N_INSNS (69), /* int_div_di */
887 2, /* branch_cost */
888 6 /* memory_latency */
890 { /* R4100 */
891 DEFAULT_COSTS
893 { /* R4111 */
894 DEFAULT_COSTS
896 { /* R4120 */
897 DEFAULT_COSTS
899 { /* R4130 */
900 /* The only costs that appear to be updated here are
901 integer multiplication. */
902 SOFT_FP_COSTS,
903 COSTS_N_INSNS (4), /* int_mult_si */
904 COSTS_N_INSNS (6), /* int_mult_di */
905 COSTS_N_INSNS (69), /* int_div_si */
906 COSTS_N_INSNS (69), /* int_div_di */
907 1, /* branch_cost */
908 4 /* memory_latency */
910 { /* R4300 */
911 DEFAULT_COSTS
913 { /* R4600 */
914 DEFAULT_COSTS
916 { /* R4650 */
917 DEFAULT_COSTS
919 { /* R5000 */
920 COSTS_N_INSNS (6), /* fp_add */
921 COSTS_N_INSNS (4), /* fp_mult_sf */
922 COSTS_N_INSNS (5), /* fp_mult_df */
923 COSTS_N_INSNS (23), /* fp_div_sf */
924 COSTS_N_INSNS (36), /* fp_div_df */
925 COSTS_N_INSNS (5), /* int_mult_si */
926 COSTS_N_INSNS (5), /* int_mult_di */
927 COSTS_N_INSNS (36), /* int_div_si */
928 COSTS_N_INSNS (36), /* int_div_di */
929 1, /* branch_cost */
930 4 /* memory_latency */
932 { /* R5400 */
933 COSTS_N_INSNS (6), /* fp_add */
934 COSTS_N_INSNS (5), /* fp_mult_sf */
935 COSTS_N_INSNS (6), /* fp_mult_df */
936 COSTS_N_INSNS (30), /* fp_div_sf */
937 COSTS_N_INSNS (59), /* fp_div_df */
938 COSTS_N_INSNS (3), /* int_mult_si */
939 COSTS_N_INSNS (4), /* int_mult_di */
940 COSTS_N_INSNS (42), /* int_div_si */
941 COSTS_N_INSNS (74), /* int_div_di */
942 1, /* branch_cost */
943 4 /* memory_latency */
945 { /* R5500 */
946 COSTS_N_INSNS (6), /* fp_add */
947 COSTS_N_INSNS (5), /* fp_mult_sf */
948 COSTS_N_INSNS (6), /* fp_mult_df */
949 COSTS_N_INSNS (30), /* fp_div_sf */
950 COSTS_N_INSNS (59), /* fp_div_df */
951 COSTS_N_INSNS (5), /* int_mult_si */
952 COSTS_N_INSNS (9), /* int_mult_di */
953 COSTS_N_INSNS (42), /* int_div_si */
954 COSTS_N_INSNS (74), /* int_div_di */
955 1, /* branch_cost */
956 4 /* memory_latency */
958 { /* R7000 */
959 /* The only costs that are changed here are
960 integer multiplication. */
961 COSTS_N_INSNS (6), /* fp_add */
962 COSTS_N_INSNS (7), /* fp_mult_sf */
963 COSTS_N_INSNS (8), /* fp_mult_df */
964 COSTS_N_INSNS (23), /* fp_div_sf */
965 COSTS_N_INSNS (36), /* fp_div_df */
966 COSTS_N_INSNS (5), /* int_mult_si */
967 COSTS_N_INSNS (9), /* int_mult_di */
968 COSTS_N_INSNS (69), /* int_div_si */
969 COSTS_N_INSNS (69), /* int_div_di */
970 1, /* branch_cost */
971 4 /* memory_latency */
973 { /* R8000 */
974 DEFAULT_COSTS
976 { /* R9000 */
977 /* The only costs that are changed here are
978 integer multiplication. */
979 COSTS_N_INSNS (6), /* fp_add */
980 COSTS_N_INSNS (7), /* fp_mult_sf */
981 COSTS_N_INSNS (8), /* fp_mult_df */
982 COSTS_N_INSNS (23), /* fp_div_sf */
983 COSTS_N_INSNS (36), /* fp_div_df */
984 COSTS_N_INSNS (3), /* int_mult_si */
985 COSTS_N_INSNS (8), /* int_mult_di */
986 COSTS_N_INSNS (69), /* int_div_si */
987 COSTS_N_INSNS (69), /* int_div_di */
988 1, /* branch_cost */
989 4 /* memory_latency */
991 { /* SB1 */
992 /* These costs are the same as the SB-1A below. */
993 COSTS_N_INSNS (4), /* fp_add */
994 COSTS_N_INSNS (4), /* fp_mult_sf */
995 COSTS_N_INSNS (4), /* fp_mult_df */
996 COSTS_N_INSNS (24), /* fp_div_sf */
997 COSTS_N_INSNS (32), /* fp_div_df */
998 COSTS_N_INSNS (3), /* int_mult_si */
999 COSTS_N_INSNS (4), /* int_mult_di */
1000 COSTS_N_INSNS (36), /* int_div_si */
1001 COSTS_N_INSNS (68), /* int_div_di */
1002 1, /* branch_cost */
1003 4 /* memory_latency */
1005 { /* SB1-A */
1006 /* These costs are the same as the SB-1 above. */
1007 COSTS_N_INSNS (4), /* fp_add */
1008 COSTS_N_INSNS (4), /* fp_mult_sf */
1009 COSTS_N_INSNS (4), /* fp_mult_df */
1010 COSTS_N_INSNS (24), /* fp_div_sf */
1011 COSTS_N_INSNS (32), /* fp_div_df */
1012 COSTS_N_INSNS (3), /* int_mult_si */
1013 COSTS_N_INSNS (4), /* int_mult_di */
1014 COSTS_N_INSNS (36), /* int_div_si */
1015 COSTS_N_INSNS (68), /* int_div_di */
1016 1, /* branch_cost */
1017 4 /* memory_latency */
1019 { /* SR71000 */
1020 DEFAULT_COSTS
1022 { /* XLR */
1023 /* Need to replace first five with the costs of calling the appropriate
1024 libgcc routine. */
1025 COSTS_N_INSNS (256), /* fp_add */
1026 COSTS_N_INSNS (256), /* fp_mult_sf */
1027 COSTS_N_INSNS (256), /* fp_mult_df */
1028 COSTS_N_INSNS (256), /* fp_div_sf */
1029 COSTS_N_INSNS (256), /* fp_div_df */
1030 COSTS_N_INSNS (8), /* int_mult_si */
1031 COSTS_N_INSNS (8), /* int_mult_di */
1032 COSTS_N_INSNS (72), /* int_div_si */
1033 COSTS_N_INSNS (72), /* int_div_di */
1034 1, /* branch_cost */
1035 4 /* memory_latency */
1039 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1040 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1041 struct mflip_mips16_entry GTY (()) {
1042 const char *name;
1043 bool mips16_p;
1045 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1047 /* Hash table callbacks for mflip_mips16_htab. */
1049 static hashval_t
1050 mflip_mips16_htab_hash (const void *entry)
1052 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1055 static int
1056 mflip_mips16_htab_eq (const void *entry, const void *name)
1058 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1059 (const char *) name) == 0;
1062 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1063 mode, false if it should next add an attribute for the opposite mode. */
1064 static GTY(()) bool mips16_flipper;
1066 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1067 for -mflip-mips16. Return true if it should use "mips16" and false if
1068 it should use "nomips16". */
1070 static bool
1071 mflip_mips16_use_mips16_p (tree decl)
1073 struct mflip_mips16_entry *entry;
1074 const char *name;
1075 hashval_t hash;
1076 void **slot;
1078 /* Use the opposite of the command-line setting for anonymous decls. */
1079 if (!DECL_NAME (decl))
1080 return !mips_base_mips16;
1082 if (!mflip_mips16_htab)
1083 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1084 mflip_mips16_htab_eq, NULL);
1086 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1087 hash = htab_hash_string (name);
1088 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1089 entry = (struct mflip_mips16_entry *) *slot;
1090 if (!entry)
1092 mips16_flipper = !mips16_flipper;
1093 entry = GGC_NEW (struct mflip_mips16_entry);
1094 entry->name = name;
1095 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1096 *slot = entry;
1098 return entry->mips16_p;
1101 /* Predicates to test for presence of "near" and "far"/"long_call"
1102 attributes on the given TYPE. */
1104 static bool
1105 mips_near_type_p (const_tree type)
1107 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1110 static bool
1111 mips_far_type_p (const_tree type)
1113 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1114 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1117 /* Similar predicates for "mips16"/"nomips16" function attributes. */
1119 static bool
1120 mips_mips16_decl_p (const_tree decl)
1122 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1125 static bool
1126 mips_nomips16_decl_p (const_tree decl)
1128 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1131 /* Return true if function DECL is a MIPS16 function. Return the ambient
1132 setting if DECL is null. */
1134 static bool
1135 mips_use_mips16_mode_p (tree decl)
1137 if (decl)
1139 /* Nested functions must use the same frame pointer as their
1140 parent and must therefore use the same ISA mode. */
1141 tree parent = decl_function_context (decl);
1142 if (parent)
1143 decl = parent;
1144 if (mips_mips16_decl_p (decl))
1145 return true;
1146 if (mips_nomips16_decl_p (decl))
1147 return false;
1149 return mips_base_mips16;
1152 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1154 static int
1155 mips_comp_type_attributes (const_tree type1, const_tree type2)
1157 /* Disallow mixed near/far attributes. */
1158 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1159 return 0;
1160 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1161 return 0;
1162 return 1;
1165 /* Implement TARGET_INSERT_ATTRIBUTES. */
1167 static void
1168 mips_insert_attributes (tree decl, tree *attributes)
1170 const char *name;
1171 bool mips16_p, nomips16_p;
1173 /* Check for "mips16" and "nomips16" attributes. */
1174 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1175 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1176 if (TREE_CODE (decl) != FUNCTION_DECL)
1178 if (mips16_p)
1179 error ("%qs attribute only applies to functions", "mips16");
1180 if (nomips16_p)
1181 error ("%qs attribute only applies to functions", "nomips16");
1183 else
1185 mips16_p |= mips_mips16_decl_p (decl);
1186 nomips16_p |= mips_nomips16_decl_p (decl);
1187 if (mips16_p || nomips16_p)
1189 /* DECL cannot be simultaneously "mips16" and "nomips16". */
1190 if (mips16_p && nomips16_p)
1191 error ("%qs cannot have both %<mips16%> and "
1192 "%<nomips16%> attributes",
1193 IDENTIFIER_POINTER (DECL_NAME (decl)));
1195 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1197 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1198 "mips16" attribute, arbitrarily pick one. We must pick the same
1199 setting for duplicate declarations of a function. */
1200 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1201 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1206 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1208 static tree
1209 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1211 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1212 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1213 error ("%qs redeclared with conflicting %qs attributes",
1214 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1215 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1216 error ("%qs redeclared with conflicting %qs attributes",
1217 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1219 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1220 DECL_ATTRIBUTES (newdecl));
1223 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1224 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1226 static void
1227 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1229 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1231 *base_ptr = XEXP (x, 0);
1232 *offset_ptr = INTVAL (XEXP (x, 1));
1234 else
1236 *base_ptr = x;
1237 *offset_ptr = 0;
1241 static unsigned int mips_build_integer (struct mips_integer_op *,
1242 unsigned HOST_WIDE_INT);
1244 /* A subroutine of mips_build_integer, with the same interface.
1245 Assume that the final action in the sequence should be a left shift. */
1247 static unsigned int
1248 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1250 unsigned int i, shift;
1252 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1253 since signed numbers are easier to load than unsigned ones. */
1254 shift = 0;
1255 while ((value & 1) == 0)
1256 value /= 2, shift++;
1258 i = mips_build_integer (codes, value);
1259 codes[i].code = ASHIFT;
1260 codes[i].value = shift;
1261 return i + 1;
1264 /* As for mips_build_shift, but assume that the final action will be
1265 an IOR or PLUS operation. */
1267 static unsigned int
1268 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1270 unsigned HOST_WIDE_INT high;
1271 unsigned int i;
1273 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1274 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1276 /* The constant is too complex to load with a simple LUI/ORI pair,
1277 so we want to give the recursive call as many trailing zeros as
1278 possible. In this case, we know bit 16 is set and that the
1279 low 16 bits form a negative number. If we subtract that number
1280 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1281 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1282 codes[i].code = PLUS;
1283 codes[i].value = CONST_LOW_PART (value);
1285 else
1287 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1288 bits gives a value with at least 17 trailing zeros. */
1289 i = mips_build_integer (codes, high);
1290 codes[i].code = IOR;
1291 codes[i].value = value & 0xffff;
1293 return i + 1;
1296 /* Fill CODES with a sequence of rtl operations to load VALUE.
1297 Return the number of operations needed. */
1299 static unsigned int
1300 mips_build_integer (struct mips_integer_op *codes,
1301 unsigned HOST_WIDE_INT value)
1303 if (SMALL_OPERAND (value)
1304 || SMALL_OPERAND_UNSIGNED (value)
1305 || LUI_OPERAND (value))
1307 /* The value can be loaded with a single instruction. */
1308 codes[0].code = UNKNOWN;
1309 codes[0].value = value;
1310 return 1;
1312 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1314 /* Either the constant is a simple LUI/ORI combination or its
1315 lowest bit is set. We don't want to shift in this case. */
1316 return mips_build_lower (codes, value);
1318 else if ((value & 0xffff) == 0)
1320 /* The constant will need at least three actions. The lowest
1321 16 bits are clear, so the final action will be a shift. */
1322 return mips_build_shift (codes, value);
1324 else
1326 /* The final action could be a shift, add or inclusive OR.
1327 Rather than use a complex condition to select the best
1328 approach, try both mips_build_shift and mips_build_lower
1329 and pick the one that gives the shortest sequence.
1330 Note that this case is only used once per constant. */
1331 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1332 unsigned int cost, alt_cost;
1334 cost = mips_build_shift (codes, value);
1335 alt_cost = mips_build_lower (alt_codes, value);
1336 if (alt_cost < cost)
1338 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1339 cost = alt_cost;
1341 return cost;
1345 /* Return true if X is a thread-local symbol. */
1347 static bool
1348 mips_tls_symbol_p (rtx x)
1350 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1353 /* Return true if SYMBOL_REF X is associated with a global symbol
1354 (in the STB_GLOBAL sense). */
1356 static bool
1357 mips_global_symbol_p (const_rtx x)
1359 const_tree decl = SYMBOL_REF_DECL (x);
1361 if (!decl)
1362 return !SYMBOL_REF_LOCAL_P (x);
1364 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1365 or weak symbols. Relocations in the object file will be against
1366 the target symbol, so it's that symbol's binding that matters here. */
1367 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1370 /* Return true if SYMBOL_REF X binds locally. */
1372 static bool
1373 mips_symbol_binds_local_p (const_rtx x)
1375 return (SYMBOL_REF_DECL (x)
1376 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1377 : SYMBOL_REF_LOCAL_P (x));
1380 /* Return true if rtx constants of mode MODE should be put into a small
1381 data section. */
1383 static bool
1384 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1386 return (!TARGET_EMBEDDED_DATA
1387 && TARGET_LOCAL_SDATA
1388 && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1391 /* Return true if X should not be moved directly into register $25.
1392 We need this because many versions of GAS will treat "la $25,foo" as
1393 part of a call sequence and so allow a global "foo" to be lazily bound. */
1395 bool
1396 mips_dangerous_for_la25_p (rtx x)
1398 return (!TARGET_EXPLICIT_RELOCS
1399 && TARGET_USE_GOT
1400 && GET_CODE (x) == SYMBOL_REF
1401 && mips_global_symbol_p (x));
1404 /* Return the method that should be used to access SYMBOL_REF or
1405 LABEL_REF X in context CONTEXT. */
1407 static enum mips_symbol_type
1408 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1410 if (TARGET_RTP_PIC)
1411 return SYMBOL_GOT_DISP;
1413 if (GET_CODE (x) == LABEL_REF)
1415 /* LABEL_REFs are used for jump tables as well as text labels.
1416 Only return SYMBOL_PC_RELATIVE if we know the label is in
1417 the text section. */
1418 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1419 return SYMBOL_PC_RELATIVE;
1421 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1422 return SYMBOL_GOT_PAGE_OFST;
1424 return SYMBOL_ABSOLUTE;
1427 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1429 if (SYMBOL_REF_TLS_MODEL (x))
1430 return SYMBOL_TLS;
1432 if (CONSTANT_POOL_ADDRESS_P (x))
1434 if (TARGET_MIPS16_TEXT_LOADS)
1435 return SYMBOL_PC_RELATIVE;
1437 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1438 return SYMBOL_PC_RELATIVE;
1440 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1441 return SYMBOL_GP_RELATIVE;
1444 /* Do not use small-data accesses for weak symbols; they may end up
1445 being zero. */
1446 if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1447 return SYMBOL_GP_RELATIVE;
1449 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1450 is in effect. */
1451 if (TARGET_ABICALLS
1452 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1454 /* There are three cases to consider:
1456 - o32 PIC (either with or without explicit relocs)
1457 - n32/n64 PIC without explicit relocs
1458 - n32/n64 PIC with explicit relocs
1460 In the first case, both local and global accesses will use an
1461 R_MIPS_GOT16 relocation. We must correctly predict which of
1462 the two semantics (local or global) the assembler and linker
1463 will apply. The choice depends on the symbol's binding rather
1464 than its visibility.
1466 In the second case, the assembler will not use R_MIPS_GOT16
1467 relocations, but it chooses between local and global accesses
1468 in the same way as for o32 PIC.
1470 In the third case we have more freedom since both forms of
1471 access will work for any kind of symbol. However, there seems
1472 little point in doing things differently. */
1473 if (mips_global_symbol_p (x))
1474 return SYMBOL_GOT_DISP;
1476 return SYMBOL_GOT_PAGE_OFST;
1479 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1480 return SYMBOL_FORCE_TO_MEM;
1482 return SYMBOL_ABSOLUTE;
1485 /* Classify the base of symbolic expression X, given that X appears in
1486 context CONTEXT. */
1488 static enum mips_symbol_type
1489 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1491 rtx offset;
1493 split_const (x, &x, &offset);
1494 if (UNSPEC_ADDRESS_P (x))
1495 return UNSPEC_ADDRESS_TYPE (x);
1497 return mips_classify_symbol (x, context);
1500 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1501 is the alignment in bytes of SYMBOL_REF X. */
1503 static bool
1504 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1506 HOST_WIDE_INT align;
1508 align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1509 return IN_RANGE (offset, 0, align - 1);
1512 /* Return true if X is a symbolic constant that can be used in context
1513 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1515 bool
1516 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1517 enum mips_symbol_type *symbol_type)
1519 rtx offset;
1521 split_const (x, &x, &offset);
1522 if (UNSPEC_ADDRESS_P (x))
1524 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1525 x = UNSPEC_ADDRESS (x);
1527 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1529 *symbol_type = mips_classify_symbol (x, context);
1530 if (*symbol_type == SYMBOL_TLS)
1531 return false;
1533 else
1534 return false;
1536 if (offset == const0_rtx)
1537 return true;
1539 /* Check whether a nonzero offset is valid for the underlying
1540 relocations. */
1541 switch (*symbol_type)
1543 case SYMBOL_ABSOLUTE:
1544 case SYMBOL_FORCE_TO_MEM:
1545 case SYMBOL_32_HIGH:
1546 case SYMBOL_64_HIGH:
1547 case SYMBOL_64_MID:
1548 case SYMBOL_64_LOW:
1549 /* If the target has 64-bit pointers and the object file only
1550 supports 32-bit symbols, the values of those symbols will be
1551 sign-extended. In this case we can't allow an arbitrary offset
1552 in case the 32-bit value X + OFFSET has a different sign from X. */
1553 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1554 return offset_within_block_p (x, INTVAL (offset));
1556 /* In other cases the relocations can handle any offset. */
1557 return true;
1559 case SYMBOL_PC_RELATIVE:
1560 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1561 In this case, we no longer have access to the underlying constant,
1562 but the original symbol-based access was known to be valid. */
1563 if (GET_CODE (x) == LABEL_REF)
1564 return true;
1566 /* Fall through. */
1568 case SYMBOL_GP_RELATIVE:
1569 /* Make sure that the offset refers to something within the
1570 same object block. This should guarantee that the final
1571 PC- or GP-relative offset is within the 16-bit limit. */
1572 return offset_within_block_p (x, INTVAL (offset));
1574 case SYMBOL_GOT_PAGE_OFST:
1575 case SYMBOL_GOTOFF_PAGE:
1576 /* If the symbol is global, the GOT entry will contain the symbol's
1577 address, and we will apply a 16-bit offset after loading it.
1578 If the symbol is local, the linker should provide enough local
1579 GOT entries for a 16-bit offset, but larger offsets may lead
1580 to GOT overflow. */
1581 return SMALL_INT (offset);
1583 case SYMBOL_TPREL:
1584 case SYMBOL_DTPREL:
1585 /* There is no carry between the HI and LO REL relocations, so the
1586 offset is only valid if we know it won't lead to such a carry. */
1587 return mips_offset_within_alignment_p (x, INTVAL (offset));
1589 case SYMBOL_GOT_DISP:
1590 case SYMBOL_GOTOFF_DISP:
1591 case SYMBOL_GOTOFF_CALL:
1592 case SYMBOL_GOTOFF_LOADGP:
1593 case SYMBOL_TLSGD:
1594 case SYMBOL_TLSLDM:
1595 case SYMBOL_GOTTPREL:
1596 case SYMBOL_TLS:
1597 case SYMBOL_HALF:
1598 return false;
1600 gcc_unreachable ();
1603 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1604 single instruction. We rely on the fact that, in the worst case,
1605 all instructions involved in a MIPS16 address calculation are usually
1606 extended ones. */
1608 static int
1609 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1611 switch (type)
1613 case SYMBOL_ABSOLUTE:
1614 /* When using 64-bit symbols, we need 5 preparatory instructions,
1615 such as:
1617 lui $at,%highest(symbol)
1618 daddiu $at,$at,%higher(symbol)
1619 dsll $at,$at,16
1620 daddiu $at,$at,%hi(symbol)
1621 dsll $at,$at,16
1623 The final address is then $at + %lo(symbol). With 32-bit
1624 symbols we just need a preparatory LUI for normal mode and
1625 a preparatory LI and SLL for MIPS16. */
1626 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1628 case SYMBOL_GP_RELATIVE:
1629 /* Treat GP-relative accesses as taking a single instruction on
1630 MIPS16 too; the copy of $gp can often be shared. */
1631 return 1;
1633 case SYMBOL_PC_RELATIVE:
1634 /* PC-relative constants can be only be used with ADDIUPC,
1635 DADDIUPC, LWPC and LDPC. */
1636 if (mode == MAX_MACHINE_MODE
1637 || GET_MODE_SIZE (mode) == 4
1638 || GET_MODE_SIZE (mode) == 8)
1639 return 1;
1641 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1642 return 0;
1644 case SYMBOL_FORCE_TO_MEM:
1645 /* LEAs will be converted into constant-pool references by
1646 mips_reorg. */
1647 if (mode == MAX_MACHINE_MODE)
1648 return 1;
1650 /* The constant must be loaded and then dereferenced. */
1651 return 0;
1653 case SYMBOL_GOT_DISP:
1654 /* The constant will have to be loaded from the GOT before it
1655 is used in an address. */
1656 if (mode != MAX_MACHINE_MODE)
1657 return 0;
1659 /* Fall through. */
1661 case SYMBOL_GOT_PAGE_OFST:
1662 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1663 local/global classification is accurate. The worst cases are:
1665 (1) For local symbols when generating o32 or o64 code. The assembler
1666 will use:
1668 lw $at,%got(symbol)
1671 ...and the final address will be $at + %lo(symbol).
1673 (2) For global symbols when -mxgot. The assembler will use:
1675 lui $at,%got_hi(symbol)
1676 (d)addu $at,$at,$gp
1678 ...and the final address will be $at + %got_lo(symbol). */
1679 return 3;
1681 case SYMBOL_GOTOFF_PAGE:
1682 case SYMBOL_GOTOFF_DISP:
1683 case SYMBOL_GOTOFF_CALL:
1684 case SYMBOL_GOTOFF_LOADGP:
1685 case SYMBOL_32_HIGH:
1686 case SYMBOL_64_HIGH:
1687 case SYMBOL_64_MID:
1688 case SYMBOL_64_LOW:
1689 case SYMBOL_TLSGD:
1690 case SYMBOL_TLSLDM:
1691 case SYMBOL_DTPREL:
1692 case SYMBOL_GOTTPREL:
1693 case SYMBOL_TPREL:
1694 case SYMBOL_HALF:
1695 /* A 16-bit constant formed by a single relocation, or a 32-bit
1696 constant formed from a high 16-bit relocation and a low 16-bit
1697 relocation. Use mips_split_p to determine which. 32-bit
1698 constants need an "lui; addiu" sequence for normal mode and
1699 an "li; sll; addiu" sequence for MIPS16 mode. */
1700 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1702 case SYMBOL_TLS:
1703 /* We don't treat a bare TLS symbol as a constant. */
1704 return 0;
1706 gcc_unreachable ();
1709 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1710 to load symbols of type TYPE into a register. Return 0 if the given
1711 type of symbol cannot be used as an immediate operand.
1713 Otherwise, return the number of instructions needed to load or store
1714 values of mode MODE to or from addresses of type TYPE. Return 0 if
1715 the given type of symbol is not valid in addresses.
1717 In both cases, treat extended MIPS16 instructions as two instructions. */
1719 static int
1720 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1722 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1725 /* A for_each_rtx callback. Stop the search if *X references a
1726 thread-local symbol. */
1728 static int
1729 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1731 return mips_tls_symbol_p (*x);
1734 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1736 static bool
1737 mips_cannot_force_const_mem (rtx x)
1739 rtx base, offset;
1741 if (!TARGET_MIPS16)
1743 /* As an optimization, reject constants that mips_legitimize_move
1744 can expand inline.
1746 Suppose we have a multi-instruction sequence that loads constant C
1747 into register R. If R does not get allocated a hard register, and
1748 R is used in an operand that allows both registers and memory
1749 references, reload will consider forcing C into memory and using
1750 one of the instruction's memory alternatives. Returning false
1751 here will force it to use an input reload instead. */
1752 if (GET_CODE (x) == CONST_INT)
1753 return true;
1755 split_const (x, &base, &offset);
1756 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1757 return true;
1760 /* TLS symbols must be computed by mips_legitimize_move. */
1761 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
1762 return true;
1764 return false;
1767 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1768 constants when we're using a per-function constant pool. */
1770 static bool
1771 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1772 const_rtx x ATTRIBUTE_UNUSED)
1774 return !TARGET_MIPS16_PCREL_LOADS;
1777 /* Return true if register REGNO is a valid base register for mode MODE.
1778 STRICT_P is true if REG_OK_STRICT is in effect. */
1781 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
1782 bool strict_p)
1784 if (!HARD_REGISTER_NUM_P (regno))
1786 if (!strict_p)
1787 return true;
1788 regno = reg_renumber[regno];
1791 /* These fake registers will be eliminated to either the stack or
1792 hard frame pointer, both of which are usually valid base registers.
1793 Reload deals with the cases where the eliminated form isn't valid. */
1794 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1795 return true;
1797 /* In MIPS16 mode, the stack pointer can only address word and doubleword
1798 values, nothing smaller. There are two problems here:
1800 (a) Instantiating virtual registers can introduce new uses of the
1801 stack pointer. If these virtual registers are valid addresses,
1802 the stack pointer should be too.
1804 (b) Most uses of the stack pointer are not made explicit until
1805 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1806 We don't know until that stage whether we'll be eliminating to the
1807 stack pointer (which needs the restriction) or the hard frame
1808 pointer (which doesn't).
1810 All in all, it seems more consistent to only enforce this restriction
1811 during and after reload. */
1812 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1813 return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1815 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1818 /* Return true if X is a valid base register for mode MODE.
1819 STRICT_P is true if REG_OK_STRICT is in effect. */
1821 static bool
1822 mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
1824 if (!strict_p && GET_CODE (x) == SUBREG)
1825 x = SUBREG_REG (x);
1827 return (REG_P (x)
1828 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
1831 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
1832 can address a value of mode MODE. */
1834 static bool
1835 mips_valid_offset_p (rtx x, enum machine_mode mode)
1837 /* Check that X is a signed 16-bit number. */
1838 if (!const_arith_operand (x, Pmode))
1839 return false;
1841 /* We may need to split multiword moves, so make sure that every word
1842 is accessible. */
1843 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1844 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
1845 return false;
1847 return true;
1850 /* Return true if a LO_SUM can address a value of mode MODE when the
1851 LO_SUM symbol has type SYMBOL_TYPE. */
1853 static bool
1854 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
1856 /* Check that symbols of type SYMBOL_TYPE can be used to access values
1857 of mode MODE. */
1858 if (mips_symbol_insns (symbol_type, mode) == 0)
1859 return false;
1861 /* Check that there is a known low-part relocation. */
1862 if (mips_lo_relocs[symbol_type] == NULL)
1863 return false;
1865 /* We may need to split multiword moves, so make sure that each word
1866 can be accessed without inducing a carry. This is mainly needed
1867 for o64, which has historically only guaranteed 64-bit alignment
1868 for 128-bit types. */
1869 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1870 && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
1871 return false;
1873 return true;
1876 /* Return true if X is a valid address for machine mode MODE. If it is,
1877 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
1878 effect. */
1880 static bool
1881 mips_classify_address (struct mips_address_info *info, rtx x,
1882 enum machine_mode mode, bool strict_p)
1884 switch (GET_CODE (x))
1886 case REG:
1887 case SUBREG:
1888 info->type = ADDRESS_REG;
1889 info->reg = x;
1890 info->offset = const0_rtx;
1891 return mips_valid_base_register_p (info->reg, mode, strict_p);
1893 case PLUS:
1894 info->type = ADDRESS_REG;
1895 info->reg = XEXP (x, 0);
1896 info->offset = XEXP (x, 1);
1897 return (mips_valid_base_register_p (info->reg, mode, strict_p)
1898 && mips_valid_offset_p (info->offset, mode));
1900 case LO_SUM:
1901 info->type = ADDRESS_LO_SUM;
1902 info->reg = XEXP (x, 0);
1903 info->offset = XEXP (x, 1);
1904 /* We have to trust the creator of the LO_SUM to do something vaguely
1905 sane. Target-independent code that creates a LO_SUM should also
1906 create and verify the matching HIGH. Target-independent code that
1907 adds an offset to a LO_SUM must prove that the offset will not
1908 induce a carry. Failure to do either of these things would be
1909 a bug, and we are not required to check for it here. The MIPS
1910 backend itself should only create LO_SUMs for valid symbolic
1911 constants, with the high part being either a HIGH or a copy
1912 of _gp. */
1913 info->symbol_type
1914 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1915 return (mips_valid_base_register_p (info->reg, mode, strict_p)
1916 && mips_valid_lo_sum_p (info->symbol_type, mode));
1918 case CONST_INT:
1919 /* Small-integer addresses don't occur very often, but they
1920 are legitimate if $0 is a valid base register. */
1921 info->type = ADDRESS_CONST_INT;
1922 return !TARGET_MIPS16 && SMALL_INT (x);
1924 case CONST:
1925 case LABEL_REF:
1926 case SYMBOL_REF:
1927 info->type = ADDRESS_SYMBOLIC;
1928 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1929 &info->symbol_type)
1930 && mips_symbol_insns (info->symbol_type, mode) > 0
1931 && !mips_split_p[info->symbol_type]);
1933 default:
1934 return false;
1938 /* Return true if X is a legitimate address for a memory operand of mode
1939 MODE. STRICT_P is true if REG_OK_STRICT is in effect. */
1941 bool
1942 mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
1944 struct mips_address_info addr;
1946 return mips_classify_address (&addr, x, mode, strict_p);
1949 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1951 bool
1952 mips_stack_address_p (rtx x, enum machine_mode mode)
1954 struct mips_address_info addr;
1956 return (mips_classify_address (&addr, x, mode, false)
1957 && addr.type == ADDRESS_REG
1958 && addr.reg == stack_pointer_rtx);
1961 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
1962 address instruction. Note that such addresses are not considered
1963 legitimate in the GO_IF_LEGITIMATE_ADDRESS sense, because their use
1964 is so restricted. */
1966 static bool
1967 mips_lwxs_address_p (rtx addr)
1969 if (ISA_HAS_LWXS
1970 && GET_CODE (addr) == PLUS
1971 && REG_P (XEXP (addr, 1)))
1973 rtx offset = XEXP (addr, 0);
1974 if (GET_CODE (offset) == MULT
1975 && REG_P (XEXP (offset, 0))
1976 && GET_CODE (XEXP (offset, 1)) == CONST_INT
1977 && INTVAL (XEXP (offset, 1)) == 4)
1978 return true;
1980 return false;
1983 /* Return true if a value at OFFSET bytes from base register BASE can be
1984 accessed using an unextended MIPS16 instruction. MODE is the mode of
1985 the value.
1987 Usually the offset in an unextended instruction is a 5-bit field.
1988 The offset is unsigned and shifted left once for LH and SH, twice
1989 for LW and SW, and so on. An exception is LWSP and SWSP, which have
1990 an 8-bit immediate field that's shifted left twice. */
1992 static bool
1993 mips16_unextended_reference_p (enum machine_mode mode, rtx base,
1994 unsigned HOST_WIDE_INT offset)
1996 if (offset % GET_MODE_SIZE (mode) == 0)
1998 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1999 return offset < 256U * GET_MODE_SIZE (mode);
2000 return offset < 32U * GET_MODE_SIZE (mode);
2002 return false;
2005 /* Return the number of instructions needed to load or store a value
2006 of mode MODE at address X. Return 0 if X isn't valid for MODE.
2007 Assume that multiword moves may need to be split into word moves
2008 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2009 enough.
2011 For MIPS16 code, count extended instructions as two instructions. */
2014 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2016 struct mips_address_info addr;
2017 int factor;
2019 /* BLKmode is used for single unaligned loads and stores and should
2020 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2021 meaningless, so we have to single it out as a special case one way
2022 or the other.) */
2023 if (mode != BLKmode && might_split_p)
2024 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2025 else
2026 factor = 1;
2028 if (mips_classify_address (&addr, x, mode, false))
2029 switch (addr.type)
2031 case ADDRESS_REG:
2032 if (TARGET_MIPS16
2033 && !mips16_unextended_reference_p (mode, addr.reg,
2034 UINTVAL (addr.offset)))
2035 return factor * 2;
2036 return factor;
2038 case ADDRESS_LO_SUM:
2039 return TARGET_MIPS16 ? factor * 2 : factor;
2041 case ADDRESS_CONST_INT:
2042 return factor;
2044 case ADDRESS_SYMBOLIC:
2045 return factor * mips_symbol_insns (addr.symbol_type, mode);
2047 return 0;
2050 /* Return the number of instructions needed to load constant X.
2051 Return 0 if X isn't a valid constant. */
2054 mips_const_insns (rtx x)
2056 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2057 enum mips_symbol_type symbol_type;
2058 rtx offset;
2060 switch (GET_CODE (x))
2062 case HIGH:
2063 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2064 &symbol_type)
2065 || !mips_split_p[symbol_type])
2066 return 0;
2068 /* This is simply an LUI for normal mode. It is an extended
2069 LI followed by an extended SLL for MIPS16. */
2070 return TARGET_MIPS16 ? 4 : 1;
2072 case CONST_INT:
2073 if (TARGET_MIPS16)
2074 /* Unsigned 8-bit constants can be loaded using an unextended
2075 LI instruction. Unsigned 16-bit constants can be loaded
2076 using an extended LI. Negative constants must be loaded
2077 using LI and then negated. */
2078 return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2079 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2080 : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2081 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2082 : 0);
2084 return mips_build_integer (codes, INTVAL (x));
2086 case CONST_DOUBLE:
2087 case CONST_VECTOR:
2088 /* Allow zeros for normal mode, where we can use $0. */
2089 return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2091 case CONST:
2092 if (CONST_GP_P (x))
2093 return 1;
2095 /* See if we can refer to X directly. */
2096 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2097 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2099 /* Otherwise try splitting the constant into a base and offset.
2100 16-bit offsets can be added using an extra ADDIU. Larger offsets
2101 must be calculated separately and then added to the base. */
2102 split_const (x, &x, &offset);
2103 if (offset != 0)
2105 int n = mips_const_insns (x);
2106 if (n != 0)
2108 if (SMALL_INT (offset))
2109 return n + 1;
2110 else
2111 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2114 return 0;
2116 case SYMBOL_REF:
2117 case LABEL_REF:
2118 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2119 MAX_MACHINE_MODE);
2121 default:
2122 return 0;
2126 /* X is a doubleword constant that can be handled by splitting it into
2127 two words and loading each word separately. Return the number of
2128 instructions required to do this. */
2131 mips_split_const_insns (rtx x)
2133 unsigned int low, high;
2135 low = mips_const_insns (mips_subword (x, false));
2136 high = mips_const_insns (mips_subword (x, true));
2137 gcc_assert (low > 0 && high > 0);
2138 return low + high;
2141 /* Return the number of instructions needed to implement INSN,
2142 given that it loads from or stores to MEM. Count extended
2143 MIPS16 instructions as two instructions. */
2146 mips_load_store_insns (rtx mem, rtx insn)
2148 enum machine_mode mode;
2149 bool might_split_p;
2150 rtx set;
2152 gcc_assert (MEM_P (mem));
2153 mode = GET_MODE (mem);
2155 /* Try to prove that INSN does not need to be split. */
2156 might_split_p = true;
2157 if (GET_MODE_BITSIZE (mode) == 64)
2159 set = single_set (insn);
2160 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2161 might_split_p = false;
2164 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2167 /* Return the number of instructions needed for an integer division. */
2170 mips_idiv_insns (void)
2172 int count;
2174 count = 1;
2175 if (TARGET_CHECK_ZERO_DIV)
2177 if (GENERATE_DIVIDE_TRAPS)
2178 count++;
2179 else
2180 count += 2;
2183 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2184 count++;
2185 return count;
2188 /* Emit a move from SRC to DEST. Assume that the move expanders can
2189 handle all moves if !can_create_pseudo_p (). The distinction is
2190 important because, unlike emit_move_insn, the move expanders know
2191 how to force Pmode objects into the constant pool even when the
2192 constant pool address is not itself legitimate. */
2195 mips_emit_move (rtx dest, rtx src)
2197 return (can_create_pseudo_p ()
2198 ? emit_move_insn (dest, src)
2199 : emit_move_insn_1 (dest, src));
2202 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2204 static void
2205 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2207 emit_insn (gen_rtx_SET (VOIDmode, target,
2208 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2211 /* Compute (CODE OP0 OP1) and store the result in a new register
2212 of mode MODE. Return that new register. */
2214 static rtx
2215 mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
2217 rtx reg;
2219 reg = gen_reg_rtx (mode);
2220 mips_emit_binary (code, reg, op0, op1);
2221 return reg;
2224 /* Copy VALUE to a register and return that register. If new pseudos
2225 are allowed, copy it into a new register, otherwise use DEST. */
2227 static rtx
2228 mips_force_temporary (rtx dest, rtx value)
2230 if (can_create_pseudo_p ())
2231 return force_reg (Pmode, value);
2232 else
2234 mips_emit_move (dest, value);
2235 return dest;
2239 /* Emit a call sequence with call pattern PATTERN and return the call
2240 instruction itself (which is not necessarily the last instruction
2241 emitted). LAZY_P is true if the call address is lazily-bound. */
2243 static rtx
2244 mips_emit_call_insn (rtx pattern, bool lazy_p)
2246 rtx insn;
2248 insn = emit_call_insn (pattern);
2250 /* Lazy-binding stubs require $gp to be valid on entry. */
2251 if (lazy_p)
2252 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2254 if (TARGET_USE_GOT)
2256 /* See the comment above load_call<mode> for details. */
2257 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2258 gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2259 emit_insn (gen_update_got_version ());
2261 return insn;
2264 /* Return an instruction that copies $gp into register REG. We want
2265 GCC to treat the register's value as constant, so that its value
2266 can be rematerialized on demand. */
2268 static rtx
2269 gen_load_const_gp (rtx reg)
2271 return (Pmode == SImode
2272 ? gen_load_const_gp_si (reg)
2273 : gen_load_const_gp_di (reg));
2276 /* Return a pseudo register that contains the value of $gp throughout
2277 the current function. Such registers are needed by MIPS16 functions,
2278 for which $gp itself is not a valid base register or addition operand. */
2280 static rtx
2281 mips16_gp_pseudo_reg (void)
2283 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2284 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2286 /* Don't emit an instruction to initialize the pseudo register if
2287 we are being called from the tree optimizers' cost-calculation
2288 routines. */
2289 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2290 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2292 rtx insn, scan, after;
2294 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2296 push_topmost_sequence ();
2297 /* We need to emit the initialization after the FUNCTION_BEG
2298 note, so that it will be integrated. */
2299 after = get_insns ();
2300 for (scan = after; scan != NULL_RTX; scan = NEXT_INSN (scan))
2301 if (NOTE_P (scan) && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
2303 after = scan;
2304 break;
2306 insn = emit_insn_after (insn, after);
2307 pop_topmost_sequence ();
2309 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2312 return cfun->machine->mips16_gp_pseudo_rtx;
2315 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2316 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2317 constant in that context and can be split into a high part and a LO_SUM.
2318 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2319 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2321 TEMP is as for mips_force_temporary and is used to load the high
2322 part into a register. */
2324 bool
2325 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2327 enum mips_symbol_context context;
2328 enum mips_symbol_type symbol_type;
2329 rtx high;
2331 context = (mode == MAX_MACHINE_MODE
2332 ? SYMBOL_CONTEXT_LEA
2333 : SYMBOL_CONTEXT_MEM);
2334 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2335 || mips_symbol_insns (symbol_type, mode) == 0
2336 || !mips_split_p[symbol_type])
2337 return false;
2339 if (lo_sum_out)
2341 if (symbol_type == SYMBOL_GP_RELATIVE)
2343 if (!can_create_pseudo_p ())
2345 emit_insn (gen_load_const_gp (temp));
2346 high = temp;
2348 else
2349 high = mips16_gp_pseudo_reg ();
2351 else
2353 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2354 high = mips_force_temporary (temp, high);
2356 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2358 return true;
2361 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2362 then add CONST_INT OFFSET to the result. */
2364 static rtx
2365 mips_unspec_address_offset (rtx base, rtx offset,
2366 enum mips_symbol_type symbol_type)
2368 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2369 UNSPEC_ADDRESS_FIRST + symbol_type);
2370 if (offset != const0_rtx)
2371 base = gen_rtx_PLUS (Pmode, base, offset);
2372 return gen_rtx_CONST (Pmode, base);
2375 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2376 type SYMBOL_TYPE. */
2379 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2381 rtx base, offset;
2383 split_const (address, &base, &offset);
2384 return mips_unspec_address_offset (base, offset, symbol_type);
2387 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2388 high part to BASE and return the result. Just return BASE otherwise.
2389 TEMP is as for mips_force_temporary.
2391 The returned expression can be used as the first operand to a LO_SUM. */
2393 static rtx
2394 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2395 enum mips_symbol_type symbol_type)
2397 if (mips_split_p[symbol_type])
2399 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2400 addr = mips_force_temporary (temp, addr);
2401 base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2403 return base;
2406 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2407 mips_force_temporary; it is only needed when OFFSET is not a
2408 SMALL_OPERAND. */
2410 static rtx
2411 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2413 if (!SMALL_OPERAND (offset))
2415 rtx high;
2417 if (TARGET_MIPS16)
2419 /* Load the full offset into a register so that we can use
2420 an unextended instruction for the address itself. */
2421 high = GEN_INT (offset);
2422 offset = 0;
2424 else
2426 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2427 high = GEN_INT (CONST_HIGH_PART (offset));
2428 offset = CONST_LOW_PART (offset);
2430 high = mips_force_temporary (temp, high);
2431 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2433 return plus_constant (reg, offset);
2436 /* The __tls_get_attr symbol. */
2437 static GTY(()) rtx mips_tls_symbol;
2439 /* Return an instruction sequence that calls __tls_get_addr. SYM is
2440 the TLS symbol we are referencing and TYPE is the symbol type to use
2441 (either global dynamic or local dynamic). V0 is an RTX for the
2442 return value location. */
2444 static rtx
2445 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2447 rtx insn, loc, a0;
2449 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2451 if (!mips_tls_symbol)
2452 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2454 loc = mips_unspec_address (sym, type);
2456 start_sequence ();
2458 emit_insn (gen_rtx_SET (Pmode, a0,
2459 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2460 insn = mips_expand_call (v0, mips_tls_symbol, const0_rtx, const0_rtx, false);
2461 RTL_CONST_CALL_P (insn) = 1;
2462 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2463 insn = get_insns ();
2465 end_sequence ();
2467 return insn;
2470 /* Return a pseudo register that contains the current thread pointer. */
2472 static rtx
2473 mips_get_tp (void)
2475 rtx tp;
2477 tp = gen_reg_rtx (Pmode);
2478 if (Pmode == DImode)
2479 emit_insn (gen_tls_get_tp_di (tp));
2480 else
2481 emit_insn (gen_tls_get_tp_si (tp));
2482 return tp;
2485 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2486 its address. The return value will be both a valid address and a valid
2487 SET_SRC (either a REG or a LO_SUM). */
2489 static rtx
2490 mips_legitimize_tls_address (rtx loc)
2492 rtx dest, insn, v0, tp, tmp1, tmp2, eqv;
2493 enum tls_model model;
2495 if (TARGET_MIPS16)
2497 sorry ("MIPS16 TLS");
2498 return gen_reg_rtx (Pmode);
2501 model = SYMBOL_REF_TLS_MODEL (loc);
2502 /* Only TARGET_ABICALLS code can have more than one module; other
2503 code must be be static and should not use a GOT. All TLS models
2504 reduce to local exec in this situation. */
2505 if (!TARGET_ABICALLS)
2506 model = TLS_MODEL_LOCAL_EXEC;
2508 switch (model)
2510 case TLS_MODEL_GLOBAL_DYNAMIC:
2511 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2512 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2513 dest = gen_reg_rtx (Pmode);
2514 emit_libcall_block (insn, dest, v0, loc);
2515 break;
2517 case TLS_MODEL_LOCAL_DYNAMIC:
2518 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2519 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2520 tmp1 = gen_reg_rtx (Pmode);
2522 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2523 share the LDM result with other LD model accesses. */
2524 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2525 UNSPEC_TLS_LDM);
2526 emit_libcall_block (insn, tmp1, v0, eqv);
2528 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2529 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2530 mips_unspec_address (loc, SYMBOL_DTPREL));
2531 break;
2533 case TLS_MODEL_INITIAL_EXEC:
2534 tp = mips_get_tp ();
2535 tmp1 = gen_reg_rtx (Pmode);
2536 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2537 if (Pmode == DImode)
2538 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2539 else
2540 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2541 dest = gen_reg_rtx (Pmode);
2542 emit_insn (gen_add3_insn (dest, tmp1, tp));
2543 break;
2545 case TLS_MODEL_LOCAL_EXEC:
2546 tp = mips_get_tp ();
2547 tmp1 = mips_unspec_offset_high (NULL, tp, loc, SYMBOL_TPREL);
2548 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2549 mips_unspec_address (loc, SYMBOL_TPREL));
2550 break;
2552 default:
2553 gcc_unreachable ();
2555 return dest;
2558 /* If X is not a valid address for mode MODE, force it into a register. */
2560 static rtx
2561 mips_force_address (rtx x, enum machine_mode mode)
2563 if (!mips_legitimate_address_p (mode, x, false))
2564 x = force_reg (Pmode, x);
2565 return x;
2568 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2569 be legitimized in a way that the generic machinery might not expect,
2570 put the new address in *XLOC and return true. MODE is the mode of
2571 the memory being accessed. */
2573 bool
2574 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2576 rtx base, addr;
2577 HOST_WIDE_INT offset;
2579 if (mips_tls_symbol_p (*xloc))
2581 *xloc = mips_legitimize_tls_address (*xloc);
2582 return true;
2585 /* See if the address can split into a high part and a LO_SUM. */
2586 if (mips_split_symbol (NULL, *xloc, mode, &addr))
2588 *xloc = mips_force_address (addr, mode);
2589 return true;
2592 /* Handle BASE + OFFSET using mips_add_offset. */
2593 mips_split_plus (*xloc, &base, &offset);
2594 if (offset != 0)
2596 if (!mips_valid_base_register_p (base, mode, false))
2597 base = copy_to_mode_reg (Pmode, base);
2598 addr = mips_add_offset (NULL, base, offset);
2599 *xloc = mips_force_address (addr, mode);
2600 return true;
2602 return false;
2605 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
2607 void
2608 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
2610 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2611 enum machine_mode mode;
2612 unsigned int i, num_ops;
2613 rtx x;
2615 mode = GET_MODE (dest);
2616 num_ops = mips_build_integer (codes, value);
2618 /* Apply each binary operation to X. Invariant: X is a legitimate
2619 source operand for a SET pattern. */
2620 x = GEN_INT (codes[0].value);
2621 for (i = 1; i < num_ops; i++)
2623 if (!can_create_pseudo_p ())
2625 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2626 x = temp;
2628 else
2629 x = force_reg (mode, x);
2630 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2633 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2636 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2637 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2638 move_operand. */
2640 static void
2641 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2643 rtx base, offset;
2645 /* Split moves of big integers into smaller pieces. */
2646 if (splittable_const_int_operand (src, mode))
2648 mips_move_integer (dest, dest, INTVAL (src));
2649 return;
2652 /* Split moves of symbolic constants into high/low pairs. */
2653 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2655 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2656 return;
2659 /* Generate the appropriate access sequences for TLS symbols. */
2660 if (mips_tls_symbol_p (src))
2662 mips_emit_move (dest, mips_legitimize_tls_address (src));
2663 return;
2666 /* If we have (const (plus symbol offset)), and that expression cannot
2667 be forced into memory, load the symbol first and add in the offset.
2668 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2669 forced into memory, as it usually produces better code. */
2670 split_const (src, &base, &offset);
2671 if (offset != const0_rtx
2672 && (targetm.cannot_force_const_mem (src)
2673 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2675 base = mips_force_temporary (dest, base);
2676 mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
2677 return;
2680 src = force_const_mem (mode, src);
2682 /* When using explicit relocs, constant pool references are sometimes
2683 not legitimate addresses. */
2684 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2685 mips_emit_move (dest, src);
2688 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
2689 sequence that is valid. */
2691 bool
2692 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2694 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2696 mips_emit_move (dest, force_reg (mode, src));
2697 return true;
2700 /* We need to deal with constants that would be legitimate
2701 immediate_operands but aren't legitimate move_operands. */
2702 if (CONSTANT_P (src) && !move_operand (src, mode))
2704 mips_legitimize_const_move (mode, dest, src);
2705 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2706 return true;
2708 return false;
2711 /* Return true if value X in context CONTEXT is a small-data address
2712 that can be rewritten as a LO_SUM. */
2714 static bool
2715 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2717 enum mips_symbol_type symbol_type;
2719 return (TARGET_EXPLICIT_RELOCS
2720 && mips_symbolic_constant_p (x, context, &symbol_type)
2721 && symbol_type == SYMBOL_GP_RELATIVE);
2724 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2725 containing MEM, or null if none. */
2727 static int
2728 mips_small_data_pattern_1 (rtx *loc, void *data)
2730 enum mips_symbol_context context;
2732 if (GET_CODE (*loc) == LO_SUM)
2733 return -1;
2735 if (MEM_P (*loc))
2737 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
2738 return 1;
2739 return -1;
2742 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2743 return mips_rewrite_small_data_p (*loc, context);
2746 /* Return true if OP refers to small data symbols directly, not through
2747 a LO_SUM. */
2749 bool
2750 mips_small_data_pattern_p (rtx op)
2752 return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
2755 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2756 DATA is the containing MEM, or null if none. */
2758 static int
2759 mips_rewrite_small_data_1 (rtx *loc, void *data)
2761 enum mips_symbol_context context;
2763 if (MEM_P (*loc))
2765 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
2766 return -1;
2769 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2770 if (mips_rewrite_small_data_p (*loc, context))
2771 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
2773 if (GET_CODE (*loc) == LO_SUM)
2774 return -1;
2776 return 0;
2779 /* Rewrite instruction pattern PATTERN so that it refers to small data
2780 using explicit relocations. */
2783 mips_rewrite_small_data (rtx pattern)
2785 pattern = copy_insn (pattern);
2786 for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
2787 return pattern;
2790 /* We need a lot of little routines to check the range of MIPS16 immediate
2791 operands. */
2793 static int
2794 m16_check_op (rtx op, int low, int high, int mask)
2796 return (GET_CODE (op) == CONST_INT
2797 && IN_RANGE (INTVAL (op), low, high)
2798 && (INTVAL (op) & mask) == 0);
2802 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2804 return m16_check_op (op, 0x1, 0x8, 0);
2808 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2810 return m16_check_op (op, -0x8, 0x7, 0);
2814 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2816 return m16_check_op (op, -0x7, 0x8, 0);
2820 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2822 return m16_check_op (op, -0x10, 0xf, 0);
2826 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2828 return m16_check_op (op, -0xf, 0x10, 0);
2832 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2834 return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
2838 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2840 return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
2844 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2846 return m16_check_op (op, -0x80, 0x7f, 0);
2850 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2852 return m16_check_op (op, -0x7f, 0x80, 0);
2856 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2858 return m16_check_op (op, 0x0, 0xff, 0);
2862 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2864 return m16_check_op (op, -0xff, 0x0, 0);
2868 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2870 return m16_check_op (op, -0x1, 0xfe, 0);
2874 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2876 return m16_check_op (op, 0x0, 0xff << 2, 3);
2880 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2882 return m16_check_op (op, -0xff << 2, 0x0, 3);
2886 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2888 return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
2892 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2894 return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
2897 /* The cost of loading values from the constant pool. It should be
2898 larger than the cost of any constant we want to synthesize inline. */
2899 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2901 /* Return the cost of X when used as an operand to the MIPS16 instruction
2902 that implements CODE. Return -1 if there is no such instruction, or if
2903 X is not a valid immediate operand for it. */
2905 static int
2906 mips16_constant_cost (int code, HOST_WIDE_INT x)
2908 switch (code)
2910 case ASHIFT:
2911 case ASHIFTRT:
2912 case LSHIFTRT:
2913 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2914 other shifts are extended. The shift patterns truncate the shift
2915 count to the right size, so there are no out-of-range values. */
2916 if (IN_RANGE (x, 1, 8))
2917 return 0;
2918 return COSTS_N_INSNS (1);
2920 case PLUS:
2921 if (IN_RANGE (x, -128, 127))
2922 return 0;
2923 if (SMALL_OPERAND (x))
2924 return COSTS_N_INSNS (1);
2925 return -1;
2927 case LEU:
2928 /* Like LE, but reject the always-true case. */
2929 if (x == -1)
2930 return -1;
2931 case LE:
2932 /* We add 1 to the immediate and use SLT. */
2933 x += 1;
2934 case XOR:
2935 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2936 case LT:
2937 case LTU:
2938 if (IN_RANGE (x, 0, 255))
2939 return 0;
2940 if (SMALL_OPERAND_UNSIGNED (x))
2941 return COSTS_N_INSNS (1);
2942 return -1;
2944 case EQ:
2945 case NE:
2946 /* Equality comparisons with 0 are cheap. */
2947 if (x == 0)
2948 return 0;
2949 return -1;
2951 default:
2952 return -1;
2956 /* Return true if there is a non-MIPS16 instruction that implements CODE
2957 and if that instruction accepts X as an immediate operand. */
2959 static int
2960 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2962 switch (code)
2964 case ASHIFT:
2965 case ASHIFTRT:
2966 case LSHIFTRT:
2967 /* All shift counts are truncated to a valid constant. */
2968 return true;
2970 case ROTATE:
2971 case ROTATERT:
2972 /* Likewise rotates, if the target supports rotates at all. */
2973 return ISA_HAS_ROR;
2975 case AND:
2976 case IOR:
2977 case XOR:
2978 /* These instructions take 16-bit unsigned immediates. */
2979 return SMALL_OPERAND_UNSIGNED (x);
2981 case PLUS:
2982 case LT:
2983 case LTU:
2984 /* These instructions take 16-bit signed immediates. */
2985 return SMALL_OPERAND (x);
2987 case EQ:
2988 case NE:
2989 case GT:
2990 case GTU:
2991 /* The "immediate" forms of these instructions are really
2992 implemented as comparisons with register 0. */
2993 return x == 0;
2995 case GE:
2996 case GEU:
2997 /* Likewise, meaning that the only valid immediate operand is 1. */
2998 return x == 1;
3000 case LE:
3001 /* We add 1 to the immediate and use SLT. */
3002 return SMALL_OPERAND (x + 1);
3004 case LEU:
3005 /* Likewise SLTU, but reject the always-true case. */
3006 return SMALL_OPERAND (x + 1) && x + 1 != 0;
3008 case SIGN_EXTRACT:
3009 case ZERO_EXTRACT:
3010 /* The bit position and size are immediate operands. */
3011 return ISA_HAS_EXT_INS;
3013 default:
3014 /* By default assume that $0 can be used for 0. */
3015 return x == 0;
3019 /* Return the cost of binary operation X, given that the instruction
3020 sequence for a word-sized or smaller operation has cost SINGLE_COST
3021 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3023 static int
3024 mips_binary_cost (rtx x, int single_cost, int double_cost)
3026 int cost;
3028 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3029 cost = double_cost;
3030 else
3031 cost = single_cost;
3032 return (cost
3033 + rtx_cost (XEXP (x, 0), 0)
3034 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
3037 /* Return the cost of floating-point multiplications of mode MODE. */
3039 static int
3040 mips_fp_mult_cost (enum machine_mode mode)
3042 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3045 /* Return the cost of floating-point divisions of mode MODE. */
3047 static int
3048 mips_fp_div_cost (enum machine_mode mode)
3050 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3053 /* Return the cost of sign-extending OP to mode MODE, not including the
3054 cost of OP itself. */
3056 static int
3057 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3059 if (MEM_P (op))
3060 /* Extended loads are as cheap as unextended ones. */
3061 return 0;
3063 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3064 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3065 return 0;
3067 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3068 /* We can use SEB or SEH. */
3069 return COSTS_N_INSNS (1);
3071 /* We need to use a shift left and a shift right. */
3072 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3075 /* Return the cost of zero-extending OP to mode MODE, not including the
3076 cost of OP itself. */
3078 static int
3079 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3081 if (MEM_P (op))
3082 /* Extended loads are as cheap as unextended ones. */
3083 return 0;
3085 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3086 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3087 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3089 if (GENERATE_MIPS16E)
3090 /* We can use ZEB or ZEH. */
3091 return COSTS_N_INSNS (1);
3093 if (TARGET_MIPS16)
3094 /* We need to load 0xff or 0xffff into a register and use AND. */
3095 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3097 /* We can use ANDI. */
3098 return COSTS_N_INSNS (1);
3101 /* Implement TARGET_RTX_COSTS. */
3103 static bool
3104 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3106 enum machine_mode mode = GET_MODE (x);
3107 bool float_mode_p = FLOAT_MODE_P (mode);
3108 int cost;
3109 rtx addr;
3111 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3112 appear in the instruction stream, and the cost of a comparison is
3113 really the cost of the branch or scc condition. At the time of
3114 writing, GCC only uses an explicit outer COMPARE code when optabs
3115 is testing whether a constant is expensive enough to force into a
3116 register. We want optabs to pass such constants through the MIPS
3117 expanders instead, so make all constants very cheap here. */
3118 if (outer_code == COMPARE)
3120 gcc_assert (CONSTANT_P (x));
3121 *total = 0;
3122 return true;
3125 switch (code)
3127 case CONST_INT:
3128 /* Treat *clear_upper32-style ANDs as having zero cost in the
3129 second operand. The cost is entirely in the first operand.
3131 ??? This is needed because we would otherwise try to CSE
3132 the constant operand. Although that's the right thing for
3133 instructions that continue to be a register operation throughout
3134 compilation, it is disastrous for instructions that could
3135 later be converted into a memory operation. */
3136 if (TARGET_64BIT
3137 && outer_code == AND
3138 && UINTVAL (x) == 0xffffffff)
3140 *total = 0;
3141 return true;
3144 if (TARGET_MIPS16)
3146 cost = mips16_constant_cost (outer_code, INTVAL (x));
3147 if (cost >= 0)
3149 *total = cost;
3150 return true;
3153 else
3155 /* When not optimizing for size, we care more about the cost
3156 of hot code, and hot code is often in a loop. If a constant
3157 operand needs to be forced into a register, we will often be
3158 able to hoist the constant load out of the loop, so the load
3159 should not contribute to the cost. */
3160 if (!optimize_size
3161 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3163 *total = 0;
3164 return true;
3167 /* Fall through. */
3169 case CONST:
3170 case SYMBOL_REF:
3171 case LABEL_REF:
3172 case CONST_DOUBLE:
3173 if (force_to_mem_operand (x, VOIDmode))
3175 *total = COSTS_N_INSNS (1);
3176 return true;
3178 cost = mips_const_insns (x);
3179 if (cost > 0)
3181 /* If the constant is likely to be stored in a GPR, SETs of
3182 single-insn constants are as cheap as register sets; we
3183 never want to CSE them.
3185 Don't reduce the cost of storing a floating-point zero in
3186 FPRs. If we have a zero in an FPR for other reasons, we
3187 can get better cfg-cleanup and delayed-branch results by
3188 using it consistently, rather than using $0 sometimes and
3189 an FPR at other times. Also, moves between floating-point
3190 registers are sometimes cheaper than (D)MTC1 $0. */
3191 if (cost == 1
3192 && outer_code == SET
3193 && !(float_mode_p && TARGET_HARD_FLOAT))
3194 cost = 0;
3195 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3196 want to CSE the constant itself. It is usually better to
3197 have N copies of the last operation in the sequence and one
3198 shared copy of the other operations. (Note that this is
3199 not true for MIPS16 code, where the final operation in the
3200 sequence is often an extended instruction.)
3202 Also, if we have a CONST_INT, we don't know whether it is
3203 for a word or doubleword operation, so we cannot rely on
3204 the result of mips_build_integer. */
3205 else if (!TARGET_MIPS16
3206 && (outer_code == SET || mode == VOIDmode))
3207 cost = 1;
3208 *total = COSTS_N_INSNS (cost);
3209 return true;
3211 /* The value will need to be fetched from the constant pool. */
3212 *total = CONSTANT_POOL_COST;
3213 return true;
3215 case MEM:
3216 /* If the address is legitimate, return the number of
3217 instructions it needs. */
3218 addr = XEXP (x, 0);
3219 cost = mips_address_insns (addr, mode, true);
3220 if (cost > 0)
3222 *total = COSTS_N_INSNS (cost + 1);
3223 return true;
3225 /* Check for a scaled indexed address. */
3226 if (mips_lwxs_address_p (addr))
3228 *total = COSTS_N_INSNS (2);
3229 return true;
3231 /* Otherwise use the default handling. */
3232 return false;
3234 case FFS:
3235 *total = COSTS_N_INSNS (6);
3236 return false;
3238 case NOT:
3239 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3240 return false;
3242 case AND:
3243 /* Check for a *clear_upper32 pattern and treat it like a zero
3244 extension. See the pattern's comment for details. */
3245 if (TARGET_64BIT
3246 && mode == DImode
3247 && CONST_INT_P (XEXP (x, 1))
3248 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3250 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3251 + rtx_cost (XEXP (x, 0), 0));
3252 return true;
3254 /* Fall through. */
3256 case IOR:
3257 case XOR:
3258 /* Double-word operations use two single-word operations. */
3259 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3260 return true;
3262 case ASHIFT:
3263 case ASHIFTRT:
3264 case LSHIFTRT:
3265 case ROTATE:
3266 case ROTATERT:
3267 if (CONSTANT_P (XEXP (x, 1)))
3268 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3269 else
3270 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3271 return true;
3273 case ABS:
3274 if (float_mode_p)
3275 *total = mips_cost->fp_add;
3276 else
3277 *total = COSTS_N_INSNS (4);
3278 return false;
3280 case LO_SUM:
3281 /* Low-part immediates need an extended MIPS16 instruction. */
3282 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3283 + rtx_cost (XEXP (x, 0), 0));
3284 return true;
3286 case LT:
3287 case LTU:
3288 case LE:
3289 case LEU:
3290 case GT:
3291 case GTU:
3292 case GE:
3293 case GEU:
3294 case EQ:
3295 case NE:
3296 case UNORDERED:
3297 case LTGT:
3298 /* Branch comparisons have VOIDmode, so use the first operand's
3299 mode instead. */
3300 mode = GET_MODE (XEXP (x, 0));
3301 if (FLOAT_MODE_P (mode))
3303 *total = mips_cost->fp_add;
3304 return false;
3306 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3307 return true;
3309 case MINUS:
3310 if (float_mode_p
3311 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3312 && TARGET_FUSED_MADD
3313 && !HONOR_NANS (mode)
3314 && !HONOR_SIGNED_ZEROS (mode))
3316 /* See if we can use NMADD or NMSUB. See mips.md for the
3317 associated patterns. */
3318 rtx op0 = XEXP (x, 0);
3319 rtx op1 = XEXP (x, 1);
3320 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3322 *total = (mips_fp_mult_cost (mode)
3323 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3324 + rtx_cost (XEXP (op0, 1), 0)
3325 + rtx_cost (op1, 0));
3326 return true;
3328 if (GET_CODE (op1) == MULT)
3330 *total = (mips_fp_mult_cost (mode)
3331 + rtx_cost (op0, 0)
3332 + rtx_cost (XEXP (op1, 0), 0)
3333 + rtx_cost (XEXP (op1, 1), 0));
3334 return true;
3337 /* Fall through. */
3339 case PLUS:
3340 if (float_mode_p)
3342 /* If this is part of a MADD or MSUB, treat the PLUS as
3343 being free. */
3344 if (ISA_HAS_FP4
3345 && TARGET_FUSED_MADD
3346 && GET_CODE (XEXP (x, 0)) == MULT)
3347 *total = 0;
3348 else
3349 *total = mips_cost->fp_add;
3350 return false;
3353 /* Double-word operations require three single-word operations and
3354 an SLTU. The MIPS16 version then needs to move the result of
3355 the SLTU from $24 to a MIPS16 register. */
3356 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3357 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3358 return true;
3360 case NEG:
3361 if (float_mode_p
3362 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3363 && TARGET_FUSED_MADD
3364 && !HONOR_NANS (mode)
3365 && HONOR_SIGNED_ZEROS (mode))
3367 /* See if we can use NMADD or NMSUB. See mips.md for the
3368 associated patterns. */
3369 rtx op = XEXP (x, 0);
3370 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3371 && GET_CODE (XEXP (op, 0)) == MULT)
3373 *total = (mips_fp_mult_cost (mode)
3374 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3375 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3376 + rtx_cost (XEXP (op, 1), 0));
3377 return true;
3381 if (float_mode_p)
3382 *total = mips_cost->fp_add;
3383 else
3384 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3385 return false;
3387 case MULT:
3388 if (float_mode_p)
3389 *total = mips_fp_mult_cost (mode);
3390 else if (mode == DImode && !TARGET_64BIT)
3391 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3392 where the mulsidi3 always includes an MFHI and an MFLO. */
3393 *total = (optimize_size
3394 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3395 : mips_cost->int_mult_si * 3 + 6);
3396 else if (optimize_size)
3397 *total = (ISA_HAS_MUL3 ? 1 : 2);
3398 else if (mode == DImode)
3399 *total = mips_cost->int_mult_di;
3400 else
3401 *total = mips_cost->int_mult_si;
3402 return false;
3404 case DIV:
3405 /* Check for a reciprocal. */
3406 if (float_mode_p
3407 && ISA_HAS_FP4
3408 && flag_unsafe_math_optimizations
3409 && XEXP (x, 0) == CONST1_RTX (mode))
3411 if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3412 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3413 division as being free. */
3414 *total = rtx_cost (XEXP (x, 1), 0);
3415 else
3416 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3417 return true;
3419 /* Fall through. */
3421 case SQRT:
3422 case MOD:
3423 if (float_mode_p)
3425 *total = mips_fp_div_cost (mode);
3426 return false;
3428 /* Fall through. */
3430 case UDIV:
3431 case UMOD:
3432 if (optimize_size)
3434 /* It is our responsibility to make division by a power of 2
3435 as cheap as 2 register additions if we want the division
3436 expanders to be used for such operations; see the setting
3437 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3438 should always produce shorter code than using
3439 expand_sdiv2_pow2. */
3440 if (TARGET_MIPS16
3441 && CONST_INT_P (XEXP (x, 1))
3442 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3444 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3445 return true;
3447 *total = COSTS_N_INSNS (mips_idiv_insns ());
3449 else if (mode == DImode)
3450 *total = mips_cost->int_div_di;
3451 else
3452 *total = mips_cost->int_div_si;
3453 return false;
3455 case SIGN_EXTEND:
3456 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3457 return false;
3459 case ZERO_EXTEND:
3460 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3461 return false;
3463 case FLOAT:
3464 case UNSIGNED_FLOAT:
3465 case FIX:
3466 case FLOAT_EXTEND:
3467 case FLOAT_TRUNCATE:
3468 *total = mips_cost->fp_add;
3469 return false;
3471 default:
3472 return false;
3476 /* Implement TARGET_ADDRESS_COST. */
3478 static int
3479 mips_address_cost (rtx addr)
3481 return mips_address_insns (addr, SImode, false);
3484 /* Return one word of double-word value OP, taking into account the fixed
3485 endianness of certain registers. HIGH_P is true to select the high part,
3486 false to select the low part. */
3489 mips_subword (rtx op, bool high_p)
3491 unsigned int byte, offset;
3492 enum machine_mode mode;
3494 mode = GET_MODE (op);
3495 if (mode == VOIDmode)
3496 mode = TARGET_64BIT ? TImode : DImode;
3498 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3499 byte = UNITS_PER_WORD;
3500 else
3501 byte = 0;
3503 if (FP_REG_RTX_P (op))
3505 /* Paired FPRs are always ordered little-endian. */
3506 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3507 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3510 if (MEM_P (op))
3511 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3513 return simplify_gen_subreg (word_mode, op, mode, byte);
3516 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3518 bool
3519 mips_split_64bit_move_p (rtx dest, rtx src)
3521 if (TARGET_64BIT)
3522 return false;
3524 /* FPR-to-FPR moves can be done in a single instruction, if they're
3525 allowed at all. */
3526 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3527 return false;
3529 /* Check for floating-point loads and stores. */
3530 if (ISA_HAS_LDC1_SDC1)
3532 if (FP_REG_RTX_P (dest) && MEM_P (src))
3533 return false;
3534 if (FP_REG_RTX_P (src) && MEM_P (dest))
3535 return false;
3537 return true;
3540 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3541 this function handles 64-bit moves for which mips_split_64bit_move_p
3542 holds. For 64-bit targets, this function handles 128-bit moves. */
3544 void
3545 mips_split_doubleword_move (rtx dest, rtx src)
3547 rtx low_dest;
3549 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3551 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3552 emit_insn (gen_move_doubleword_fprdi (dest, src));
3553 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3554 emit_insn (gen_move_doubleword_fprdf (dest, src));
3555 else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
3556 emit_insn (gen_move_doubleword_fprv2sf (dest, src));
3557 else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
3558 emit_insn (gen_move_doubleword_fprv2si (dest, src));
3559 else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
3560 emit_insn (gen_move_doubleword_fprv4hi (dest, src));
3561 else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
3562 emit_insn (gen_move_doubleword_fprv8qi (dest, src));
3563 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3564 emit_insn (gen_move_doubleword_fprtf (dest, src));
3565 else
3566 gcc_unreachable ();
3568 else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
3570 low_dest = mips_subword (dest, false);
3571 mips_emit_move (low_dest, mips_subword (src, false));
3572 if (TARGET_64BIT)
3573 emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
3574 else
3575 emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
3577 else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
3579 mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
3580 if (TARGET_64BIT)
3581 emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
3582 else
3583 emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
3585 else
3587 /* The operation can be split into two normal moves. Decide in
3588 which order to do them. */
3589 low_dest = mips_subword (dest, false);
3590 if (REG_P (low_dest)
3591 && reg_overlap_mentioned_p (low_dest, src))
3593 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3594 mips_emit_move (low_dest, mips_subword (src, false));
3596 else
3598 mips_emit_move (low_dest, mips_subword (src, false));
3599 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3604 /* Return the appropriate instructions to move SRC into DEST. Assume
3605 that SRC is operand 1 and DEST is operand 0. */
3607 const char *
3608 mips_output_move (rtx dest, rtx src)
3610 enum rtx_code dest_code, src_code;
3611 enum machine_mode mode;
3612 enum mips_symbol_type symbol_type;
3613 bool dbl_p;
3615 dest_code = GET_CODE (dest);
3616 src_code = GET_CODE (src);
3617 mode = GET_MODE (dest);
3618 dbl_p = (GET_MODE_SIZE (mode) == 8);
3620 if (dbl_p && mips_split_64bit_move_p (dest, src))
3621 return "#";
3623 if ((src_code == REG && GP_REG_P (REGNO (src)))
3624 || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
3626 if (dest_code == REG)
3628 if (GP_REG_P (REGNO (dest)))
3629 return "move\t%0,%z1";
3631 /* Moves to HI are handled by special .md insns. */
3632 if (REGNO (dest) == LO_REGNUM)
3633 return "mtlo\t%z1";
3635 if (DSP_ACC_REG_P (REGNO (dest)))
3637 static char retval[] = "mt__\t%z1,%q0";
3639 retval[2] = reg_names[REGNO (dest)][4];
3640 retval[3] = reg_names[REGNO (dest)][5];
3641 return retval;
3644 if (FP_REG_P (REGNO (dest)))
3645 return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
3647 if (ALL_COP_REG_P (REGNO (dest)))
3649 static char retval[] = "dmtc_\t%z1,%0";
3651 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3652 return dbl_p ? retval : retval + 1;
3655 if (dest_code == MEM)
3656 switch (GET_MODE_SIZE (mode))
3658 case 1: return "sb\t%z1,%0";
3659 case 2: return "sh\t%z1,%0";
3660 case 4: return "sw\t%z1,%0";
3661 case 8: return "sd\t%z1,%0";
3664 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3666 if (src_code == REG)
3668 /* Moves from HI are handled by special .md insns. */
3669 if (REGNO (src) == LO_REGNUM)
3671 /* When generating VR4120 or VR4130 code, we use MACC and
3672 DMACC instead of MFLO. This avoids both the normal
3673 MIPS III HI/LO hazards and the errata related to
3674 -mfix-vr4130. */
3675 if (ISA_HAS_MACCHI)
3676 return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
3677 return "mflo\t%0";
3680 if (DSP_ACC_REG_P (REGNO (src)))
3682 static char retval[] = "mf__\t%0,%q1";
3684 retval[2] = reg_names[REGNO (src)][4];
3685 retval[3] = reg_names[REGNO (src)][5];
3686 return retval;
3689 if (FP_REG_P (REGNO (src)))
3690 return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
3692 if (ALL_COP_REG_P (REGNO (src)))
3694 static char retval[] = "dmfc_\t%0,%1";
3696 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3697 return dbl_p ? retval : retval + 1;
3700 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3701 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3704 if (src_code == MEM)
3705 switch (GET_MODE_SIZE (mode))
3707 case 1: return "lbu\t%0,%1";
3708 case 2: return "lhu\t%0,%1";
3709 case 4: return "lw\t%0,%1";
3710 case 8: return "ld\t%0,%1";
3713 if (src_code == CONST_INT)
3715 /* Don't use the X format for the operand itself, because that
3716 will give out-of-range numbers for 64-bit hosts and 32-bit
3717 targets. */
3718 if (!TARGET_MIPS16)
3719 return "li\t%0,%1\t\t\t# %X1";
3721 if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
3722 return "li\t%0,%1";
3724 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
3725 return "#";
3728 if (src_code == HIGH)
3729 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3731 if (CONST_GP_P (src))
3732 return "move\t%0,%1";
3734 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3735 && mips_lo_relocs[symbol_type] != 0)
3737 /* A signed 16-bit constant formed by applying a relocation
3738 operator to a symbolic address. */
3739 gcc_assert (!mips_split_p[symbol_type]);
3740 return "li\t%0,%R1";
3743 if (symbolic_operand (src, VOIDmode))
3745 gcc_assert (TARGET_MIPS16
3746 ? TARGET_MIPS16_TEXT_LOADS
3747 : !TARGET_EXPLICIT_RELOCS);
3748 return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
3751 if (src_code == REG && FP_REG_P (REGNO (src)))
3753 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3755 if (GET_MODE (dest) == V2SFmode)
3756 return "mov.ps\t%0,%1";
3757 else
3758 return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
3761 if (dest_code == MEM)
3762 return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
3764 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3766 if (src_code == MEM)
3767 return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
3769 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3771 static char retval[] = "l_c_\t%0,%1";
3773 retval[1] = (dbl_p ? 'd' : 'w');
3774 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3775 return retval;
3777 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3779 static char retval[] = "s_c_\t%1,%0";
3781 retval[1] = (dbl_p ? 'd' : 'w');
3782 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3783 return retval;
3785 gcc_unreachable ();
3788 /* Return true if CMP1 is a suitable second operand for integer ordering
3789 test CODE. See also the *sCC patterns in mips.md. */
3791 static bool
3792 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
3794 switch (code)
3796 case GT:
3797 case GTU:
3798 return reg_or_0_operand (cmp1, VOIDmode);
3800 case GE:
3801 case GEU:
3802 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3804 case LT:
3805 case LTU:
3806 return arith_operand (cmp1, VOIDmode);
3808 case LE:
3809 return sle_operand (cmp1, VOIDmode);
3811 case LEU:
3812 return sleu_operand (cmp1, VOIDmode);
3814 default:
3815 gcc_unreachable ();
3819 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
3820 integer ordering test *CODE, or if an equivalent combination can
3821 be formed by adjusting *CODE and *CMP1. When returning true, update
3822 *CODE and *CMP1 with the chosen code and operand, otherwise leave
3823 them alone. */
3825 static bool
3826 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
3827 enum machine_mode mode)
3829 HOST_WIDE_INT plus_one;
3831 if (mips_int_order_operand_ok_p (*code, *cmp1))
3832 return true;
3834 if (GET_CODE (*cmp1) == CONST_INT)
3835 switch (*code)
3837 case LE:
3838 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
3839 if (INTVAL (*cmp1) < plus_one)
3841 *code = LT;
3842 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3843 return true;
3845 break;
3847 case LEU:
3848 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
3849 if (plus_one != 0)
3851 *code = LTU;
3852 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3853 return true;
3855 break;
3857 default:
3858 break;
3860 return false;
3863 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
3864 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
3865 is nonnull, it's OK to set TARGET to the inverse of the result and
3866 flip *INVERT_PTR instead. */
3868 static void
3869 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
3870 rtx target, rtx cmp0, rtx cmp1)
3872 enum machine_mode mode;
3874 /* First see if there is a MIPS instruction that can do this operation.
3875 If not, try doing the same for the inverse operation. If that also
3876 fails, force CMP1 into a register and try again. */
3877 mode = GET_MODE (cmp0);
3878 if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
3879 mips_emit_binary (code, target, cmp0, cmp1);
3880 else
3882 enum rtx_code inv_code = reverse_condition (code);
3883 if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
3885 cmp1 = force_reg (mode, cmp1);
3886 mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
3888 else if (invert_ptr == 0)
3890 rtx inv_target;
3892 inv_target = mips_force_binary (GET_MODE (target),
3893 inv_code, cmp0, cmp1);
3894 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3896 else
3898 *invert_ptr = !*invert_ptr;
3899 mips_emit_binary (inv_code, target, cmp0, cmp1);
3904 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3905 The register will have the same mode as CMP0. */
3907 static rtx
3908 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3910 if (cmp1 == const0_rtx)
3911 return cmp0;
3913 if (uns_arith_operand (cmp1, VOIDmode))
3914 return expand_binop (GET_MODE (cmp0), xor_optab,
3915 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3917 return expand_binop (GET_MODE (cmp0), sub_optab,
3918 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3921 /* Convert *CODE into a code that can be used in a floating-point
3922 scc instruction (C.cond.fmt). Return true if the values of
3923 the condition code registers will be inverted, with 0 indicating
3924 that the condition holds. */
3926 static bool
3927 mips_reversed_fp_cond (enum rtx_code *code)
3929 switch (*code)
3931 case NE:
3932 case LTGT:
3933 case ORDERED:
3934 *code = reverse_condition_maybe_unordered (*code);
3935 return true;
3937 default:
3938 return false;
3942 /* Convert a comparison into something that can be used in a branch or
3943 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3944 being compared and *CODE is the code used to compare them.
3946 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3947 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
3948 otherwise any standard branch condition can be used. The standard branch
3949 conditions are:
3951 - EQ or NE between two registers.
3952 - any comparison between a register and zero. */
3954 static void
3955 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3957 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3959 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3961 *op0 = cmp_operands[0];
3962 *op1 = cmp_operands[1];
3964 else if (*code == EQ || *code == NE)
3966 if (need_eq_ne_p)
3968 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3969 *op1 = const0_rtx;
3971 else
3973 *op0 = cmp_operands[0];
3974 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3977 else
3979 /* The comparison needs a separate scc instruction. Store the
3980 result of the scc in *OP0 and compare it against zero. */
3981 bool invert = false;
3982 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3983 mips_emit_int_order_test (*code, &invert, *op0,
3984 cmp_operands[0], cmp_operands[1]);
3985 *code = (invert ? EQ : NE);
3986 *op1 = const0_rtx;
3989 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3991 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3992 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3993 *code = NE;
3994 *op1 = const0_rtx;
3996 else
3998 enum rtx_code cmp_code;
4000 /* Floating-point tests use a separate C.cond.fmt comparison to
4001 set a condition code register. The branch or conditional move
4002 will then compare that register against zero.
4004 Set CMP_CODE to the code of the comparison instruction and
4005 *CODE to the code that the branch or move should use. */
4006 cmp_code = *code;
4007 *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
4008 *op0 = (ISA_HAS_8CC
4009 ? gen_reg_rtx (CCmode)
4010 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4011 *op1 = const0_rtx;
4012 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
4016 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4017 Store the result in TARGET and return true if successful.
4019 On 64-bit targets, TARGET may be narrower than cmp_operands[0]. */
4021 bool
4022 mips_expand_scc (enum rtx_code code, rtx target)
4024 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4025 return false;
4027 if (code == EQ || code == NE)
4029 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4030 mips_emit_binary (code, target, zie, const0_rtx);
4032 else
4033 mips_emit_int_order_test (code, 0, target,
4034 cmp_operands[0], cmp_operands[1]);
4035 return true;
4038 /* Compare cmp_operands[0] with cmp_operands[1] using comparison code
4039 CODE and jump to OPERANDS[0] if the condition holds. */
4041 void
4042 mips_expand_conditional_branch (rtx *operands, enum rtx_code code)
4044 rtx op0, op1, condition;
4046 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4047 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4048 emit_jump_insn (gen_condjump (condition, operands[0]));
4051 /* Implement:
4053 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4054 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4056 void
4057 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4058 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4060 rtx cmp_result;
4061 bool reversed_p;
4063 reversed_p = mips_reversed_fp_cond (&cond);
4064 cmp_result = gen_reg_rtx (CCV2mode);
4065 emit_insn (gen_scc_ps (cmp_result,
4066 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4067 if (reversed_p)
4068 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4069 cmp_result));
4070 else
4071 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4072 cmp_result));
4075 /* Compare cmp_operands[0] with cmp_operands[1] using the code of
4076 OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] if the condition
4077 holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
4079 void
4080 mips_expand_conditional_move (rtx *operands)
4082 enum rtx_code code;
4083 rtx cond, op0, op1;
4085 code = GET_CODE (operands[1]);
4086 mips_emit_compare (&code, &op0, &op1, true);
4087 cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1),
4088 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4089 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
4090 operands[2], operands[3])));
4093 /* Compare cmp_operands[0] with cmp_operands[1] using rtl code CODE,
4094 then trap if the condition holds. */
4096 void
4097 mips_expand_conditional_trap (enum rtx_code code)
4099 rtx op0, op1;
4100 enum machine_mode mode;
4102 /* MIPS conditional trap instructions don't have GT or LE flavors,
4103 so we must swap the operands and convert to LT and GE respectively. */
4104 switch (code)
4106 case GT:
4107 case LE:
4108 case GTU:
4109 case LEU:
4110 code = swap_condition (code);
4111 op0 = cmp_operands[1];
4112 op1 = cmp_operands[0];
4113 break;
4115 default:
4116 op0 = cmp_operands[0];
4117 op1 = cmp_operands[1];
4118 break;
4121 mode = GET_MODE (cmp_operands[0]);
4122 op0 = force_reg (mode, op0);
4123 if (!arith_operand (op1, mode))
4124 op1 = force_reg (mode, op1);
4126 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4127 gen_rtx_fmt_ee (code, mode, op0, op1),
4128 const0_rtx));
4131 /* Initialize *CUM for a call to a function of type FNTYPE. */
4133 void
4134 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
4136 memset (cum, 0, sizeof (*cum));
4137 cum->prototype = (fntype && prototype_p (fntype));
4138 cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
4141 /* Fill INFO with information about a single argument. CUM is the
4142 cumulative state for earlier arguments. MODE is the mode of this
4143 argument and TYPE is its type (if known). NAMED is true if this
4144 is a named (fixed) argument rather than a variable one. */
4146 static void
4147 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4148 enum machine_mode mode, tree type, int named)
4150 bool doubleword_aligned_p;
4151 unsigned int num_bytes, num_words, max_regs;
4153 /* Work out the size of the argument. */
4154 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4155 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4157 /* Decide whether it should go in a floating-point register, assuming
4158 one is free. Later code checks for availability.
4160 The checks against UNITS_PER_FPVALUE handle the soft-float and
4161 single-float cases. */
4162 switch (mips_abi)
4164 case ABI_EABI:
4165 /* The EABI conventions have traditionally been defined in terms
4166 of TYPE_MODE, regardless of the actual type. */
4167 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4168 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4169 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4170 break;
4172 case ABI_32:
4173 case ABI_O64:
4174 /* Only leading floating-point scalars are passed in
4175 floating-point registers. We also handle vector floats the same
4176 say, which is OK because they are not covered by the standard ABI. */
4177 info->fpr_p = (!cum->gp_reg_found
4178 && cum->arg_number < 2
4179 && (type == 0
4180 || SCALAR_FLOAT_TYPE_P (type)
4181 || VECTOR_FLOAT_TYPE_P (type))
4182 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4183 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4184 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4185 break;
4187 case ABI_N32:
4188 case ABI_64:
4189 /* Scalar, complex and vector floating-point types are passed in
4190 floating-point registers, as long as this is a named rather
4191 than a variable argument. */
4192 info->fpr_p = (named
4193 && (type == 0 || FLOAT_TYPE_P (type))
4194 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4195 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4196 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4197 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4199 /* ??? According to the ABI documentation, the real and imaginary
4200 parts of complex floats should be passed in individual registers.
4201 The real and imaginary parts of stack arguments are supposed
4202 to be contiguous and there should be an extra word of padding
4203 at the end.
4205 This has two problems. First, it makes it impossible to use a
4206 single "void *" va_list type, since register and stack arguments
4207 are passed differently. (At the time of writing, MIPSpro cannot
4208 handle complex float varargs correctly.) Second, it's unclear
4209 what should happen when there is only one register free.
4211 For now, we assume that named complex floats should go into FPRs
4212 if there are two FPRs free, otherwise they should be passed in the
4213 same way as a struct containing two floats. */
4214 if (info->fpr_p
4215 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4216 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4218 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4219 info->fpr_p = false;
4220 else
4221 num_words = 2;
4223 break;
4225 default:
4226 gcc_unreachable ();
4229 /* See whether the argument has doubleword alignment. */
4230 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4232 /* Set REG_OFFSET to the register count we're interested in.
4233 The EABI allocates the floating-point registers separately,
4234 but the other ABIs allocate them like integer registers. */
4235 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4236 ? cum->num_fprs
4237 : cum->num_gprs);
4239 /* Advance to an even register if the argument is doubleword-aligned. */
4240 if (doubleword_aligned_p)
4241 info->reg_offset += info->reg_offset & 1;
4243 /* Work out the offset of a stack argument. */
4244 info->stack_offset = cum->stack_words;
4245 if (doubleword_aligned_p)
4246 info->stack_offset += info->stack_offset & 1;
4248 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4250 /* Partition the argument between registers and stack. */
4251 info->reg_words = MIN (num_words, max_regs);
4252 info->stack_words = num_words - info->reg_words;
4255 /* INFO describes a register argument that has the normal format for the
4256 argument's mode. Return the register it uses, assuming that FPRs are
4257 available if HARD_FLOAT_P. */
4259 static unsigned int
4260 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4262 if (!info->fpr_p || !hard_float_p)
4263 return GP_ARG_FIRST + info->reg_offset;
4264 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4265 /* In o32, the second argument is always passed in $f14
4266 for TARGET_DOUBLE_FLOAT, regardless of whether the
4267 first argument was a word or doubleword. */
4268 return FP_ARG_FIRST + 2;
4269 else
4270 return FP_ARG_FIRST + info->reg_offset;
4273 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
4275 static bool
4276 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4278 return !TARGET_OLDABI;
4281 /* Implement FUNCTION_ARG. */
4284 mips_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4285 tree type, int named)
4287 struct mips_arg_info info;
4289 /* We will be called with a mode of VOIDmode after the last argument
4290 has been seen. Whatever we return will be passed to the call expander.
4291 If we need a MIPS16 fp_code, return a REG with the code stored as
4292 the mode. */
4293 if (mode == VOIDmode)
4295 if (TARGET_MIPS16 && cum->fp_code != 0)
4296 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4297 else
4298 return NULL;
4301 mips_get_arg_info (&info, cum, mode, type, named);
4303 /* Return straight away if the whole argument is passed on the stack. */
4304 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4305 return NULL;
4307 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4308 contains a double in its entirety, then that 64-bit chunk is passed
4309 in a floating-point register. */
4310 if (TARGET_NEWABI
4311 && TARGET_HARD_FLOAT
4312 && named
4313 && type != 0
4314 && TREE_CODE (type) == RECORD_TYPE
4315 && TYPE_SIZE_UNIT (type)
4316 && host_integerp (TYPE_SIZE_UNIT (type), 1))
4318 tree field;
4320 /* First check to see if there is any such field. */
4321 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4322 if (TREE_CODE (field) == FIELD_DECL
4323 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4324 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4325 && host_integerp (bit_position (field), 0)
4326 && int_bit_position (field) % BITS_PER_WORD == 0)
4327 break;
4329 if (field != 0)
4331 /* Now handle the special case by returning a PARALLEL
4332 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4333 chunks are passed in registers. */
4334 unsigned int i;
4335 HOST_WIDE_INT bitpos;
4336 rtx ret;
4338 /* assign_parms checks the mode of ENTRY_PARM, so we must
4339 use the actual mode here. */
4340 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4342 bitpos = 0;
4343 field = TYPE_FIELDS (type);
4344 for (i = 0; i < info.reg_words; i++)
4346 rtx reg;
4348 for (; field; field = TREE_CHAIN (field))
4349 if (TREE_CODE (field) == FIELD_DECL
4350 && int_bit_position (field) >= bitpos)
4351 break;
4353 if (field
4354 && int_bit_position (field) == bitpos
4355 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4356 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4357 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4358 else
4359 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4361 XVECEXP (ret, 0, i)
4362 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4363 GEN_INT (bitpos / BITS_PER_UNIT));
4365 bitpos += BITS_PER_WORD;
4367 return ret;
4371 /* Handle the n32/n64 conventions for passing complex floating-point
4372 arguments in FPR pairs. The real part goes in the lower register
4373 and the imaginary part goes in the upper register. */
4374 if (TARGET_NEWABI
4375 && info.fpr_p
4376 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4378 rtx real, imag;
4379 enum machine_mode inner;
4380 unsigned int regno;
4382 inner = GET_MODE_INNER (mode);
4383 regno = FP_ARG_FIRST + info.reg_offset;
4384 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4386 /* Real part in registers, imaginary part on stack. */
4387 gcc_assert (info.stack_words == info.reg_words);
4388 return gen_rtx_REG (inner, regno);
4390 else
4392 gcc_assert (info.stack_words == 0);
4393 real = gen_rtx_EXPR_LIST (VOIDmode,
4394 gen_rtx_REG (inner, regno),
4395 const0_rtx);
4396 imag = gen_rtx_EXPR_LIST (VOIDmode,
4397 gen_rtx_REG (inner,
4398 regno + info.reg_words / 2),
4399 GEN_INT (GET_MODE_SIZE (inner)));
4400 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4404 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4407 /* Implement FUNCTION_ARG_ADVANCE. */
4409 void
4410 mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4411 tree type, int named)
4413 struct mips_arg_info info;
4415 mips_get_arg_info (&info, cum, mode, type, named);
4417 if (!info.fpr_p)
4418 cum->gp_reg_found = true;
4420 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4421 an explanation of what this code does. It assumes that we're using
4422 either the o32 or the o64 ABI, both of which pass at most 2 arguments
4423 in FPRs. */
4424 if (cum->arg_number < 2 && info.fpr_p)
4425 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4427 /* Advance the register count. This has the effect of setting
4428 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4429 argument required us to skip the final GPR and pass the whole
4430 argument on the stack. */
4431 if (mips_abi != ABI_EABI || !info.fpr_p)
4432 cum->num_gprs = info.reg_offset + info.reg_words;
4433 else if (info.reg_words > 0)
4434 cum->num_fprs += MAX_FPRS_PER_FMT;
4436 /* Advance the stack word count. */
4437 if (info.stack_words > 0)
4438 cum->stack_words = info.stack_offset + info.stack_words;
4440 cum->arg_number++;
4443 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4445 static int
4446 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4447 enum machine_mode mode, tree type, bool named)
4449 struct mips_arg_info info;
4451 mips_get_arg_info (&info, cum, mode, type, named);
4452 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4455 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4456 PARM_BOUNDARY bits of alignment, but will be given anything up
4457 to STACK_BOUNDARY bits if the type requires it. */
4460 mips_function_arg_boundary (enum machine_mode mode, tree type)
4462 unsigned int alignment;
4464 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4465 if (alignment < PARM_BOUNDARY)
4466 alignment = PARM_BOUNDARY;
4467 if (alignment > STACK_BOUNDARY)
4468 alignment = STACK_BOUNDARY;
4469 return alignment;
4472 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4473 upward rather than downward. In other words, return true if the
4474 first byte of the stack slot has useful data, false if the last
4475 byte does. */
4477 bool
4478 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4480 /* On little-endian targets, the first byte of every stack argument
4481 is passed in the first byte of the stack slot. */
4482 if (!BYTES_BIG_ENDIAN)
4483 return true;
4485 /* Otherwise, integral types are padded downward: the last byte of a
4486 stack argument is passed in the last byte of the stack slot. */
4487 if (type != 0
4488 ? (INTEGRAL_TYPE_P (type)
4489 || POINTER_TYPE_P (type)
4490 || FIXED_POINT_TYPE_P (type))
4491 : (SCALAR_INT_MODE_P (mode)
4492 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4493 return false;
4495 /* Big-endian o64 pads floating-point arguments downward. */
4496 if (mips_abi == ABI_O64)
4497 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4498 return false;
4500 /* Other types are padded upward for o32, o64, n32 and n64. */
4501 if (mips_abi != ABI_EABI)
4502 return true;
4504 /* Arguments smaller than a stack slot are padded downward. */
4505 if (mode != BLKmode)
4506 return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
4507 else
4508 return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
4511 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4512 if the least significant byte of the register has useful data. Return
4513 the opposite if the most significant byte does. */
4515 bool
4516 mips_pad_reg_upward (enum machine_mode mode, tree type)
4518 /* No shifting is required for floating-point arguments. */
4519 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4520 return !BYTES_BIG_ENDIAN;
4522 /* Otherwise, apply the same padding to register arguments as we do
4523 to stack arguments. */
4524 return mips_pad_arg_upward (mode, type);
4527 /* Return nonzero when an argument must be passed by reference. */
4529 static bool
4530 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4531 enum machine_mode mode, const_tree type,
4532 bool named ATTRIBUTE_UNUSED)
4534 if (mips_abi == ABI_EABI)
4536 int size;
4538 /* ??? How should SCmode be handled? */
4539 if (mode == DImode || mode == DFmode
4540 || mode == DQmode || mode == UDQmode
4541 || mode == DAmode || mode == UDAmode)
4542 return 0;
4544 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4545 return size == -1 || size > UNITS_PER_WORD;
4547 else
4549 /* If we have a variable-sized parameter, we have no choice. */
4550 return targetm.calls.must_pass_in_stack (mode, type);
4554 /* Implement TARGET_CALLEE_COPIES. */
4556 static bool
4557 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4558 enum machine_mode mode ATTRIBUTE_UNUSED,
4559 const_tree type ATTRIBUTE_UNUSED, bool named)
4561 return mips_abi == ABI_EABI && named;
4564 /* See whether VALTYPE is a record whose fields should be returned in
4565 floating-point registers. If so, return the number of fields and
4566 list them in FIELDS (which should have two elements). Return 0
4567 otherwise.
4569 For n32 & n64, a structure with one or two fields is returned in
4570 floating-point registers as long as every field has a floating-point
4571 type. */
4573 static int
4574 mips_fpr_return_fields (const_tree valtype, tree *fields)
4576 tree field;
4577 int i;
4579 if (!TARGET_NEWABI)
4580 return 0;
4582 if (TREE_CODE (valtype) != RECORD_TYPE)
4583 return 0;
4585 i = 0;
4586 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4588 if (TREE_CODE (field) != FIELD_DECL)
4589 continue;
4591 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
4592 return 0;
4594 if (i == 2)
4595 return 0;
4597 fields[i++] = field;
4599 return i;
4602 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4603 a value in the most significant part of $2/$3 if:
4605 - the target is big-endian;
4607 - the value has a structure or union type (we generalize this to
4608 cover aggregates from other languages too); and
4610 - the structure is not returned in floating-point registers. */
4612 static bool
4613 mips_return_in_msb (const_tree valtype)
4615 tree fields[2];
4617 return (TARGET_NEWABI
4618 && TARGET_BIG_ENDIAN
4619 && AGGREGATE_TYPE_P (valtype)
4620 && mips_fpr_return_fields (valtype, fields) == 0);
4623 /* Return true if the function return value MODE will get returned in a
4624 floating-point register. */
4626 static bool
4627 mips_return_mode_in_fpr_p (enum machine_mode mode)
4629 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4630 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4631 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4632 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4635 /* Return the representation of an FPR return register when the
4636 value being returned in FP_RETURN has mode VALUE_MODE and the
4637 return type itself has mode TYPE_MODE. On NewABI targets,
4638 the two modes may be different for structures like:
4640 struct __attribute__((packed)) foo { float f; }
4642 where we return the SFmode value of "f" in FP_RETURN, but where
4643 the structure itself has mode BLKmode. */
4645 static rtx
4646 mips_return_fpr_single (enum machine_mode type_mode,
4647 enum machine_mode value_mode)
4649 rtx x;
4651 x = gen_rtx_REG (value_mode, FP_RETURN);
4652 if (type_mode != value_mode)
4654 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
4655 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
4657 return x;
4660 /* Return a composite value in a pair of floating-point registers.
4661 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4662 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4663 complete value.
4665 For n32 & n64, $f0 always holds the first value and $f2 the second.
4666 Otherwise the values are packed together as closely as possible. */
4668 static rtx
4669 mips_return_fpr_pair (enum machine_mode mode,
4670 enum machine_mode mode1, HOST_WIDE_INT offset1,
4671 enum machine_mode mode2, HOST_WIDE_INT offset2)
4673 int inc;
4675 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4676 return gen_rtx_PARALLEL
4677 (mode,
4678 gen_rtvec (2,
4679 gen_rtx_EXPR_LIST (VOIDmode,
4680 gen_rtx_REG (mode1, FP_RETURN),
4681 GEN_INT (offset1)),
4682 gen_rtx_EXPR_LIST (VOIDmode,
4683 gen_rtx_REG (mode2, FP_RETURN + inc),
4684 GEN_INT (offset2))));
4688 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4689 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4690 VALTYPE is null and MODE is the mode of the return value. */
4693 mips_function_value (const_tree valtype, enum machine_mode mode)
4695 if (valtype)
4697 tree fields[2];
4698 int unsigned_p;
4700 mode = TYPE_MODE (valtype);
4701 unsigned_p = TYPE_UNSIGNED (valtype);
4703 /* Since TARGET_PROMOTE_FUNCTION_RETURN unconditionally returns true,
4704 we must promote the mode just as PROMOTE_MODE does. */
4705 mode = promote_mode (valtype, mode, &unsigned_p, 1);
4707 /* Handle structures whose fields are returned in $f0/$f2. */
4708 switch (mips_fpr_return_fields (valtype, fields))
4710 case 1:
4711 return mips_return_fpr_single (mode,
4712 TYPE_MODE (TREE_TYPE (fields[0])));
4714 case 2:
4715 return mips_return_fpr_pair (mode,
4716 TYPE_MODE (TREE_TYPE (fields[0])),
4717 int_byte_position (fields[0]),
4718 TYPE_MODE (TREE_TYPE (fields[1])),
4719 int_byte_position (fields[1]));
4722 /* If a value is passed in the most significant part of a register, see
4723 whether we have to round the mode up to a whole number of words. */
4724 if (mips_return_in_msb (valtype))
4726 HOST_WIDE_INT size = int_size_in_bytes (valtype);
4727 if (size % UNITS_PER_WORD != 0)
4729 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4730 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4734 /* For EABI, the class of return register depends entirely on MODE.
4735 For example, "struct { some_type x; }" and "union { some_type x; }"
4736 are returned in the same way as a bare "some_type" would be.
4737 Other ABIs only use FPRs for scalar, complex or vector types. */
4738 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
4739 return gen_rtx_REG (mode, GP_RETURN);
4742 if (!TARGET_MIPS16)
4744 /* Handle long doubles for n32 & n64. */
4745 if (mode == TFmode)
4746 return mips_return_fpr_pair (mode,
4747 DImode, 0,
4748 DImode, GET_MODE_SIZE (mode) / 2);
4750 if (mips_return_mode_in_fpr_p (mode))
4752 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4753 return mips_return_fpr_pair (mode,
4754 GET_MODE_INNER (mode), 0,
4755 GET_MODE_INNER (mode),
4756 GET_MODE_SIZE (mode) / 2);
4757 else
4758 return gen_rtx_REG (mode, FP_RETURN);
4762 return gen_rtx_REG (mode, GP_RETURN);
4765 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
4766 all BLKmode objects are returned in memory. Under the n32, n64
4767 and embedded ABIs, small structures are returned in a register.
4768 Objects with varying size must still be returned in memory, of
4769 course. */
4771 static bool
4772 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
4774 return (TARGET_OLDABI
4775 ? TYPE_MODE (type) == BLKmode
4776 : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
4779 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
4781 static void
4782 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4783 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4784 int no_rtl)
4786 CUMULATIVE_ARGS local_cum;
4787 int gp_saved, fp_saved;
4789 /* The caller has advanced CUM up to, but not beyond, the last named
4790 argument. Advance a local copy of CUM past the last "real" named
4791 argument, to find out how many registers are left over. */
4792 local_cum = *cum;
4793 FUNCTION_ARG_ADVANCE (local_cum, mode, type, true);
4795 /* Found out how many registers we need to save. */
4796 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4797 fp_saved = (EABI_FLOAT_VARARGS_P
4798 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4799 : 0);
4801 if (!no_rtl)
4803 if (gp_saved > 0)
4805 rtx ptr, mem;
4807 ptr = plus_constant (virtual_incoming_args_rtx,
4808 REG_PARM_STACK_SPACE (cfun->decl)
4809 - gp_saved * UNITS_PER_WORD);
4810 mem = gen_frame_mem (BLKmode, ptr);
4811 set_mem_alias_set (mem, get_varargs_alias_set ());
4813 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4814 mem, gp_saved);
4816 if (fp_saved > 0)
4818 /* We can't use move_block_from_reg, because it will use
4819 the wrong mode. */
4820 enum machine_mode mode;
4821 int off, i;
4823 /* Set OFF to the offset from virtual_incoming_args_rtx of
4824 the first float register. The FP save area lies below
4825 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4826 off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
4827 off -= fp_saved * UNITS_PER_FPREG;
4829 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4831 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4832 i += MAX_FPRS_PER_FMT)
4834 rtx ptr, mem;
4836 ptr = plus_constant (virtual_incoming_args_rtx, off);
4837 mem = gen_frame_mem (mode, ptr);
4838 set_mem_alias_set (mem, get_varargs_alias_set ());
4839 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4840 off += UNITS_PER_HWFPVALUE;
4844 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4845 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4846 + fp_saved * UNITS_PER_FPREG);
4849 /* Implement TARGET_BUILTIN_VA_LIST. */
4851 static tree
4852 mips_build_builtin_va_list (void)
4854 if (EABI_FLOAT_VARARGS_P)
4856 /* We keep 3 pointers, and two offsets.
4858 Two pointers are to the overflow area, which starts at the CFA.
4859 One of these is constant, for addressing into the GPR save area
4860 below it. The other is advanced up the stack through the
4861 overflow region.
4863 The third pointer is to the bottom of the GPR save area.
4864 Since the FPR save area is just below it, we can address
4865 FPR slots off this pointer.
4867 We also keep two one-byte offsets, which are to be subtracted
4868 from the constant pointers to yield addresses in the GPR and
4869 FPR save areas. These are downcounted as float or non-float
4870 arguments are used, and when they get to zero, the argument
4871 must be obtained from the overflow region. */
4872 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4873 tree array, index;
4875 record = lang_hooks.types.make_type (RECORD_TYPE);
4877 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4878 ptr_type_node);
4879 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4880 ptr_type_node);
4881 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4882 ptr_type_node);
4883 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4884 unsigned_char_type_node);
4885 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4886 unsigned_char_type_node);
4887 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4888 warn on every user file. */
4889 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4890 array = build_array_type (unsigned_char_type_node,
4891 build_index_type (index));
4892 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4894 DECL_FIELD_CONTEXT (f_ovfl) = record;
4895 DECL_FIELD_CONTEXT (f_gtop) = record;
4896 DECL_FIELD_CONTEXT (f_ftop) = record;
4897 DECL_FIELD_CONTEXT (f_goff) = record;
4898 DECL_FIELD_CONTEXT (f_foff) = record;
4899 DECL_FIELD_CONTEXT (f_res) = record;
4901 TYPE_FIELDS (record) = f_ovfl;
4902 TREE_CHAIN (f_ovfl) = f_gtop;
4903 TREE_CHAIN (f_gtop) = f_ftop;
4904 TREE_CHAIN (f_ftop) = f_goff;
4905 TREE_CHAIN (f_goff) = f_foff;
4906 TREE_CHAIN (f_foff) = f_res;
4908 layout_type (record);
4909 return record;
4911 else if (TARGET_IRIX && TARGET_IRIX6)
4912 /* On IRIX 6, this type is 'char *'. */
4913 return build_pointer_type (char_type_node);
4914 else
4915 /* Otherwise, we use 'void *'. */
4916 return ptr_type_node;
4919 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
4921 static void
4922 mips_va_start (tree valist, rtx nextarg)
4924 if (EABI_FLOAT_VARARGS_P)
4926 const CUMULATIVE_ARGS *cum;
4927 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4928 tree ovfl, gtop, ftop, goff, foff;
4929 tree t;
4930 int gpr_save_area_size;
4931 int fpr_save_area_size;
4932 int fpr_offset;
4934 cum = &crtl->args.info;
4935 gpr_save_area_size
4936 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4937 fpr_save_area_size
4938 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4940 f_ovfl = TYPE_FIELDS (va_list_type_node);
4941 f_gtop = TREE_CHAIN (f_ovfl);
4942 f_ftop = TREE_CHAIN (f_gtop);
4943 f_goff = TREE_CHAIN (f_ftop);
4944 f_foff = TREE_CHAIN (f_goff);
4946 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4947 NULL_TREE);
4948 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4949 NULL_TREE);
4950 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4951 NULL_TREE);
4952 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4953 NULL_TREE);
4954 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4955 NULL_TREE);
4957 /* Emit code to initialize OVFL, which points to the next varargs
4958 stack argument. CUM->STACK_WORDS gives the number of stack
4959 words used by named arguments. */
4960 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4961 if (cum->stack_words > 0)
4962 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4963 size_int (cum->stack_words * UNITS_PER_WORD));
4964 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4965 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4967 /* Emit code to initialize GTOP, the top of the GPR save area. */
4968 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4969 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
4970 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4972 /* Emit code to initialize FTOP, the top of the FPR save area.
4973 This address is gpr_save_area_bytes below GTOP, rounded
4974 down to the next fp-aligned boundary. */
4975 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4976 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4977 fpr_offset &= -UNITS_PER_FPVALUE;
4978 if (fpr_offset)
4979 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4980 size_int (-fpr_offset));
4981 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
4982 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4984 /* Emit code to initialize GOFF, the offset from GTOP of the
4985 next GPR argument. */
4986 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
4987 build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
4988 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4990 /* Likewise emit code to initialize FOFF, the offset from FTOP
4991 of the next FPR argument. */
4992 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
4993 build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
4994 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4996 else
4998 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4999 std_expand_builtin_va_start (valist, nextarg);
5003 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
5005 static tree
5006 mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5007 gimple_seq *post_p)
5009 tree addr;
5010 bool indirect_p;
5012 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5013 if (indirect_p)
5014 type = build_pointer_type (type);
5016 if (!EABI_FLOAT_VARARGS_P)
5017 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5018 else
5020 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5021 tree ovfl, top, off, align;
5022 HOST_WIDE_INT size, rsize, osize;
5023 tree t, u;
5025 f_ovfl = TYPE_FIELDS (va_list_type_node);
5026 f_gtop = TREE_CHAIN (f_ovfl);
5027 f_ftop = TREE_CHAIN (f_gtop);
5028 f_goff = TREE_CHAIN (f_ftop);
5029 f_foff = TREE_CHAIN (f_goff);
5031 /* Let:
5033 TOP be the top of the GPR or FPR save area;
5034 OFF be the offset from TOP of the next register;
5035 ADDR_RTX be the address of the argument;
5036 SIZE be the number of bytes in the argument type;
5037 RSIZE be the number of bytes used to store the argument
5038 when it's in the register save area; and
5039 OSIZE be the number of bytes used to store it when it's
5040 in the stack overflow area.
5042 The code we want is:
5044 1: off &= -rsize; // round down
5045 2: if (off != 0)
5046 3: {
5047 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
5048 5: off -= rsize;
5049 6: }
5050 7: else
5051 8: {
5052 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
5053 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
5054 11: ovfl += osize;
5055 14: }
5057 [1] and [9] can sometimes be optimized away. */
5059 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5060 NULL_TREE);
5061 size = int_size_in_bytes (type);
5063 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5064 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5066 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5067 NULL_TREE);
5068 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5069 NULL_TREE);
5071 /* When va_start saves FPR arguments to the stack, each slot
5072 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
5073 argument's precision. */
5074 rsize = UNITS_PER_HWFPVALUE;
5076 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5077 (= PARM_BOUNDARY bits). This can be different from RSIZE
5078 in two cases:
5080 (1) On 32-bit targets when TYPE is a structure such as:
5082 struct s { float f; };
5084 Such structures are passed in paired FPRs, so RSIZE
5085 will be 8 bytes. However, the structure only takes
5086 up 4 bytes of memory, so OSIZE will only be 4.
5088 (2) In combinations such as -mgp64 -msingle-float
5089 -fshort-double. Doubles passed in registers will then take
5090 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
5091 stack take up UNITS_PER_WORD bytes. */
5092 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5094 else
5096 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5097 NULL_TREE);
5098 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5099 NULL_TREE);
5100 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5101 if (rsize > UNITS_PER_WORD)
5103 /* [1] Emit code for: off &= -rsize. */
5104 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5105 build_int_cst (NULL_TREE, -rsize));
5106 gimplify_assign (off, t, pre_p);
5108 osize = rsize;
5111 /* [2] Emit code to branch if off == 0. */
5112 t = build2 (NE_EXPR, boolean_type_node, off,
5113 build_int_cst (TREE_TYPE (off), 0));
5114 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5116 /* [5] Emit code for: off -= rsize. We do this as a form of
5117 post-decrement not available to C. */
5118 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5119 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5121 /* [4] Emit code for:
5122 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
5123 t = fold_convert (sizetype, t);
5124 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5125 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5126 if (BYTES_BIG_ENDIAN && rsize > size)
5128 u = size_int (rsize - size);
5129 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5131 COND_EXPR_THEN (addr) = t;
5133 if (osize > UNITS_PER_WORD)
5135 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
5136 u = size_int (osize - 1);
5137 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5138 t = fold_convert (sizetype, t);
5139 u = size_int (-osize);
5140 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5141 t = fold_convert (TREE_TYPE (ovfl), t);
5142 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5144 else
5145 align = NULL;
5147 /* [10, 11] Emit code for:
5148 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5149 ovfl += osize. */
5150 u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
5151 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5152 if (BYTES_BIG_ENDIAN && osize > size)
5154 u = size_int (osize - size);
5155 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5158 /* String [9] and [10, 11] together. */
5159 if (align)
5160 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5161 COND_EXPR_ELSE (addr) = t;
5163 addr = fold_convert (build_pointer_type (type), addr);
5164 addr = build_va_arg_indirect_ref (addr);
5167 if (indirect_p)
5168 addr = build_va_arg_indirect_ref (addr);
5170 return addr;
5173 /* A chained list of functions for which mips16_build_call_stub has already
5174 generated a stub. NAME is the name of the function and FP_RET_P is true
5175 if the function returns a value in floating-point registers. */
5176 struct mips16_stub {
5177 struct mips16_stub *next;
5178 char *name;
5179 bool fp_ret_p;
5181 static struct mips16_stub *mips16_stubs;
5183 /* Return the two-character string that identifies floating-point
5184 return mode MODE in the name of a MIPS16 function stub. */
5186 static const char *
5187 mips16_call_stub_mode_suffix (enum machine_mode mode)
5189 if (mode == SFmode)
5190 return "sf";
5191 else if (mode == DFmode)
5192 return "df";
5193 else if (mode == SCmode)
5194 return "sc";
5195 else if (mode == DCmode)
5196 return "dc";
5197 else if (mode == V2SFmode)
5198 return "df";
5199 else
5200 gcc_unreachable ();
5203 /* Write instructions to move a 32-bit value between general register
5204 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5205 from GPREG to FPREG and 'f' to move in the opposite direction. */
5207 static void
5208 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5210 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5211 reg_names[gpreg], reg_names[fpreg]);
5214 /* Likewise for 64-bit values. */
5216 static void
5217 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5219 if (TARGET_64BIT)
5220 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
5221 reg_names[gpreg], reg_names[fpreg]);
5222 else if (TARGET_FLOAT64)
5224 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5225 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5226 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
5227 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
5229 else
5231 /* Move the least-significant word. */
5232 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5233 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5234 /* ...then the most significant word. */
5235 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5236 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
5240 /* Write out code to move floating-point arguments into or out of
5241 general registers. FP_CODE is the code describing which arguments
5242 are present (see the comment above the definition of CUMULATIVE_ARGS
5243 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
5245 static void
5246 mips_output_args_xfer (int fp_code, char direction)
5248 unsigned int gparg, fparg, f;
5249 CUMULATIVE_ARGS cum;
5251 /* This code only works for o32 and o64. */
5252 gcc_assert (TARGET_OLDABI);
5254 mips_init_cumulative_args (&cum, NULL);
5256 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5258 enum machine_mode mode;
5259 struct mips_arg_info info;
5261 if ((f & 3) == 1)
5262 mode = SFmode;
5263 else if ((f & 3) == 2)
5264 mode = DFmode;
5265 else
5266 gcc_unreachable ();
5268 mips_get_arg_info (&info, &cum, mode, NULL, true);
5269 gparg = mips_arg_regno (&info, false);
5270 fparg = mips_arg_regno (&info, true);
5272 if (mode == SFmode)
5273 mips_output_32bit_xfer (direction, gparg, fparg);
5274 else
5275 mips_output_64bit_xfer (direction, gparg, fparg);
5277 mips_function_arg_advance (&cum, mode, NULL, true);
5281 /* Write a MIPS16 stub for the current function. This stub is used
5282 for functions which take arguments in the floating-point registers.
5283 It is normal-mode code that moves the floating-point arguments
5284 into the general registers and then jumps to the MIPS16 code. */
5286 static void
5287 mips16_build_function_stub (void)
5289 const char *fnname, *separator;
5290 char *secname, *stubname;
5291 tree stubdecl;
5292 unsigned int f;
5294 /* Create the name of the stub, and its unique section. */
5295 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5296 fnname = targetm.strip_name_encoding (fnname);
5297 secname = ACONCAT ((".mips16.fn.", fnname, NULL));
5298 stubname = ACONCAT (("__fn_stub_", fnname, NULL));
5300 /* Build a decl for the stub. */
5301 stubdecl = build_decl (FUNCTION_DECL, get_identifier (stubname),
5302 build_function_type (void_type_node, NULL_TREE));
5303 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5304 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5306 /* Output a comment. */
5307 fprintf (asm_out_file, "\t# Stub function for %s (",
5308 current_function_name ());
5309 separator = "";
5310 for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
5312 fprintf (asm_out_file, "%s%s", separator,
5313 (f & 3) == 1 ? "float" : "double");
5314 separator = ", ";
5316 fprintf (asm_out_file, ")\n");
5318 /* Write the preamble leading up to the function declaration. */
5319 fprintf (asm_out_file, "\t.set\tnomips16\n");
5320 switch_to_section (function_section (stubdecl));
5321 ASM_OUTPUT_ALIGN (asm_out_file,
5322 floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
5324 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
5325 within a .ent, and we cannot emit another .ent. */
5326 if (!FUNCTION_NAME_ALREADY_DECLARED)
5328 fputs ("\t.ent\t", asm_out_file);
5329 assemble_name (asm_out_file, stubname);
5330 fputs ("\n", asm_out_file);
5333 /* Start the definition proper. */
5334 assemble_name (asm_out_file, stubname);
5335 fputs (":\n", asm_out_file);
5337 /* Load the address of the MIPS16 function into $at. Do this first so
5338 that targets with coprocessor interlocks can use an MFC1 to fill the
5339 delay slot. */
5340 fprintf (asm_out_file, "\t.set\tnoat\n");
5341 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
5342 assemble_name (asm_out_file, fnname);
5343 fprintf (asm_out_file, "\n");
5345 /* Move the arguments from floating-point registers to general registers. */
5346 mips_output_args_xfer (crtl->args.info.fp_code, 'f');
5348 /* Jump to the MIPS16 function. */
5349 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5350 fprintf (asm_out_file, "\t.set\tat\n");
5352 if (!FUNCTION_NAME_ALREADY_DECLARED)
5354 fputs ("\t.end\t", asm_out_file);
5355 assemble_name (asm_out_file, stubname);
5356 fputs ("\n", asm_out_file);
5359 switch_to_section (function_section (current_function_decl));
5362 /* The current function is a MIPS16 function that returns a value in an FPR.
5363 Copy the return value from its soft-float to its hard-float location.
5364 libgcc2 has special non-MIPS16 helper functions for each case. */
5366 static void
5367 mips16_copy_fpr_return_value (void)
5369 rtx fn, insn, arg, call;
5370 tree id, return_type;
5371 enum machine_mode return_mode;
5373 return_type = DECL_RESULT (current_function_decl);
5374 return_mode = DECL_MODE (return_type);
5376 id = get_identifier (ACONCAT (("__mips16_ret_",
5377 mips16_call_stub_mode_suffix (return_mode),
5378 NULL)));
5379 fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5380 arg = gen_rtx_REG (return_mode, GP_RETURN);
5381 call = gen_call_value_internal (arg, fn, const0_rtx);
5382 insn = mips_emit_call_insn (call, false);
5383 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), arg);
5386 /* Consider building a stub for a MIPS16 call to function FN.
5387 RETVAL is the location of the return value, or null if this is
5388 a "call" rather than a "call_value". ARGS_SIZE is the size of the
5389 arguments and FP_CODE is the code built by mips_function_arg;
5390 see the comment above CUMULATIVE_ARGS for details.
5392 If a stub was needed, emit the call and return the call insn itself.
5393 Return null otherwise.
5395 A stub is needed for calls to functions that, in normal mode,
5396 receive arguments in FPRs or return values in FPRs. The stub
5397 copies the arguments from their soft-float positions to their
5398 hard-float positions, calls the real function, then copies the
5399 return value from its hard-float position to its soft-float
5400 position.
5402 We emit a JAL to FN even when FN might need a stub. If FN turns out
5403 to be to a non-MIPS16 function, the linker automatically redirects
5404 the JAL to the stub, otherwise the JAL continues to call FN directly. */
5406 static rtx
5407 mips16_build_call_stub (rtx retval, rtx fn, rtx args_size, int fp_code)
5409 const char *fnname;
5410 bool fp_ret_p;
5411 struct mips16_stub *l;
5412 rtx insn;
5414 /* We don't need to do anything if we aren't in MIPS16 mode, or if
5415 we were invoked with the -msoft-float option. */
5416 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5417 return NULL_RTX;
5419 /* Figure out whether the value might come back in a floating-point
5420 register. */
5421 fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
5423 /* We don't need to do anything if there were no floating-point
5424 arguments and the value will not be returned in a floating-point
5425 register. */
5426 if (fp_code == 0 && !fp_ret_p)
5427 return NULL_RTX;
5429 /* We don't need to do anything if this is a call to a special
5430 MIPS16 support function. */
5431 if (GET_CODE (fn) == SYMBOL_REF
5432 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
5433 return NULL_RTX;
5435 /* This code will only work for o32 and o64 abis. The other ABI's
5436 require more sophisticated support. */
5437 gcc_assert (TARGET_OLDABI);
5439 /* If we're calling via a function pointer, use one of the magic
5440 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
5441 Each stub expects the function address to arrive in register $2. */
5442 if (GET_CODE (fn) != SYMBOL_REF)
5444 char buf[30];
5445 tree id;
5446 rtx stub_fn, insn;
5448 /* Create a SYMBOL_REF for the libgcc.a function. */
5449 if (fp_ret_p)
5450 sprintf (buf, "__mips16_call_stub_%s_%d",
5451 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5452 fp_code);
5453 else
5454 sprintf (buf, "__mips16_call_stub_%d", fp_code);
5455 id = get_identifier (buf);
5456 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5458 /* Load the target function into $2. */
5459 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
5461 /* Emit the call. */
5462 if (retval == NULL_RTX)
5463 insn = gen_call_internal (stub_fn, args_size);
5464 else
5465 insn = gen_call_value_internal (retval, stub_fn, args_size);
5466 insn = mips_emit_call_insn (insn, false);
5468 /* Tell GCC that this call does indeed use the value of $2. */
5469 CALL_INSN_FUNCTION_USAGE (insn) =
5470 gen_rtx_EXPR_LIST (VOIDmode,
5471 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
5472 CALL_INSN_FUNCTION_USAGE (insn));
5474 /* If we are handling a floating-point return value, we need to
5475 save $18 in the function prologue. Putting a note on the
5476 call will mean that df_regs_ever_live_p ($18) will be true if the
5477 call is not eliminated, and we can check that in the prologue
5478 code. */
5479 if (fp_ret_p)
5480 CALL_INSN_FUNCTION_USAGE (insn) =
5481 gen_rtx_EXPR_LIST (VOIDmode,
5482 gen_rtx_USE (VOIDmode,
5483 gen_rtx_REG (word_mode, 18)),
5484 CALL_INSN_FUNCTION_USAGE (insn));
5486 return insn;
5489 /* We know the function we are going to call. If we have already
5490 built a stub, we don't need to do anything further. */
5491 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5492 for (l = mips16_stubs; l != NULL; l = l->next)
5493 if (strcmp (l->name, fnname) == 0)
5494 break;
5496 if (l == NULL)
5498 const char *separator;
5499 char *secname, *stubname;
5500 tree stubid, stubdecl;
5501 unsigned int f;
5503 /* If the function does not return in FPRs, the special stub
5504 section is named
5505 .mips16.call.FNNAME
5507 If the function does return in FPRs, the stub section is named
5508 .mips16.call.fp.FNNAME
5510 Build a decl for the stub. */
5511 secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
5512 fnname, NULL));
5513 stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
5514 fnname, NULL));
5515 stubid = get_identifier (stubname);
5516 stubdecl = build_decl (FUNCTION_DECL, stubid,
5517 build_function_type (void_type_node, NULL_TREE));
5518 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5519 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE,
5520 void_type_node);
5522 /* Output a comment. */
5523 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
5524 (fp_ret_p
5525 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
5526 : ""),
5527 fnname);
5528 separator = "";
5529 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5531 fprintf (asm_out_file, "%s%s", separator,
5532 (f & 3) == 1 ? "float" : "double");
5533 separator = ", ";
5535 fprintf (asm_out_file, ")\n");
5537 /* Write the preamble leading up to the function declaration. */
5538 fprintf (asm_out_file, "\t.set\tnomips16\n");
5539 assemble_start_function (stubdecl, stubname);
5541 if (!FUNCTION_NAME_ALREADY_DECLARED)
5543 fputs ("\t.ent\t", asm_out_file);
5544 assemble_name (asm_out_file, stubname);
5545 fputs ("\n", asm_out_file);
5547 assemble_name (asm_out_file, stubname);
5548 fputs (":\n", asm_out_file);
5551 if (!fp_ret_p)
5553 /* Load the address of the MIPS16 function into $at. Do this
5554 first so that targets with coprocessor interlocks can use
5555 an MFC1 to fill the delay slot. */
5556 fprintf (asm_out_file, "\t.set\tnoat\n");
5557 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
5558 fnname);
5561 /* Move the arguments from general registers to floating-point
5562 registers. */
5563 mips_output_args_xfer (fp_code, 't');
5565 if (!fp_ret_p)
5567 /* Jump to the previously-loaded address. */
5568 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5569 fprintf (asm_out_file, "\t.set\tat\n");
5571 else
5573 /* Save the return address in $18 and call the non-MIPS16 function.
5574 The stub's caller knows that $18 might be clobbered, even though
5575 $18 is usually a call-saved register. */
5576 fprintf (asm_out_file, "\tmove\t%s,%s\n",
5577 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
5578 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
5580 /* Move the result from floating-point registers to
5581 general registers. */
5582 switch (GET_MODE (retval))
5584 case SCmode:
5585 mips_output_32bit_xfer ('f', GP_RETURN + 1,
5586 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5587 /* Fall though. */
5588 case SFmode:
5589 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5590 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
5592 /* On 64-bit targets, complex floats are returned in
5593 a single GPR, such that "sd" on a suitably-aligned
5594 target would store the value correctly. */
5595 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
5596 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
5597 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
5598 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
5599 reg_names[GP_RETURN],
5600 reg_names[GP_RETURN],
5601 reg_names[GP_RETURN + 1]);
5603 break;
5605 case DCmode:
5606 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
5607 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5608 /* Fall though. */
5609 case DFmode:
5610 case V2SFmode:
5611 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5612 break;
5614 default:
5615 gcc_unreachable ();
5617 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
5620 #ifdef ASM_DECLARE_FUNCTION_SIZE
5621 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
5622 #endif
5624 if (!FUNCTION_NAME_ALREADY_DECLARED)
5626 fputs ("\t.end\t", asm_out_file);
5627 assemble_name (asm_out_file, stubname);
5628 fputs ("\n", asm_out_file);
5631 /* Record this stub. */
5632 l = XNEW (struct mips16_stub);
5633 l->name = xstrdup (fnname);
5634 l->fp_ret_p = fp_ret_p;
5635 l->next = mips16_stubs;
5636 mips16_stubs = l;
5639 /* If we expect a floating-point return value, but we've built a
5640 stub which does not expect one, then we're in trouble. We can't
5641 use the existing stub, because it won't handle the floating-point
5642 value. We can't build a new stub, because the linker won't know
5643 which stub to use for the various calls in this object file.
5644 Fortunately, this case is illegal, since it means that a function
5645 was declared in two different ways in a single compilation. */
5646 if (fp_ret_p && !l->fp_ret_p)
5647 error ("cannot handle inconsistent calls to %qs", fnname);
5649 if (retval == NULL_RTX)
5650 insn = gen_call_internal_direct (fn, args_size);
5651 else
5652 insn = gen_call_value_internal_direct (retval, fn, args_size);
5653 insn = mips_emit_call_insn (insn, false);
5655 /* If we are calling a stub which handles a floating-point return
5656 value, we need to arrange to save $18 in the prologue. We do this
5657 by marking the function call as using the register. The prologue
5658 will later see that it is used, and emit code to save it. */
5659 if (fp_ret_p)
5660 CALL_INSN_FUNCTION_USAGE (insn) =
5661 gen_rtx_EXPR_LIST (VOIDmode,
5662 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
5663 CALL_INSN_FUNCTION_USAGE (insn));
5665 return insn;
5668 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5670 static bool
5671 mips_ok_for_lazy_binding_p (rtx x)
5673 return (TARGET_USE_GOT
5674 && GET_CODE (x) == SYMBOL_REF
5675 && !mips_symbol_binds_local_p (x));
5678 /* Load function address ADDR into register DEST. SIBCALL_P is true
5679 if the address is needed for a sibling call. Return true if we
5680 used an explicit lazy-binding sequence. */
5682 static bool
5683 mips_load_call_address (rtx dest, rtx addr, bool sibcall_p)
5685 /* If we're generating PIC, and this call is to a global function,
5686 try to allow its address to be resolved lazily. This isn't
5687 possible for sibcalls when $gp is call-saved because the value
5688 of $gp on entry to the stub would be our caller's gp, not ours. */
5689 if (TARGET_EXPLICIT_RELOCS
5690 && !(sibcall_p && TARGET_CALL_SAVED_GP)
5691 && mips_ok_for_lazy_binding_p (addr))
5693 rtx high, lo_sum_symbol;
5695 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
5696 addr, SYMBOL_GOTOFF_CALL);
5697 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
5698 if (Pmode == SImode)
5699 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
5700 else
5701 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
5702 return true;
5704 else
5706 mips_emit_move (dest, addr);
5707 return false;
5711 /* Expand a "call", "sibcall", "call_value" or "sibcall_value" instruction.
5712 RESULT is where the result will go (null for "call"s and "sibcall"s),
5713 ADDR is the address of the function, ARGS_SIZE is the size of the
5714 arguments and AUX is the value passed to us by mips_function_arg.
5715 SIBCALL_P is true if we are expanding a sibling call, false if we're
5716 expanding a normal call.
5718 Return the call itself. */
5721 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, bool sibcall_p)
5723 rtx orig_addr, pattern, insn;
5724 bool lazy_p;
5726 orig_addr = addr;
5727 lazy_p = false;
5728 if (!call_insn_operand (addr, VOIDmode))
5730 addr = gen_reg_rtx (Pmode);
5731 lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
5734 insn = mips16_build_call_stub (result, addr, args_size,
5735 aux == 0 ? 0 : (int) GET_MODE (aux));
5736 if (insn)
5738 gcc_assert (!sibcall_p && !lazy_p);
5739 return insn;
5742 if (result == 0)
5743 pattern = (sibcall_p
5744 ? gen_sibcall_internal (addr, args_size)
5745 : gen_call_internal (addr, args_size));
5746 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
5748 /* Handle return values created by mips_return_fpr_pair. */
5749 rtx reg1, reg2;
5751 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
5752 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
5753 pattern =
5754 (sibcall_p
5755 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
5756 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
5758 else
5760 /* Handle return values created by mips_return_fpr_single. */
5761 if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
5762 result = XEXP (XVECEXP (result, 0, 0), 0);
5763 pattern = (sibcall_p
5764 ? gen_sibcall_value_internal (result, addr, args_size)
5765 : gen_call_value_internal (result, addr, args_size));
5768 return mips_emit_call_insn (pattern, lazy_p);
5771 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
5773 static bool
5774 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5776 if (!TARGET_SIBCALLS)
5777 return false;
5779 /* We can't do a sibcall if the called function is a MIPS16 function
5780 because there is no direct "jx" instruction equivalent to "jalx" to
5781 switch the ISA mode. We only care about cases where the sibling
5782 and normal calls would both be direct. */
5783 if (mips_use_mips16_mode_p (decl)
5784 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5785 return false;
5787 /* When -minterlink-mips16 is in effect, assume that non-locally-binding
5788 functions could be MIPS16 ones unless an attribute explicitly tells
5789 us otherwise. */
5790 if (TARGET_INTERLINK_MIPS16
5791 && decl
5792 && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
5793 && !mips_nomips16_decl_p (decl)
5794 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5795 return false;
5797 /* Otherwise OK. */
5798 return true;
5801 /* Emit code to move general operand SRC into condition-code
5802 register DEST given that SCRATCH is a scratch TFmode FPR.
5803 The sequence is:
5805 FP1 = SRC
5806 FP2 = 0.0f
5807 DEST = FP2 < FP1
5809 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
5811 void
5812 mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
5814 rtx fp1, fp2;
5816 /* Change the source to SFmode. */
5817 if (MEM_P (src))
5818 src = adjust_address (src, SFmode, 0);
5819 else if (REG_P (src) || GET_CODE (src) == SUBREG)
5820 src = gen_rtx_REG (SFmode, true_regnum (src));
5822 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
5823 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
5825 mips_emit_move (copy_rtx (fp1), src);
5826 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
5827 emit_insn (gen_slt_sf (dest, fp2, fp1));
5830 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
5831 Assume that the areas do not overlap. */
5833 static void
5834 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
5836 HOST_WIDE_INT offset, delta;
5837 unsigned HOST_WIDE_INT bits;
5838 int i;
5839 enum machine_mode mode;
5840 rtx *regs;
5842 /* Work out how many bits to move at a time. If both operands have
5843 half-word alignment, it is usually better to move in half words.
5844 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
5845 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
5846 Otherwise move word-sized chunks. */
5847 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
5848 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
5849 bits = BITS_PER_WORD / 2;
5850 else
5851 bits = BITS_PER_WORD;
5853 mode = mode_for_size (bits, MODE_INT, 0);
5854 delta = bits / BITS_PER_UNIT;
5856 /* Allocate a buffer for the temporary registers. */
5857 regs = XALLOCAVEC (rtx, length / delta);
5859 /* Load as many BITS-sized chunks as possible. Use a normal load if
5860 the source has enough alignment, otherwise use left/right pairs. */
5861 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5863 regs[i] = gen_reg_rtx (mode);
5864 if (MEM_ALIGN (src) >= bits)
5865 mips_emit_move (regs[i], adjust_address (src, mode, offset));
5866 else
5868 rtx part = adjust_address (src, BLKmode, offset);
5869 if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
5870 gcc_unreachable ();
5874 /* Copy the chunks to the destination. */
5875 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5876 if (MEM_ALIGN (dest) >= bits)
5877 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
5878 else
5880 rtx part = adjust_address (dest, BLKmode, offset);
5881 if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
5882 gcc_unreachable ();
5885 /* Mop up any left-over bytes. */
5886 if (offset < length)
5888 src = adjust_address (src, BLKmode, offset);
5889 dest = adjust_address (dest, BLKmode, offset);
5890 move_by_pieces (dest, src, length - offset,
5891 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
5895 /* Helper function for doing a loop-based block operation on memory
5896 reference MEM. Each iteration of the loop will operate on LENGTH
5897 bytes of MEM.
5899 Create a new base register for use within the loop and point it to
5900 the start of MEM. Create a new memory reference that uses this
5901 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
5903 static void
5904 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
5905 rtx *loop_reg, rtx *loop_mem)
5907 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
5909 /* Although the new mem does not refer to a known location,
5910 it does keep up to LENGTH bytes of alignment. */
5911 *loop_mem = change_address (mem, BLKmode, *loop_reg);
5912 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
5915 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
5916 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
5917 the memory regions do not overlap. */
5919 static void
5920 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
5921 HOST_WIDE_INT bytes_per_iter)
5923 rtx label, src_reg, dest_reg, final_src;
5924 HOST_WIDE_INT leftover;
5926 leftover = length % bytes_per_iter;
5927 length -= leftover;
5929 /* Create registers and memory references for use within the loop. */
5930 mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
5931 mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
5933 /* Calculate the value that SRC_REG should have after the last iteration
5934 of the loop. */
5935 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
5936 0, 0, OPTAB_WIDEN);
5938 /* Emit the start of the loop. */
5939 label = gen_label_rtx ();
5940 emit_label (label);
5942 /* Emit the loop body. */
5943 mips_block_move_straight (dest, src, bytes_per_iter);
5945 /* Move on to the next block. */
5946 mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
5947 mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
5949 /* Emit the loop condition. */
5950 if (Pmode == DImode)
5951 emit_insn (gen_cmpdi (src_reg, final_src));
5952 else
5953 emit_insn (gen_cmpsi (src_reg, final_src));
5954 emit_jump_insn (gen_bne (label));
5956 /* Mop up any left-over bytes. */
5957 if (leftover)
5958 mips_block_move_straight (dest, src, leftover);
5961 /* Expand a movmemsi instruction, which copies LENGTH bytes from
5962 memory reference SRC to memory reference DEST. */
5964 bool
5965 mips_expand_block_move (rtx dest, rtx src, rtx length)
5967 if (GET_CODE (length) == CONST_INT)
5969 if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
5971 mips_block_move_straight (dest, src, INTVAL (length));
5972 return true;
5974 else if (optimize)
5976 mips_block_move_loop (dest, src, INTVAL (length),
5977 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
5978 return true;
5981 return false;
5984 /* Expand a loop of synci insns for the address range [BEGIN, END). */
5986 void
5987 mips_expand_synci_loop (rtx begin, rtx end)
5989 rtx inc, label, cmp, cmp_result;
5991 /* Load INC with the cache line size (rdhwr INC,$1). */
5992 inc = gen_reg_rtx (SImode);
5993 emit_insn (gen_rdhwr (inc, const1_rtx));
5995 /* Loop back to here. */
5996 label = gen_label_rtx ();
5997 emit_label (label);
5999 emit_insn (gen_synci (begin));
6001 cmp = mips_force_binary (Pmode, GTU, begin, end);
6003 mips_emit_binary (PLUS, begin, begin, inc);
6005 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
6006 emit_jump_insn (gen_condjump (cmp_result, label));
6009 /* Expand a QI or HI mode atomic memory operation.
6011 GENERATOR contains a pointer to the gen_* function that generates
6012 the SI mode underlying atomic operation using masks that we
6013 calculate.
6015 RESULT is the return register for the operation. Its value is NULL
6016 if unused.
6018 MEM is the location of the atomic access.
6020 OLDVAL is the first operand for the operation.
6022 NEWVAL is the optional second operand for the operation. Its value
6023 is NULL if unused. */
6025 void
6026 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
6027 rtx result, rtx mem, rtx oldval, rtx newval)
6029 rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
6030 rtx unshifted_mask_reg, mask, inverted_mask, si_op;
6031 rtx res = NULL;
6032 enum machine_mode mode;
6034 mode = GET_MODE (mem);
6036 /* Compute the address of the containing SImode value. */
6037 orig_addr = force_reg (Pmode, XEXP (mem, 0));
6038 memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
6039 force_reg (Pmode, GEN_INT (-4)));
6041 /* Create a memory reference for it. */
6042 memsi = gen_rtx_MEM (SImode, memsi_addr);
6043 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
6044 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
6046 /* Work out the byte offset of the QImode or HImode value,
6047 counting from the least significant byte. */
6048 shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
6049 if (TARGET_BIG_ENDIAN)
6050 mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
6052 /* Multiply by eight to convert the shift value from bytes to bits. */
6053 mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
6055 /* Make the final shift an SImode value, so that it can be used in
6056 SImode operations. */
6057 shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
6059 /* Set MASK to an inclusive mask of the QImode or HImode value. */
6060 unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
6061 unshifted_mask_reg = force_reg (SImode, unshifted_mask);
6062 mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
6064 /* Compute the equivalent exclusive mask. */
6065 inverted_mask = gen_reg_rtx (SImode);
6066 emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
6067 gen_rtx_NOT (SImode, mask)));
6069 /* Shift the old value into place. */
6070 if (oldval != const0_rtx)
6072 oldval = convert_modes (SImode, mode, oldval, true);
6073 oldval = force_reg (SImode, oldval);
6074 oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
6077 /* Do the same for the new value. */
6078 if (newval && newval != const0_rtx)
6080 newval = convert_modes (SImode, mode, newval, true);
6081 newval = force_reg (SImode, newval);
6082 newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
6085 /* Do the SImode atomic access. */
6086 if (result)
6087 res = gen_reg_rtx (SImode);
6088 if (newval)
6089 si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
6090 else if (result)
6091 si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
6092 else
6093 si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
6095 emit_insn (si_op);
6097 if (result)
6099 /* Shift and convert the result. */
6100 mips_emit_binary (AND, res, res, mask);
6101 mips_emit_binary (LSHIFTRT, res, res, shiftsi);
6102 mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
6106 /* Return true if it is possible to use left/right accesses for a
6107 bitfield of WIDTH bits starting BITPOS bits into *OP. When
6108 returning true, update *OP, *LEFT and *RIGHT as follows:
6110 *OP is a BLKmode reference to the whole field.
6112 *LEFT is a QImode reference to the first byte if big endian or
6113 the last byte if little endian. This address can be used in the
6114 left-side instructions (LWL, SWL, LDL, SDL).
6116 *RIGHT is a QImode reference to the opposite end of the field and
6117 can be used in the patterning right-side instruction. */
6119 static bool
6120 mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
6121 rtx *left, rtx *right)
6123 rtx first, last;
6125 /* Check that the operand really is a MEM. Not all the extv and
6126 extzv predicates are checked. */
6127 if (!MEM_P (*op))
6128 return false;
6130 /* Check that the size is valid. */
6131 if (width != 32 && (!TARGET_64BIT || width != 64))
6132 return false;
6134 /* We can only access byte-aligned values. Since we are always passed
6135 a reference to the first byte of the field, it is not necessary to
6136 do anything with BITPOS after this check. */
6137 if (bitpos % BITS_PER_UNIT != 0)
6138 return false;
6140 /* Reject aligned bitfields: we want to use a normal load or store
6141 instead of a left/right pair. */
6142 if (MEM_ALIGN (*op) >= width)
6143 return false;
6145 /* Adjust *OP to refer to the whole field. This also has the effect
6146 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
6147 *op = adjust_address (*op, BLKmode, 0);
6148 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
6150 /* Get references to both ends of the field. We deliberately don't
6151 use the original QImode *OP for FIRST since the new BLKmode one
6152 might have a simpler address. */
6153 first = adjust_address (*op, QImode, 0);
6154 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
6156 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
6157 correspond to the MSB and RIGHT to the LSB. */
6158 if (TARGET_BIG_ENDIAN)
6159 *left = first, *right = last;
6160 else
6161 *left = last, *right = first;
6163 return true;
6166 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
6167 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
6168 the operation is the equivalent of:
6170 (set DEST (*_extract SRC WIDTH BITPOS))
6172 Return true on success. */
6174 bool
6175 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
6176 HOST_WIDE_INT bitpos)
6178 rtx left, right, temp;
6180 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
6181 be a paradoxical word_mode subreg. This is the only case in which
6182 we allow the destination to be larger than the source. */
6183 if (GET_CODE (dest) == SUBREG
6184 && GET_MODE (dest) == DImode
6185 && GET_MODE (SUBREG_REG (dest)) == SImode)
6186 dest = SUBREG_REG (dest);
6188 /* After the above adjustment, the destination must be the same
6189 width as the source. */
6190 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
6191 return false;
6193 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
6194 return false;
6196 temp = gen_reg_rtx (GET_MODE (dest));
6197 if (GET_MODE (dest) == DImode)
6199 emit_insn (gen_mov_ldl (temp, src, left));
6200 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
6202 else
6204 emit_insn (gen_mov_lwl (temp, src, left));
6205 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
6207 return true;
6210 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
6211 BITPOS and SRC are the operands passed to the expander; the operation
6212 is the equivalent of:
6214 (set (zero_extract DEST WIDTH BITPOS) SRC)
6216 Return true on success. */
6218 bool
6219 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
6220 HOST_WIDE_INT bitpos)
6222 rtx left, right;
6223 enum machine_mode mode;
6225 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
6226 return false;
6228 mode = mode_for_size (width, MODE_INT, 0);
6229 src = gen_lowpart (mode, src);
6230 if (mode == DImode)
6232 emit_insn (gen_mov_sdl (dest, src, left));
6233 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
6235 else
6237 emit_insn (gen_mov_swl (dest, src, left));
6238 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
6240 return true;
6243 /* Return true if X is a MEM with the same size as MODE. */
6245 bool
6246 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6248 rtx size;
6250 if (!MEM_P (x))
6251 return false;
6253 size = MEM_SIZE (x);
6254 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6257 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
6258 source of an "ext" instruction or the destination of an "ins"
6259 instruction. OP must be a register operand and the following
6260 conditions must hold:
6262 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
6263 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6264 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6266 Also reject lengths equal to a word as they are better handled
6267 by the move patterns. */
6269 bool
6270 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
6272 if (!ISA_HAS_EXT_INS
6273 || !register_operand (op, VOIDmode)
6274 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6275 return false;
6277 if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
6278 return false;
6280 if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
6281 return false;
6283 return true;
6286 /* Return true if -msplit-addresses is selected and should be honored.
6288 -msplit-addresses is a half-way house between explicit relocations
6289 and the traditional assembler macros. It can split absolute 32-bit
6290 symbolic constants into a high/lo_sum pair but uses macros for other
6291 sorts of access.
6293 Like explicit relocation support for REL targets, it relies
6294 on GNU extensions in the assembler and the linker.
6296 Although this code should work for -O0, it has traditionally
6297 been treated as an optimization. */
6299 static bool
6300 mips_split_addresses_p (void)
6302 return (TARGET_SPLIT_ADDRESSES
6303 && optimize
6304 && !TARGET_MIPS16
6305 && !flag_pic
6306 && !ABI_HAS_64BIT_SYMBOLS);
6309 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
6311 static void
6312 mips_init_relocs (void)
6314 memset (mips_split_p, '\0', sizeof (mips_split_p));
6315 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6316 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6318 if (ABI_HAS_64BIT_SYMBOLS)
6320 if (TARGET_EXPLICIT_RELOCS)
6322 mips_split_p[SYMBOL_64_HIGH] = true;
6323 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6324 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6326 mips_split_p[SYMBOL_64_MID] = true;
6327 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6328 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6330 mips_split_p[SYMBOL_64_LOW] = true;
6331 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6332 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6334 mips_split_p[SYMBOL_ABSOLUTE] = true;
6335 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6338 else
6340 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses_p () || TARGET_MIPS16)
6342 mips_split_p[SYMBOL_ABSOLUTE] = true;
6343 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6344 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6346 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6350 if (TARGET_MIPS16)
6352 /* The high part is provided by a pseudo copy of $gp. */
6353 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6354 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6357 if (TARGET_EXPLICIT_RELOCS)
6359 /* Small data constants are kept whole until after reload,
6360 then lowered by mips_rewrite_small_data. */
6361 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6363 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6364 if (TARGET_NEWABI)
6366 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6367 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6369 else
6371 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6372 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6375 if (TARGET_XGOT)
6377 /* The HIGH and LO_SUM are matched by special .md patterns. */
6378 mips_split_p[SYMBOL_GOT_DISP] = true;
6380 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6381 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6382 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6384 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6385 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6386 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6388 else
6390 if (TARGET_NEWABI)
6391 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6392 else
6393 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6394 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6398 if (TARGET_NEWABI)
6400 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6401 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6402 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6405 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6406 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6408 mips_split_p[SYMBOL_DTPREL] = true;
6409 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6410 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6412 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6414 mips_split_p[SYMBOL_TPREL] = true;
6415 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6416 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6418 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6421 /* If OP is an UNSPEC address, return the address to which it refers,
6422 otherwise return OP itself. */
6424 static rtx
6425 mips_strip_unspec_address (rtx op)
6427 rtx base, offset;
6429 split_const (op, &base, &offset);
6430 if (UNSPEC_ADDRESS_P (base))
6431 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6432 return op;
6435 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6436 in context CONTEXT. RELOCS is the array of relocations to use. */
6438 static void
6439 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6440 const char **relocs)
6442 enum mips_symbol_type symbol_type;
6443 const char *p;
6445 symbol_type = mips_classify_symbolic_expression (op, context);
6446 gcc_assert (relocs[symbol_type]);
6448 fputs (relocs[symbol_type], file);
6449 output_addr_const (file, mips_strip_unspec_address (op));
6450 for (p = relocs[symbol_type]; *p != 0; p++)
6451 if (*p == '(')
6452 fputc (')', file);
6455 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6456 The punctuation characters are:
6458 '(' Start a nested ".set noreorder" block.
6459 ')' End a nested ".set noreorder" block.
6460 '[' Start a nested ".set noat" block.
6461 ']' End a nested ".set noat" block.
6462 '<' Start a nested ".set nomacro" block.
6463 '>' End a nested ".set nomacro" block.
6464 '*' Behave like %(%< if generating a delayed-branch sequence.
6465 '#' Print a nop if in a ".set noreorder" block.
6466 '/' Like '#', but do nothing within a delayed-branch sequence.
6467 '?' Print "l" if mips_branch_likely is true
6468 '.' Print the name of the register with a hard-wired zero (zero or $0).
6469 '@' Print the name of the assembler temporary register (at or $1).
6470 '^' Print the name of the pic call-through register (t9 or $25).
6471 '+' Print the name of the gp register (usually gp or $28).
6472 '$' Print the name of the stack pointer register (sp or $29).
6473 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6474 '-' Print ".set pop" under the same conditions for '|'.
6476 See also mips_init_print_operand_pucnt. */
6478 static void
6479 mips_print_operand_punctuation (FILE *file, int ch)
6481 switch (ch)
6483 case '(':
6484 if (set_noreorder++ == 0)
6485 fputs (".set\tnoreorder\n\t", file);
6486 break;
6488 case ')':
6489 gcc_assert (set_noreorder > 0);
6490 if (--set_noreorder == 0)
6491 fputs ("\n\t.set\treorder", file);
6492 break;
6494 case '[':
6495 if (set_noat++ == 0)
6496 fputs (".set\tnoat\n\t", file);
6497 break;
6499 case ']':
6500 gcc_assert (set_noat > 0);
6501 if (--set_noat == 0)
6502 fputs ("\n\t.set\tat", file);
6503 break;
6505 case '<':
6506 if (set_nomacro++ == 0)
6507 fputs (".set\tnomacro\n\t", file);
6508 break;
6510 case '>':
6511 gcc_assert (set_nomacro > 0);
6512 if (--set_nomacro == 0)
6513 fputs ("\n\t.set\tmacro", file);
6514 break;
6516 case '*':
6517 if (final_sequence != 0)
6519 mips_print_operand_punctuation (file, '(');
6520 mips_print_operand_punctuation (file, '<');
6522 break;
6524 case '#':
6525 if (set_noreorder != 0)
6526 fputs ("\n\tnop", file);
6527 break;
6529 case '/':
6530 /* Print an extra newline so that the delayed insn is separated
6531 from the following ones. This looks neater and is consistent
6532 with non-nop delayed sequences. */
6533 if (set_noreorder != 0 && final_sequence == 0)
6534 fputs ("\n\tnop\n", file);
6535 break;
6537 case '?':
6538 if (mips_branch_likely)
6539 putc ('l', file);
6540 break;
6542 case '.':
6543 fputs (reg_names[GP_REG_FIRST + 0], file);
6544 break;
6546 case '@':
6547 fputs (reg_names[GP_REG_FIRST + 1], file);
6548 break;
6550 case '^':
6551 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
6552 break;
6554 case '+':
6555 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6556 break;
6558 case '$':
6559 fputs (reg_names[STACK_POINTER_REGNUM], file);
6560 break;
6562 case '|':
6563 if (!ISA_HAS_LL_SC)
6564 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6565 break;
6567 case '-':
6568 if (!ISA_HAS_LL_SC)
6569 fputs ("\n\t.set\tpop", file);
6570 break;
6572 default:
6573 gcc_unreachable ();
6574 break;
6578 /* Initialize mips_print_operand_punct. */
6580 static void
6581 mips_init_print_operand_punct (void)
6583 const char *p;
6585 for (p = "()[]<>*#/?.@^+$|-"; *p; p++)
6586 mips_print_operand_punct[(unsigned char) *p] = true;
6589 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
6590 associated with condition CODE. Print the condition part of the
6591 opcode to FILE. */
6593 static void
6594 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
6596 switch (code)
6598 case EQ:
6599 case NE:
6600 case GT:
6601 case GE:
6602 case LT:
6603 case LE:
6604 case GTU:
6605 case GEU:
6606 case LTU:
6607 case LEU:
6608 /* Conveniently, the MIPS names for these conditions are the same
6609 as their RTL equivalents. */
6610 fputs (GET_RTX_NAME (code), file);
6611 break;
6613 default:
6614 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6615 break;
6619 /* Likewise floating-point branches. */
6621 static void
6622 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
6624 switch (code)
6626 case EQ:
6627 fputs ("c1f", file);
6628 break;
6630 case NE:
6631 fputs ("c1t", file);
6632 break;
6634 default:
6635 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6636 break;
6640 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6642 'X' Print CONST_INT OP in hexadecimal format.
6643 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
6644 'd' Print CONST_INT OP in decimal.
6645 'h' Print the high-part relocation associated with OP, after stripping
6646 any outermost HIGH.
6647 'R' Print the low-part relocation associated with OP.
6648 'C' Print the integer branch condition for comparison OP.
6649 'N' Print the inverse of the integer branch condition for comparison OP.
6650 'F' Print the FPU branch condition for comparison OP.
6651 'W' Print the inverse of the FPU branch condition for comparison OP.
6652 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6653 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6654 't' Like 'T', but with the EQ/NE cases reversed
6655 'Y' Print mips_fp_conditions[INTVAL (OP)]
6656 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
6657 'q' Print a DSP accumulator register.
6658 'D' Print the second part of a double-word register or memory operand.
6659 'L' Print the low-order register in a double-word register operand.
6660 'M' Print high-order register in a double-word register operand.
6661 'z' Print $0 if OP is zero, otherwise print OP normally. */
6663 void
6664 mips_print_operand (FILE *file, rtx op, int letter)
6666 enum rtx_code code;
6668 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6670 mips_print_operand_punctuation (file, letter);
6671 return;
6674 gcc_assert (op);
6675 code = GET_CODE (op);
6677 switch (letter)
6679 case 'X':
6680 if (GET_CODE (op) == CONST_INT)
6681 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6682 else
6683 output_operand_lossage ("invalid use of '%%%c'", letter);
6684 break;
6686 case 'x':
6687 if (GET_CODE (op) == CONST_INT)
6688 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
6689 else
6690 output_operand_lossage ("invalid use of '%%%c'", letter);
6691 break;
6693 case 'd':
6694 if (GET_CODE (op) == CONST_INT)
6695 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
6696 else
6697 output_operand_lossage ("invalid use of '%%%c'", letter);
6698 break;
6700 case 'h':
6701 if (code == HIGH)
6702 op = XEXP (op, 0);
6703 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6704 break;
6706 case 'R':
6707 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6708 break;
6710 case 'C':
6711 mips_print_int_branch_condition (file, code, letter);
6712 break;
6714 case 'N':
6715 mips_print_int_branch_condition (file, reverse_condition (code), letter);
6716 break;
6718 case 'F':
6719 mips_print_float_branch_condition (file, code, letter);
6720 break;
6722 case 'W':
6723 mips_print_float_branch_condition (file, reverse_condition (code),
6724 letter);
6725 break;
6727 case 'T':
6728 case 't':
6730 int truth = (code == NE) == (letter == 'T');
6731 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6733 break;
6735 case 'Y':
6736 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
6737 fputs (mips_fp_conditions[UINTVAL (op)], file);
6738 else
6739 output_operand_lossage ("'%%%c' is not a valid operand prefix",
6740 letter);
6741 break;
6743 case 'Z':
6744 if (ISA_HAS_8CC)
6746 mips_print_operand (file, op, 0);
6747 fputc (',', file);
6749 break;
6751 case 'q':
6752 if (code == REG && MD_REG_P (REGNO (op)))
6753 fprintf (file, "$ac0");
6754 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
6755 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
6756 else
6757 output_operand_lossage ("invalid use of '%%%c'", letter);
6758 break;
6760 default:
6761 switch (code)
6763 case REG:
6765 unsigned int regno = REGNO (op);
6766 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
6767 || (letter == 'L' && TARGET_BIG_ENDIAN)
6768 || letter == 'D')
6769 regno++;
6770 fprintf (file, "%s", reg_names[regno]);
6772 break;
6774 case MEM:
6775 if (letter == 'D')
6776 output_address (plus_constant (XEXP (op, 0), 4));
6777 else
6778 output_address (XEXP (op, 0));
6779 break;
6781 default:
6782 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6783 fputs (reg_names[GP_REG_FIRST], file);
6784 else if (CONST_GP_P (op))
6785 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6786 else
6787 output_addr_const (file, mips_strip_unspec_address (op));
6788 break;
6793 /* Output address operand X to FILE. */
6795 void
6796 mips_print_operand_address (FILE *file, rtx x)
6798 struct mips_address_info addr;
6800 if (mips_classify_address (&addr, x, word_mode, true))
6801 switch (addr.type)
6803 case ADDRESS_REG:
6804 mips_print_operand (file, addr.offset, 0);
6805 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6806 return;
6808 case ADDRESS_LO_SUM:
6809 mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6810 mips_lo_relocs);
6811 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6812 return;
6814 case ADDRESS_CONST_INT:
6815 output_addr_const (file, x);
6816 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
6817 return;
6819 case ADDRESS_SYMBOLIC:
6820 output_addr_const (file, mips_strip_unspec_address (x));
6821 return;
6823 gcc_unreachable ();
6826 /* Implement TARGET_ENCODE_SECTION_INFO. */
6828 static void
6829 mips_encode_section_info (tree decl, rtx rtl, int first)
6831 default_encode_section_info (decl, rtl, first);
6833 if (TREE_CODE (decl) == FUNCTION_DECL)
6835 rtx symbol = XEXP (rtl, 0);
6836 tree type = TREE_TYPE (decl);
6838 /* Encode whether the symbol is short or long. */
6839 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
6840 || mips_far_type_p (type))
6841 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
6845 /* Implement TARGET_SELECT_RTX_SECTION. */
6847 static section *
6848 mips_select_rtx_section (enum machine_mode mode, rtx x,
6849 unsigned HOST_WIDE_INT align)
6851 /* ??? Consider using mergeable small data sections. */
6852 if (mips_rtx_constant_in_small_data_p (mode))
6853 return get_named_section (NULL, ".sdata", 0);
6855 return default_elf_select_rtx_section (mode, x, align);
6858 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6860 The complication here is that, with the combination TARGET_ABICALLS
6861 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6862 therefore not be included in the read-only part of a DSO. Handle such
6863 cases by selecting a normal data section instead of a read-only one.
6864 The logic apes that in default_function_rodata_section. */
6866 static section *
6867 mips_function_rodata_section (tree decl)
6869 if (!TARGET_ABICALLS || TARGET_GPWORD)
6870 return default_function_rodata_section (decl);
6872 if (decl && DECL_SECTION_NAME (decl))
6874 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6875 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6877 char *rname = ASTRDUP (name);
6878 rname[14] = 'd';
6879 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6881 else if (flag_function_sections
6882 && flag_data_sections
6883 && strncmp (name, ".text.", 6) == 0)
6885 char *rname = ASTRDUP (name);
6886 memcpy (rname + 1, "data", 4);
6887 return get_section (rname, SECTION_WRITE, decl);
6890 return data_section;
6893 /* Implement TARGET_IN_SMALL_DATA_P. */
6895 static bool
6896 mips_in_small_data_p (const_tree decl)
6898 unsigned HOST_WIDE_INT size;
6900 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6901 return false;
6903 /* We don't yet generate small-data references for -mabicalls
6904 or VxWorks RTP code. See the related -G handling in
6905 mips_override_options. */
6906 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
6907 return false;
6909 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6911 const char *name;
6913 /* Reject anything that isn't in a known small-data section. */
6914 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6915 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6916 return false;
6918 /* If a symbol is defined externally, the assembler will use the
6919 usual -G rules when deciding how to implement macros. */
6920 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
6921 return true;
6923 else if (TARGET_EMBEDDED_DATA)
6925 /* Don't put constants into the small data section: we want them
6926 to be in ROM rather than RAM. */
6927 if (TREE_CODE (decl) != VAR_DECL)
6928 return false;
6930 if (TREE_READONLY (decl)
6931 && !TREE_SIDE_EFFECTS (decl)
6932 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6933 return false;
6936 /* Enforce -mlocal-sdata. */
6937 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
6938 return false;
6940 /* Enforce -mextern-sdata. */
6941 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
6943 if (DECL_EXTERNAL (decl))
6944 return false;
6945 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
6946 return false;
6949 /* We have traditionally not treated zero-sized objects as small data,
6950 so this is now effectively part of the ABI. */
6951 size = int_size_in_bytes (TREE_TYPE (decl));
6952 return size > 0 && size <= mips_small_data_threshold;
6955 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
6956 anchors for small data: the GP register acts as an anchor in that
6957 case. We also don't want to use them for PC-relative accesses,
6958 where the PC acts as an anchor. */
6960 static bool
6961 mips_use_anchors_for_symbol_p (const_rtx symbol)
6963 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
6965 case SYMBOL_PC_RELATIVE:
6966 case SYMBOL_GP_RELATIVE:
6967 return false;
6969 default:
6970 return default_use_anchors_for_symbol_p (symbol);
6974 /* The MIPS debug format wants all automatic variables and arguments
6975 to be in terms of the virtual frame pointer (stack pointer before
6976 any adjustment in the function), while the MIPS 3.0 linker wants
6977 the frame pointer to be the stack pointer after the initial
6978 adjustment. So, we do the adjustment here. The arg pointer (which
6979 is eliminated) points to the virtual frame pointer, while the frame
6980 pointer (which may be eliminated) points to the stack pointer after
6981 the initial adjustments. */
6983 HOST_WIDE_INT
6984 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6986 rtx offset2 = const0_rtx;
6987 rtx reg = eliminate_constant_term (addr, &offset2);
6989 if (offset == 0)
6990 offset = INTVAL (offset2);
6992 if (reg == stack_pointer_rtx
6993 || reg == frame_pointer_rtx
6994 || reg == hard_frame_pointer_rtx)
6996 offset -= cfun->machine->frame.total_size;
6997 if (reg == hard_frame_pointer_rtx)
6998 offset += cfun->machine->frame.hard_frame_pointer_offset;
7001 /* sdbout_parms does not want this to crash for unrecognized cases. */
7002 #if 0
7003 else if (reg != arg_pointer_rtx)
7004 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
7005 addr);
7006 #endif
7008 return offset;
7011 /* Implement ASM_OUTPUT_EXTERNAL. */
7013 void
7014 mips_output_external (FILE *file, tree decl, const char *name)
7016 default_elf_asm_output_external (file, decl, name);
7018 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
7019 set in order to avoid putting out names that are never really
7020 used. */
7021 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
7023 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
7025 /* When using assembler macros, emit .extern directives for
7026 all small-data externs so that the assembler knows how
7027 big they are.
7029 In most cases it would be safe (though pointless) to emit
7030 .externs for other symbols too. One exception is when an
7031 object is within the -G limit but declared by the user to
7032 be in a section other than .sbss or .sdata. */
7033 fputs ("\t.extern\t", file);
7034 assemble_name (file, name);
7035 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
7036 int_size_in_bytes (TREE_TYPE (decl)));
7038 else if (TARGET_IRIX
7039 && mips_abi == ABI_32
7040 && TREE_CODE (decl) == FUNCTION_DECL)
7042 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
7043 `.global name .text' directive for every used but
7044 undefined function. If we don't, the linker may perform
7045 an optimization (skipping over the insns that set $gp)
7046 when it is unsafe. */
7047 fputs ("\t.globl ", file);
7048 assemble_name (file, name);
7049 fputs (" .text\n", file);
7054 /* Implement ASM_OUTPUT_SOURCE_FILENAME. */
7056 void
7057 mips_output_filename (FILE *stream, const char *name)
7059 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
7060 directives. */
7061 if (write_symbols == DWARF2_DEBUG)
7062 return;
7063 else if (mips_output_filename_first_time)
7065 mips_output_filename_first_time = 0;
7066 num_source_filenames += 1;
7067 current_function_file = name;
7068 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7069 output_quoted_string (stream, name);
7070 putc ('\n', stream);
7072 /* If we are emitting stabs, let dbxout.c handle this (except for
7073 the mips_output_filename_first_time case). */
7074 else if (write_symbols == DBX_DEBUG)
7075 return;
7076 else if (name != current_function_file
7077 && strcmp (name, current_function_file) != 0)
7079 num_source_filenames += 1;
7080 current_function_file = name;
7081 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7082 output_quoted_string (stream, name);
7083 putc ('\n', stream);
7087 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
7089 static void ATTRIBUTE_UNUSED
7090 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
7092 switch (size)
7094 case 4:
7095 fputs ("\t.dtprelword\t", file);
7096 break;
7098 case 8:
7099 fputs ("\t.dtpreldword\t", file);
7100 break;
7102 default:
7103 gcc_unreachable ();
7105 output_addr_const (file, x);
7106 fputs ("+0x8000", file);
7109 /* Implement TARGET_DWARF_REGISTER_SPAN. */
7111 static rtx
7112 mips_dwarf_register_span (rtx reg)
7114 rtx high, low;
7115 enum machine_mode mode;
7117 /* By default, GCC maps increasing register numbers to increasing
7118 memory locations, but paired FPRs are always little-endian,
7119 regardless of the prevailing endianness. */
7120 mode = GET_MODE (reg);
7121 if (FP_REG_P (REGNO (reg))
7122 && TARGET_BIG_ENDIAN
7123 && MAX_FPRS_PER_FMT > 1
7124 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
7126 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
7127 high = mips_subword (reg, true);
7128 low = mips_subword (reg, false);
7129 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
7132 return NULL_RTX;
7135 /* Implement ASM_OUTPUT_ASCII. */
7137 void
7138 mips_output_ascii (FILE *stream, const char *string, size_t len)
7140 size_t i;
7141 int cur_pos;
7143 cur_pos = 17;
7144 fprintf (stream, "\t.ascii\t\"");
7145 for (i = 0; i < len; i++)
7147 int c;
7149 c = (unsigned char) string[i];
7150 if (ISPRINT (c))
7152 if (c == '\\' || c == '\"')
7154 putc ('\\', stream);
7155 cur_pos++;
7157 putc (c, stream);
7158 cur_pos++;
7160 else
7162 fprintf (stream, "\\%03o", c);
7163 cur_pos += 4;
7166 if (cur_pos > 72 && i+1 < len)
7168 cur_pos = 17;
7169 fprintf (stream, "\"\n\t.ascii\t\"");
7172 fprintf (stream, "\"\n");
7175 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7176 macros, mark the symbol as written so that mips_asm_output_external
7177 won't emit an .extern for it. STREAM is the output file, NAME is the
7178 name of the symbol, INIT_STRING is the string that should be written
7179 before the symbol and FINAL_STRING is the string that should be
7180 written after it. FINAL_STRING is a printf format that consumes the
7181 remaining arguments. */
7183 void
7184 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7185 const char *final_string, ...)
7187 va_list ap;
7189 fputs (init_string, stream);
7190 assemble_name (stream, name);
7191 va_start (ap, final_string);
7192 vfprintf (stream, final_string, ap);
7193 va_end (ap);
7195 if (!TARGET_EXPLICIT_RELOCS)
7197 tree name_tree = get_identifier (name);
7198 TREE_ASM_WRITTEN (name_tree) = 1;
7202 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7203 NAME is the name of the object and ALIGN is the required alignment
7204 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7205 alignment argument. */
7207 void
7208 mips_declare_common_object (FILE *stream, const char *name,
7209 const char *init_string,
7210 unsigned HOST_WIDE_INT size,
7211 unsigned int align, bool takes_alignment_p)
7213 if (!takes_alignment_p)
7215 size += (align / BITS_PER_UNIT) - 1;
7216 size -= size % (align / BITS_PER_UNIT);
7217 mips_declare_object (stream, name, init_string,
7218 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7220 else
7221 mips_declare_object (stream, name, init_string,
7222 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7223 size, align / BITS_PER_UNIT);
7226 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7227 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7229 void
7230 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7231 unsigned HOST_WIDE_INT size,
7232 unsigned int align)
7234 /* If the target wants uninitialized const declarations in
7235 .rdata then don't put them in .comm. */
7236 if (TARGET_EMBEDDED_DATA
7237 && TARGET_UNINIT_CONST_IN_RODATA
7238 && TREE_CODE (decl) == VAR_DECL
7239 && TREE_READONLY (decl)
7240 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7242 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7243 targetm.asm_out.globalize_label (stream, name);
7245 switch_to_section (readonly_data_section);
7246 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7247 mips_declare_object (stream, name, "",
7248 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7249 size);
7251 else
7252 mips_declare_common_object (stream, name, "\n\t.comm\t",
7253 size, align, true);
7256 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7257 extern int size_directive_output;
7259 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7260 definitions except that it uses mips_declare_object to emit the label. */
7262 void
7263 mips_declare_object_name (FILE *stream, const char *name,
7264 tree decl ATTRIBUTE_UNUSED)
7266 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7267 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7268 #endif
7270 size_directive_output = 0;
7271 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7273 HOST_WIDE_INT size;
7275 size_directive_output = 1;
7276 size = int_size_in_bytes (TREE_TYPE (decl));
7277 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7280 mips_declare_object (stream, name, "", ":\n");
7283 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7285 void
7286 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7288 const char *name;
7290 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7291 if (!flag_inhibit_size_directive
7292 && DECL_SIZE (decl) != 0
7293 && !at_end
7294 && top_level
7295 && DECL_INITIAL (decl) == error_mark_node
7296 && !size_directive_output)
7298 HOST_WIDE_INT size;
7300 size_directive_output = 1;
7301 size = int_size_in_bytes (TREE_TYPE (decl));
7302 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7305 #endif
7307 /* Return the FOO in the name of the ".mdebug.FOO" section associated
7308 with the current ABI. */
7310 static const char *
7311 mips_mdebug_abi_name (void)
7313 switch (mips_abi)
7315 case ABI_32:
7316 return "abi32";
7317 case ABI_O64:
7318 return "abiO64";
7319 case ABI_N32:
7320 return "abiN32";
7321 case ABI_64:
7322 return "abiN64";
7323 case ABI_EABI:
7324 return TARGET_64BIT ? "eabi64" : "eabi32";
7325 default:
7326 gcc_unreachable ();
7330 /* Implement TARGET_ASM_FILE_START. */
7332 static void
7333 mips_file_start (void)
7335 default_file_start ();
7337 /* Generate a special section to describe the ABI switches used to
7338 produce the resultant binary. This is unnecessary on IRIX and
7339 causes unwanted warnings from the native linker. */
7340 if (!TARGET_IRIX)
7342 /* Record the ABI itself. Modern versions of binutils encode
7343 this information in the ELF header flags, but GDB needs the
7344 information in order to correctly debug binaries produced by
7345 older binutils. See the function mips_gdbarch_init in
7346 gdb/mips-tdep.c. */
7347 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7348 mips_mdebug_abi_name ());
7350 /* There is no ELF header flag to distinguish long32 forms of the
7351 EABI from long64 forms. Emit a special section to help tools
7352 such as GDB. Do the same for o64, which is sometimes used with
7353 -mlong64. */
7354 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7355 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7356 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7358 #ifdef HAVE_AS_GNU_ATTRIBUTE
7359 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7360 (TARGET_HARD_FLOAT_ABI
7361 ? (TARGET_DOUBLE_FLOAT
7362 ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
7363 #endif
7366 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
7367 if (TARGET_ABICALLS)
7368 fprintf (asm_out_file, "\t.abicalls\n");
7370 if (flag_verbose_asm)
7371 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7372 ASM_COMMENT_START,
7373 mips_small_data_threshold, mips_arch_info->name, mips_isa);
7376 /* Make the last instruction frame-related and note that it performs
7377 the operation described by FRAME_PATTERN. */
7379 static void
7380 mips_set_frame_expr (rtx frame_pattern)
7382 rtx insn;
7384 insn = get_last_insn ();
7385 RTX_FRAME_RELATED_P (insn) = 1;
7386 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7387 frame_pattern,
7388 REG_NOTES (insn));
7391 /* Return a frame-related rtx that stores REG at MEM.
7392 REG must be a single register. */
7394 static rtx
7395 mips_frame_set (rtx mem, rtx reg)
7397 rtx set;
7399 /* If we're saving the return address register and the DWARF return
7400 address column differs from the hard register number, adjust the
7401 note reg to refer to the former. */
7402 if (REGNO (reg) == GP_REG_FIRST + 31
7403 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7404 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7406 set = gen_rtx_SET (VOIDmode, mem, reg);
7407 RTX_FRAME_RELATED_P (set) = 1;
7409 return set;
7412 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7413 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7414 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7415 static const unsigned char mips16e_s2_s8_regs[] = {
7416 30, 23, 22, 21, 20, 19, 18
7418 static const unsigned char mips16e_a0_a3_regs[] = {
7419 4, 5, 6, 7
7422 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7423 ordered from the uppermost in memory to the lowest in memory. */
7424 static const unsigned char mips16e_save_restore_regs[] = {
7425 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7428 /* Return the index of the lowest X in the range [0, SIZE) for which
7429 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7431 static unsigned int
7432 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7433 unsigned int size)
7435 unsigned int i;
7437 for (i = 0; i < size; i++)
7438 if (BITSET_P (mask, regs[i]))
7439 break;
7441 return i;
7444 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7445 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7446 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7447 is true for all indexes (X, SIZE). */
7449 static void
7450 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7451 unsigned int size, unsigned int *num_regs_ptr)
7453 unsigned int i;
7455 i = mips16e_find_first_register (*mask_ptr, regs, size);
7456 for (i++; i < size; i++)
7457 if (!BITSET_P (*mask_ptr, regs[i]))
7459 *num_regs_ptr += 1;
7460 *mask_ptr |= 1 << regs[i];
7464 /* Return a simplified form of X using the register values in REG_VALUES.
7465 REG_VALUES[R] is the last value assigned to hard register R, or null
7466 if R has not been modified.
7468 This function is rather limited, but is good enough for our purposes. */
7470 static rtx
7471 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7473 x = avoid_constant_pool_reference (x);
7475 if (UNARY_P (x))
7477 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7478 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7479 x0, GET_MODE (XEXP (x, 0)));
7482 if (ARITHMETIC_P (x))
7484 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7485 rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7486 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7489 if (REG_P (x)
7490 && reg_values[REGNO (x)]
7491 && !rtx_unstable_p (reg_values[REGNO (x)]))
7492 return reg_values[REGNO (x)];
7494 return x;
7497 /* Return true if (set DEST SRC) stores an argument register into its
7498 caller-allocated save slot, storing the number of that argument
7499 register in *REGNO_PTR if so. REG_VALUES is as for
7500 mips16e_collect_propagate_value. */
7502 static bool
7503 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7504 unsigned int *regno_ptr)
7506 unsigned int argno, regno;
7507 HOST_WIDE_INT offset, required_offset;
7508 rtx addr, base;
7510 /* Check that this is a word-mode store. */
7511 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7512 return false;
7514 /* Check that the register being saved is an unmodified argument
7515 register. */
7516 regno = REGNO (src);
7517 if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
7518 return false;
7519 argno = regno - GP_ARG_FIRST;
7521 /* Check whether the address is an appropriate stack-pointer or
7522 frame-pointer access. */
7523 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7524 mips_split_plus (addr, &base, &offset);
7525 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7526 if (base == hard_frame_pointer_rtx)
7527 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
7528 else if (base != stack_pointer_rtx)
7529 return false;
7530 if (offset != required_offset)
7531 return false;
7533 *regno_ptr = regno;
7534 return true;
7537 /* A subroutine of mips_expand_prologue, called only when generating
7538 MIPS16e SAVE instructions. Search the start of the function for any
7539 instructions that save argument registers into their caller-allocated
7540 save slots. Delete such instructions and return a value N such that
7541 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7542 instructions redundant. */
7544 static unsigned int
7545 mips16e_collect_argument_saves (void)
7547 rtx reg_values[FIRST_PSEUDO_REGISTER];
7548 rtx insn, next, set, dest, src;
7549 unsigned int nargs, regno;
7551 push_topmost_sequence ();
7552 nargs = 0;
7553 memset (reg_values, 0, sizeof (reg_values));
7554 for (insn = get_insns (); insn; insn = next)
7556 next = NEXT_INSN (insn);
7557 if (NOTE_P (insn))
7558 continue;
7560 if (!INSN_P (insn))
7561 break;
7563 set = PATTERN (insn);
7564 if (GET_CODE (set) != SET)
7565 break;
7567 dest = SET_DEST (set);
7568 src = SET_SRC (set);
7569 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
7571 if (!BITSET_P (cfun->machine->frame.mask, regno))
7573 delete_insn (insn);
7574 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7577 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7578 reg_values[REGNO (dest)]
7579 = mips16e_collect_propagate_value (src, reg_values);
7580 else
7581 break;
7583 pop_topmost_sequence ();
7585 return nargs;
7588 /* Return a move between register REGNO and memory location SP + OFFSET.
7589 Make the move a load if RESTORE_P, otherwise make it a frame-related
7590 store. */
7592 static rtx
7593 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7594 unsigned int regno)
7596 rtx reg, mem;
7598 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7599 reg = gen_rtx_REG (SImode, regno);
7600 return (restore_p
7601 ? gen_rtx_SET (VOIDmode, reg, mem)
7602 : mips_frame_set (mem, reg));
7605 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7606 The instruction must:
7608 - Allocate or deallocate SIZE bytes in total; SIZE is known
7609 to be nonzero.
7611 - Save or restore as many registers in *MASK_PTR as possible.
7612 The instruction saves the first registers at the top of the
7613 allocated area, with the other registers below it.
7615 - Save NARGS argument registers above the allocated area.
7617 (NARGS is always zero if RESTORE_P.)
7619 The SAVE and RESTORE instructions cannot save and restore all general
7620 registers, so there may be some registers left over for the caller to
7621 handle. Destructively modify *MASK_PTR so that it contains the registers
7622 that still need to be saved or restored. The caller can save these
7623 registers in the memory immediately below *OFFSET_PTR, which is a
7624 byte offset from the bottom of the allocated stack area. */
7626 static rtx
7627 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7628 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7629 HOST_WIDE_INT size)
7631 rtx pattern, set;
7632 HOST_WIDE_INT offset, top_offset;
7633 unsigned int i, regno;
7634 int n;
7636 gcc_assert (cfun->machine->frame.num_fp == 0);
7638 /* Calculate the number of elements in the PARALLEL. We need one element
7639 for the stack adjustment, one for each argument register save, and one
7640 for each additional register move. */
7641 n = 1 + nargs;
7642 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7643 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7644 n++;
7646 /* Create the final PARALLEL. */
7647 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7648 n = 0;
7650 /* Add the stack pointer adjustment. */
7651 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7652 plus_constant (stack_pointer_rtx,
7653 restore_p ? size : -size));
7654 RTX_FRAME_RELATED_P (set) = 1;
7655 XVECEXP (pattern, 0, n++) = set;
7657 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7658 top_offset = restore_p ? size : 0;
7660 /* Save the arguments. */
7661 for (i = 0; i < nargs; i++)
7663 offset = top_offset + i * UNITS_PER_WORD;
7664 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7665 XVECEXP (pattern, 0, n++) = set;
7668 /* Then fill in the other register moves. */
7669 offset = top_offset;
7670 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7672 regno = mips16e_save_restore_regs[i];
7673 if (BITSET_P (*mask_ptr, regno))
7675 offset -= UNITS_PER_WORD;
7676 set = mips16e_save_restore_reg (restore_p, offset, regno);
7677 XVECEXP (pattern, 0, n++) = set;
7678 *mask_ptr &= ~(1 << regno);
7682 /* Tell the caller what offset it should use for the remaining registers. */
7683 *offset_ptr = size + (offset - top_offset);
7685 gcc_assert (n == XVECLEN (pattern, 0));
7687 return pattern;
7690 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7691 pointer. Return true if PATTERN matches the kind of instruction
7692 generated by mips16e_build_save_restore. If INFO is nonnull,
7693 initialize it when returning true. */
7695 bool
7696 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7697 struct mips16e_save_restore_info *info)
7699 unsigned int i, nargs, mask, extra;
7700 HOST_WIDE_INT top_offset, save_offset, offset;
7701 rtx set, reg, mem, base;
7702 int n;
7704 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7705 return false;
7707 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7708 top_offset = adjust > 0 ? adjust : 0;
7710 /* Interpret all other members of the PARALLEL. */
7711 save_offset = top_offset - UNITS_PER_WORD;
7712 mask = 0;
7713 nargs = 0;
7714 i = 0;
7715 for (n = 1; n < XVECLEN (pattern, 0); n++)
7717 /* Check that we have a SET. */
7718 set = XVECEXP (pattern, 0, n);
7719 if (GET_CODE (set) != SET)
7720 return false;
7722 /* Check that the SET is a load (if restoring) or a store
7723 (if saving). */
7724 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7725 if (!MEM_P (mem))
7726 return false;
7728 /* Check that the address is the sum of the stack pointer and a
7729 possibly-zero constant offset. */
7730 mips_split_plus (XEXP (mem, 0), &base, &offset);
7731 if (base != stack_pointer_rtx)
7732 return false;
7734 /* Check that SET's other operand is a register. */
7735 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7736 if (!REG_P (reg))
7737 return false;
7739 /* Check for argument saves. */
7740 if (offset == top_offset + nargs * UNITS_PER_WORD
7741 && REGNO (reg) == GP_ARG_FIRST + nargs)
7742 nargs++;
7743 else if (offset == save_offset)
7745 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7746 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7747 return false;
7749 mask |= 1 << REGNO (reg);
7750 save_offset -= UNITS_PER_WORD;
7752 else
7753 return false;
7756 /* Check that the restrictions on register ranges are met. */
7757 extra = 0;
7758 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7759 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7760 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7761 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7762 if (extra != 0)
7763 return false;
7765 /* Make sure that the topmost argument register is not saved twice.
7766 The checks above ensure that the same is then true for the other
7767 argument registers. */
7768 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7769 return false;
7771 /* Pass back information, if requested. */
7772 if (info)
7774 info->nargs = nargs;
7775 info->mask = mask;
7776 info->size = (adjust > 0 ? adjust : -adjust);
7779 return true;
7782 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7783 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7784 the null terminator. */
7786 static char *
7787 mips16e_add_register_range (char *s, unsigned int min_reg,
7788 unsigned int max_reg)
7790 if (min_reg != max_reg)
7791 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7792 else
7793 s += sprintf (s, ",%s", reg_names[min_reg]);
7794 return s;
7797 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7798 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7800 const char *
7801 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7803 static char buffer[300];
7805 struct mips16e_save_restore_info info;
7806 unsigned int i, end;
7807 char *s;
7809 /* Parse the pattern. */
7810 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7811 gcc_unreachable ();
7813 /* Add the mnemonic. */
7814 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7815 s += strlen (s);
7817 /* Save the arguments. */
7818 if (info.nargs > 1)
7819 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7820 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7821 else if (info.nargs == 1)
7822 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7824 /* Emit the amount of stack space to allocate or deallocate. */
7825 s += sprintf (s, "%d", (int) info.size);
7827 /* Save or restore $16. */
7828 if (BITSET_P (info.mask, 16))
7829 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7831 /* Save or restore $17. */
7832 if (BITSET_P (info.mask, 17))
7833 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7835 /* Save or restore registers in the range $s2...$s8, which
7836 mips16e_s2_s8_regs lists in decreasing order. Note that this
7837 is a software register range; the hardware registers are not
7838 numbered consecutively. */
7839 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7840 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7841 if (i < end)
7842 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7843 mips16e_s2_s8_regs[i]);
7845 /* Save or restore registers in the range $a0...$a3. */
7846 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7847 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7848 if (i < end)
7849 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7850 mips16e_a0_a3_regs[end - 1]);
7852 /* Save or restore $31. */
7853 if (BITSET_P (info.mask, 31))
7854 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7856 return buffer;
7859 /* Return true if the current function has an insn that implicitly
7860 refers to $gp. */
7862 static bool
7863 mips_function_has_gp_insn (void)
7865 /* Don't bother rechecking if we found one last time. */
7866 if (!cfun->machine->has_gp_insn_p)
7868 rtx insn;
7870 push_topmost_sequence ();
7871 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7872 if (USEFUL_INSN_P (insn)
7873 && (get_attr_got (insn) != GOT_UNSET
7874 || mips_small_data_pattern_p (PATTERN (insn))))
7876 cfun->machine->has_gp_insn_p = true;
7877 break;
7879 pop_topmost_sequence ();
7881 return cfun->machine->has_gp_insn_p;
7884 /* Return the register that should be used as the global pointer
7885 within this function. Return 0 if the function doesn't need
7886 a global pointer. */
7888 static unsigned int
7889 mips_global_pointer (void)
7891 unsigned int regno;
7893 /* $gp is always available unless we're using a GOT. */
7894 if (!TARGET_USE_GOT)
7895 return GLOBAL_POINTER_REGNUM;
7897 /* We must always provide $gp when it is used implicitly. */
7898 if (!TARGET_EXPLICIT_RELOCS)
7899 return GLOBAL_POINTER_REGNUM;
7901 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7902 a valid gp. */
7903 if (crtl->profile)
7904 return GLOBAL_POINTER_REGNUM;
7906 /* If the function has a nonlocal goto, $gp must hold the correct
7907 global pointer for the target function. */
7908 if (crtl->has_nonlocal_goto)
7909 return GLOBAL_POINTER_REGNUM;
7911 /* If the gp is never referenced, there's no need to initialize it.
7912 Note that reload can sometimes introduce constant pool references
7913 into a function that otherwise didn't need them. For example,
7914 suppose we have an instruction like:
7916 (set (reg:DF R1) (float:DF (reg:SI R2)))
7918 If R2 turns out to be constant such as 1, the instruction may have a
7919 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7920 using this constant if R2 doesn't get allocated to a register.
7922 In cases like these, reload will have added the constant to the pool
7923 but no instruction will yet refer to it. */
7924 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7925 && !crtl->uses_const_pool
7926 && !mips_function_has_gp_insn ())
7927 return 0;
7929 /* We need a global pointer, but perhaps we can use a call-clobbered
7930 register instead of $gp. */
7931 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7932 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7933 if (!df_regs_ever_live_p (regno)
7934 && call_really_used_regs[regno]
7935 && !fixed_regs[regno]
7936 && regno != PIC_FUNCTION_ADDR_REGNUM)
7937 return regno;
7939 return GLOBAL_POINTER_REGNUM;
7942 /* Return true if the current function returns its value in a floating-point
7943 register in MIPS16 mode. */
7945 static bool
7946 mips16_cfun_returns_in_fpr_p (void)
7948 tree return_type = DECL_RESULT (current_function_decl);
7949 return (TARGET_MIPS16
7950 && TARGET_HARD_FLOAT_ABI
7951 && !aggregate_value_p (return_type, current_function_decl)
7952 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7955 /* Return true if the current function must save register REGNO. */
7957 static bool
7958 mips_save_reg_p (unsigned int regno)
7960 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7961 if we have not chosen a call-clobbered substitute. */
7962 if (regno == GLOBAL_POINTER_REGNUM)
7963 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7965 /* Check call-saved registers. */
7966 if ((crtl->saves_all_registers || df_regs_ever_live_p (regno))
7967 && !call_really_used_regs[regno])
7968 return true;
7970 /* Save both registers in an FPR pair if either one is used. This is
7971 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7972 register to be used without the even register. */
7973 if (FP_REG_P (regno)
7974 && MAX_FPRS_PER_FMT == 2
7975 && df_regs_ever_live_p (regno + 1)
7976 && !call_really_used_regs[regno + 1])
7977 return true;
7979 /* We need to save the old frame pointer before setting up a new one. */
7980 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7981 return true;
7983 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7984 if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7985 return true;
7987 /* We need to save the incoming return address if it is ever clobbered
7988 within the function, if __builtin_eh_return is being used to set a
7989 different return address, or if a stub is being used to return a
7990 value in FPRs. */
7991 if (regno == GP_REG_FIRST + 31
7992 && (df_regs_ever_live_p (regno)
7993 || crtl->calls_eh_return
7994 || mips16_cfun_returns_in_fpr_p ()))
7995 return true;
7997 return false;
8000 /* Populate the current function's mips_frame_info structure.
8002 MIPS stack frames look like:
8004 +-------------------------------+
8006 | incoming stack arguments |
8008 +-------------------------------+
8010 | caller-allocated save area |
8011 A | for register arguments |
8013 +-------------------------------+ <-- incoming stack pointer
8015 | callee-allocated save area |
8016 B | for arguments that are |
8017 | split between registers and |
8018 | the stack |
8020 +-------------------------------+ <-- arg_pointer_rtx
8022 C | callee-allocated save area |
8023 | for register varargs |
8025 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
8026 | | + UNITS_PER_HWFPVALUE
8027 | FPR save area |
8029 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
8030 | | + UNITS_PER_WORD
8031 | GPR save area |
8033 +-------------------------------+
8034 | | \
8035 | local variables | | var_size
8036 | | /
8037 +-------------------------------+
8038 | | \
8039 | $gp save area | | cprestore_size
8040 | | /
8041 P +-------------------------------+ <-- hard_frame_pointer_rtx for
8042 | | MIPS16 code
8043 | outgoing stack arguments |
8045 +-------------------------------+
8047 | caller-allocated save area |
8048 | for register arguments |
8050 +-------------------------------+ <-- stack_pointer_rtx
8051 frame_pointer_rtx
8052 hard_frame_pointer_rtx for
8053 non-MIPS16 code.
8055 At least two of A, B and C will be empty.
8057 Dynamic stack allocations such as alloca insert data at point P.
8058 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
8059 hard_frame_pointer_rtx unchanged. */
8061 static void
8062 mips_compute_frame_info (void)
8064 struct mips_frame_info *frame;
8065 HOST_WIDE_INT offset, size;
8066 unsigned int regno, i;
8068 frame = &cfun->machine->frame;
8069 memset (frame, 0, sizeof (*frame));
8070 size = get_frame_size ();
8072 cfun->machine->global_pointer = mips_global_pointer ();
8074 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
8075 area and the $gp save slot. This area isn't needed in leaf functions,
8076 but if the target-independent frame size is nonzero, we're committed
8077 to allocating it anyway. */
8078 if (size == 0 && current_function_is_leaf)
8080 /* The MIPS 3.0 linker does not like functions that dynamically
8081 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
8082 looks like we are trying to create a second frame pointer to the
8083 function, so allocate some stack space to make it happy. */
8084 if (cfun->calls_alloca)
8085 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
8086 else
8087 frame->args_size = 0;
8088 frame->cprestore_size = 0;
8090 else
8092 frame->args_size = crtl->outgoing_args_size;
8093 frame->cprestore_size = STARTING_FRAME_OFFSET - frame->args_size;
8095 offset = frame->args_size + frame->cprestore_size;
8097 /* Move above the local variables. */
8098 frame->var_size = MIPS_STACK_ALIGN (size);
8099 offset += frame->var_size;
8101 /* Find out which GPRs we need to save. */
8102 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8103 if (mips_save_reg_p (regno))
8105 frame->num_gp++;
8106 frame->mask |= 1 << (regno - GP_REG_FIRST);
8109 /* If this function calls eh_return, we must also save and restore the
8110 EH data registers. */
8111 if (crtl->calls_eh_return)
8112 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
8114 frame->num_gp++;
8115 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
8118 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
8119 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
8120 save all later registers too. */
8121 if (GENERATE_MIPS16E_SAVE_RESTORE)
8123 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
8124 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
8125 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
8126 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
8129 /* Move above the GPR save area. */
8130 if (frame->num_gp > 0)
8132 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
8133 frame->gp_sp_offset = offset - UNITS_PER_WORD;
8136 /* Find out which FPRs we need to save. This loop must iterate over
8137 the same space as its companion in mips_for_each_saved_reg. */
8138 if (TARGET_HARD_FLOAT)
8139 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
8140 if (mips_save_reg_p (regno))
8142 frame->num_fp += MAX_FPRS_PER_FMT;
8143 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
8146 /* Move above the FPR save area. */
8147 if (frame->num_fp > 0)
8149 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
8150 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
8153 /* Move above the callee-allocated varargs save area. */
8154 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
8155 frame->arg_pointer_offset = offset;
8157 /* Move above the callee-allocated area for pretend stack arguments. */
8158 offset += crtl->args.pretend_args_size;
8159 frame->total_size = offset;
8161 /* Work out the offsets of the save areas from the top of the frame. */
8162 if (frame->gp_sp_offset > 0)
8163 frame->gp_save_offset = frame->gp_sp_offset - offset;
8164 if (frame->fp_sp_offset > 0)
8165 frame->fp_save_offset = frame->fp_sp_offset - offset;
8167 /* MIPS16 code offsets the frame pointer by the size of the outgoing
8168 arguments. This tends to increase the chances of using unextended
8169 instructions for local variables and incoming arguments. */
8170 if (TARGET_MIPS16)
8171 frame->hard_frame_pointer_offset = frame->args_size;
8174 /* Return the style of GP load sequence that is being used for the
8175 current function. */
8177 enum mips_loadgp_style
8178 mips_current_loadgp_style (void)
8180 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
8181 return LOADGP_NONE;
8183 if (TARGET_RTP_PIC)
8184 return LOADGP_RTP;
8186 if (TARGET_ABSOLUTE_ABICALLS)
8187 return LOADGP_ABSOLUTE;
8189 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
8192 /* Implement FRAME_POINTER_REQUIRED. */
8194 bool
8195 mips_frame_pointer_required (void)
8197 /* If the function contains dynamic stack allocations, we need to
8198 use the frame pointer to access the static parts of the frame. */
8199 if (cfun->calls_alloca)
8200 return true;
8202 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
8203 reload may be unable to compute the address of a local variable,
8204 since there is no way to add a large constant to the stack pointer
8205 without using a second temporary register. */
8206 if (TARGET_MIPS16)
8208 mips_compute_frame_info ();
8209 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
8210 return true;
8213 return false;
8216 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
8217 or argument pointer. TO is either the stack pointer or hard frame
8218 pointer. */
8220 HOST_WIDE_INT
8221 mips_initial_elimination_offset (int from, int to)
8223 HOST_WIDE_INT offset;
8225 mips_compute_frame_info ();
8227 /* Set OFFSET to the offset from the soft frame pointer, which is also
8228 the offset from the end-of-prologue stack pointer. */
8229 switch (from)
8231 case FRAME_POINTER_REGNUM:
8232 offset = 0;
8233 break;
8235 case ARG_POINTER_REGNUM:
8236 offset = cfun->machine->frame.arg_pointer_offset;
8237 break;
8239 default:
8240 gcc_unreachable ();
8243 if (to == HARD_FRAME_POINTER_REGNUM)
8244 offset -= cfun->machine->frame.hard_frame_pointer_offset;
8246 return offset;
8249 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
8251 static void
8252 mips_extra_live_on_entry (bitmap regs)
8254 if (TARGET_USE_GOT)
8256 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
8257 the global pointer. */
8258 if (!TARGET_ABSOLUTE_ABICALLS)
8259 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8261 /* See the comment above load_call<mode> for details. */
8262 bitmap_set_bit (regs, GOT_VERSION_REGNUM);
8266 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
8267 previous frame. */
8270 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8272 if (count != 0)
8273 return const0_rtx;
8275 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8278 /* Emit code to change the current function's return address to
8279 ADDRESS. SCRATCH is available as a scratch register, if needed.
8280 ADDRESS and SCRATCH are both word-mode GPRs. */
8282 void
8283 mips_set_return_address (rtx address, rtx scratch)
8285 rtx slot_address;
8287 gcc_assert (BITSET_P (cfun->machine->frame.mask, 31));
8288 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
8289 cfun->machine->frame.gp_sp_offset);
8290 mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
8293 /* Restore $gp from its save slot. Valid only when using o32 or
8294 o64 abicalls. */
8296 void
8297 mips_restore_gp (void)
8299 rtx base, address;
8301 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
8303 base = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
8304 address = mips_add_offset (pic_offset_table_rtx, base,
8305 crtl->outgoing_args_size);
8306 mips_emit_move (pic_offset_table_rtx, gen_frame_mem (Pmode, address));
8307 if (!TARGET_EXPLICIT_RELOCS)
8308 emit_insn (gen_blockage ());
8311 /* A function to save or store a register. The first argument is the
8312 register and the second is the stack slot. */
8313 typedef void (*mips_save_restore_fn) (rtx, rtx);
8315 /* Use FN to save or restore register REGNO. MODE is the register's
8316 mode and OFFSET is the offset of its save slot from the current
8317 stack pointer. */
8319 static void
8320 mips_save_restore_reg (enum machine_mode mode, int regno,
8321 HOST_WIDE_INT offset, mips_save_restore_fn fn)
8323 rtx mem;
8325 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
8326 fn (gen_rtx_REG (mode, regno), mem);
8329 /* Call FN for each register that is saved by the current function.
8330 SP_OFFSET is the offset of the current stack pointer from the start
8331 of the frame. */
8333 static void
8334 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
8336 enum machine_mode fpr_mode;
8337 HOST_WIDE_INT offset;
8338 int regno;
8340 /* Save registers starting from high to low. The debuggers prefer at least
8341 the return register be stored at func+4, and also it allows us not to
8342 need a nop in the epilogue if at least one register is reloaded in
8343 addition to return address. */
8344 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
8345 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
8346 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
8348 mips_save_restore_reg (word_mode, regno, offset, fn);
8349 offset -= UNITS_PER_WORD;
8352 /* This loop must iterate over the same space as its companion in
8353 mips_compute_frame_info. */
8354 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
8355 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
8356 for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
8357 regno >= FP_REG_FIRST;
8358 regno -= MAX_FPRS_PER_FMT)
8359 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
8361 mips_save_restore_reg (fpr_mode, regno, offset, fn);
8362 offset -= GET_MODE_SIZE (fpr_mode);
8366 /* If we're generating n32 or n64 abicalls, and the current function
8367 does not use $28 as its global pointer, emit a cplocal directive.
8368 Use pic_offset_table_rtx as the argument to the directive. */
8370 static void
8371 mips_output_cplocal (void)
8373 if (!TARGET_EXPLICIT_RELOCS
8374 && cfun->machine->global_pointer > 0
8375 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
8376 output_asm_insn (".cplocal %+", 0);
8379 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
8381 static void
8382 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8384 const char *fnname;
8386 #ifdef SDB_DEBUGGING_INFO
8387 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
8388 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
8389 #endif
8391 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
8392 floating-point arguments. */
8393 if (TARGET_MIPS16
8394 && TARGET_HARD_FLOAT_ABI
8395 && crtl->args.info.fp_code != 0)
8396 mips16_build_function_stub ();
8398 /* Select the MIPS16 mode for this function. */
8399 if (TARGET_MIPS16)
8400 fprintf (file, "\t.set\tmips16\n");
8401 else
8402 fprintf (file, "\t.set\tnomips16\n");
8404 if (!FUNCTION_NAME_ALREADY_DECLARED)
8406 /* Get the function name the same way that toplev.c does before calling
8407 assemble_start_function. This is needed so that the name used here
8408 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8409 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8411 if (!flag_inhibit_size_directive)
8413 fputs ("\t.ent\t", file);
8414 assemble_name (file, fnname);
8415 fputs ("\n", file);
8418 assemble_name (file, fnname);
8419 fputs (":\n", file);
8422 /* Stop mips_file_end from treating this function as external. */
8423 if (TARGET_IRIX && mips_abi == ABI_32)
8424 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
8426 /* Output MIPS-specific frame information. */
8427 if (!flag_inhibit_size_directive)
8429 const struct mips_frame_info *frame;
8431 frame = &cfun->machine->frame;
8433 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
8434 fprintf (file,
8435 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
8436 "# vars= " HOST_WIDE_INT_PRINT_DEC
8437 ", regs= %d/%d"
8438 ", args= " HOST_WIDE_INT_PRINT_DEC
8439 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
8440 reg_names[frame_pointer_needed
8441 ? HARD_FRAME_POINTER_REGNUM
8442 : STACK_POINTER_REGNUM],
8443 (frame_pointer_needed
8444 ? frame->total_size - frame->hard_frame_pointer_offset
8445 : frame->total_size),
8446 reg_names[GP_REG_FIRST + 31],
8447 frame->var_size,
8448 frame->num_gp, frame->num_fp,
8449 frame->args_size,
8450 frame->cprestore_size);
8452 /* .mask MASK, OFFSET. */
8453 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8454 frame->mask, frame->gp_save_offset);
8456 /* .fmask MASK, OFFSET. */
8457 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8458 frame->fmask, frame->fp_save_offset);
8461 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
8462 Also emit the ".set noreorder; .set nomacro" sequence for functions
8463 that need it. */
8464 if (mips_current_loadgp_style () == LOADGP_OLDABI)
8466 /* .cpload must be in a .set noreorder but not a .set nomacro block. */
8467 if (!cfun->machine->all_noreorder_p)
8468 output_asm_insn ("%(.cpload\t%^%)", 0);
8469 else
8470 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8472 else if (cfun->machine->all_noreorder_p)
8473 output_asm_insn ("%(%<", 0);
8475 /* Tell the assembler which register we're using as the global
8476 pointer. This is needed for thunks, since they can use either
8477 explicit relocs or assembler macros. */
8478 mips_output_cplocal ();
8481 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
8483 static void
8484 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8485 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8487 /* Reinstate the normal $gp. */
8488 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8489 mips_output_cplocal ();
8491 if (cfun->machine->all_noreorder_p)
8493 /* Avoid using %>%) since it adds excess whitespace. */
8494 output_asm_insn (".set\tmacro", 0);
8495 output_asm_insn (".set\treorder", 0);
8496 set_noreorder = set_nomacro = 0;
8499 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8501 const char *fnname;
8503 /* Get the function name the same way that toplev.c does before calling
8504 assemble_start_function. This is needed so that the name used here
8505 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8506 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8507 fputs ("\t.end\t", file);
8508 assemble_name (file, fnname);
8509 fputs ("\n", file);
8513 /* Save register REG to MEM. Make the instruction frame-related. */
8515 static void
8516 mips_save_reg (rtx reg, rtx mem)
8518 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8520 rtx x1, x2;
8522 if (mips_split_64bit_move_p (mem, reg))
8523 mips_split_doubleword_move (mem, reg);
8524 else
8525 mips_emit_move (mem, reg);
8527 x1 = mips_frame_set (mips_subword (mem, false),
8528 mips_subword (reg, false));
8529 x2 = mips_frame_set (mips_subword (mem, true),
8530 mips_subword (reg, true));
8531 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8533 else
8535 if (TARGET_MIPS16
8536 && REGNO (reg) != GP_REG_FIRST + 31
8537 && !M16_REG_P (REGNO (reg)))
8539 /* Save a non-MIPS16 register by moving it through a temporary.
8540 We don't need to do this for $31 since there's a special
8541 instruction for it. */
8542 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8543 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8545 else
8546 mips_emit_move (mem, reg);
8548 mips_set_frame_expr (mips_frame_set (mem, reg));
8552 /* The __gnu_local_gp symbol. */
8554 static GTY(()) rtx mips_gnu_local_gp;
8556 /* If we're generating n32 or n64 abicalls, emit instructions
8557 to set up the global pointer. */
8559 static void
8560 mips_emit_loadgp (void)
8562 rtx addr, offset, incoming_address, base, index, pic_reg;
8564 pic_reg = pic_offset_table_rtx;
8565 switch (mips_current_loadgp_style ())
8567 case LOADGP_ABSOLUTE:
8568 if (mips_gnu_local_gp == NULL)
8570 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
8571 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
8573 emit_insn (Pmode == SImode
8574 ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
8575 : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
8576 break;
8578 case LOADGP_NEWABI:
8579 addr = XEXP (DECL_RTL (current_function_decl), 0);
8580 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
8581 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8582 emit_insn (Pmode == SImode
8583 ? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
8584 : gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
8585 break;
8587 case LOADGP_RTP:
8588 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
8589 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
8590 emit_insn (Pmode == SImode
8591 ? gen_loadgp_rtp_si (pic_reg, base, index)
8592 : gen_loadgp_rtp_di (pic_reg, base, index));
8593 break;
8595 default:
8596 return;
8598 /* Emit a blockage if there are implicit uses of the GP register.
8599 This includes profiled functions, because FUNCTION_PROFILE uses
8600 a jal macro. */
8601 if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
8602 emit_insn (gen_loadgp_blockage ());
8605 /* Expand the "prologue" pattern. */
8607 void
8608 mips_expand_prologue (void)
8610 const struct mips_frame_info *frame;
8611 HOST_WIDE_INT size;
8612 unsigned int nargs;
8613 rtx insn;
8615 if (cfun->machine->global_pointer > 0)
8616 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8618 frame = &cfun->machine->frame;
8619 size = frame->total_size;
8621 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8622 bytes beforehand; this is enough to cover the register save area
8623 without going out of range. */
8624 if ((frame->mask | frame->fmask) != 0)
8626 HOST_WIDE_INT step1;
8628 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8629 if (GENERATE_MIPS16E_SAVE_RESTORE)
8631 HOST_WIDE_INT offset;
8632 unsigned int mask, regno;
8634 /* Try to merge argument stores into the save instruction. */
8635 nargs = mips16e_collect_argument_saves ();
8637 /* Build the save instruction. */
8638 mask = frame->mask;
8639 insn = mips16e_build_save_restore (false, &mask, &offset,
8640 nargs, step1);
8641 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8642 size -= step1;
8644 /* Check if we need to save other registers. */
8645 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8646 if (BITSET_P (mask, regno - GP_REG_FIRST))
8648 offset -= UNITS_PER_WORD;
8649 mips_save_restore_reg (word_mode, regno,
8650 offset, mips_save_reg);
8653 else
8655 insn = gen_add3_insn (stack_pointer_rtx,
8656 stack_pointer_rtx,
8657 GEN_INT (-step1));
8658 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8659 size -= step1;
8660 mips_for_each_saved_reg (size, mips_save_reg);
8664 /* Allocate the rest of the frame. */
8665 if (size > 0)
8667 if (SMALL_OPERAND (-size))
8668 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8669 stack_pointer_rtx,
8670 GEN_INT (-size)))) = 1;
8671 else
8673 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8674 if (TARGET_MIPS16)
8676 /* There are no instructions to add or subtract registers
8677 from the stack pointer, so use the frame pointer as a
8678 temporary. We should always be using a frame pointer
8679 in this case anyway. */
8680 gcc_assert (frame_pointer_needed);
8681 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8682 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8683 hard_frame_pointer_rtx,
8684 MIPS_PROLOGUE_TEMP (Pmode)));
8685 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8687 else
8688 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8689 stack_pointer_rtx,
8690 MIPS_PROLOGUE_TEMP (Pmode)));
8692 /* Describe the combined effect of the previous instructions. */
8693 mips_set_frame_expr
8694 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8695 plus_constant (stack_pointer_rtx, -size)));
8699 /* Set up the frame pointer, if we're using one. */
8700 if (frame_pointer_needed)
8702 HOST_WIDE_INT offset;
8704 offset = frame->hard_frame_pointer_offset;
8705 if (offset == 0)
8707 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8708 RTX_FRAME_RELATED_P (insn) = 1;
8710 else if (SMALL_OPERAND (offset))
8712 insn = gen_add3_insn (hard_frame_pointer_rtx,
8713 stack_pointer_rtx, GEN_INT (offset));
8714 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8716 else
8718 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
8719 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8720 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8721 hard_frame_pointer_rtx,
8722 MIPS_PROLOGUE_TEMP (Pmode)));
8723 mips_set_frame_expr
8724 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8725 plus_constant (stack_pointer_rtx, offset)));
8729 mips_emit_loadgp ();
8731 /* Initialize the $gp save slot. */
8732 if (frame->cprestore_size > 0)
8733 emit_insn (gen_cprestore (GEN_INT (crtl->outgoing_args_size)));
8735 /* If we are profiling, make sure no instructions are scheduled before
8736 the call to mcount. */
8737 if (crtl->profile)
8738 emit_insn (gen_blockage ());
8741 /* Emit instructions to restore register REG from slot MEM. */
8743 static void
8744 mips_restore_reg (rtx reg, rtx mem)
8746 /* There's no MIPS16 instruction to load $31 directly. Load into
8747 $7 instead and adjust the return insn appropriately. */
8748 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8749 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
8751 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8753 /* Can't restore directly; move through a temporary. */
8754 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8755 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8757 else
8758 mips_emit_move (reg, mem);
8761 /* Emit any instructions needed before a return. */
8763 void
8764 mips_expand_before_return (void)
8766 /* When using a call-clobbered gp, we start out with unified call
8767 insns that include instructions to restore the gp. We then split
8768 these unified calls after reload. These split calls explicitly
8769 clobber gp, so there is no need to define
8770 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
8772 For consistency, we should also insert an explicit clobber of $28
8773 before return insns, so that the post-reload optimizers know that
8774 the register is not live on exit. */
8775 if (TARGET_CALL_CLOBBERED_GP)
8776 emit_clobber (pic_offset_table_rtx);
8779 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
8780 says which. */
8782 void
8783 mips_expand_epilogue (bool sibcall_p)
8785 const struct mips_frame_info *frame;
8786 HOST_WIDE_INT step1, step2;
8787 rtx base, target;
8789 if (!sibcall_p && mips_can_use_return_insn ())
8791 emit_jump_insn (gen_return ());
8792 return;
8795 /* In MIPS16 mode, if the return value should go into a floating-point
8796 register, we need to call a helper routine to copy it over. */
8797 if (mips16_cfun_returns_in_fpr_p ())
8798 mips16_copy_fpr_return_value ();
8800 /* Split the frame into two. STEP1 is the amount of stack we should
8801 deallocate before restoring the registers. STEP2 is the amount we
8802 should deallocate afterwards.
8804 Start off by assuming that no registers need to be restored. */
8805 frame = &cfun->machine->frame;
8806 step1 = frame->total_size;
8807 step2 = 0;
8809 /* Work out which register holds the frame address. */
8810 if (!frame_pointer_needed)
8811 base = stack_pointer_rtx;
8812 else
8814 base = hard_frame_pointer_rtx;
8815 step1 -= frame->hard_frame_pointer_offset;
8818 /* If we need to restore registers, deallocate as much stack as
8819 possible in the second step without going out of range. */
8820 if ((frame->mask | frame->fmask) != 0)
8822 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8823 step1 -= step2;
8826 /* Set TARGET to BASE + STEP1. */
8827 target = base;
8828 if (step1 > 0)
8830 rtx adjust;
8832 /* Get an rtx for STEP1 that we can add to BASE. */
8833 adjust = GEN_INT (step1);
8834 if (!SMALL_OPERAND (step1))
8836 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8837 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8840 /* Normal mode code can copy the result straight into $sp. */
8841 if (!TARGET_MIPS16)
8842 target = stack_pointer_rtx;
8844 emit_insn (gen_add3_insn (target, base, adjust));
8847 /* Copy TARGET into the stack pointer. */
8848 if (target != stack_pointer_rtx)
8849 mips_emit_move (stack_pointer_rtx, target);
8851 /* If we're using addressing macros, $gp is implicitly used by all
8852 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8853 from the stack. */
8854 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8855 emit_insn (gen_blockage ());
8857 if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
8859 unsigned int regno, mask;
8860 HOST_WIDE_INT offset;
8861 rtx restore;
8863 /* Generate the restore instruction. */
8864 mask = frame->mask;
8865 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8867 /* Restore any other registers manually. */
8868 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8869 if (BITSET_P (mask, regno - GP_REG_FIRST))
8871 offset -= UNITS_PER_WORD;
8872 mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
8875 /* Restore the remaining registers and deallocate the final bit
8876 of the frame. */
8877 emit_insn (restore);
8879 else
8881 /* Restore the registers. */
8882 mips_for_each_saved_reg (frame->total_size - step2, mips_restore_reg);
8884 /* Deallocate the final bit of the frame. */
8885 if (step2 > 0)
8886 emit_insn (gen_add3_insn (stack_pointer_rtx,
8887 stack_pointer_rtx,
8888 GEN_INT (step2)));
8891 /* Add in the __builtin_eh_return stack adjustment. We need to
8892 use a temporary in MIPS16 code. */
8893 if (crtl->calls_eh_return)
8895 if (TARGET_MIPS16)
8897 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8898 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8899 MIPS_EPILOGUE_TEMP (Pmode),
8900 EH_RETURN_STACKADJ_RTX));
8901 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8903 else
8904 emit_insn (gen_add3_insn (stack_pointer_rtx,
8905 stack_pointer_rtx,
8906 EH_RETURN_STACKADJ_RTX));
8909 if (!sibcall_p)
8911 unsigned int regno;
8913 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8914 path will restore the return address into $7 rather than $31. */
8915 if (TARGET_MIPS16
8916 && !GENERATE_MIPS16E_SAVE_RESTORE
8917 && BITSET_P (frame->mask, 31))
8918 regno = GP_REG_FIRST + 7;
8919 else
8920 regno = GP_REG_FIRST + 31;
8921 mips_expand_before_return ();
8922 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
8926 /* Return nonzero if this function is known to have a null epilogue.
8927 This allows the optimizer to omit jumps to jumps if no stack
8928 was created. */
8930 bool
8931 mips_can_use_return_insn (void)
8933 if (!reload_completed)
8934 return false;
8936 if (crtl->profile)
8937 return false;
8939 /* In MIPS16 mode, a function that returns a floating-point value
8940 needs to arrange to copy the return value into the floating-point
8941 registers. */
8942 if (mips16_cfun_returns_in_fpr_p ())
8943 return false;
8945 return cfun->machine->frame.total_size == 0;
8948 /* Return true if register REGNO can store a value of mode MODE.
8949 The result of this function is cached in mips_hard_regno_mode_ok. */
8951 static bool
8952 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
8954 unsigned int size;
8955 enum mode_class class;
8957 if (mode == CCV2mode)
8958 return (ISA_HAS_8CC
8959 && ST_REG_P (regno)
8960 && (regno - ST_REG_FIRST) % 2 == 0);
8962 if (mode == CCV4mode)
8963 return (ISA_HAS_8CC
8964 && ST_REG_P (regno)
8965 && (regno - ST_REG_FIRST) % 4 == 0);
8967 if (mode == CCmode)
8969 if (!ISA_HAS_8CC)
8970 return regno == FPSW_REGNUM;
8972 return (ST_REG_P (regno)
8973 || GP_REG_P (regno)
8974 || FP_REG_P (regno));
8977 size = GET_MODE_SIZE (mode);
8978 class = GET_MODE_CLASS (mode);
8980 if (GP_REG_P (regno))
8981 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
8983 if (FP_REG_P (regno)
8984 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
8985 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
8987 /* Allow TFmode for CCmode reloads. */
8988 if (mode == TFmode && ISA_HAS_8CC)
8989 return true;
8991 /* Allow 64-bit vector modes for Loongson-2E/2F. */
8992 if (TARGET_LOONGSON_VECTORS
8993 && (mode == V2SImode
8994 || mode == V4HImode
8995 || mode == V8QImode
8996 || mode == DImode))
8997 return true;
8999 if (class == MODE_FLOAT
9000 || class == MODE_COMPLEX_FLOAT
9001 || class == MODE_VECTOR_FLOAT)
9002 return size <= UNITS_PER_FPVALUE;
9004 /* Allow integer modes that fit into a single register. We need
9005 to put integers into FPRs when using instructions like CVT
9006 and TRUNC. There's no point allowing sizes smaller than a word,
9007 because the FPU has no appropriate load/store instructions. */
9008 if (class == MODE_INT)
9009 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
9012 if (ACC_REG_P (regno)
9013 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
9015 if (MD_REG_P (regno))
9017 /* After a multiplication or division, clobbering HI makes
9018 the value of LO unpredictable, and vice versa. This means
9019 that, for all interesting cases, HI and LO are effectively
9020 a single register.
9022 We model this by requiring that any value that uses HI
9023 also uses LO. */
9024 if (size <= UNITS_PER_WORD * 2)
9025 return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
9027 else
9029 /* DSP accumulators do not have the same restrictions as
9030 HI and LO, so we can treat them as normal doubleword
9031 registers. */
9032 if (size <= UNITS_PER_WORD)
9033 return true;
9035 if (size <= UNITS_PER_WORD * 2
9036 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
9037 return true;
9041 if (ALL_COP_REG_P (regno))
9042 return class == MODE_INT && size <= UNITS_PER_WORD;
9044 if (regno == GOT_VERSION_REGNUM)
9045 return mode == SImode;
9047 return false;
9050 /* Implement HARD_REGNO_NREGS. */
9052 unsigned int
9053 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9055 if (ST_REG_P (regno))
9056 /* The size of FP status registers is always 4, because they only hold
9057 CCmode values, and CCmode is always considered to be 4 bytes wide. */
9058 return (GET_MODE_SIZE (mode) + 3) / 4;
9060 if (FP_REG_P (regno))
9061 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9063 /* All other registers are word-sized. */
9064 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9067 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
9068 in mips_hard_regno_nregs. */
9071 mips_class_max_nregs (enum reg_class class, enum machine_mode mode)
9073 int size;
9074 HARD_REG_SET left;
9076 size = 0x8000;
9077 COPY_HARD_REG_SET (left, reg_class_contents[(int) class]);
9078 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
9080 size = MIN (size, 4);
9081 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
9083 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
9085 size = MIN (size, UNITS_PER_FPREG);
9086 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
9088 if (!hard_reg_set_empty_p (left))
9089 size = MIN (size, UNITS_PER_WORD);
9090 return (GET_MODE_SIZE (mode) + size - 1) / size;
9093 /* Implement CANNOT_CHANGE_MODE_CLASS. */
9095 bool
9096 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
9097 enum machine_mode to ATTRIBUTE_UNUSED,
9098 enum reg_class class)
9100 /* There are several problems with changing the modes of values
9101 in floating-point registers:
9103 - When a multi-word value is stored in paired floating-point
9104 registers, the first register always holds the low word.
9105 We therefore can't allow FPRs to change between single-word
9106 and multi-word modes on big-endian targets.
9108 - GCC assumes that each word of a multiword register can be accessed
9109 individually using SUBREGs. This is not true for floating-point
9110 registers if they are bigger than a word.
9112 - Loading a 32-bit value into a 64-bit floating-point register
9113 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9114 We can't allow FPRs to change from SImode to to a wider mode on
9115 64-bit targets.
9117 - If the FPU has already interpreted a value in one format, we must
9118 not ask it to treat the value as having a different format.
9120 We therefore disallow all mode changes involving FPRs. */
9121 return reg_classes_intersect_p (FP_REGS, class);
9124 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
9126 static bool
9127 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
9129 switch (mode)
9131 case SFmode:
9132 return TARGET_HARD_FLOAT;
9134 case DFmode:
9135 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
9137 case V2SFmode:
9138 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
9140 default:
9141 return false;
9145 /* Implement MODES_TIEABLE_P. */
9147 bool
9148 mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9150 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
9151 prefer to put one of them in FPRs. */
9152 return (mode1 == mode2
9153 || (!mips_mode_ok_for_mov_fmt_p (mode1)
9154 && !mips_mode_ok_for_mov_fmt_p (mode2)));
9157 /* Implement PREFERRED_RELOAD_CLASS. */
9159 enum reg_class
9160 mips_preferred_reload_class (rtx x, enum reg_class class)
9162 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
9163 return LEA_REGS;
9165 if (reg_class_subset_p (FP_REGS, class)
9166 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
9167 return FP_REGS;
9169 if (reg_class_subset_p (GR_REGS, class))
9170 class = GR_REGS;
9172 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
9173 class = M16_REGS;
9175 return class;
9178 /* Implement REGISTER_MOVE_COST. */
9181 mips_register_move_cost (enum machine_mode mode,
9182 enum reg_class to, enum reg_class from)
9184 if (TARGET_MIPS16)
9186 /* ??? We cannot move general registers into HI and LO because
9187 MIPS16 has no MTHI and MTLO instructions. Make the cost of
9188 moves in the opposite direction just as high, which stops the
9189 register allocators from using HI and LO for pseudos. */
9190 if (reg_class_subset_p (from, GENERAL_REGS)
9191 && reg_class_subset_p (to, GENERAL_REGS))
9193 if (reg_class_subset_p (from, M16_REGS)
9194 || reg_class_subset_p (to, M16_REGS))
9195 return 2;
9196 /* Two MOVEs. */
9197 return 4;
9200 else if (reg_class_subset_p (from, GENERAL_REGS))
9202 if (reg_class_subset_p (to, GENERAL_REGS))
9203 return 2;
9204 if (reg_class_subset_p (to, FP_REGS))
9205 return 4;
9206 if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
9207 return 5;
9208 if (reg_class_subset_p (to, ACC_REGS))
9209 return 6;
9211 else if (reg_class_subset_p (to, GENERAL_REGS))
9213 if (reg_class_subset_p (from, FP_REGS))
9214 return 4;
9215 if (reg_class_subset_p (from, ST_REGS))
9216 /* LUI followed by MOVF. */
9217 return 4;
9218 if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
9219 return 5;
9220 if (reg_class_subset_p (from, ACC_REGS))
9221 return 6;
9223 else if (reg_class_subset_p (from, FP_REGS))
9225 if (reg_class_subset_p (to, FP_REGS)
9226 && mips_mode_ok_for_mov_fmt_p (mode))
9227 return 4;
9228 if (reg_class_subset_p (to, ST_REGS))
9229 /* An expensive sequence. */
9230 return 8;
9233 return 12;
9236 /* Return the register class required for a secondary register when
9237 copying between one of the registers in CLASS and value X, which
9238 has mode MODE. X is the source of the move if IN_P, otherwise it
9239 is the destination. Return NO_REGS if no secondary register is
9240 needed. */
9242 enum reg_class
9243 mips_secondary_reload_class (enum reg_class class,
9244 enum machine_mode mode, rtx x, bool in_p)
9246 int regno;
9248 /* If X is a constant that cannot be loaded into $25, it must be loaded
9249 into some other GPR. No other register class allows a direct move. */
9250 if (mips_dangerous_for_la25_p (x))
9251 return reg_class_subset_p (class, LEA_REGS) ? NO_REGS : LEA_REGS;
9253 regno = true_regnum (x);
9254 if (TARGET_MIPS16)
9256 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
9257 if (!reg_class_subset_p (class, M16_REGS) && !M16_REG_P (regno))
9258 return M16_REGS;
9260 /* We can't really copy to HI or LO at all in MIPS16 mode. */
9261 if (in_p ? reg_classes_intersect_p (class, ACC_REGS) : ACC_REG_P (regno))
9262 return M16_REGS;
9264 return NO_REGS;
9267 /* Copying from accumulator registers to anywhere other than a general
9268 register requires a temporary general register. */
9269 if (reg_class_subset_p (class, ACC_REGS))
9270 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9271 if (ACC_REG_P (regno))
9272 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9274 /* We can only copy a value to a condition code register from a
9275 floating-point register, and even then we require a scratch
9276 floating-point register. We can only copy a value out of a
9277 condition-code register into a general register. */
9278 if (reg_class_subset_p (class, ST_REGS))
9280 if (in_p)
9281 return FP_REGS;
9282 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9284 if (ST_REG_P (regno))
9286 if (!in_p)
9287 return FP_REGS;
9288 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9291 if (reg_class_subset_p (class, FP_REGS))
9293 if (MEM_P (x)
9294 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
9295 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9296 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9297 return NO_REGS;
9299 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
9300 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9301 return NO_REGS;
9303 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
9304 /* We can force the constant to memory and use lwc1
9305 and ldc1. As above, we will use pairs of lwc1s if
9306 ldc1 is not supported. */
9307 return NO_REGS;
9309 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
9310 /* In this case we can use mov.fmt. */
9311 return NO_REGS;
9313 /* Otherwise, we need to reload through an integer register. */
9314 return GR_REGS;
9316 if (FP_REG_P (regno))
9317 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9319 return NO_REGS;
9322 /* Implement TARGET_MODE_REP_EXTENDED. */
9324 static int
9325 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
9327 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
9328 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
9329 return SIGN_EXTEND;
9331 return UNKNOWN;
9334 /* Implement TARGET_VALID_POINTER_MODE. */
9336 static bool
9337 mips_valid_pointer_mode (enum machine_mode mode)
9339 return mode == SImode || (TARGET_64BIT && mode == DImode);
9342 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
9344 static bool
9345 mips_vector_mode_supported_p (enum machine_mode mode)
9347 switch (mode)
9349 case V2SFmode:
9350 return TARGET_PAIRED_SINGLE_FLOAT;
9352 case V2HImode:
9353 case V4QImode:
9354 case V2HQmode:
9355 case V2UHQmode:
9356 case V2HAmode:
9357 case V2UHAmode:
9358 case V4QQmode:
9359 case V4UQQmode:
9360 return TARGET_DSP;
9362 case V2SImode:
9363 case V4HImode:
9364 case V8QImode:
9365 return TARGET_LOONGSON_VECTORS;
9367 default:
9368 return false;
9372 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9374 static bool
9375 mips_scalar_mode_supported_p (enum machine_mode mode)
9377 if (ALL_FIXED_POINT_MODE_P (mode)
9378 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9379 return true;
9381 return default_scalar_mode_supported_p (mode);
9384 /* Implement TARGET_INIT_LIBFUNCS. */
9386 #include "config/gofast.h"
9388 static void
9389 mips_init_libfuncs (void)
9391 if (TARGET_FIX_VR4120)
9393 /* Register the special divsi3 and modsi3 functions needed to work
9394 around VR4120 division errata. */
9395 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9396 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9399 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
9401 /* Register the MIPS16 -mhard-float stubs. */
9402 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9403 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9404 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9405 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9407 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9408 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9409 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9410 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9411 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9412 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9413 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
9415 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9416 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9417 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
9419 if (TARGET_DOUBLE_FLOAT)
9421 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9422 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9423 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9424 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9426 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9427 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9428 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9429 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9430 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9431 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9432 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
9434 set_conv_libfunc (sext_optab, DFmode, SFmode,
9435 "__mips16_extendsfdf2");
9436 set_conv_libfunc (trunc_optab, SFmode, DFmode,
9437 "__mips16_truncdfsf2");
9438 set_conv_libfunc (sfix_optab, SImode, DFmode,
9439 "__mips16_fix_truncdfsi");
9440 set_conv_libfunc (sfloat_optab, DFmode, SImode,
9441 "__mips16_floatsidf");
9442 set_conv_libfunc (ufloat_optab, DFmode, SImode,
9443 "__mips16_floatunsidf");
9446 else
9447 /* Register the gofast functions if selected using --enable-gofast. */
9448 gofast_maybe_init_libfuncs ();
9450 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
9451 on an external non-MIPS16 routine to implement __sync_synchronize. */
9452 if (TARGET_MIPS16)
9453 synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
9456 /* Return the length of INSN. LENGTH is the initial length computed by
9457 attributes in the machine-description file. */
9460 mips_adjust_insn_length (rtx insn, int length)
9462 /* A unconditional jump has an unfilled delay slot if it is not part
9463 of a sequence. A conditional jump normally has a delay slot, but
9464 does not on MIPS16. */
9465 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9466 length += 4;
9468 /* See how many nops might be needed to avoid hardware hazards. */
9469 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9470 switch (get_attr_hazard (insn))
9472 case HAZARD_NONE:
9473 break;
9475 case HAZARD_DELAY:
9476 length += 4;
9477 break;
9479 case HAZARD_HILO:
9480 length += 8;
9481 break;
9484 /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
9485 the .md file length attributes are 4-based for both modes.
9486 Adjust the MIPS16 ones here. */
9487 if (TARGET_MIPS16)
9488 length /= 2;
9490 return length;
9493 /* Return an asm sequence to start a noat block and load the address
9494 of a label into $1. */
9496 const char *
9497 mips_output_load_label (void)
9499 if (TARGET_EXPLICIT_RELOCS)
9500 switch (mips_abi)
9502 case ABI_N32:
9503 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9505 case ABI_64:
9506 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9508 default:
9509 if (ISA_HAS_LOAD_DELAY)
9510 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9511 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9513 else
9515 if (Pmode == DImode)
9516 return "%[dla\t%@,%0";
9517 else
9518 return "%[la\t%@,%0";
9522 /* Return the assembly code for INSN, which has the operands given by
9523 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9524 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9525 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9526 version of BRANCH_IF_TRUE. */
9528 const char *
9529 mips_output_conditional_branch (rtx insn, rtx *operands,
9530 const char *branch_if_true,
9531 const char *branch_if_false)
9533 unsigned int length;
9534 rtx taken, not_taken;
9536 length = get_attr_length (insn);
9537 if (length <= 8)
9539 /* Just a simple conditional branch. */
9540 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9541 return branch_if_true;
9544 /* Generate a reversed branch around a direct jump. This fallback does
9545 not use branch-likely instructions. */
9546 mips_branch_likely = false;
9547 not_taken = gen_label_rtx ();
9548 taken = operands[1];
9550 /* Generate the reversed branch to NOT_TAKEN. */
9551 operands[1] = not_taken;
9552 output_asm_insn (branch_if_false, operands);
9554 /* If INSN has a delay slot, we must provide delay slots for both the
9555 branch to NOT_TAKEN and the conditional jump. We must also ensure
9556 that INSN's delay slot is executed in the appropriate cases. */
9557 if (final_sequence)
9559 /* This first delay slot will always be executed, so use INSN's
9560 delay slot if is not annulled. */
9561 if (!INSN_ANNULLED_BRANCH_P (insn))
9563 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9564 asm_out_file, optimize, 1, NULL);
9565 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9567 else
9568 output_asm_insn ("nop", 0);
9569 fprintf (asm_out_file, "\n");
9572 /* Output the unconditional branch to TAKEN. */
9573 if (length <= 16)
9574 output_asm_insn ("j\t%0%/", &taken);
9575 else
9577 output_asm_insn (mips_output_load_label (), &taken);
9578 output_asm_insn ("jr\t%@%]%/", 0);
9581 /* Now deal with its delay slot; see above. */
9582 if (final_sequence)
9584 /* This delay slot will only be executed if the branch is taken.
9585 Use INSN's delay slot if is annulled. */
9586 if (INSN_ANNULLED_BRANCH_P (insn))
9588 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9589 asm_out_file, optimize, 1, NULL);
9590 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9592 else
9593 output_asm_insn ("nop", 0);
9594 fprintf (asm_out_file, "\n");
9597 /* Output NOT_TAKEN. */
9598 targetm.asm_out.internal_label (asm_out_file, "L",
9599 CODE_LABEL_NUMBER (not_taken));
9600 return "";
9603 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9604 if some ordering condition is true. The condition is given by
9605 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9606 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9607 its second is always zero. */
9609 const char *
9610 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9612 const char *branch[2];
9614 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9615 Make BRANCH[0] branch on the inverse condition. */
9616 switch (GET_CODE (operands[0]))
9618 /* These cases are equivalent to comparisons against zero. */
9619 case LEU:
9620 inverted_p = !inverted_p;
9621 /* Fall through. */
9622 case GTU:
9623 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9624 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9625 break;
9627 /* These cases are always true or always false. */
9628 case LTU:
9629 inverted_p = !inverted_p;
9630 /* Fall through. */
9631 case GEU:
9632 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9633 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9634 break;
9636 default:
9637 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9638 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9639 break;
9641 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9644 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
9645 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
9647 When working around R4000 and R4400 errata, we need to make sure that
9648 the division is not immediately followed by a shift[1][2]. We also
9649 need to stop the division from being put into a branch delay slot[3].
9650 The easiest way to avoid both problems is to add a nop after the
9651 division. When a divide-by-zero check is needed, this nop can be
9652 used to fill the branch delay slot.
9654 [1] If a double-word or a variable shift executes immediately
9655 after starting an integer division, the shift may give an
9656 incorrect result. See quotations of errata #16 and #28 from
9657 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9658 in mips.md for details.
9660 [2] A similar bug to [1] exists for all revisions of the
9661 R4000 and the R4400 when run in an MC configuration.
9662 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9664 "19. In this following sequence:
9666 ddiv (or ddivu or div or divu)
9667 dsll32 (or dsrl32, dsra32)
9669 if an MPT stall occurs, while the divide is slipping the cpu
9670 pipeline, then the following double shift would end up with an
9671 incorrect result.
9673 Workaround: The compiler needs to avoid generating any
9674 sequence with divide followed by extended double shift."
9676 This erratum is also present in "MIPS R4400MC Errata, Processor
9677 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9678 & 3.0" as errata #10 and #4, respectively.
9680 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9681 (also valid for MIPS R4000MC processors):
9683 "52. R4000SC: This bug does not apply for the R4000PC.
9685 There are two flavors of this bug:
9687 1) If the instruction just after divide takes an RF exception
9688 (tlb-refill, tlb-invalid) and gets an instruction cache
9689 miss (both primary and secondary) and the line which is
9690 currently in secondary cache at this index had the first
9691 data word, where the bits 5..2 are set, then R4000 would
9692 get a wrong result for the div.
9696 div r8, r9
9697 ------------------- # end-of page. -tlb-refill
9701 div r8, r9
9702 ------------------- # end-of page. -tlb-invalid
9705 2) If the divide is in the taken branch delay slot, where the
9706 target takes RF exception and gets an I-cache miss for the
9707 exception vector or where I-cache miss occurs for the
9708 target address, under the above mentioned scenarios, the
9709 div would get wrong results.
9712 j r2 # to next page mapped or unmapped
9713 div r8,r9 # this bug would be there as long
9714 # as there is an ICache miss and
9715 nop # the "data pattern" is present
9718 beq r0, r0, NextPage # to Next page
9719 div r8,r9
9722 This bug is present for div, divu, ddiv, and ddivu
9723 instructions.
9725 Workaround: For item 1), OS could make sure that the next page
9726 after the divide instruction is also mapped. For item 2), the
9727 compiler could make sure that the divide instruction is not in
9728 the branch delay slot."
9730 These processors have PRId values of 0x00004220 and 0x00004300 for
9731 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9733 const char *
9734 mips_output_division (const char *division, rtx *operands)
9736 const char *s;
9738 s = division;
9739 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9741 output_asm_insn (s, operands);
9742 s = "nop";
9744 if (TARGET_CHECK_ZERO_DIV)
9746 if (TARGET_MIPS16)
9748 output_asm_insn (s, operands);
9749 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9751 else if (GENERATE_DIVIDE_TRAPS)
9753 output_asm_insn (s, operands);
9754 s = "teq\t%2,%.,7";
9756 else
9758 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9759 output_asm_insn (s, operands);
9760 s = "break\t7%)\n1:";
9763 return s;
9766 /* Return true if IN_INSN is a multiply-add or multiply-subtract
9767 instruction and if OUT_INSN assigns to the accumulator operand. */
9769 bool
9770 mips_linked_madd_p (rtx out_insn, rtx in_insn)
9772 rtx x;
9774 x = single_set (in_insn);
9775 if (x == 0)
9776 return false;
9778 x = SET_SRC (x);
9780 if (GET_CODE (x) == PLUS
9781 && GET_CODE (XEXP (x, 0)) == MULT
9782 && reg_set_p (XEXP (x, 1), out_insn))
9783 return true;
9785 if (GET_CODE (x) == MINUS
9786 && GET_CODE (XEXP (x, 1)) == MULT
9787 && reg_set_p (XEXP (x, 0), out_insn))
9788 return true;
9790 return false;
9793 /* True if the dependency between OUT_INSN and IN_INSN is on the store
9794 data rather than the address. We need this because the cprestore
9795 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
9796 which causes the default routine to abort. We just return false
9797 for that case. */
9799 bool
9800 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
9802 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
9803 return false;
9805 return !store_data_bypass_p (out_insn, in_insn);
9809 /* Variables and flags used in scheduler hooks when tuning for
9810 Loongson 2E/2F. */
9811 static struct
9813 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
9814 strategy. */
9816 /* If true, then next ALU1/2 instruction will go to ALU1. */
9817 bool alu1_turn_p;
9819 /* If true, then next FALU1/2 unstruction will go to FALU1. */
9820 bool falu1_turn_p;
9822 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
9823 int alu1_core_unit_code;
9824 int alu2_core_unit_code;
9825 int falu1_core_unit_code;
9826 int falu2_core_unit_code;
9828 /* True if current cycle has a multi instruction.
9829 This flag is used in mips_ls2_dfa_post_advance_cycle. */
9830 bool cycle_has_multi_p;
9832 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
9833 These are used in mips_ls2_dfa_post_advance_cycle to initialize
9834 DFA state.
9835 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
9836 instruction to go ALU1. */
9837 rtx alu1_turn_enabled_insn;
9838 rtx alu2_turn_enabled_insn;
9839 rtx falu1_turn_enabled_insn;
9840 rtx falu2_turn_enabled_insn;
9841 } mips_ls2;
9843 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9844 dependencies have no cost, except on the 20Kc where output-dependence
9845 is treated like input-dependence. */
9847 static int
9848 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9849 rtx dep ATTRIBUTE_UNUSED, int cost)
9851 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
9852 && TUNE_20KC)
9853 return cost;
9854 if (REG_NOTE_KIND (link) != 0)
9855 return 0;
9856 return cost;
9859 /* Return the number of instructions that can be issued per cycle. */
9861 static int
9862 mips_issue_rate (void)
9864 switch (mips_tune)
9866 case PROCESSOR_74KC:
9867 case PROCESSOR_74KF2_1:
9868 case PROCESSOR_74KF1_1:
9869 case PROCESSOR_74KF3_2:
9870 /* The 74k is not strictly quad-issue cpu, but can be seen as one
9871 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
9872 but in reality only a maximum of 3 insns can be issued as
9873 floating-point loads and stores also require a slot in the
9874 AGEN pipe. */
9875 return 4;
9877 case PROCESSOR_20KC:
9878 case PROCESSOR_R4130:
9879 case PROCESSOR_R5400:
9880 case PROCESSOR_R5500:
9881 case PROCESSOR_R7000:
9882 case PROCESSOR_R9000:
9883 return 2;
9885 case PROCESSOR_SB1:
9886 case PROCESSOR_SB1A:
9887 /* This is actually 4, but we get better performance if we claim 3.
9888 This is partly because of unwanted speculative code motion with the
9889 larger number, and partly because in most common cases we can't
9890 reach the theoretical max of 4. */
9891 return 3;
9893 case PROCESSOR_LOONGSON_2E:
9894 case PROCESSOR_LOONGSON_2F:
9895 return 4;
9897 default:
9898 return 1;
9902 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
9904 static void
9905 mips_ls2_init_dfa_post_cycle_insn (void)
9907 start_sequence ();
9908 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
9909 mips_ls2.alu1_turn_enabled_insn = get_insns ();
9910 end_sequence ();
9912 start_sequence ();
9913 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
9914 mips_ls2.alu2_turn_enabled_insn = get_insns ();
9915 end_sequence ();
9917 start_sequence ();
9918 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
9919 mips_ls2.falu1_turn_enabled_insn = get_insns ();
9920 end_sequence ();
9922 start_sequence ();
9923 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
9924 mips_ls2.falu2_turn_enabled_insn = get_insns ();
9925 end_sequence ();
9927 mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
9928 mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
9929 mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
9930 mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
9933 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
9934 Init data used in mips_dfa_post_advance_cycle. */
9936 static void
9937 mips_init_dfa_post_cycle_insn (void)
9939 if (TUNE_LOONGSON_2EF)
9940 mips_ls2_init_dfa_post_cycle_insn ();
9943 /* Initialize STATE when scheduling for Loongson 2E/2F.
9944 Support round-robin dispatch scheme by enabling only one of
9945 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
9946 respectively. */
9948 static void
9949 mips_ls2_dfa_post_advance_cycle (state_t state)
9951 if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
9953 /* Though there are no non-pipelined ALU1 insns,
9954 we can get an instruction of type 'multi' before reload. */
9955 gcc_assert (mips_ls2.cycle_has_multi_p);
9956 mips_ls2.alu1_turn_p = false;
9959 mips_ls2.cycle_has_multi_p = false;
9961 if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
9962 /* We have a non-pipelined alu instruction in the core,
9963 adjust round-robin counter. */
9964 mips_ls2.alu1_turn_p = true;
9966 if (mips_ls2.alu1_turn_p)
9968 if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
9969 gcc_unreachable ();
9971 else
9973 if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
9974 gcc_unreachable ();
9977 if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
9979 /* There are no non-pipelined FALU1 insns. */
9980 gcc_unreachable ();
9981 mips_ls2.falu1_turn_p = false;
9984 if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
9985 /* We have a non-pipelined falu instruction in the core,
9986 adjust round-robin counter. */
9987 mips_ls2.falu1_turn_p = true;
9989 if (mips_ls2.falu1_turn_p)
9991 if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
9992 gcc_unreachable ();
9994 else
9996 if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
9997 gcc_unreachable ();
10001 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
10002 This hook is being called at the start of each cycle. */
10004 static void
10005 mips_dfa_post_advance_cycle (void)
10007 if (TUNE_LOONGSON_2EF)
10008 mips_ls2_dfa_post_advance_cycle (curr_state);
10011 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
10012 be as wide as the scheduling freedom in the DFA. */
10014 static int
10015 mips_multipass_dfa_lookahead (void)
10017 /* Can schedule up to 4 of the 6 function units in any one cycle. */
10018 if (TUNE_SB1)
10019 return 4;
10021 if (TUNE_LOONGSON_2EF)
10022 return 4;
10024 return 0;
10027 /* Remove the instruction at index LOWER from ready queue READY and
10028 reinsert it in front of the instruction at index HIGHER. LOWER must
10029 be <= HIGHER. */
10031 static void
10032 mips_promote_ready (rtx *ready, int lower, int higher)
10034 rtx new_head;
10035 int i;
10037 new_head = ready[lower];
10038 for (i = lower; i < higher; i++)
10039 ready[i] = ready[i + 1];
10040 ready[i] = new_head;
10043 /* If the priority of the instruction at POS2 in the ready queue READY
10044 is within LIMIT units of that of the instruction at POS1, swap the
10045 instructions if POS2 is not already less than POS1. */
10047 static void
10048 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
10050 if (pos1 < pos2
10051 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
10053 rtx temp;
10055 temp = ready[pos1];
10056 ready[pos1] = ready[pos2];
10057 ready[pos2] = temp;
10061 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10062 that may clobber hi or lo. */
10063 static rtx mips_macc_chains_last_hilo;
10065 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10066 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10068 static void
10069 mips_macc_chains_record (rtx insn)
10071 if (get_attr_may_clobber_hilo (insn))
10072 mips_macc_chains_last_hilo = insn;
10075 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10076 has NREADY elements, looking for a multiply-add or multiply-subtract
10077 instruction that is cumulative with mips_macc_chains_last_hilo.
10078 If there is one, promote it ahead of anything else that might
10079 clobber hi or lo. */
10081 static void
10082 mips_macc_chains_reorder (rtx *ready, int nready)
10084 int i, j;
10086 if (mips_macc_chains_last_hilo != 0)
10087 for (i = nready - 1; i >= 0; i--)
10088 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10090 for (j = nready - 1; j > i; j--)
10091 if (recog_memoized (ready[j]) >= 0
10092 && get_attr_may_clobber_hilo (ready[j]))
10094 mips_promote_ready (ready, i, j);
10095 break;
10097 break;
10101 /* The last instruction to be scheduled. */
10102 static rtx vr4130_last_insn;
10104 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10105 points to an rtx that is initially an instruction. Nullify the rtx
10106 if the instruction uses the value of register X. */
10108 static void
10109 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
10110 void *data)
10112 rtx *insn_ptr;
10114 insn_ptr = (rtx *) data;
10115 if (REG_P (x)
10116 && *insn_ptr != 0
10117 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10118 *insn_ptr = 0;
10121 /* Return true if there is true register dependence between vr4130_last_insn
10122 and INSN. */
10124 static bool
10125 vr4130_true_reg_dependence_p (rtx insn)
10127 note_stores (PATTERN (vr4130_last_insn),
10128 vr4130_true_reg_dependence_p_1, &insn);
10129 return insn == 0;
10132 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10133 the ready queue and that INSN2 is the instruction after it, return
10134 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10135 in which INSN1 and INSN2 can probably issue in parallel, but for
10136 which (INSN2, INSN1) should be less sensitive to instruction
10137 alignment than (INSN1, INSN2). See 4130.md for more details. */
10139 static bool
10140 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10142 sd_iterator_def sd_it;
10143 dep_t dep;
10145 /* Check for the following case:
10147 1) there is some other instruction X with an anti dependence on INSN1;
10148 2) X has a higher priority than INSN2; and
10149 3) X is an arithmetic instruction (and thus has no unit restrictions).
10151 If INSN1 is the last instruction blocking X, it would better to
10152 choose (INSN1, X) over (INSN2, INSN1). */
10153 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
10154 if (DEP_TYPE (dep) == REG_DEP_ANTI
10155 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
10156 && recog_memoized (DEP_CON (dep)) >= 0
10157 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
10158 return false;
10160 if (vr4130_last_insn != 0
10161 && recog_memoized (insn1) >= 0
10162 && recog_memoized (insn2) >= 0)
10164 /* See whether INSN1 and INSN2 use different execution units,
10165 or if they are both ALU-type instructions. If so, they can
10166 probably execute in parallel. */
10167 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10168 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10169 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10171 /* If only one of the instructions has a dependence on
10172 vr4130_last_insn, prefer to schedule the other one first. */
10173 bool dep1_p = vr4130_true_reg_dependence_p (insn1);
10174 bool dep2_p = vr4130_true_reg_dependence_p (insn2);
10175 if (dep1_p != dep2_p)
10176 return dep1_p;
10178 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10179 is not an ALU-type instruction and if INSN1 uses the same
10180 execution unit. (Note that if this condition holds, we already
10181 know that INSN2 uses a different execution unit.) */
10182 if (class1 != VR4130_CLASS_ALU
10183 && recog_memoized (vr4130_last_insn) >= 0
10184 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10185 return true;
10188 return false;
10191 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10192 queue with at least two instructions. Swap the first two if
10193 vr4130_swap_insns_p says that it could be worthwhile. */
10195 static void
10196 vr4130_reorder (rtx *ready, int nready)
10198 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10199 mips_promote_ready (ready, nready - 2, nready - 1);
10202 /* Record whether last 74k AGEN instruction was a load or store. */
10203 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
10205 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
10206 resets to TYPE_UNKNOWN state. */
10208 static void
10209 mips_74k_agen_init (rtx insn)
10211 if (!insn || !NONJUMP_INSN_P (insn))
10212 mips_last_74k_agen_insn = TYPE_UNKNOWN;
10213 else
10215 enum attr_type type = get_attr_type (insn);
10216 if (type == TYPE_LOAD || type == TYPE_STORE)
10217 mips_last_74k_agen_insn = type;
10221 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
10222 loads to be grouped together, and multiple stores to be grouped
10223 together. Swap things around in the ready queue to make this happen. */
10225 static void
10226 mips_74k_agen_reorder (rtx *ready, int nready)
10228 int i;
10229 int store_pos, load_pos;
10231 store_pos = -1;
10232 load_pos = -1;
10234 for (i = nready - 1; i >= 0; i--)
10236 rtx insn = ready[i];
10237 if (USEFUL_INSN_P (insn))
10238 switch (get_attr_type (insn))
10240 case TYPE_STORE:
10241 if (store_pos == -1)
10242 store_pos = i;
10243 break;
10245 case TYPE_LOAD:
10246 if (load_pos == -1)
10247 load_pos = i;
10248 break;
10250 default:
10251 break;
10255 if (load_pos == -1 || store_pos == -1)
10256 return;
10258 switch (mips_last_74k_agen_insn)
10260 case TYPE_UNKNOWN:
10261 /* Prefer to schedule loads since they have a higher latency. */
10262 case TYPE_LOAD:
10263 /* Swap loads to the front of the queue. */
10264 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
10265 break;
10266 case TYPE_STORE:
10267 /* Swap stores to the front of the queue. */
10268 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
10269 break;
10270 default:
10271 break;
10275 /* Implement TARGET_SCHED_INIT. */
10277 static void
10278 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10279 int max_ready ATTRIBUTE_UNUSED)
10281 mips_macc_chains_last_hilo = 0;
10282 vr4130_last_insn = 0;
10283 mips_74k_agen_init (NULL_RTX);
10285 /* When scheduling for Loongson2, branch instructions go to ALU1,
10286 therefore basic block is most likely to start with round-robin counter
10287 pointed to ALU2. */
10288 mips_ls2.alu1_turn_p = false;
10289 mips_ls2.falu1_turn_p = true;
10292 /* Implement TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
10294 static int
10295 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10296 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
10298 if (!reload_completed
10299 && TUNE_MACC_CHAINS
10300 && *nreadyp > 0)
10301 mips_macc_chains_reorder (ready, *nreadyp);
10303 if (reload_completed
10304 && TUNE_MIPS4130
10305 && !TARGET_VR4130_ALIGN
10306 && *nreadyp > 1)
10307 vr4130_reorder (ready, *nreadyp);
10309 if (TUNE_74K)
10310 mips_74k_agen_reorder (ready, *nreadyp);
10312 return mips_issue_rate ();
10315 /* Update round-robin counters for ALU1/2 and FALU1/2. */
10317 static void
10318 mips_ls2_variable_issue (rtx insn)
10320 if (mips_ls2.alu1_turn_p)
10322 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
10323 mips_ls2.alu1_turn_p = false;
10325 else
10327 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
10328 mips_ls2.alu1_turn_p = true;
10331 if (mips_ls2.falu1_turn_p)
10333 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
10334 mips_ls2.falu1_turn_p = false;
10336 else
10338 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
10339 mips_ls2.falu1_turn_p = true;
10342 if (recog_memoized (insn) >= 0)
10343 mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
10346 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
10348 static int
10349 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10350 rtx insn, int more)
10352 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
10353 if (USEFUL_INSN_P (insn))
10355 more--;
10356 if (!reload_completed && TUNE_MACC_CHAINS)
10357 mips_macc_chains_record (insn);
10358 vr4130_last_insn = insn;
10359 if (TUNE_74K)
10360 mips_74k_agen_init (insn);
10361 else if (TUNE_LOONGSON_2EF)
10362 mips_ls2_variable_issue (insn);
10365 /* Instructions of type 'multi' should all be split before
10366 the second scheduling pass. */
10367 gcc_assert (!reload_completed
10368 || recog_memoized (insn) < 0
10369 || get_attr_type (insn) != TYPE_MULTI);
10371 return more;
10374 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
10375 return the first operand of the associated PREF or PREFX insn. */
10378 mips_prefetch_cookie (rtx write, rtx locality)
10380 /* store_streamed / load_streamed. */
10381 if (INTVAL (locality) <= 0)
10382 return GEN_INT (INTVAL (write) + 4);
10384 /* store / load. */
10385 if (INTVAL (locality) <= 2)
10386 return write;
10388 /* store_retained / load_retained. */
10389 return GEN_INT (INTVAL (write) + 6);
10392 /* Flags that indicate when a built-in function is available.
10394 BUILTIN_AVAIL_NON_MIPS16
10395 The function is available on the current target, but only
10396 in non-MIPS16 mode. */
10397 #define BUILTIN_AVAIL_NON_MIPS16 1
10399 /* Declare an availability predicate for built-in functions that
10400 require non-MIPS16 mode and also require COND to be true.
10401 NAME is the main part of the predicate's name. */
10402 #define AVAIL_NON_MIPS16(NAME, COND) \
10403 static unsigned int \
10404 mips_builtin_avail_##NAME (void) \
10406 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
10409 /* This structure describes a single built-in function. */
10410 struct mips_builtin_description {
10411 /* The code of the main .md file instruction. See mips_builtin_type
10412 for more information. */
10413 enum insn_code icode;
10415 /* The floating-point comparison code to use with ICODE, if any. */
10416 enum mips_fp_condition cond;
10418 /* The name of the built-in function. */
10419 const char *name;
10421 /* Specifies how the function should be expanded. */
10422 enum mips_builtin_type builtin_type;
10424 /* The function's prototype. */
10425 enum mips_function_type function_type;
10427 /* Whether the function is available. */
10428 unsigned int (*avail) (void);
10431 AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
10432 AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
10433 AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
10434 AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
10435 AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
10436 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
10437 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
10438 AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
10440 /* Construct a mips_builtin_description from the given arguments.
10442 INSN is the name of the associated instruction pattern, without the
10443 leading CODE_FOR_mips_.
10445 CODE is the floating-point condition code associated with the
10446 function. It can be 'f' if the field is not applicable.
10448 NAME is the name of the function itself, without the leading
10449 "__builtin_mips_".
10451 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
10453 AVAIL is the name of the availability predicate, without the leading
10454 mips_builtin_avail_. */
10455 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
10456 FUNCTION_TYPE, AVAIL) \
10457 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
10458 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
10459 mips_builtin_avail_ ## AVAIL }
10461 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
10462 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
10463 are as for MIPS_BUILTIN. */
10464 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
10465 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
10467 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
10468 are subject to mips_builtin_avail_<AVAIL>. */
10469 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
10470 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
10471 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
10472 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
10473 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
10475 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10476 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
10477 while the any and all forms are subject to mips_builtin_avail_mips3d. */
10478 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
10479 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
10480 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
10481 mips3d), \
10482 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
10483 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
10484 mips3d), \
10485 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
10486 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
10487 AVAIL), \
10488 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
10489 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
10490 AVAIL)
10492 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10493 are subject to mips_builtin_avail_mips3d. */
10494 #define CMP_4S_BUILTINS(INSN, COND) \
10495 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
10496 MIPS_BUILTIN_CMP_ANY, \
10497 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
10498 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
10499 MIPS_BUILTIN_CMP_ALL, \
10500 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
10502 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10503 instruction requires mips_builtin_avail_<AVAIL>. */
10504 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
10505 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
10506 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10507 AVAIL), \
10508 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
10509 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10510 AVAIL)
10512 /* Define all the built-in functions related to C.cond.fmt condition COND. */
10513 #define CMP_BUILTINS(COND) \
10514 MOVTF_BUILTINS (c, COND, paired_single), \
10515 MOVTF_BUILTINS (cabs, COND, mips3d), \
10516 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
10517 CMP_PS_BUILTINS (c, COND, paired_single), \
10518 CMP_PS_BUILTINS (cabs, COND, mips3d), \
10519 CMP_4S_BUILTINS (c, COND), \
10520 CMP_4S_BUILTINS (cabs, COND)
10522 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
10523 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
10524 and AVAIL are as for MIPS_BUILTIN. */
10525 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
10526 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
10527 FUNCTION_TYPE, AVAIL)
10529 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10530 branch instruction. AVAIL is as for MIPS_BUILTIN. */
10531 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
10532 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
10533 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
10535 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
10536 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
10537 builtin_description field. */
10538 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
10539 { CODE_FOR_loongson_ ## INSN, 0, "__builtin_loongson_" #FN_NAME, \
10540 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, mips_builtin_avail_loongson }
10542 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
10543 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
10544 builtin_description field. */
10545 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
10546 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
10548 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
10549 We use functions of this form when the same insn can be usefully applied
10550 to more than one datatype. */
10551 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
10552 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
10554 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10555 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10556 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10557 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10558 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10559 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
10561 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
10562 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
10563 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
10564 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
10565 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
10566 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
10567 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
10568 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
10569 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
10570 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
10571 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
10572 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
10573 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
10574 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
10575 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
10576 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
10577 #define CODE_FOR_loongson_biadd CODE_FOR_reduc_uplus_v8qi
10578 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
10579 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
10580 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
10581 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
10582 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
10583 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
10584 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
10585 #define CODE_FOR_loongson_punpckhbh CODE_FOR_vec_interleave_highv8qi
10586 #define CODE_FOR_loongson_punpckhhw CODE_FOR_vec_interleave_highv4hi
10587 #define CODE_FOR_loongson_punpckhwd CODE_FOR_vec_interleave_highv2si
10588 #define CODE_FOR_loongson_punpcklbh CODE_FOR_vec_interleave_lowv8qi
10589 #define CODE_FOR_loongson_punpcklhw CODE_FOR_vec_interleave_lowv4hi
10590 #define CODE_FOR_loongson_punpcklwd CODE_FOR_vec_interleave_lowv2si
10592 static const struct mips_builtin_description mips_builtins[] = {
10593 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10594 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10595 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10596 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10597 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
10598 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
10599 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
10600 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
10602 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
10603 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10604 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10605 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
10606 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
10608 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
10609 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
10610 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
10611 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
10612 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
10613 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10615 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
10616 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
10617 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
10618 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
10619 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
10620 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10622 MIPS_FP_CONDITIONS (CMP_BUILTINS),
10624 /* Built-in functions for the SB-1 processor. */
10625 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
10627 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
10628 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10629 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10630 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
10631 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10632 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10633 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10634 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10635 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
10636 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10637 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10638 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
10639 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
10640 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
10641 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
10642 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
10643 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
10644 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
10645 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
10646 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
10647 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
10648 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
10649 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
10650 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
10651 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
10652 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
10653 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
10654 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
10655 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
10656 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
10657 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
10658 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
10659 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10660 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10661 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
10662 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
10663 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10664 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10665 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
10666 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
10667 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
10668 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10669 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
10670 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
10671 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
10672 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
10673 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
10674 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
10675 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
10676 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
10677 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
10678 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
10679 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
10680 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
10681 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
10682 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
10683 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
10684 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10685 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10686 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10687 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
10688 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
10689 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
10690 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
10691 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
10692 BPOSGE_BUILTIN (32, dsp),
10694 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
10695 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
10696 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10697 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10698 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10699 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10700 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
10701 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
10702 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
10703 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
10704 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
10705 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10706 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10707 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10708 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10709 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10710 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
10711 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
10712 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
10713 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
10714 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
10715 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
10716 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
10717 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10718 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10719 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10720 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10721 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10722 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10723 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10724 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10725 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10726 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10727 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10728 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10730 /* Built-in functions for the DSP ASE (32-bit only). */
10731 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10732 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10733 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10734 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10735 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10736 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10737 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10738 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
10739 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
10740 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10741 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10742 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10743 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10744 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
10745 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
10746 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
10747 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
10748 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
10749 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
10750 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
10751 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
10753 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
10754 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10755 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10756 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
10757 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
10758 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
10759 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
10760 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10761 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dspr2_32),
10762 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dspr2_32),
10763 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10764 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10765 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10766 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10767 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10768 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10770 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
10771 LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
10772 LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
10773 LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
10774 LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10775 LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10776 LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10777 LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10778 LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10779 LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10780 LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
10781 LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
10782 LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10783 LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
10784 LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10785 LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10786 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
10787 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10788 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10789 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10790 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
10791 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
10792 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10793 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
10794 LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10795 LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10796 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10797 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10798 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10799 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10800 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10801 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10802 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10803 LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10804 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10805 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10806 LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10807 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10808 LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
10809 LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
10810 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10811 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10812 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10813 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10814 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10815 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10816 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10817 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10818 LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
10819 LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10820 LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10821 LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10822 LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10823 LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
10824 LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
10825 LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10826 LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10827 LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10828 LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
10829 LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10830 LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
10831 LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
10832 LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI_UQI),
10833 LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_V4HI_UQI),
10834 LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
10835 LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
10836 LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
10837 LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
10838 LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
10839 LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
10840 LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
10841 LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
10842 LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
10843 LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
10844 LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
10845 LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
10846 LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10847 LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10848 LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10849 LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10850 LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10851 LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10852 LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
10853 LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
10854 LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10855 LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
10856 LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10857 LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10858 LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10859 LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10860 LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10861 LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10862 LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10863 LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10864 LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10865 LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10866 LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10867 LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10868 LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10869 LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI)
10872 /* MODE is a vector mode whose elements have type TYPE. Return the type
10873 of the vector itself. */
10875 static tree
10876 mips_builtin_vector_type (tree type, enum machine_mode mode)
10878 static tree types[2 * (int) MAX_MACHINE_MODE];
10879 int mode_index;
10881 mode_index = (int) mode;
10883 if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
10884 mode_index += MAX_MACHINE_MODE;
10886 if (types[mode_index] == NULL_TREE)
10887 types[mode_index] = build_vector_type_for_mode (type, mode);
10888 return types[mode_index];
10891 /* Source-level argument types. */
10892 #define MIPS_ATYPE_VOID void_type_node
10893 #define MIPS_ATYPE_INT integer_type_node
10894 #define MIPS_ATYPE_POINTER ptr_type_node
10896 /* Standard mode-based argument types. */
10897 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
10898 #define MIPS_ATYPE_SI intSI_type_node
10899 #define MIPS_ATYPE_USI unsigned_intSI_type_node
10900 #define MIPS_ATYPE_DI intDI_type_node
10901 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
10902 #define MIPS_ATYPE_SF float_type_node
10903 #define MIPS_ATYPE_DF double_type_node
10905 /* Vector argument types. */
10906 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
10907 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
10908 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
10909 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
10910 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
10911 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
10912 #define MIPS_ATYPE_UV2SI \
10913 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
10914 #define MIPS_ATYPE_UV4HI \
10915 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
10916 #define MIPS_ATYPE_UV8QI \
10917 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
10919 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
10920 their associated MIPS_ATYPEs. */
10921 #define MIPS_FTYPE_ATYPES1(A, B) \
10922 MIPS_ATYPE_##A, MIPS_ATYPE_##B
10924 #define MIPS_FTYPE_ATYPES2(A, B, C) \
10925 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
10927 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
10928 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
10930 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
10931 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
10932 MIPS_ATYPE_##E
10934 /* Return the function type associated with function prototype TYPE. */
10936 static tree
10937 mips_build_function_type (enum mips_function_type type)
10939 static tree types[(int) MIPS_MAX_FTYPE_MAX];
10941 if (types[(int) type] == NULL_TREE)
10942 switch (type)
10944 #define DEF_MIPS_FTYPE(NUM, ARGS) \
10945 case MIPS_FTYPE_NAME##NUM ARGS: \
10946 types[(int) type] \
10947 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
10948 NULL_TREE); \
10949 break;
10950 #include "config/mips/mips-ftypes.def"
10951 #undef DEF_MIPS_FTYPE
10952 default:
10953 gcc_unreachable ();
10956 return types[(int) type];
10959 /* Implement TARGET_INIT_BUILTINS. */
10961 static void
10962 mips_init_builtins (void)
10964 const struct mips_builtin_description *d;
10965 unsigned int i;
10967 /* Iterate through all of the bdesc arrays, initializing all of the
10968 builtin functions. */
10969 for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
10971 d = &mips_builtins[i];
10972 if (d->avail ())
10973 add_builtin_function (d->name,
10974 mips_build_function_type (d->function_type),
10975 i, BUILT_IN_MD, NULL, NULL);
10979 /* Take argument ARGNO from EXP's argument list and convert it into a
10980 form suitable for input operand OPNO of instruction ICODE. Return the
10981 value. */
10983 static rtx
10984 mips_prepare_builtin_arg (enum insn_code icode,
10985 unsigned int opno, tree exp, unsigned int argno)
10987 rtx value;
10988 enum machine_mode mode;
10990 value = expand_normal (CALL_EXPR_ARG (exp, argno));
10991 mode = insn_data[icode].operand[opno].mode;
10992 if (!insn_data[icode].operand[opno].predicate (value, mode))
10994 value = copy_to_mode_reg (mode, value);
10995 /* Check the predicate again. */
10996 if (!insn_data[icode].operand[opno].predicate (value, mode))
10998 error ("invalid argument to built-in function");
10999 return const0_rtx;
11003 return value;
11006 /* Return an rtx suitable for output operand OP of instruction ICODE.
11007 If TARGET is non-null, try to use it where possible. */
11009 static rtx
11010 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11012 enum machine_mode mode;
11014 mode = insn_data[icode].operand[op].mode;
11015 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11016 target = gen_reg_rtx (mode);
11018 return target;
11021 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
11022 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
11023 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
11024 suggests a good place to put the result. */
11026 static rtx
11027 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11028 bool has_target_p)
11030 rtx ops[MAX_RECOG_OPERANDS];
11031 int opno, argno;
11033 /* Map any target to operand 0. */
11034 opno = 0;
11035 if (has_target_p)
11037 ops[opno] = mips_prepare_builtin_target (icode, opno, target);
11038 opno++;
11041 /* Map the arguments to the other operands. The n_operands value
11042 for an expander includes match_dups and match_scratches as well as
11043 match_operands, so n_operands is only an upper bound on the number
11044 of arguments to the expander function. */
11045 gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
11046 for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
11047 ops[opno] = mips_prepare_builtin_arg (icode, opno, exp, argno);
11049 switch (opno)
11051 case 2:
11052 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11053 break;
11055 case 3:
11056 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
11057 break;
11059 case 4:
11060 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
11061 break;
11063 default:
11064 gcc_unreachable ();
11066 return target;
11069 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
11070 function; TYPE says which. EXP is the CALL_EXPR that calls the
11071 function, ICODE is the instruction that should be used to compare
11072 the first two arguments, and COND is the condition it should test.
11073 TARGET, if nonnull, suggests a good place to put the result. */
11075 static rtx
11076 mips_expand_builtin_movtf (enum mips_builtin_type type,
11077 enum insn_code icode, enum mips_fp_condition cond,
11078 rtx target, tree exp)
11080 rtx cmp_result, op0, op1;
11082 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11083 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
11084 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
11085 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
11087 icode = CODE_FOR_mips_cond_move_tf_ps;
11088 target = mips_prepare_builtin_target (icode, 0, target);
11089 if (type == MIPS_BUILTIN_MOVT)
11091 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
11092 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
11094 else
11096 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
11097 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
11099 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
11100 return target;
11103 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11104 into TARGET otherwise. Return TARGET. */
11106 static rtx
11107 mips_builtin_branch_and_move (rtx condition, rtx target,
11108 rtx value_if_true, rtx value_if_false)
11110 rtx true_label, done_label;
11112 true_label = gen_label_rtx ();
11113 done_label = gen_label_rtx ();
11115 /* First assume that CONDITION is false. */
11116 mips_emit_move (target, value_if_false);
11118 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11119 emit_jump_insn (gen_condjump (condition, true_label));
11120 emit_jump_insn (gen_jump (done_label));
11121 emit_barrier ();
11123 /* Fix TARGET if CONDITION is true. */
11124 emit_label (true_label);
11125 mips_emit_move (target, value_if_true);
11127 emit_label (done_label);
11128 return target;
11131 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
11132 the CALL_EXPR that calls the function, ICODE is the code of the
11133 comparison instruction, and COND is the condition it should test.
11134 TARGET, if nonnull, suggests a good place to put the boolean result. */
11136 static rtx
11137 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
11138 enum insn_code icode, enum mips_fp_condition cond,
11139 rtx target, tree exp)
11141 rtx offset, condition, cmp_result, args[MAX_RECOG_OPERANDS];
11142 int argno;
11144 if (target == 0 || GET_MODE (target) != SImode)
11145 target = gen_reg_rtx (SImode);
11147 /* The instruction should have a target operand, an operand for each
11148 argument, and an operand for COND. */
11149 gcc_assert (call_expr_nargs (exp) + 2 == insn_data[icode].n_operands);
11151 /* Prepare the operands to the comparison. */
11152 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11153 for (argno = 0; argno < call_expr_nargs (exp); argno++)
11154 args[argno] = mips_prepare_builtin_arg (icode, argno + 1, exp, argno);
11156 switch (insn_data[icode].n_operands)
11158 case 4:
11159 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
11160 GEN_INT (cond)));
11161 break;
11163 case 6:
11164 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
11165 args[2], args[3], GEN_INT (cond)));
11166 break;
11168 default:
11169 gcc_unreachable ();
11172 /* If the comparison sets more than one register, we define the result
11173 to be 0 if all registers are false and -1 if all registers are true.
11174 The value of the complete result is indeterminate otherwise. */
11175 switch (builtin_type)
11177 case MIPS_BUILTIN_CMP_ALL:
11178 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
11179 return mips_builtin_branch_and_move (condition, target,
11180 const0_rtx, const1_rtx);
11182 case MIPS_BUILTIN_CMP_UPPER:
11183 case MIPS_BUILTIN_CMP_LOWER:
11184 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
11185 condition = gen_single_cc (cmp_result, offset);
11186 return mips_builtin_branch_and_move (condition, target,
11187 const1_rtx, const0_rtx);
11189 default:
11190 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
11191 return mips_builtin_branch_and_move (condition, target,
11192 const1_rtx, const0_rtx);
11196 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
11197 if nonnull, suggests a good place to put the boolean result. */
11199 static rtx
11200 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
11202 rtx condition, cmp_result;
11203 int cmp_value;
11205 if (target == 0 || GET_MODE (target) != SImode)
11206 target = gen_reg_rtx (SImode);
11208 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
11210 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
11211 cmp_value = 32;
11212 else
11213 gcc_assert (0);
11215 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
11216 return mips_builtin_branch_and_move (condition, target,
11217 const1_rtx, const0_rtx);
11220 /* Implement TARGET_EXPAND_BUILTIN. */
11222 static rtx
11223 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11224 enum machine_mode mode ATTRIBUTE_UNUSED,
11225 int ignore ATTRIBUTE_UNUSED)
11227 tree fndecl;
11228 unsigned int fcode, avail;
11229 const struct mips_builtin_description *d;
11231 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11232 fcode = DECL_FUNCTION_CODE (fndecl);
11233 gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
11234 d = &mips_builtins[fcode];
11235 avail = d->avail ();
11236 gcc_assert (avail != 0);
11237 if (TARGET_MIPS16)
11239 error ("built-in function %qs not supported for MIPS16",
11240 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
11241 return const0_rtx;
11243 switch (d->builtin_type)
11245 case MIPS_BUILTIN_DIRECT:
11246 return mips_expand_builtin_direct (d->icode, target, exp, true);
11248 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11249 return mips_expand_builtin_direct (d->icode, target, exp, false);
11251 case MIPS_BUILTIN_MOVT:
11252 case MIPS_BUILTIN_MOVF:
11253 return mips_expand_builtin_movtf (d->builtin_type, d->icode,
11254 d->cond, target, exp);
11256 case MIPS_BUILTIN_CMP_ANY:
11257 case MIPS_BUILTIN_CMP_ALL:
11258 case MIPS_BUILTIN_CMP_UPPER:
11259 case MIPS_BUILTIN_CMP_LOWER:
11260 case MIPS_BUILTIN_CMP_SINGLE:
11261 return mips_expand_builtin_compare (d->builtin_type, d->icode,
11262 d->cond, target, exp);
11264 case MIPS_BUILTIN_BPOSGE32:
11265 return mips_expand_builtin_bposge (d->builtin_type, target);
11267 gcc_unreachable ();
11270 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
11271 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
11272 struct mips16_constant {
11273 struct mips16_constant *next;
11274 rtx value;
11275 rtx label;
11276 enum machine_mode mode;
11279 /* Information about an incomplete MIPS16 constant pool. FIRST is the
11280 first constant, HIGHEST_ADDRESS is the highest address that the first
11281 byte of the pool can have, and INSN_ADDRESS is the current instruction
11282 address. */
11283 struct mips16_constant_pool {
11284 struct mips16_constant *first;
11285 int highest_address;
11286 int insn_address;
11289 /* Add constant VALUE to POOL and return its label. MODE is the
11290 value's mode (used for CONST_INTs, etc.). */
11292 static rtx
11293 mips16_add_constant (struct mips16_constant_pool *pool,
11294 rtx value, enum machine_mode mode)
11296 struct mips16_constant **p, *c;
11297 bool first_of_size_p;
11299 /* See whether the constant is already in the pool. If so, return the
11300 existing label, otherwise leave P pointing to the place where the
11301 constant should be added.
11303 Keep the pool sorted in increasing order of mode size so that we can
11304 reduce the number of alignments needed. */
11305 first_of_size_p = true;
11306 for (p = &pool->first; *p != 0; p = &(*p)->next)
11308 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
11309 return (*p)->label;
11310 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
11311 break;
11312 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
11313 first_of_size_p = false;
11316 /* In the worst case, the constant needed by the earliest instruction
11317 will end up at the end of the pool. The entire pool must then be
11318 accessible from that instruction.
11320 When adding the first constant, set the pool's highest address to
11321 the address of the first out-of-range byte. Adjust this address
11322 downwards each time a new constant is added. */
11323 if (pool->first == 0)
11324 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
11325 of the instruction with the lowest two bits clear. The base PC
11326 value for LDPC has the lowest three bits clear. Assume the worst
11327 case here; namely that the PC-relative instruction occupies the
11328 last 2 bytes in an aligned word. */
11329 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
11330 pool->highest_address -= GET_MODE_SIZE (mode);
11331 if (first_of_size_p)
11332 /* Take into account the worst possible padding due to alignment. */
11333 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
11335 /* Create a new entry. */
11336 c = XNEW (struct mips16_constant);
11337 c->value = value;
11338 c->mode = mode;
11339 c->label = gen_label_rtx ();
11340 c->next = *p;
11341 *p = c;
11343 return c->label;
11346 /* Output constant VALUE after instruction INSN and return the last
11347 instruction emitted. MODE is the mode of the constant. */
11349 static rtx
11350 mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
11352 if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
11354 rtx size = GEN_INT (GET_MODE_SIZE (mode));
11355 return emit_insn_after (gen_consttable_int (value, size), insn);
11358 if (SCALAR_FLOAT_MODE_P (mode))
11359 return emit_insn_after (gen_consttable_float (value), insn);
11361 if (VECTOR_MODE_P (mode))
11363 int i;
11365 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
11366 insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
11367 CONST_VECTOR_ELT (value, i), insn);
11368 return insn;
11371 gcc_unreachable ();
11374 /* Dump out the constants in CONSTANTS after INSN. */
11376 static void
11377 mips16_emit_constants (struct mips16_constant *constants, rtx insn)
11379 struct mips16_constant *c, *next;
11380 int align;
11382 align = 0;
11383 for (c = constants; c != NULL; c = next)
11385 /* If necessary, increase the alignment of PC. */
11386 if (align < GET_MODE_SIZE (c->mode))
11388 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
11389 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
11391 align = GET_MODE_SIZE (c->mode);
11393 insn = emit_label_after (c->label, insn);
11394 insn = mips16_emit_constants_1 (c->mode, c->value, insn);
11396 next = c->next;
11397 free (c);
11400 emit_barrier_after (insn);
11403 /* Return the length of instruction INSN. */
11405 static int
11406 mips16_insn_length (rtx insn)
11408 if (JUMP_P (insn))
11410 rtx body = PATTERN (insn);
11411 if (GET_CODE (body) == ADDR_VEC)
11412 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
11413 if (GET_CODE (body) == ADDR_DIFF_VEC)
11414 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
11416 return get_attr_length (insn);
11419 /* If *X is a symbolic constant that refers to the constant pool, add
11420 the constant to POOL and rewrite *X to use the constant's label. */
11422 static void
11423 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
11425 rtx base, offset, label;
11427 split_const (*x, &base, &offset);
11428 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
11430 label = mips16_add_constant (pool, get_pool_constant (base),
11431 get_pool_mode (base));
11432 base = gen_rtx_LABEL_REF (Pmode, label);
11433 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
11437 /* This structure is used to communicate with mips16_rewrite_pool_refs.
11438 INSN is the instruction we're rewriting and POOL points to the current
11439 constant pool. */
11440 struct mips16_rewrite_pool_refs_info {
11441 rtx insn;
11442 struct mips16_constant_pool *pool;
11445 /* Rewrite *X so that constant pool references refer to the constant's
11446 label instead. DATA points to a mips16_rewrite_pool_refs_info
11447 structure. */
11449 static int
11450 mips16_rewrite_pool_refs (rtx *x, void *data)
11452 struct mips16_rewrite_pool_refs_info *info =
11453 (struct mips16_rewrite_pool_refs_info *) data;
11455 if (force_to_mem_operand (*x, Pmode))
11457 rtx mem = force_const_mem (GET_MODE (*x), *x);
11458 validate_change (info->insn, x, mem, false);
11461 if (MEM_P (*x))
11463 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
11464 return -1;
11467 if (TARGET_MIPS16_TEXT_LOADS)
11468 mips16_rewrite_pool_constant (info->pool, x);
11470 return GET_CODE (*x) == CONST ? -1 : 0;
11473 /* Build MIPS16 constant pools. */
11475 static void
11476 mips16_lay_out_constants (void)
11478 struct mips16_constant_pool pool;
11479 struct mips16_rewrite_pool_refs_info info;
11480 rtx insn, barrier;
11482 if (!TARGET_MIPS16_PCREL_LOADS)
11483 return;
11485 barrier = 0;
11486 memset (&pool, 0, sizeof (pool));
11487 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11489 /* Rewrite constant pool references in INSN. */
11490 if (INSN_P (insn))
11492 info.insn = insn;
11493 info.pool = &pool;
11494 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
11497 pool.insn_address += mips16_insn_length (insn);
11499 if (pool.first != NULL)
11501 /* If there are no natural barriers between the first user of
11502 the pool and the highest acceptable address, we'll need to
11503 create a new instruction to jump around the constant pool.
11504 In the worst case, this instruction will be 4 bytes long.
11506 If it's too late to do this transformation after INSN,
11507 do it immediately before INSN. */
11508 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
11510 rtx label, jump;
11512 label = gen_label_rtx ();
11514 jump = emit_jump_insn_before (gen_jump (label), insn);
11515 JUMP_LABEL (jump) = label;
11516 LABEL_NUSES (label) = 1;
11517 barrier = emit_barrier_after (jump);
11519 emit_label_after (label, barrier);
11520 pool.insn_address += 4;
11523 /* See whether the constant pool is now out of range of the first
11524 user. If so, output the constants after the previous barrier.
11525 Note that any instructions between BARRIER and INSN (inclusive)
11526 will use negative offsets to refer to the pool. */
11527 if (pool.insn_address > pool.highest_address)
11529 mips16_emit_constants (pool.first, barrier);
11530 pool.first = NULL;
11531 barrier = 0;
11533 else if (BARRIER_P (insn))
11534 barrier = insn;
11537 mips16_emit_constants (pool.first, get_last_insn ());
11540 /* A temporary variable used by for_each_rtx callbacks, etc. */
11541 static rtx mips_sim_insn;
11543 /* A structure representing the state of the processor pipeline.
11544 Used by the mips_sim_* family of functions. */
11545 struct mips_sim {
11546 /* The maximum number of instructions that can be issued in a cycle.
11547 (Caches mips_issue_rate.) */
11548 unsigned int issue_rate;
11550 /* The current simulation time. */
11551 unsigned int time;
11553 /* How many more instructions can be issued in the current cycle. */
11554 unsigned int insns_left;
11556 /* LAST_SET[X].INSN is the last instruction to set register X.
11557 LAST_SET[X].TIME is the time at which that instruction was issued.
11558 INSN is null if no instruction has yet set register X. */
11559 struct {
11560 rtx insn;
11561 unsigned int time;
11562 } last_set[FIRST_PSEUDO_REGISTER];
11564 /* The pipeline's current DFA state. */
11565 state_t dfa_state;
11568 /* Reset STATE to the initial simulation state. */
11570 static void
11571 mips_sim_reset (struct mips_sim *state)
11573 state->time = 0;
11574 state->insns_left = state->issue_rate;
11575 memset (&state->last_set, 0, sizeof (state->last_set));
11576 state_reset (state->dfa_state);
11579 /* Initialize STATE before its first use. DFA_STATE points to an
11580 allocated but uninitialized DFA state. */
11582 static void
11583 mips_sim_init (struct mips_sim *state, state_t dfa_state)
11585 state->issue_rate = mips_issue_rate ();
11586 state->dfa_state = dfa_state;
11587 mips_sim_reset (state);
11590 /* Advance STATE by one clock cycle. */
11592 static void
11593 mips_sim_next_cycle (struct mips_sim *state)
11595 state->time++;
11596 state->insns_left = state->issue_rate;
11597 state_transition (state->dfa_state, 0);
11600 /* Advance simulation state STATE until instruction INSN can read
11601 register REG. */
11603 static void
11604 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
11606 unsigned int regno, end_regno;
11608 end_regno = END_REGNO (reg);
11609 for (regno = REGNO (reg); regno < end_regno; regno++)
11610 if (state->last_set[regno].insn != 0)
11612 unsigned int t;
11614 t = (state->last_set[regno].time
11615 + insn_latency (state->last_set[regno].insn, insn));
11616 while (state->time < t)
11617 mips_sim_next_cycle (state);
11621 /* A for_each_rtx callback. If *X is a register, advance simulation state
11622 DATA until mips_sim_insn can read the register's value. */
11624 static int
11625 mips_sim_wait_regs_2 (rtx *x, void *data)
11627 if (REG_P (*x))
11628 mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
11629 return 0;
11632 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
11634 static void
11635 mips_sim_wait_regs_1 (rtx *x, void *data)
11637 for_each_rtx (x, mips_sim_wait_regs_2, data);
11640 /* Advance simulation state STATE until all of INSN's register
11641 dependencies are satisfied. */
11643 static void
11644 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
11646 mips_sim_insn = insn;
11647 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
11650 /* Advance simulation state STATE until the units required by
11651 instruction INSN are available. */
11653 static void
11654 mips_sim_wait_units (struct mips_sim *state, rtx insn)
11656 state_t tmp_state;
11658 tmp_state = alloca (state_size ());
11659 while (state->insns_left == 0
11660 || (memcpy (tmp_state, state->dfa_state, state_size ()),
11661 state_transition (tmp_state, insn) >= 0))
11662 mips_sim_next_cycle (state);
11665 /* Advance simulation state STATE until INSN is ready to issue. */
11667 static void
11668 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
11670 mips_sim_wait_regs (state, insn);
11671 mips_sim_wait_units (state, insn);
11674 /* mips_sim_insn has just set X. Update the LAST_SET array
11675 in simulation state DATA. */
11677 static void
11678 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11680 struct mips_sim *state;
11682 state = (struct mips_sim *) data;
11683 if (REG_P (x))
11685 unsigned int regno, end_regno;
11687 end_regno = END_REGNO (x);
11688 for (regno = REGNO (x); regno < end_regno; regno++)
11690 state->last_set[regno].insn = mips_sim_insn;
11691 state->last_set[regno].time = state->time;
11696 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
11697 can issue immediately (i.e., that mips_sim_wait_insn has already
11698 been called). */
11700 static void
11701 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
11703 state_transition (state->dfa_state, insn);
11704 state->insns_left--;
11706 mips_sim_insn = insn;
11707 note_stores (PATTERN (insn), mips_sim_record_set, state);
11710 /* Simulate issuing a NOP in state STATE. */
11712 static void
11713 mips_sim_issue_nop (struct mips_sim *state)
11715 if (state->insns_left == 0)
11716 mips_sim_next_cycle (state);
11717 state->insns_left--;
11720 /* Update simulation state STATE so that it's ready to accept the instruction
11721 after INSN. INSN should be part of the main rtl chain, not a member of a
11722 SEQUENCE. */
11724 static void
11725 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
11727 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
11728 if (JUMP_P (insn))
11729 mips_sim_issue_nop (state);
11731 switch (GET_CODE (SEQ_BEGIN (insn)))
11733 case CODE_LABEL:
11734 case CALL_INSN:
11735 /* We can't predict the processor state after a call or label. */
11736 mips_sim_reset (state);
11737 break;
11739 case JUMP_INSN:
11740 /* The delay slots of branch likely instructions are only executed
11741 when the branch is taken. Therefore, if the caller has simulated
11742 the delay slot instruction, STATE does not really reflect the state
11743 of the pipeline for the instruction after the delay slot. Also,
11744 branch likely instructions tend to incur a penalty when not taken,
11745 so there will probably be an extra delay between the branch and
11746 the instruction after the delay slot. */
11747 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
11748 mips_sim_reset (state);
11749 break;
11751 default:
11752 break;
11756 /* The VR4130 pipeline issues aligned pairs of instructions together,
11757 but it stalls the second instruction if it depends on the first.
11758 In order to cut down the amount of logic required, this dependence
11759 check is not based on a full instruction decode. Instead, any non-SPECIAL
11760 instruction is assumed to modify the register specified by bits 20-16
11761 (which is usually the "rt" field).
11763 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
11764 input, so we can end up with a false dependence between the branch
11765 and its delay slot. If this situation occurs in instruction INSN,
11766 try to avoid it by swapping rs and rt. */
11768 static void
11769 vr4130_avoid_branch_rt_conflict (rtx insn)
11771 rtx first, second;
11773 first = SEQ_BEGIN (insn);
11774 second = SEQ_END (insn);
11775 if (JUMP_P (first)
11776 && NONJUMP_INSN_P (second)
11777 && GET_CODE (PATTERN (first)) == SET
11778 && GET_CODE (SET_DEST (PATTERN (first))) == PC
11779 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
11781 /* Check for the right kind of condition. */
11782 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
11783 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
11784 && REG_P (XEXP (cond, 0))
11785 && REG_P (XEXP (cond, 1))
11786 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
11787 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
11789 /* SECOND mentions the rt register but not the rs register. */
11790 rtx tmp = XEXP (cond, 0);
11791 XEXP (cond, 0) = XEXP (cond, 1);
11792 XEXP (cond, 1) = tmp;
11797 /* Implement -mvr4130-align. Go through each basic block and simulate the
11798 processor pipeline. If we find that a pair of instructions could execute
11799 in parallel, and the first of those instructions is not 8-byte aligned,
11800 insert a nop to make it aligned. */
11802 static void
11803 vr4130_align_insns (void)
11805 struct mips_sim state;
11806 rtx insn, subinsn, last, last2, next;
11807 bool aligned_p;
11809 dfa_start ();
11811 /* LAST is the last instruction before INSN to have a nonzero length.
11812 LAST2 is the last such instruction before LAST. */
11813 last = 0;
11814 last2 = 0;
11816 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
11817 aligned_p = true;
11819 mips_sim_init (&state, alloca (state_size ()));
11820 for (insn = get_insns (); insn != 0; insn = next)
11822 unsigned int length;
11824 next = NEXT_INSN (insn);
11826 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
11827 This isn't really related to the alignment pass, but we do it on
11828 the fly to avoid a separate instruction walk. */
11829 vr4130_avoid_branch_rt_conflict (insn);
11831 if (USEFUL_INSN_P (insn))
11832 FOR_EACH_SUBINSN (subinsn, insn)
11834 mips_sim_wait_insn (&state, subinsn);
11836 /* If we want this instruction to issue in parallel with the
11837 previous one, make sure that the previous instruction is
11838 aligned. There are several reasons why this isn't worthwhile
11839 when the second instruction is a call:
11841 - Calls are less likely to be performance critical,
11842 - There's a good chance that the delay slot can execute
11843 in parallel with the call.
11844 - The return address would then be unaligned.
11846 In general, if we're going to insert a nop between instructions
11847 X and Y, it's better to insert it immediately after X. That
11848 way, if the nop makes Y aligned, it will also align any labels
11849 between X and Y. */
11850 if (state.insns_left != state.issue_rate
11851 && !CALL_P (subinsn))
11853 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
11855 /* SUBINSN is the first instruction in INSN and INSN is
11856 aligned. We want to align the previous instruction
11857 instead, so insert a nop between LAST2 and LAST.
11859 Note that LAST could be either a single instruction
11860 or a branch with a delay slot. In the latter case,
11861 LAST, like INSN, is already aligned, but the delay
11862 slot must have some extra delay that stops it from
11863 issuing at the same time as the branch. We therefore
11864 insert a nop before the branch in order to align its
11865 delay slot. */
11866 emit_insn_after (gen_nop (), last2);
11867 aligned_p = false;
11869 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
11871 /* SUBINSN is the delay slot of INSN, but INSN is
11872 currently unaligned. Insert a nop between
11873 LAST and INSN to align it. */
11874 emit_insn_after (gen_nop (), last);
11875 aligned_p = true;
11878 mips_sim_issue_insn (&state, subinsn);
11880 mips_sim_finish_insn (&state, insn);
11882 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
11883 length = get_attr_length (insn);
11884 if (length > 0)
11886 /* If the instruction is an asm statement or multi-instruction
11887 mips.md patern, the length is only an estimate. Insert an
11888 8 byte alignment after it so that the following instructions
11889 can be handled correctly. */
11890 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
11891 && (recog_memoized (insn) < 0 || length >= 8))
11893 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
11894 next = NEXT_INSN (next);
11895 mips_sim_next_cycle (&state);
11896 aligned_p = true;
11898 else if (length & 4)
11899 aligned_p = !aligned_p;
11900 last2 = last;
11901 last = insn;
11904 /* See whether INSN is an aligned label. */
11905 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
11906 aligned_p = true;
11908 dfa_finish ();
11911 /* This structure records that the current function has a LO_SUM
11912 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
11913 the largest offset applied to BASE by all such LO_SUMs. */
11914 struct mips_lo_sum_offset {
11915 rtx base;
11916 HOST_WIDE_INT offset;
11919 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
11921 static hashval_t
11922 mips_hash_base (rtx base)
11924 int do_not_record_p;
11926 return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
11929 /* Hash-table callbacks for mips_lo_sum_offsets. */
11931 static hashval_t
11932 mips_lo_sum_offset_hash (const void *entry)
11934 return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
11937 static int
11938 mips_lo_sum_offset_eq (const void *entry, const void *value)
11940 return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
11941 (const_rtx) value);
11944 /* Look up symbolic constant X in HTAB, which is a hash table of
11945 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
11946 paired with a recorded LO_SUM, otherwise record X in the table. */
11948 static bool
11949 mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
11951 rtx base, offset;
11952 void **slot;
11953 struct mips_lo_sum_offset *entry;
11955 /* Split X into a base and offset. */
11956 split_const (x, &base, &offset);
11957 if (UNSPEC_ADDRESS_P (base))
11958 base = UNSPEC_ADDRESS (base);
11960 /* Look up the base in the hash table. */
11961 slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
11962 if (slot == NULL)
11963 return false;
11965 entry = (struct mips_lo_sum_offset *) *slot;
11966 if (option == INSERT)
11968 if (entry == NULL)
11970 entry = XNEW (struct mips_lo_sum_offset);
11971 entry->base = base;
11972 entry->offset = INTVAL (offset);
11973 *slot = entry;
11975 else
11977 if (INTVAL (offset) > entry->offset)
11978 entry->offset = INTVAL (offset);
11981 return INTVAL (offset) <= entry->offset;
11984 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
11985 Record every LO_SUM in *LOC. */
11987 static int
11988 mips_record_lo_sum (rtx *loc, void *data)
11990 if (GET_CODE (*loc) == LO_SUM)
11991 mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
11992 return 0;
11995 /* Return true if INSN is a SET of an orphaned high-part relocation.
11996 HTAB is a hash table of mips_lo_sum_offsets that describes all the
11997 LO_SUMs in the current function. */
11999 static bool
12000 mips_orphaned_high_part_p (htab_t htab, rtx insn)
12002 enum mips_symbol_type type;
12003 rtx x, set;
12005 set = single_set (insn);
12006 if (set)
12008 /* Check for %his. */
12009 x = SET_SRC (set);
12010 if (GET_CODE (x) == HIGH
12011 && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
12012 return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
12014 /* Check for local %gots (and %got_pages, which is redundant but OK). */
12015 if (GET_CODE (x) == UNSPEC
12016 && XINT (x, 1) == UNSPEC_LOAD_GOT
12017 && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
12018 SYMBOL_CONTEXT_LEA, &type)
12019 && type == SYMBOL_GOTOFF_PAGE)
12020 return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
12022 return false;
12025 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
12026 INSN and a previous instruction, avoid it by inserting nops after
12027 instruction AFTER.
12029 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
12030 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
12031 before using the value of that register. *HILO_DELAY counts the
12032 number of instructions since the last hilo hazard (that is,
12033 the number of instructions since the last MFLO or MFHI).
12035 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
12036 for the next instruction.
12038 LO_REG is an rtx for the LO register, used in dependence checking. */
12040 static void
12041 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
12042 rtx *delayed_reg, rtx lo_reg)
12044 rtx pattern, set;
12045 int nops, ninsns;
12047 pattern = PATTERN (insn);
12049 /* Do not put the whole function in .set noreorder if it contains
12050 an asm statement. We don't know whether there will be hazards
12051 between the asm statement and the gcc-generated code. */
12052 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
12053 cfun->machine->all_noreorder_p = false;
12055 /* Ignore zero-length instructions (barriers and the like). */
12056 ninsns = get_attr_length (insn) / 4;
12057 if (ninsns == 0)
12058 return;
12060 /* Work out how many nops are needed. Note that we only care about
12061 registers that are explicitly mentioned in the instruction's pattern.
12062 It doesn't matter that calls use the argument registers or that they
12063 clobber hi and lo. */
12064 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
12065 nops = 2 - *hilo_delay;
12066 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
12067 nops = 1;
12068 else
12069 nops = 0;
12071 /* Insert the nops between this instruction and the previous one.
12072 Each new nop takes us further from the last hilo hazard. */
12073 *hilo_delay += nops;
12074 while (nops-- > 0)
12075 emit_insn_after (gen_hazard_nop (), after);
12077 /* Set up the state for the next instruction. */
12078 *hilo_delay += ninsns;
12079 *delayed_reg = 0;
12080 if (INSN_CODE (insn) >= 0)
12081 switch (get_attr_hazard (insn))
12083 case HAZARD_NONE:
12084 break;
12086 case HAZARD_HILO:
12087 *hilo_delay = 0;
12088 break;
12090 case HAZARD_DELAY:
12091 set = single_set (insn);
12092 gcc_assert (set);
12093 *delayed_reg = SET_DEST (set);
12094 break;
12098 /* Go through the instruction stream and insert nops where necessary.
12099 Also delete any high-part relocations whose partnering low parts
12100 are now all dead. See if the whole function can then be put into
12101 .set noreorder and .set nomacro. */
12103 static void
12104 mips_reorg_process_insns (void)
12106 rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
12107 int hilo_delay;
12108 htab_t htab;
12110 /* Force all instructions to be split into their final form. */
12111 split_all_insns_noflow ();
12113 /* Recalculate instruction lengths without taking nops into account. */
12114 cfun->machine->ignore_hazard_length_p = true;
12115 shorten_branches (get_insns ());
12117 cfun->machine->all_noreorder_p = true;
12119 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
12120 if (!TARGET_EXPLICIT_RELOCS)
12121 cfun->machine->all_noreorder_p = false;
12123 /* Profiled functions can't be all noreorder because the profiler
12124 support uses assembler macros. */
12125 if (crtl->profile)
12126 cfun->machine->all_noreorder_p = false;
12128 /* Code compiled with -mfix-vr4120 can't be all noreorder because
12129 we rely on the assembler to work around some errata. */
12130 if (TARGET_FIX_VR4120)
12131 cfun->machine->all_noreorder_p = false;
12133 /* The same is true for -mfix-vr4130 if we might generate MFLO or
12134 MFHI instructions. Note that we avoid using MFLO and MFHI if
12135 the VR4130 MACC and DMACC instructions are available instead;
12136 see the *mfhilo_{si,di}_macc patterns. */
12137 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
12138 cfun->machine->all_noreorder_p = false;
12140 htab = htab_create (37, mips_lo_sum_offset_hash,
12141 mips_lo_sum_offset_eq, free);
12143 /* Make a first pass over the instructions, recording all the LO_SUMs. */
12144 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
12145 FOR_EACH_SUBINSN (subinsn, insn)
12146 if (INSN_P (subinsn))
12147 for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
12149 last_insn = 0;
12150 hilo_delay = 2;
12151 delayed_reg = 0;
12152 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
12154 /* Make a second pass over the instructions. Delete orphaned
12155 high-part relocations or turn them into NOPs. Avoid hazards
12156 by inserting NOPs. */
12157 for (insn = get_insns (); insn != 0; insn = next_insn)
12159 next_insn = NEXT_INSN (insn);
12160 if (INSN_P (insn))
12162 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
12164 /* If we find an orphaned high-part relocation in a delay
12165 slot, it's easier to turn that instruction into a NOP than
12166 to delete it. The delay slot will be a NOP either way. */
12167 FOR_EACH_SUBINSN (subinsn, insn)
12168 if (INSN_P (subinsn))
12170 if (mips_orphaned_high_part_p (htab, subinsn))
12172 PATTERN (subinsn) = gen_nop ();
12173 INSN_CODE (subinsn) = CODE_FOR_nop;
12175 mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
12176 &delayed_reg, lo_reg);
12178 last_insn = insn;
12180 else
12182 /* INSN is a single instruction. Delete it if it's an
12183 orphaned high-part relocation. */
12184 if (mips_orphaned_high_part_p (htab, insn))
12185 delete_insn (insn);
12186 else
12188 mips_avoid_hazard (last_insn, insn, &hilo_delay,
12189 &delayed_reg, lo_reg);
12190 last_insn = insn;
12196 htab_delete (htab);
12199 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
12201 static void
12202 mips_reorg (void)
12204 mips16_lay_out_constants ();
12205 if (mips_base_delayed_branch)
12206 dbr_schedule (get_insns ());
12207 mips_reorg_process_insns ();
12208 if (TARGET_EXPLICIT_RELOCS && TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
12209 vr4130_align_insns ();
12212 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
12213 in order to avoid duplicating too much logic from elsewhere. */
12215 static void
12216 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
12217 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12218 tree function)
12220 rtx this, temp1, temp2, insn, fnaddr;
12221 bool use_sibcall_p;
12223 /* Pretend to be a post-reload pass while generating rtl. */
12224 reload_completed = 1;
12226 /* Mark the end of the (empty) prologue. */
12227 emit_note (NOTE_INSN_PROLOGUE_END);
12229 /* Determine if we can use a sibcall to call FUNCTION directly. */
12230 fnaddr = XEXP (DECL_RTL (function), 0);
12231 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
12232 && const_call_insn_operand (fnaddr, Pmode));
12234 /* Determine if we need to load FNADDR from the GOT. */
12235 if (!use_sibcall_p)
12236 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
12238 case SYMBOL_GOT_PAGE_OFST:
12239 case SYMBOL_GOT_DISP:
12240 /* Pick a global pointer. Use a call-clobbered register if
12241 TARGET_CALL_SAVED_GP. */
12242 cfun->machine->global_pointer =
12243 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
12244 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
12246 /* Set up the global pointer for n32 or n64 abicalls. */
12247 mips_emit_loadgp ();
12248 break;
12250 default:
12251 break;
12254 /* We need two temporary registers in some cases. */
12255 temp1 = gen_rtx_REG (Pmode, 2);
12256 temp2 = gen_rtx_REG (Pmode, 3);
12258 /* Find out which register contains the "this" pointer. */
12259 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12260 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
12261 else
12262 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
12264 /* Add DELTA to THIS. */
12265 if (delta != 0)
12267 rtx offset = GEN_INT (delta);
12268 if (!SMALL_OPERAND (delta))
12270 mips_emit_move (temp1, offset);
12271 offset = temp1;
12273 emit_insn (gen_add3_insn (this, this, offset));
12276 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
12277 if (vcall_offset != 0)
12279 rtx addr;
12281 /* Set TEMP1 to *THIS. */
12282 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
12284 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
12285 addr = mips_add_offset (temp2, temp1, vcall_offset);
12287 /* Load the offset and add it to THIS. */
12288 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
12289 emit_insn (gen_add3_insn (this, this, temp1));
12292 /* Jump to the target function. Use a sibcall if direct jumps are
12293 allowed, otherwise load the address into a register first. */
12294 if (use_sibcall_p)
12296 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
12297 SIBLING_CALL_P (insn) = 1;
12299 else
12301 /* This is messy. GAS treats "la $25,foo" as part of a call
12302 sequence and may allow a global "foo" to be lazily bound.
12303 The general move patterns therefore reject this combination.
12305 In this context, lazy binding would actually be OK
12306 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
12307 TARGET_CALL_SAVED_GP; see mips_load_call_address.
12308 We must therefore load the address via a temporary
12309 register if mips_dangerous_for_la25_p.
12311 If we jump to the temporary register rather than $25, the assembler
12312 can use the move insn to fill the jump's delay slot. */
12313 if (TARGET_USE_PIC_FN_ADDR_REG
12314 && !mips_dangerous_for_la25_p (fnaddr))
12315 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
12316 mips_load_call_address (temp1, fnaddr, true);
12318 if (TARGET_USE_PIC_FN_ADDR_REG
12319 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
12320 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
12321 emit_jump_insn (gen_indirect_jump (temp1));
12324 /* Run just enough of rest_of_compilation. This sequence was
12325 "borrowed" from alpha.c. */
12326 insn = get_insns ();
12327 insn_locators_alloc ();
12328 split_all_insns_noflow ();
12329 mips16_lay_out_constants ();
12330 shorten_branches (insn);
12331 final_start_function (insn, file, 1);
12332 final (insn, file, 1);
12333 final_end_function ();
12334 free_after_compilation (cfun);
12336 /* Clean up the vars set above. Note that final_end_function resets
12337 the global pointer for us. */
12338 reload_completed = 0;
12341 /* The last argument passed to mips_set_mips16_mode, or negative if the
12342 function hasn't been called yet.
12344 There are two copies of this information. One is saved and restored
12345 by the PCH process while the other is specific to this compiler
12346 invocation. The information calculated by mips_set_mips16_mode
12347 is invalid unless the two variables are the same. */
12348 static int was_mips16_p = -1;
12349 static GTY(()) int was_mips16_pch_p = -1;
12351 /* Set up the target-dependent global state so that it matches the
12352 current function's ISA mode. */
12354 static void
12355 mips_set_mips16_mode (int mips16_p)
12357 if (mips16_p == was_mips16_p
12358 && mips16_p == was_mips16_pch_p)
12359 return;
12361 /* Restore base settings of various flags. */
12362 target_flags = mips_base_target_flags;
12363 flag_schedule_insns = mips_base_schedule_insns;
12364 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
12365 flag_move_loop_invariants = mips_base_move_loop_invariants;
12366 align_loops = mips_base_align_loops;
12367 align_jumps = mips_base_align_jumps;
12368 align_functions = mips_base_align_functions;
12370 if (mips16_p)
12372 /* Switch to MIPS16 mode. */
12373 target_flags |= MASK_MIPS16;
12375 /* Don't run the scheduler before reload, since it tends to
12376 increase register pressure. */
12377 flag_schedule_insns = 0;
12379 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
12380 the whole function to be in a single section. */
12381 flag_reorder_blocks_and_partition = 0;
12383 /* Don't move loop invariants, because it tends to increase
12384 register pressure. It also introduces an extra move in cases
12385 where the constant is the first operand in a two-operand binary
12386 instruction, or when it forms a register argument to a functon
12387 call. */
12388 flag_move_loop_invariants = 0;
12390 /* Silently disable -mexplicit-relocs since it doesn't apply
12391 to MIPS16 code. Even so, it would overly pedantic to warn
12392 about "-mips16 -mexplicit-relocs", especially given that
12393 we use a %gprel() operator. */
12394 target_flags &= ~MASK_EXPLICIT_RELOCS;
12396 /* Experiments suggest we get the best overall section-anchor
12397 results from using the range of an unextended LW or SW. Code
12398 that makes heavy use of byte or short accesses can do better
12399 with ranges of 0...31 and 0...63 respectively, but most code is
12400 sensitive to the range of LW and SW instead. */
12401 targetm.min_anchor_offset = 0;
12402 targetm.max_anchor_offset = 127;
12404 if (flag_pic || TARGET_ABICALLS)
12405 sorry ("MIPS16 PIC");
12407 if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
12408 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
12410 else
12412 /* Switch to normal (non-MIPS16) mode. */
12413 target_flags &= ~MASK_MIPS16;
12415 /* Provide default values for align_* for 64-bit targets. */
12416 if (TARGET_64BIT)
12418 if (align_loops == 0)
12419 align_loops = 8;
12420 if (align_jumps == 0)
12421 align_jumps = 8;
12422 if (align_functions == 0)
12423 align_functions = 8;
12426 targetm.min_anchor_offset = -32768;
12427 targetm.max_anchor_offset = 32767;
12430 /* (Re)initialize MIPS target internals for new ISA. */
12431 mips_init_relocs ();
12433 if (was_mips16_p >= 0 || was_mips16_pch_p >= 0)
12434 /* Reinitialize target-dependent state. */
12435 target_reinit ();
12437 was_mips16_p = mips16_p;
12438 was_mips16_pch_p = mips16_p;
12441 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
12442 function should use the MIPS16 ISA and switch modes accordingly. */
12444 static void
12445 mips_set_current_function (tree fndecl)
12447 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
12450 /* Allocate a chunk of memory for per-function machine-dependent data. */
12452 static struct machine_function *
12453 mips_init_machine_status (void)
12455 return ((struct machine_function *)
12456 ggc_alloc_cleared (sizeof (struct machine_function)));
12459 /* Return the processor associated with the given ISA level, or null
12460 if the ISA isn't valid. */
12462 static const struct mips_cpu_info *
12463 mips_cpu_info_from_isa (int isa)
12465 unsigned int i;
12467 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
12468 if (mips_cpu_info_table[i].isa == isa)
12469 return mips_cpu_info_table + i;
12471 return NULL;
12474 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
12475 with a final "000" replaced by "k". Ignore case.
12477 Note: this function is shared between GCC and GAS. */
12479 static bool
12480 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
12482 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
12483 given++, canonical++;
12485 return ((*given == 0 && *canonical == 0)
12486 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
12489 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
12490 CPU name. We've traditionally allowed a lot of variation here.
12492 Note: this function is shared between GCC and GAS. */
12494 static bool
12495 mips_matching_cpu_name_p (const char *canonical, const char *given)
12497 /* First see if the name matches exactly, or with a final "000"
12498 turned into "k". */
12499 if (mips_strict_matching_cpu_name_p (canonical, given))
12500 return true;
12502 /* If not, try comparing based on numerical designation alone.
12503 See if GIVEN is an unadorned number, or 'r' followed by a number. */
12504 if (TOLOWER (*given) == 'r')
12505 given++;
12506 if (!ISDIGIT (*given))
12507 return false;
12509 /* Skip over some well-known prefixes in the canonical name,
12510 hoping to find a number there too. */
12511 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
12512 canonical += 2;
12513 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
12514 canonical += 2;
12515 else if (TOLOWER (canonical[0]) == 'r')
12516 canonical += 1;
12518 return mips_strict_matching_cpu_name_p (canonical, given);
12521 /* Return the mips_cpu_info entry for the processor or ISA given
12522 by CPU_STRING. Return null if the string isn't recognized.
12524 A similar function exists in GAS. */
12526 static const struct mips_cpu_info *
12527 mips_parse_cpu (const char *cpu_string)
12529 unsigned int i;
12530 const char *s;
12532 /* In the past, we allowed upper-case CPU names, but it doesn't
12533 work well with the multilib machinery. */
12534 for (s = cpu_string; *s != 0; s++)
12535 if (ISUPPER (*s))
12537 warning (0, "CPU names must be lower case");
12538 break;
12541 /* 'from-abi' selects the most compatible architecture for the given
12542 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
12543 EABIs, we have to decide whether we're using the 32-bit or 64-bit
12544 version. */
12545 if (strcasecmp (cpu_string, "from-abi") == 0)
12546 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
12547 : ABI_NEEDS_64BIT_REGS ? 3
12548 : (TARGET_64BIT ? 3 : 1));
12550 /* 'default' has traditionally been a no-op. Probably not very useful. */
12551 if (strcasecmp (cpu_string, "default") == 0)
12552 return NULL;
12554 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
12555 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
12556 return mips_cpu_info_table + i;
12558 return NULL;
12561 /* Set up globals to generate code for the ISA or processor
12562 described by INFO. */
12564 static void
12565 mips_set_architecture (const struct mips_cpu_info *info)
12567 if (info != 0)
12569 mips_arch_info = info;
12570 mips_arch = info->cpu;
12571 mips_isa = info->isa;
12575 /* Likewise for tuning. */
12577 static void
12578 mips_set_tune (const struct mips_cpu_info *info)
12580 if (info != 0)
12582 mips_tune_info = info;
12583 mips_tune = info->cpu;
12587 /* Implement TARGET_HANDLE_OPTION. */
12589 static bool
12590 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
12592 switch (code)
12594 case OPT_mabi_:
12595 if (strcmp (arg, "32") == 0)
12596 mips_abi = ABI_32;
12597 else if (strcmp (arg, "o64") == 0)
12598 mips_abi = ABI_O64;
12599 else if (strcmp (arg, "n32") == 0)
12600 mips_abi = ABI_N32;
12601 else if (strcmp (arg, "64") == 0)
12602 mips_abi = ABI_64;
12603 else if (strcmp (arg, "eabi") == 0)
12604 mips_abi = ABI_EABI;
12605 else
12606 return false;
12607 return true;
12609 case OPT_march_:
12610 case OPT_mtune_:
12611 return mips_parse_cpu (arg) != 0;
12613 case OPT_mips:
12614 mips_isa_option_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
12615 return mips_isa_option_info != 0;
12617 case OPT_mno_flush_func:
12618 mips_cache_flush_func = NULL;
12619 return true;
12621 case OPT_mcode_readable_:
12622 if (strcmp (arg, "yes") == 0)
12623 mips_code_readable = CODE_READABLE_YES;
12624 else if (strcmp (arg, "pcrel") == 0)
12625 mips_code_readable = CODE_READABLE_PCREL;
12626 else if (strcmp (arg, "no") == 0)
12627 mips_code_readable = CODE_READABLE_NO;
12628 else
12629 return false;
12630 return true;
12632 default:
12633 return true;
12637 /* Implement OVERRIDE_OPTIONS. */
12639 void
12640 mips_override_options (void)
12642 int i, start, regno, mode;
12644 /* Process flags as though we were generating non-MIPS16 code. */
12645 mips_base_mips16 = TARGET_MIPS16;
12646 target_flags &= ~MASK_MIPS16;
12648 #ifdef SUBTARGET_OVERRIDE_OPTIONS
12649 SUBTARGET_OVERRIDE_OPTIONS;
12650 #endif
12652 /* Set the small data limit. */
12653 mips_small_data_threshold = (g_switch_set
12654 ? g_switch_value
12655 : MIPS_DEFAULT_GVALUE);
12657 /* The following code determines the architecture and register size.
12658 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
12659 The GAS and GCC code should be kept in sync as much as possible. */
12661 if (mips_arch_string != 0)
12662 mips_set_architecture (mips_parse_cpu (mips_arch_string));
12664 if (mips_isa_option_info != 0)
12666 if (mips_arch_info == 0)
12667 mips_set_architecture (mips_isa_option_info);
12668 else if (mips_arch_info->isa != mips_isa_option_info->isa)
12669 error ("%<-%s%> conflicts with the other architecture options, "
12670 "which specify a %s processor",
12671 mips_isa_option_info->name,
12672 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
12675 if (mips_arch_info == 0)
12677 #ifdef MIPS_CPU_STRING_DEFAULT
12678 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
12679 #else
12680 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
12681 #endif
12684 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
12685 error ("%<-march=%s%> is not compatible with the selected ABI",
12686 mips_arch_info->name);
12688 /* Optimize for mips_arch, unless -mtune selects a different processor. */
12689 if (mips_tune_string != 0)
12690 mips_set_tune (mips_parse_cpu (mips_tune_string));
12692 if (mips_tune_info == 0)
12693 mips_set_tune (mips_arch_info);
12695 if ((target_flags_explicit & MASK_64BIT) != 0)
12697 /* The user specified the size of the integer registers. Make sure
12698 it agrees with the ABI and ISA. */
12699 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
12700 error ("%<-mgp64%> used with a 32-bit processor");
12701 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
12702 error ("%<-mgp32%> used with a 64-bit ABI");
12703 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
12704 error ("%<-mgp64%> used with a 32-bit ABI");
12706 else
12708 /* Infer the integer register size from the ABI and processor.
12709 Restrict ourselves to 32-bit registers if that's all the
12710 processor has, or if the ABI cannot handle 64-bit registers. */
12711 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
12712 target_flags &= ~MASK_64BIT;
12713 else
12714 target_flags |= MASK_64BIT;
12717 if ((target_flags_explicit & MASK_FLOAT64) != 0)
12719 if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
12720 error ("unsupported combination: %s", "-mfp64 -msingle-float");
12721 else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
12722 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
12723 else if (!TARGET_64BIT && TARGET_FLOAT64)
12725 if (!ISA_HAS_MXHC1)
12726 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
12727 " the target supports the mfhc1 and mthc1 instructions");
12728 else if (mips_abi != ABI_32)
12729 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
12730 " the o32 ABI");
12733 else
12735 /* -msingle-float selects 32-bit float registers. Otherwise the
12736 float registers should be the same size as the integer ones. */
12737 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
12738 target_flags |= MASK_FLOAT64;
12739 else
12740 target_flags &= ~MASK_FLOAT64;
12743 /* End of code shared with GAS. */
12745 /* If no -mlong* option was given, infer it from the other options. */
12746 if ((target_flags_explicit & MASK_LONG64) == 0)
12748 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
12749 target_flags |= MASK_LONG64;
12750 else
12751 target_flags &= ~MASK_LONG64;
12754 if (!TARGET_OLDABI)
12755 flag_pcc_struct_return = 0;
12757 /* Decide which rtx_costs structure to use. */
12758 if (optimize_size)
12759 mips_cost = &mips_rtx_cost_optimize_size;
12760 else
12761 mips_cost = &mips_rtx_cost_data[mips_tune];
12763 /* If the user hasn't specified a branch cost, use the processor's
12764 default. */
12765 if (mips_branch_cost == 0)
12766 mips_branch_cost = mips_cost->branch_cost;
12768 /* If neither -mbranch-likely nor -mno-branch-likely was given
12769 on the command line, set MASK_BRANCHLIKELY based on the target
12770 architecture and tuning flags. Annulled delay slots are a
12771 size win, so we only consider the processor-specific tuning
12772 for !optimize_size. */
12773 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
12775 if (ISA_HAS_BRANCHLIKELY
12776 && (optimize_size
12777 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
12778 target_flags |= MASK_BRANCHLIKELY;
12779 else
12780 target_flags &= ~MASK_BRANCHLIKELY;
12782 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
12783 warning (0, "the %qs architecture does not support branch-likely"
12784 " instructions", mips_arch_info->name);
12786 /* The effect of -mabicalls isn't defined for the EABI. */
12787 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
12789 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
12790 target_flags &= ~MASK_ABICALLS;
12793 if (TARGET_ABICALLS)
12794 /* We need to set flag_pic for executables as well as DSOs
12795 because we may reference symbols that are not defined in
12796 the final executable. (MIPS does not use things like
12797 copy relocs, for example.)
12799 Also, there is a body of code that uses __PIC__ to distinguish
12800 between -mabicalls and -mno-abicalls code. */
12801 flag_pic = 1;
12803 /* -mvr4130-align is a "speed over size" optimization: it usually produces
12804 faster code, but at the expense of more nops. Enable it at -O3 and
12805 above. */
12806 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
12807 target_flags |= MASK_VR4130_ALIGN;
12809 /* Prefer a call to memcpy over inline code when optimizing for size,
12810 though see MOVE_RATIO in mips.h. */
12811 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
12812 target_flags |= MASK_MEMCPY;
12814 /* If we have a nonzero small-data limit, check that the -mgpopt
12815 setting is consistent with the other target flags. */
12816 if (mips_small_data_threshold > 0)
12818 if (!TARGET_GPOPT)
12820 if (!TARGET_EXPLICIT_RELOCS)
12821 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
12823 TARGET_LOCAL_SDATA = false;
12824 TARGET_EXTERN_SDATA = false;
12826 else
12828 if (TARGET_VXWORKS_RTP)
12829 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
12831 if (TARGET_ABICALLS)
12832 warning (0, "cannot use small-data accesses for %qs",
12833 "-mabicalls");
12837 #ifdef MIPS_TFMODE_FORMAT
12838 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
12839 #endif
12841 /* Make sure that the user didn't turn off paired single support when
12842 MIPS-3D support is requested. */
12843 if (TARGET_MIPS3D
12844 && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
12845 && !TARGET_PAIRED_SINGLE_FLOAT)
12846 error ("%<-mips3d%> requires %<-mpaired-single%>");
12848 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
12849 if (TARGET_MIPS3D)
12850 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
12852 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
12853 and TARGET_HARD_FLOAT_ABI are both true. */
12854 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
12855 error ("%qs must be used with %qs",
12856 TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
12857 TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
12859 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
12860 enabled. */
12861 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
12862 warning (0, "the %qs architecture does not support paired-single"
12863 " instructions", mips_arch_info->name);
12865 /* If TARGET_DSPR2, enable MASK_DSP. */
12866 if (TARGET_DSPR2)
12867 target_flags |= MASK_DSP;
12869 mips_init_print_operand_punct ();
12871 /* Set up array to map GCC register number to debug register number.
12872 Ignore the special purpose register numbers. */
12874 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12876 mips_dbx_regno[i] = INVALID_REGNUM;
12877 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
12878 mips_dwarf_regno[i] = i;
12879 else
12880 mips_dwarf_regno[i] = INVALID_REGNUM;
12883 start = GP_DBX_FIRST - GP_REG_FIRST;
12884 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
12885 mips_dbx_regno[i] = i + start;
12887 start = FP_DBX_FIRST - FP_REG_FIRST;
12888 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
12889 mips_dbx_regno[i] = i + start;
12891 /* Accumulator debug registers use big-endian ordering. */
12892 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
12893 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
12894 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
12895 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
12896 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
12898 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
12899 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
12902 /* Set up mips_hard_regno_mode_ok. */
12903 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
12904 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12905 mips_hard_regno_mode_ok[(int)mode][regno]
12906 = mips_hard_regno_mode_ok_p (regno, mode);
12908 /* Function to allocate machine-dependent function status. */
12909 init_machine_status = &mips_init_machine_status;
12911 /* Default to working around R4000 errata only if the processor
12912 was selected explicitly. */
12913 if ((target_flags_explicit & MASK_FIX_R4000) == 0
12914 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
12915 target_flags |= MASK_FIX_R4000;
12917 /* Default to working around R4400 errata only if the processor
12918 was selected explicitly. */
12919 if ((target_flags_explicit & MASK_FIX_R4400) == 0
12920 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
12921 target_flags |= MASK_FIX_R4400;
12923 /* Save base state of options. */
12924 mips_base_target_flags = target_flags;
12925 mips_base_delayed_branch = flag_delayed_branch;
12926 mips_base_schedule_insns = flag_schedule_insns;
12927 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
12928 mips_base_move_loop_invariants = flag_move_loop_invariants;
12929 mips_base_align_loops = align_loops;
12930 mips_base_align_jumps = align_jumps;
12931 mips_base_align_functions = align_functions;
12933 /* Now select the ISA mode.
12935 Do all CPP-sensitive stuff in non-MIPS16 mode; we'll switch to
12936 MIPS16 mode afterwards if need be. */
12937 mips_set_mips16_mode (false);
12939 /* We call dbr_schedule from within mips_reorg. */
12940 flag_delayed_branch = 0;
12943 /* Swap the register information for registers I and I + 1, which
12944 currently have the wrong endianness. Note that the registers'
12945 fixedness and call-clobberedness might have been set on the
12946 command line. */
12948 static void
12949 mips_swap_registers (unsigned int i)
12951 int tmpi;
12952 const char *tmps;
12954 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
12955 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
12957 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
12958 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
12959 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
12960 SWAP_STRING (reg_names[i], reg_names[i + 1]);
12962 #undef SWAP_STRING
12963 #undef SWAP_INT
12966 /* Implement CONDITIONAL_REGISTER_USAGE. */
12968 void
12969 mips_conditional_register_usage (void)
12971 if (!ISA_HAS_DSP)
12973 int regno;
12975 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
12976 fixed_regs[regno] = call_used_regs[regno] = 1;
12978 if (!TARGET_HARD_FLOAT)
12980 int regno;
12982 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
12983 fixed_regs[regno] = call_used_regs[regno] = 1;
12984 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12985 fixed_regs[regno] = call_used_regs[regno] = 1;
12987 else if (! ISA_HAS_8CC)
12989 int regno;
12991 /* We only have a single condition-code register. We implement
12992 this by fixing all the condition-code registers and generating
12993 RTL that refers directly to ST_REG_FIRST. */
12994 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12995 fixed_regs[regno] = call_used_regs[regno] = 1;
12997 /* In MIPS16 mode, we permit the $t temporary registers to be used
12998 for reload. We prohibit the unused $s registers, since they
12999 are call-saved, and saving them via a MIPS16 register would
13000 probably waste more time than just reloading the value. */
13001 if (TARGET_MIPS16)
13003 fixed_regs[18] = call_used_regs[18] = 1;
13004 fixed_regs[19] = call_used_regs[19] = 1;
13005 fixed_regs[20] = call_used_regs[20] = 1;
13006 fixed_regs[21] = call_used_regs[21] = 1;
13007 fixed_regs[22] = call_used_regs[22] = 1;
13008 fixed_regs[23] = call_used_regs[23] = 1;
13009 fixed_regs[26] = call_used_regs[26] = 1;
13010 fixed_regs[27] = call_used_regs[27] = 1;
13011 fixed_regs[30] = call_used_regs[30] = 1;
13013 /* $f20-$f23 are call-clobbered for n64. */
13014 if (mips_abi == ABI_64)
13016 int regno;
13017 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
13018 call_really_used_regs[regno] = call_used_regs[regno] = 1;
13020 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
13021 for n32. */
13022 if (mips_abi == ABI_N32)
13024 int regno;
13025 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
13026 call_really_used_regs[regno] = call_used_regs[regno] = 1;
13028 /* Make sure that double-register accumulator values are correctly
13029 ordered for the current endianness. */
13030 if (TARGET_LITTLE_ENDIAN)
13032 unsigned int regno;
13034 mips_swap_registers (MD_REG_FIRST);
13035 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
13036 mips_swap_registers (regno);
13040 /* Initialize vector TARGET to VALS. */
13042 void
13043 mips_expand_vector_init (rtx target, rtx vals)
13045 enum machine_mode mode;
13046 enum machine_mode inner;
13047 unsigned int i, n_elts;
13048 rtx mem;
13050 mode = GET_MODE (target);
13051 inner = GET_MODE_INNER (mode);
13052 n_elts = GET_MODE_NUNITS (mode);
13054 gcc_assert (VECTOR_MODE_P (mode));
13056 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
13057 for (i = 0; i < n_elts; i++)
13058 emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)),
13059 XVECEXP (vals, 0, i));
13061 emit_move_insn (target, mem);
13064 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
13065 other registers for instructions for which it is possible. This
13066 encourages the compiler to use CMP in cases where an XOR would
13067 require some register shuffling. */
13069 void
13070 mips_order_regs_for_local_alloc (void)
13072 int i;
13074 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
13075 reg_alloc_order[i] = i;
13077 if (TARGET_MIPS16)
13079 /* It really doesn't matter where we put register 0, since it is
13080 a fixed register anyhow. */
13081 reg_alloc_order[0] = 24;
13082 reg_alloc_order[24] = 0;
13086 /* Initialize the GCC target structure. */
13087 #undef TARGET_ASM_ALIGNED_HI_OP
13088 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
13089 #undef TARGET_ASM_ALIGNED_SI_OP
13090 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
13091 #undef TARGET_ASM_ALIGNED_DI_OP
13092 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
13094 #undef TARGET_ASM_FUNCTION_PROLOGUE
13095 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
13096 #undef TARGET_ASM_FUNCTION_EPILOGUE
13097 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
13098 #undef TARGET_ASM_SELECT_RTX_SECTION
13099 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
13100 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
13101 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
13103 #undef TARGET_SCHED_INIT
13104 #define TARGET_SCHED_INIT mips_sched_init
13105 #undef TARGET_SCHED_REORDER
13106 #define TARGET_SCHED_REORDER mips_sched_reorder
13107 #undef TARGET_SCHED_REORDER2
13108 #define TARGET_SCHED_REORDER2 mips_sched_reorder
13109 #undef TARGET_SCHED_VARIABLE_ISSUE
13110 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
13111 #undef TARGET_SCHED_ADJUST_COST
13112 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
13113 #undef TARGET_SCHED_ISSUE_RATE
13114 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
13115 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
13116 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
13117 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
13118 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
13119 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
13120 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
13121 mips_multipass_dfa_lookahead
13123 #undef TARGET_DEFAULT_TARGET_FLAGS
13124 #define TARGET_DEFAULT_TARGET_FLAGS \
13125 (TARGET_DEFAULT \
13126 | TARGET_CPU_DEFAULT \
13127 | TARGET_ENDIAN_DEFAULT \
13128 | TARGET_FP_EXCEPTIONS_DEFAULT \
13129 | MASK_CHECK_ZERO_DIV \
13130 | MASK_FUSED_MADD)
13131 #undef TARGET_HANDLE_OPTION
13132 #define TARGET_HANDLE_OPTION mips_handle_option
13134 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
13135 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
13137 #undef TARGET_INSERT_ATTRIBUTES
13138 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
13139 #undef TARGET_MERGE_DECL_ATTRIBUTES
13140 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
13141 #undef TARGET_SET_CURRENT_FUNCTION
13142 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
13144 #undef TARGET_VALID_POINTER_MODE
13145 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
13146 #undef TARGET_RTX_COSTS
13147 #define TARGET_RTX_COSTS mips_rtx_costs
13148 #undef TARGET_ADDRESS_COST
13149 #define TARGET_ADDRESS_COST mips_address_cost
13151 #undef TARGET_IN_SMALL_DATA_P
13152 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
13154 #undef TARGET_MACHINE_DEPENDENT_REORG
13155 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
13157 #undef TARGET_ASM_FILE_START
13158 #define TARGET_ASM_FILE_START mips_file_start
13159 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
13160 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
13162 #undef TARGET_INIT_LIBFUNCS
13163 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
13165 #undef TARGET_BUILD_BUILTIN_VA_LIST
13166 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
13167 #undef TARGET_EXPAND_BUILTIN_VA_START
13168 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
13169 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
13170 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
13172 #undef TARGET_PROMOTE_FUNCTION_ARGS
13173 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
13174 #undef TARGET_PROMOTE_FUNCTION_RETURN
13175 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
13176 #undef TARGET_PROMOTE_PROTOTYPES
13177 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
13179 #undef TARGET_RETURN_IN_MEMORY
13180 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
13181 #undef TARGET_RETURN_IN_MSB
13182 #define TARGET_RETURN_IN_MSB mips_return_in_msb
13184 #undef TARGET_ASM_OUTPUT_MI_THUNK
13185 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
13186 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
13187 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
13189 #undef TARGET_SETUP_INCOMING_VARARGS
13190 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
13191 #undef TARGET_STRICT_ARGUMENT_NAMING
13192 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
13193 #undef TARGET_MUST_PASS_IN_STACK
13194 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
13195 #undef TARGET_PASS_BY_REFERENCE
13196 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
13197 #undef TARGET_CALLEE_COPIES
13198 #define TARGET_CALLEE_COPIES mips_callee_copies
13199 #undef TARGET_ARG_PARTIAL_BYTES
13200 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
13202 #undef TARGET_MODE_REP_EXTENDED
13203 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
13205 #undef TARGET_VECTOR_MODE_SUPPORTED_P
13206 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
13208 #undef TARGET_SCALAR_MODE_SUPPORTED_P
13209 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
13211 #undef TARGET_INIT_BUILTINS
13212 #define TARGET_INIT_BUILTINS mips_init_builtins
13213 #undef TARGET_EXPAND_BUILTIN
13214 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
13216 #undef TARGET_HAVE_TLS
13217 #define TARGET_HAVE_TLS HAVE_AS_TLS
13219 #undef TARGET_CANNOT_FORCE_CONST_MEM
13220 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
13222 #undef TARGET_ENCODE_SECTION_INFO
13223 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
13225 #undef TARGET_ATTRIBUTE_TABLE
13226 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
13227 /* All our function attributes are related to how out-of-line copies should
13228 be compiled or called. They don't in themselves prevent inlining. */
13229 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
13230 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
13232 #undef TARGET_EXTRA_LIVE_ON_ENTRY
13233 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
13235 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
13236 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
13237 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
13238 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
13240 #undef TARGET_COMP_TYPE_ATTRIBUTES
13241 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
13243 #ifdef HAVE_AS_DTPRELWORD
13244 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
13245 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
13246 #endif
13247 #undef TARGET_DWARF_REGISTER_SPAN
13248 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
13250 struct gcc_target targetm = TARGET_INITIALIZER;
13252 #include "gt-mips.h"