Merge from trunk @ 138209
[official-gcc.git] / gcc / config / mips / mips.c
blob913acc71cf132f37776af2aab41d8ef795bdb54b
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "libfuncs.h"
46 #include "flags.h"
47 #include "reload.h"
48 #include "tm_p.h"
49 #include "ggc.h"
50 #include "gstab.h"
51 #include "hashtab.h"
52 #include "debug.h"
53 #include "target.h"
54 #include "target-def.h"
55 #include "integrate.h"
56 #include "langhooks.h"
57 #include "cfglayout.h"
58 #include "sched-int.h"
59 #include "gimple.h"
60 #include "bitmap.h"
61 #include "diagnostic.h"
63 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
64 #define UNSPEC_ADDRESS_P(X) \
65 (GET_CODE (X) == UNSPEC \
66 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
67 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
69 /* Extract the symbol or label from UNSPEC wrapper X. */
70 #define UNSPEC_ADDRESS(X) \
71 XVECEXP (X, 0, 0)
73 /* Extract the symbol type from UNSPEC wrapper X. */
74 #define UNSPEC_ADDRESS_TYPE(X) \
75 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
77 /* The maximum distance between the top of the stack frame and the
78 value $sp has when we save and restore registers.
80 The value for normal-mode code must be a SMALL_OPERAND and must
81 preserve the maximum stack alignment. We therefore use a value
82 of 0x7ff0 in this case.
84 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
85 up to 0x7f8 bytes and can usually save or restore all the registers
86 that we need to save or restore. (Note that we can only use these
87 instructions for o32, for which the stack alignment is 8 bytes.)
89 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
90 RESTORE are not available. We can then use unextended instructions
91 to save and restore registers, and to allocate and deallocate the top
92 part of the frame. */
93 #define MIPS_MAX_FIRST_STACK_STEP \
94 (!TARGET_MIPS16 ? 0x7ff0 \
95 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
96 : TARGET_64BIT ? 0x100 : 0x400)
98 /* True if INSN is a mips.md pattern or asm statement. */
99 #define USEFUL_INSN_P(INSN) \
100 (INSN_P (INSN) \
101 && GET_CODE (PATTERN (INSN)) != USE \
102 && GET_CODE (PATTERN (INSN)) != CLOBBER \
103 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
104 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
106 /* If INSN is a delayed branch sequence, return the first instruction
107 in the sequence, otherwise return INSN itself. */
108 #define SEQ_BEGIN(INSN) \
109 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
110 ? XVECEXP (PATTERN (INSN), 0, 0) \
111 : (INSN))
113 /* Likewise for the last instruction in a delayed branch sequence. */
114 #define SEQ_END(INSN) \
115 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
116 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117 : (INSN))
119 /* Execute the following loop body with SUBINSN set to each instruction
120 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
121 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
122 for ((SUBINSN) = SEQ_BEGIN (INSN); \
123 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
124 (SUBINSN) = NEXT_INSN (SUBINSN))
126 /* True if bit BIT is set in VALUE. */
127 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
129 /* Classifies an address.
131 ADDRESS_REG
132 A natural register + offset address. The register satisfies
133 mips_valid_base_register_p and the offset is a const_arith_operand.
135 ADDRESS_LO_SUM
136 A LO_SUM rtx. The first operand is a valid base register and
137 the second operand is a symbolic address.
139 ADDRESS_CONST_INT
140 A signed 16-bit constant address.
142 ADDRESS_SYMBOLIC:
143 A constant symbolic address. */
144 enum mips_address_type {
145 ADDRESS_REG,
146 ADDRESS_LO_SUM,
147 ADDRESS_CONST_INT,
148 ADDRESS_SYMBOLIC
151 /* Macros to create an enumeration identifier for a function prototype. */
152 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
153 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
154 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
155 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
157 /* Classifies the prototype of a built-in function. */
158 enum mips_function_type {
159 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
160 #include "config/mips/mips-ftypes.def"
161 #undef DEF_MIPS_FTYPE
162 MIPS_MAX_FTYPE_MAX
165 /* Specifies how a built-in function should be converted into rtl. */
166 enum mips_builtin_type {
167 /* The function corresponds directly to an .md pattern. The return
168 value is mapped to operand 0 and the arguments are mapped to
169 operands 1 and above. */
170 MIPS_BUILTIN_DIRECT,
172 /* The function corresponds directly to an .md pattern. There is no return
173 value and the arguments are mapped to operands 0 and above. */
174 MIPS_BUILTIN_DIRECT_NO_TARGET,
176 /* The function corresponds to a comparison instruction followed by
177 a mips_cond_move_tf_ps pattern. The first two arguments are the
178 values to compare and the second two arguments are the vector
179 operands for the movt.ps or movf.ps instruction (in assembly order). */
180 MIPS_BUILTIN_MOVF,
181 MIPS_BUILTIN_MOVT,
183 /* The function corresponds to a V2SF comparison instruction. Operand 0
184 of this instruction is the result of the comparison, which has mode
185 CCV2 or CCV4. The function arguments are mapped to operands 1 and
186 above. The function's return value is an SImode boolean that is
187 true under the following conditions:
189 MIPS_BUILTIN_CMP_ANY: one of the registers is true
190 MIPS_BUILTIN_CMP_ALL: all of the registers are true
191 MIPS_BUILTIN_CMP_LOWER: the first register is true
192 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
193 MIPS_BUILTIN_CMP_ANY,
194 MIPS_BUILTIN_CMP_ALL,
195 MIPS_BUILTIN_CMP_UPPER,
196 MIPS_BUILTIN_CMP_LOWER,
198 /* As above, but the instruction only sets a single $fcc register. */
199 MIPS_BUILTIN_CMP_SINGLE,
201 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
202 MIPS_BUILTIN_BPOSGE32
205 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
206 #define MIPS_FP_CONDITIONS(MACRO) \
207 MACRO (f), \
208 MACRO (un), \
209 MACRO (eq), \
210 MACRO (ueq), \
211 MACRO (olt), \
212 MACRO (ult), \
213 MACRO (ole), \
214 MACRO (ule), \
215 MACRO (sf), \
216 MACRO (ngle), \
217 MACRO (seq), \
218 MACRO (ngl), \
219 MACRO (lt), \
220 MACRO (nge), \
221 MACRO (le), \
222 MACRO (ngt)
224 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
225 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
226 enum mips_fp_condition {
227 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
230 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
231 #define STRINGIFY(X) #X
232 static const char *const mips_fp_conditions[] = {
233 MIPS_FP_CONDITIONS (STRINGIFY)
236 /* Information about a function's frame layout. */
237 struct mips_frame_info GTY(()) {
238 /* The size of the frame in bytes. */
239 HOST_WIDE_INT total_size;
241 /* The number of bytes allocated to variables. */
242 HOST_WIDE_INT var_size;
244 /* The number of bytes allocated to outgoing function arguments. */
245 HOST_WIDE_INT args_size;
247 /* The number of bytes allocated to the .cprestore slot, or 0 if there
248 is no such slot. */
249 HOST_WIDE_INT cprestore_size;
251 /* Bit X is set if the function saves or restores GPR X. */
252 unsigned int mask;
254 /* Likewise FPR X. */
255 unsigned int fmask;
257 /* The number of GPRs and FPRs saved. */
258 unsigned int num_gp;
259 unsigned int num_fp;
261 /* The offset of the topmost GPR and FPR save slots from the top of
262 the frame, or zero if no such slots are needed. */
263 HOST_WIDE_INT gp_save_offset;
264 HOST_WIDE_INT fp_save_offset;
266 /* Likewise, but giving offsets from the bottom of the frame. */
267 HOST_WIDE_INT gp_sp_offset;
268 HOST_WIDE_INT fp_sp_offset;
270 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
271 HOST_WIDE_INT arg_pointer_offset;
273 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
274 HOST_WIDE_INT hard_frame_pointer_offset;
277 struct machine_function GTY(()) {
278 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
279 rtx mips16_gp_pseudo_rtx;
281 /* The number of extra stack bytes taken up by register varargs.
282 This area is allocated by the callee at the very top of the frame. */
283 int varargs_size;
285 /* The current frame information, calculated by mips_compute_frame_info. */
286 struct mips_frame_info frame;
288 /* The register to use as the function's global pointer. */
289 unsigned int global_pointer;
291 /* True if mips_adjust_insn_length should ignore an instruction's
292 hazard attribute. */
293 bool ignore_hazard_length_p;
295 /* True if the whole function is suitable for .set noreorder and
296 .set nomacro. */
297 bool all_noreorder_p;
299 /* True if the function is known to have an instruction that needs $gp. */
300 bool has_gp_insn_p;
302 /* True if we have emitted an instruction to initialize
303 mips16_gp_pseudo_rtx. */
304 bool initialized_mips16_gp_pseudo_p;
307 /* Information about a single argument. */
308 struct mips_arg_info {
309 /* True if the argument is passed in a floating-point register, or
310 would have been if we hadn't run out of registers. */
311 bool fpr_p;
313 /* The number of words passed in registers, rounded up. */
314 unsigned int reg_words;
316 /* For EABI, the offset of the first register from GP_ARG_FIRST or
317 FP_ARG_FIRST. For other ABIs, the offset of the first register from
318 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
319 comment for details).
321 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
322 on the stack. */
323 unsigned int reg_offset;
325 /* The number of words that must be passed on the stack, rounded up. */
326 unsigned int stack_words;
328 /* The offset from the start of the stack overflow area of the argument's
329 first stack word. Only meaningful when STACK_WORDS is nonzero. */
330 unsigned int stack_offset;
333 /* Information about an address described by mips_address_type.
335 ADDRESS_CONST_INT
336 No fields are used.
338 ADDRESS_REG
339 REG is the base register and OFFSET is the constant offset.
341 ADDRESS_LO_SUM
342 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
343 is the type of symbol it references.
345 ADDRESS_SYMBOLIC
346 SYMBOL_TYPE is the type of symbol that the address references. */
347 struct mips_address_info {
348 enum mips_address_type type;
349 rtx reg;
350 rtx offset;
351 enum mips_symbol_type symbol_type;
354 /* One stage in a constant building sequence. These sequences have
355 the form:
357 A = VALUE[0]
358 A = A CODE[1] VALUE[1]
359 A = A CODE[2] VALUE[2]
362 where A is an accumulator, each CODE[i] is a binary rtl operation
363 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
364 struct mips_integer_op {
365 enum rtx_code code;
366 unsigned HOST_WIDE_INT value;
369 /* The largest number of operations needed to load an integer constant.
370 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
371 When the lowest bit is clear, we can try, but reject a sequence with
372 an extra SLL at the end. */
373 #define MIPS_MAX_INTEGER_OPS 7
375 /* Information about a MIPS16e SAVE or RESTORE instruction. */
376 struct mips16e_save_restore_info {
377 /* The number of argument registers saved by a SAVE instruction.
378 0 for RESTORE instructions. */
379 unsigned int nargs;
381 /* Bit X is set if the instruction saves or restores GPR X. */
382 unsigned int mask;
384 /* The total number of bytes to allocate. */
385 HOST_WIDE_INT size;
388 /* Global variables for machine-dependent things. */
390 /* The -G setting, or the configuration's default small-data limit if
391 no -G option is given. */
392 static unsigned int mips_small_data_threshold;
394 /* The number of file directives written by mips_output_filename. */
395 int num_source_filenames;
397 /* The name that appeared in the last .file directive written by
398 mips_output_filename, or "" if mips_output_filename hasn't
399 written anything yet. */
400 const char *current_function_file = "";
402 /* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
403 int sdb_label_count;
405 /* Arrays that map GCC register numbers to debugger register numbers. */
406 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
407 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
409 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
410 int set_noreorder;
411 int set_nomacro;
412 static int set_noat;
414 /* True if we're writing out a branch-likely instruction rather than a
415 normal branch. */
416 static bool mips_branch_likely;
418 /* The operands passed to the last cmpMM expander. */
419 rtx cmp_operands[2];
421 /* The current instruction-set architecture. */
422 enum processor_type mips_arch;
423 const struct mips_cpu_info *mips_arch_info;
425 /* The processor that we should tune the code for. */
426 enum processor_type mips_tune;
427 const struct mips_cpu_info *mips_tune_info;
429 /* The ISA level associated with mips_arch. */
430 int mips_isa;
432 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
433 static const struct mips_cpu_info *mips_isa_option_info;
435 /* Which ABI to use. */
436 int mips_abi = MIPS_ABI_DEFAULT;
438 /* Which cost information to use. */
439 const struct mips_rtx_cost_data *mips_cost;
441 /* The ambient target flags, excluding MASK_MIPS16. */
442 static int mips_base_target_flags;
444 /* True if MIPS16 is the default mode. */
445 bool mips_base_mips16;
447 /* The ambient values of other global variables. */
448 static int mips_base_delayed_branch; /* flag_delayed_branch */
449 static int mips_base_schedule_insns; /* flag_schedule_insns */
450 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
451 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
452 static int mips_base_align_loops; /* align_loops */
453 static int mips_base_align_jumps; /* align_jumps */
454 static int mips_base_align_functions; /* align_functions */
456 /* The -mcode-readable setting. */
457 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
459 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
460 bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
462 /* Index C is true if character C is a valid PRINT_OPERAND punctation
463 character. */
464 bool mips_print_operand_punct[256];
466 static GTY (()) int mips_output_filename_first_time = 1;
468 /* mips_split_p[X] is true if symbols of type X can be split by
469 mips_split_symbol. */
470 bool mips_split_p[NUM_SYMBOL_TYPES];
472 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
473 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
474 if they are matched by a special .md file pattern. */
475 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
477 /* Likewise for HIGHs. */
478 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
480 /* Index R is the smallest register class that contains register R. */
481 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
482 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
483 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
484 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
485 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
486 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
487 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
488 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
489 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
490 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
491 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
492 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
493 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
494 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
495 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
496 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
497 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
498 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
499 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
500 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
501 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
502 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
503 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
504 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
505 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
506 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
507 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
508 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
509 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
510 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
511 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
512 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
513 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
514 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
515 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
516 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
517 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
518 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
519 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
520 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
521 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
522 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
523 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
524 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
525 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
526 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
527 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
528 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
531 /* The value of TARGET_ATTRIBUTE_TABLE. */
532 const struct attribute_spec mips_attribute_table[] = {
533 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
534 { "long_call", 0, 0, false, true, true, NULL },
535 { "far", 0, 0, false, true, true, NULL },
536 { "near", 0, 0, false, true, true, NULL },
537 /* We would really like to treat "mips16" and "nomips16" as type
538 attributes, but GCC doesn't provide the hooks we need to support
539 the right conversion rules. As declaration attributes, they affect
540 code generation but don't carry other semantics. */
541 { "mips16", 0, 0, true, false, false, NULL },
542 { "nomips16", 0, 0, true, false, false, NULL },
543 { NULL, 0, 0, false, false, false, NULL }
546 /* A table describing all the processors GCC knows about. Names are
547 matched in the order listed. The first mention of an ISA level is
548 taken as the canonical name for that ISA.
550 To ease comparison, please keep this table in the same order
551 as GAS's mips_cpu_info_table. Please also make sure that
552 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
553 options correctly. */
554 static const struct mips_cpu_info mips_cpu_info_table[] = {
555 /* Entries for generic ISAs. */
556 { "mips1", PROCESSOR_R3000, 1, 0 },
557 { "mips2", PROCESSOR_R6000, 2, 0 },
558 { "mips3", PROCESSOR_R4000, 3, 0 },
559 { "mips4", PROCESSOR_R8000, 4, 0 },
560 /* Prefer not to use branch-likely instructions for generic MIPS32rX
561 and MIPS64rX code. The instructions were officially deprecated
562 in revisions 2 and earlier, but revision 3 is likely to downgrade
563 that to a recommendation to avoid the instructions in code that
564 isn't tuned to a specific processor. */
565 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
566 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
567 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
569 /* MIPS I processors. */
570 { "r3000", PROCESSOR_R3000, 1, 0 },
571 { "r2000", PROCESSOR_R3000, 1, 0 },
572 { "r3900", PROCESSOR_R3900, 1, 0 },
574 /* MIPS II processors. */
575 { "r6000", PROCESSOR_R6000, 2, 0 },
577 /* MIPS III processors. */
578 { "r4000", PROCESSOR_R4000, 3, 0 },
579 { "vr4100", PROCESSOR_R4100, 3, 0 },
580 { "vr4111", PROCESSOR_R4111, 3, 0 },
581 { "vr4120", PROCESSOR_R4120, 3, 0 },
582 { "vr4130", PROCESSOR_R4130, 3, 0 },
583 { "vr4300", PROCESSOR_R4300, 3, 0 },
584 { "r4400", PROCESSOR_R4000, 3, 0 },
585 { "r4600", PROCESSOR_R4600, 3, 0 },
586 { "orion", PROCESSOR_R4600, 3, 0 },
587 { "r4650", PROCESSOR_R4650, 3, 0 },
588 /* ST Loongson 2E/2F processors. */
589 { "loongson2e", PROCESSOR_LOONGSON_2E, 3, PTF_AVOID_BRANCHLIKELY },
590 { "loongson2f", PROCESSOR_LOONGSON_2F, 3, PTF_AVOID_BRANCHLIKELY },
592 /* MIPS IV processors. */
593 { "r8000", PROCESSOR_R8000, 4, 0 },
594 { "vr5000", PROCESSOR_R5000, 4, 0 },
595 { "vr5400", PROCESSOR_R5400, 4, 0 },
596 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
597 { "rm7000", PROCESSOR_R7000, 4, 0 },
598 { "rm9000", PROCESSOR_R9000, 4, 0 },
600 /* MIPS32 processors. */
601 { "4kc", PROCESSOR_4KC, 32, 0 },
602 { "4km", PROCESSOR_4KC, 32, 0 },
603 { "4kp", PROCESSOR_4KP, 32, 0 },
604 { "4ksc", PROCESSOR_4KC, 32, 0 },
606 /* MIPS32 Release 2 processors. */
607 { "m4k", PROCESSOR_M4K, 33, 0 },
608 { "4kec", PROCESSOR_4KC, 33, 0 },
609 { "4kem", PROCESSOR_4KC, 33, 0 },
610 { "4kep", PROCESSOR_4KP, 33, 0 },
611 { "4ksd", PROCESSOR_4KC, 33, 0 },
613 { "24kc", PROCESSOR_24KC, 33, 0 },
614 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
615 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
616 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
617 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
618 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
620 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP. */
621 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
622 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
623 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
624 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
625 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
627 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP. */
628 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
629 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
630 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
631 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
632 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
634 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2. */
635 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
636 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
637 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
638 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
639 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
640 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
642 /* MIPS64 processors. */
643 { "5kc", PROCESSOR_5KC, 64, 0 },
644 { "5kf", PROCESSOR_5KF, 64, 0 },
645 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
646 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
647 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
648 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
649 { "xlr", PROCESSOR_XLR, 64, 0 }
652 /* Default costs. If these are used for a processor we should look
653 up the actual costs. */
654 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
655 COSTS_N_INSNS (7), /* fp_mult_sf */ \
656 COSTS_N_INSNS (8), /* fp_mult_df */ \
657 COSTS_N_INSNS (23), /* fp_div_sf */ \
658 COSTS_N_INSNS (36), /* fp_div_df */ \
659 COSTS_N_INSNS (10), /* int_mult_si */ \
660 COSTS_N_INSNS (10), /* int_mult_di */ \
661 COSTS_N_INSNS (69), /* int_div_si */ \
662 COSTS_N_INSNS (69), /* int_div_di */ \
663 2, /* branch_cost */ \
664 4 /* memory_latency */
666 /* Floating-point costs for processors without an FPU. Just assume that
667 all floating-point libcalls are very expensive. */
668 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
669 COSTS_N_INSNS (256), /* fp_mult_sf */ \
670 COSTS_N_INSNS (256), /* fp_mult_df */ \
671 COSTS_N_INSNS (256), /* fp_div_sf */ \
672 COSTS_N_INSNS (256) /* fp_div_df */
674 /* Costs to use when optimizing for size. */
675 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
676 COSTS_N_INSNS (1), /* fp_add */
677 COSTS_N_INSNS (1), /* fp_mult_sf */
678 COSTS_N_INSNS (1), /* fp_mult_df */
679 COSTS_N_INSNS (1), /* fp_div_sf */
680 COSTS_N_INSNS (1), /* fp_div_df */
681 COSTS_N_INSNS (1), /* int_mult_si */
682 COSTS_N_INSNS (1), /* int_mult_di */
683 COSTS_N_INSNS (1), /* int_div_si */
684 COSTS_N_INSNS (1), /* int_div_di */
685 2, /* branch_cost */
686 4 /* memory_latency */
689 /* Costs to use when optimizing for speed, indexed by processor. */
690 static const struct mips_rtx_cost_data mips_rtx_cost_data[PROCESSOR_MAX] = {
691 { /* R3000 */
692 COSTS_N_INSNS (2), /* fp_add */
693 COSTS_N_INSNS (4), /* fp_mult_sf */
694 COSTS_N_INSNS (5), /* fp_mult_df */
695 COSTS_N_INSNS (12), /* fp_div_sf */
696 COSTS_N_INSNS (19), /* fp_div_df */
697 COSTS_N_INSNS (12), /* int_mult_si */
698 COSTS_N_INSNS (12), /* int_mult_di */
699 COSTS_N_INSNS (35), /* int_div_si */
700 COSTS_N_INSNS (35), /* int_div_di */
701 1, /* branch_cost */
702 4 /* memory_latency */
704 { /* 4KC */
705 SOFT_FP_COSTS,
706 COSTS_N_INSNS (6), /* int_mult_si */
707 COSTS_N_INSNS (6), /* int_mult_di */
708 COSTS_N_INSNS (36), /* int_div_si */
709 COSTS_N_INSNS (36), /* int_div_di */
710 1, /* branch_cost */
711 4 /* memory_latency */
713 { /* 4KP */
714 SOFT_FP_COSTS,
715 COSTS_N_INSNS (36), /* int_mult_si */
716 COSTS_N_INSNS (36), /* int_mult_di */
717 COSTS_N_INSNS (37), /* int_div_si */
718 COSTS_N_INSNS (37), /* int_div_di */
719 1, /* branch_cost */
720 4 /* memory_latency */
722 { /* 5KC */
723 SOFT_FP_COSTS,
724 COSTS_N_INSNS (4), /* int_mult_si */
725 COSTS_N_INSNS (11), /* int_mult_di */
726 COSTS_N_INSNS (36), /* int_div_si */
727 COSTS_N_INSNS (68), /* int_div_di */
728 1, /* branch_cost */
729 4 /* memory_latency */
731 { /* 5KF */
732 COSTS_N_INSNS (4), /* fp_add */
733 COSTS_N_INSNS (4), /* fp_mult_sf */
734 COSTS_N_INSNS (5), /* fp_mult_df */
735 COSTS_N_INSNS (17), /* fp_div_sf */
736 COSTS_N_INSNS (32), /* fp_div_df */
737 COSTS_N_INSNS (4), /* int_mult_si */
738 COSTS_N_INSNS (11), /* int_mult_di */
739 COSTS_N_INSNS (36), /* int_div_si */
740 COSTS_N_INSNS (68), /* int_div_di */
741 1, /* branch_cost */
742 4 /* memory_latency */
744 { /* 20KC */
745 COSTS_N_INSNS (4), /* fp_add */
746 COSTS_N_INSNS (4), /* fp_mult_sf */
747 COSTS_N_INSNS (5), /* fp_mult_df */
748 COSTS_N_INSNS (17), /* fp_div_sf */
749 COSTS_N_INSNS (32), /* fp_div_df */
750 COSTS_N_INSNS (4), /* int_mult_si */
751 COSTS_N_INSNS (7), /* int_mult_di */
752 COSTS_N_INSNS (42), /* int_div_si */
753 COSTS_N_INSNS (72), /* int_div_di */
754 1, /* branch_cost */
755 4 /* memory_latency */
757 { /* 24KC */
758 SOFT_FP_COSTS,
759 COSTS_N_INSNS (5), /* int_mult_si */
760 COSTS_N_INSNS (5), /* int_mult_di */
761 COSTS_N_INSNS (41), /* int_div_si */
762 COSTS_N_INSNS (41), /* int_div_di */
763 1, /* branch_cost */
764 4 /* memory_latency */
766 { /* 24KF2_1 */
767 COSTS_N_INSNS (8), /* fp_add */
768 COSTS_N_INSNS (8), /* fp_mult_sf */
769 COSTS_N_INSNS (10), /* fp_mult_df */
770 COSTS_N_INSNS (34), /* fp_div_sf */
771 COSTS_N_INSNS (64), /* fp_div_df */
772 COSTS_N_INSNS (5), /* int_mult_si */
773 COSTS_N_INSNS (5), /* int_mult_di */
774 COSTS_N_INSNS (41), /* int_div_si */
775 COSTS_N_INSNS (41), /* int_div_di */
776 1, /* branch_cost */
777 4 /* memory_latency */
779 { /* 24KF1_1 */
780 COSTS_N_INSNS (4), /* fp_add */
781 COSTS_N_INSNS (4), /* fp_mult_sf */
782 COSTS_N_INSNS (5), /* fp_mult_df */
783 COSTS_N_INSNS (17), /* fp_div_sf */
784 COSTS_N_INSNS (32), /* fp_div_df */
785 COSTS_N_INSNS (5), /* int_mult_si */
786 COSTS_N_INSNS (5), /* int_mult_di */
787 COSTS_N_INSNS (41), /* int_div_si */
788 COSTS_N_INSNS (41), /* int_div_di */
789 1, /* branch_cost */
790 4 /* memory_latency */
792 { /* 74KC */
793 SOFT_FP_COSTS,
794 COSTS_N_INSNS (5), /* int_mult_si */
795 COSTS_N_INSNS (5), /* int_mult_di */
796 COSTS_N_INSNS (41), /* int_div_si */
797 COSTS_N_INSNS (41), /* int_div_di */
798 1, /* branch_cost */
799 4 /* memory_latency */
801 { /* 74KF2_1 */
802 COSTS_N_INSNS (8), /* fp_add */
803 COSTS_N_INSNS (8), /* fp_mult_sf */
804 COSTS_N_INSNS (10), /* fp_mult_df */
805 COSTS_N_INSNS (34), /* fp_div_sf */
806 COSTS_N_INSNS (64), /* fp_div_df */
807 COSTS_N_INSNS (5), /* int_mult_si */
808 COSTS_N_INSNS (5), /* int_mult_di */
809 COSTS_N_INSNS (41), /* int_div_si */
810 COSTS_N_INSNS (41), /* int_div_di */
811 1, /* branch_cost */
812 4 /* memory_latency */
814 { /* 74KF1_1 */
815 COSTS_N_INSNS (4), /* fp_add */
816 COSTS_N_INSNS (4), /* fp_mult_sf */
817 COSTS_N_INSNS (5), /* fp_mult_df */
818 COSTS_N_INSNS (17), /* fp_div_sf */
819 COSTS_N_INSNS (32), /* fp_div_df */
820 COSTS_N_INSNS (5), /* int_mult_si */
821 COSTS_N_INSNS (5), /* int_mult_di */
822 COSTS_N_INSNS (41), /* int_div_si */
823 COSTS_N_INSNS (41), /* int_div_di */
824 1, /* branch_cost */
825 4 /* memory_latency */
827 { /* 74KF3_2 */
828 COSTS_N_INSNS (6), /* fp_add */
829 COSTS_N_INSNS (6), /* fp_mult_sf */
830 COSTS_N_INSNS (7), /* fp_mult_df */
831 COSTS_N_INSNS (25), /* fp_div_sf */
832 COSTS_N_INSNS (48), /* fp_div_df */
833 COSTS_N_INSNS (5), /* int_mult_si */
834 COSTS_N_INSNS (5), /* int_mult_di */
835 COSTS_N_INSNS (41), /* int_div_si */
836 COSTS_N_INSNS (41), /* int_div_di */
837 1, /* branch_cost */
838 4 /* memory_latency */
840 { /* Loongson-2E */
841 DEFAULT_COSTS
843 { /* Loongson-2F */
844 DEFAULT_COSTS
846 { /* M4k */
847 DEFAULT_COSTS
849 { /* R3900 */
850 COSTS_N_INSNS (2), /* fp_add */
851 COSTS_N_INSNS (4), /* fp_mult_sf */
852 COSTS_N_INSNS (5), /* fp_mult_df */
853 COSTS_N_INSNS (12), /* fp_div_sf */
854 COSTS_N_INSNS (19), /* fp_div_df */
855 COSTS_N_INSNS (2), /* int_mult_si */
856 COSTS_N_INSNS (2), /* int_mult_di */
857 COSTS_N_INSNS (35), /* int_div_si */
858 COSTS_N_INSNS (35), /* int_div_di */
859 1, /* branch_cost */
860 4 /* memory_latency */
862 { /* R6000 */
863 COSTS_N_INSNS (3), /* fp_add */
864 COSTS_N_INSNS (5), /* fp_mult_sf */
865 COSTS_N_INSNS (6), /* fp_mult_df */
866 COSTS_N_INSNS (15), /* fp_div_sf */
867 COSTS_N_INSNS (16), /* fp_div_df */
868 COSTS_N_INSNS (17), /* int_mult_si */
869 COSTS_N_INSNS (17), /* int_mult_di */
870 COSTS_N_INSNS (38), /* int_div_si */
871 COSTS_N_INSNS (38), /* int_div_di */
872 2, /* branch_cost */
873 6 /* memory_latency */
875 { /* R4000 */
876 COSTS_N_INSNS (6), /* fp_add */
877 COSTS_N_INSNS (7), /* fp_mult_sf */
878 COSTS_N_INSNS (8), /* fp_mult_df */
879 COSTS_N_INSNS (23), /* fp_div_sf */
880 COSTS_N_INSNS (36), /* fp_div_df */
881 COSTS_N_INSNS (10), /* int_mult_si */
882 COSTS_N_INSNS (10), /* int_mult_di */
883 COSTS_N_INSNS (69), /* int_div_si */
884 COSTS_N_INSNS (69), /* int_div_di */
885 2, /* branch_cost */
886 6 /* memory_latency */
888 { /* R4100 */
889 DEFAULT_COSTS
891 { /* R4111 */
892 DEFAULT_COSTS
894 { /* R4120 */
895 DEFAULT_COSTS
897 { /* R4130 */
898 /* The only costs that appear to be updated here are
899 integer multiplication. */
900 SOFT_FP_COSTS,
901 COSTS_N_INSNS (4), /* int_mult_si */
902 COSTS_N_INSNS (6), /* int_mult_di */
903 COSTS_N_INSNS (69), /* int_div_si */
904 COSTS_N_INSNS (69), /* int_div_di */
905 1, /* branch_cost */
906 4 /* memory_latency */
908 { /* R4300 */
909 DEFAULT_COSTS
911 { /* R4600 */
912 DEFAULT_COSTS
914 { /* R4650 */
915 DEFAULT_COSTS
917 { /* R5000 */
918 COSTS_N_INSNS (6), /* fp_add */
919 COSTS_N_INSNS (4), /* fp_mult_sf */
920 COSTS_N_INSNS (5), /* fp_mult_df */
921 COSTS_N_INSNS (23), /* fp_div_sf */
922 COSTS_N_INSNS (36), /* fp_div_df */
923 COSTS_N_INSNS (5), /* int_mult_si */
924 COSTS_N_INSNS (5), /* int_mult_di */
925 COSTS_N_INSNS (36), /* int_div_si */
926 COSTS_N_INSNS (36), /* int_div_di */
927 1, /* branch_cost */
928 4 /* memory_latency */
930 { /* R5400 */
931 COSTS_N_INSNS (6), /* fp_add */
932 COSTS_N_INSNS (5), /* fp_mult_sf */
933 COSTS_N_INSNS (6), /* fp_mult_df */
934 COSTS_N_INSNS (30), /* fp_div_sf */
935 COSTS_N_INSNS (59), /* fp_div_df */
936 COSTS_N_INSNS (3), /* int_mult_si */
937 COSTS_N_INSNS (4), /* int_mult_di */
938 COSTS_N_INSNS (42), /* int_div_si */
939 COSTS_N_INSNS (74), /* int_div_di */
940 1, /* branch_cost */
941 4 /* memory_latency */
943 { /* R5500 */
944 COSTS_N_INSNS (6), /* fp_add */
945 COSTS_N_INSNS (5), /* fp_mult_sf */
946 COSTS_N_INSNS (6), /* fp_mult_df */
947 COSTS_N_INSNS (30), /* fp_div_sf */
948 COSTS_N_INSNS (59), /* fp_div_df */
949 COSTS_N_INSNS (5), /* int_mult_si */
950 COSTS_N_INSNS (9), /* int_mult_di */
951 COSTS_N_INSNS (42), /* int_div_si */
952 COSTS_N_INSNS (74), /* int_div_di */
953 1, /* branch_cost */
954 4 /* memory_latency */
956 { /* R7000 */
957 /* The only costs that are changed here are
958 integer multiplication. */
959 COSTS_N_INSNS (6), /* fp_add */
960 COSTS_N_INSNS (7), /* fp_mult_sf */
961 COSTS_N_INSNS (8), /* fp_mult_df */
962 COSTS_N_INSNS (23), /* fp_div_sf */
963 COSTS_N_INSNS (36), /* fp_div_df */
964 COSTS_N_INSNS (5), /* int_mult_si */
965 COSTS_N_INSNS (9), /* int_mult_di */
966 COSTS_N_INSNS (69), /* int_div_si */
967 COSTS_N_INSNS (69), /* int_div_di */
968 1, /* branch_cost */
969 4 /* memory_latency */
971 { /* R8000 */
972 DEFAULT_COSTS
974 { /* R9000 */
975 /* The only costs that are changed here are
976 integer multiplication. */
977 COSTS_N_INSNS (6), /* fp_add */
978 COSTS_N_INSNS (7), /* fp_mult_sf */
979 COSTS_N_INSNS (8), /* fp_mult_df */
980 COSTS_N_INSNS (23), /* fp_div_sf */
981 COSTS_N_INSNS (36), /* fp_div_df */
982 COSTS_N_INSNS (3), /* int_mult_si */
983 COSTS_N_INSNS (8), /* int_mult_di */
984 COSTS_N_INSNS (69), /* int_div_si */
985 COSTS_N_INSNS (69), /* int_div_di */
986 1, /* branch_cost */
987 4 /* memory_latency */
989 { /* SB1 */
990 /* These costs are the same as the SB-1A below. */
991 COSTS_N_INSNS (4), /* fp_add */
992 COSTS_N_INSNS (4), /* fp_mult_sf */
993 COSTS_N_INSNS (4), /* fp_mult_df */
994 COSTS_N_INSNS (24), /* fp_div_sf */
995 COSTS_N_INSNS (32), /* fp_div_df */
996 COSTS_N_INSNS (3), /* int_mult_si */
997 COSTS_N_INSNS (4), /* int_mult_di */
998 COSTS_N_INSNS (36), /* int_div_si */
999 COSTS_N_INSNS (68), /* int_div_di */
1000 1, /* branch_cost */
1001 4 /* memory_latency */
1003 { /* SB1-A */
1004 /* These costs are the same as the SB-1 above. */
1005 COSTS_N_INSNS (4), /* fp_add */
1006 COSTS_N_INSNS (4), /* fp_mult_sf */
1007 COSTS_N_INSNS (4), /* fp_mult_df */
1008 COSTS_N_INSNS (24), /* fp_div_sf */
1009 COSTS_N_INSNS (32), /* fp_div_df */
1010 COSTS_N_INSNS (3), /* int_mult_si */
1011 COSTS_N_INSNS (4), /* int_mult_di */
1012 COSTS_N_INSNS (36), /* int_div_si */
1013 COSTS_N_INSNS (68), /* int_div_di */
1014 1, /* branch_cost */
1015 4 /* memory_latency */
1017 { /* SR71000 */
1018 DEFAULT_COSTS
1020 { /* XLR */
1021 /* Need to replace first five with the costs of calling the appropriate
1022 libgcc routine. */
1023 COSTS_N_INSNS (256), /* fp_add */
1024 COSTS_N_INSNS (256), /* fp_mult_sf */
1025 COSTS_N_INSNS (256), /* fp_mult_df */
1026 COSTS_N_INSNS (256), /* fp_div_sf */
1027 COSTS_N_INSNS (256), /* fp_div_df */
1028 COSTS_N_INSNS (8), /* int_mult_si */
1029 COSTS_N_INSNS (8), /* int_mult_di */
1030 COSTS_N_INSNS (72), /* int_div_si */
1031 COSTS_N_INSNS (72), /* int_div_di */
1032 1, /* branch_cost */
1033 4 /* memory_latency */
1037 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1038 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1039 struct mflip_mips16_entry GTY (()) {
1040 const char *name;
1041 bool mips16_p;
1043 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1045 /* Hash table callbacks for mflip_mips16_htab. */
1047 static hashval_t
1048 mflip_mips16_htab_hash (const void *entry)
1050 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1053 static int
1054 mflip_mips16_htab_eq (const void *entry, const void *name)
1056 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1057 (const char *) name) == 0;
1060 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1061 mode, false if it should next add an attribute for the opposite mode. */
1062 static GTY(()) bool mips16_flipper;
1064 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1065 for -mflip-mips16. Return true if it should use "mips16" and false if
1066 it should use "nomips16". */
1068 static bool
1069 mflip_mips16_use_mips16_p (tree decl)
1071 struct mflip_mips16_entry *entry;
1072 const char *name;
1073 hashval_t hash;
1074 void **slot;
1076 /* Use the opposite of the command-line setting for anonymous decls. */
1077 if (!DECL_NAME (decl))
1078 return !mips_base_mips16;
1080 if (!mflip_mips16_htab)
1081 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1082 mflip_mips16_htab_eq, NULL);
1084 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1085 hash = htab_hash_string (name);
1086 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1087 entry = (struct mflip_mips16_entry *) *slot;
1088 if (!entry)
1090 mips16_flipper = !mips16_flipper;
1091 entry = GGC_NEW (struct mflip_mips16_entry);
1092 entry->name = name;
1093 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1094 *slot = entry;
1096 return entry->mips16_p;
1099 /* Predicates to test for presence of "near" and "far"/"long_call"
1100 attributes on the given TYPE. */
1102 static bool
1103 mips_near_type_p (const_tree type)
1105 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1108 static bool
1109 mips_far_type_p (const_tree type)
1111 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1112 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1115 /* Similar predicates for "mips16"/"nomips16" function attributes. */
1117 static bool
1118 mips_mips16_decl_p (const_tree decl)
1120 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1123 static bool
1124 mips_nomips16_decl_p (const_tree decl)
1126 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1129 /* Return true if function DECL is a MIPS16 function. Return the ambient
1130 setting if DECL is null. */
1132 static bool
1133 mips_use_mips16_mode_p (tree decl)
1135 if (decl)
1137 /* Nested functions must use the same frame pointer as their
1138 parent and must therefore use the same ISA mode. */
1139 tree parent = decl_function_context (decl);
1140 if (parent)
1141 decl = parent;
1142 if (mips_mips16_decl_p (decl))
1143 return true;
1144 if (mips_nomips16_decl_p (decl))
1145 return false;
1147 return mips_base_mips16;
1150 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1152 static int
1153 mips_comp_type_attributes (const_tree type1, const_tree type2)
1155 /* Disallow mixed near/far attributes. */
1156 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1157 return 0;
1158 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1159 return 0;
1160 return 1;
1163 /* Implement TARGET_INSERT_ATTRIBUTES. */
1165 static void
1166 mips_insert_attributes (tree decl, tree *attributes)
1168 const char *name;
1169 bool mips16_p, nomips16_p;
1171 /* Check for "mips16" and "nomips16" attributes. */
1172 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1173 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1174 if (TREE_CODE (decl) != FUNCTION_DECL)
1176 if (mips16_p)
1177 error ("%qs attribute only applies to functions", "mips16");
1178 if (nomips16_p)
1179 error ("%qs attribute only applies to functions", "nomips16");
1181 else
1183 mips16_p |= mips_mips16_decl_p (decl);
1184 nomips16_p |= mips_nomips16_decl_p (decl);
1185 if (mips16_p || nomips16_p)
1187 /* DECL cannot be simultaneously "mips16" and "nomips16". */
1188 if (mips16_p && nomips16_p)
1189 error ("%qs cannot have both %<mips16%> and "
1190 "%<nomips16%> attributes",
1191 IDENTIFIER_POINTER (DECL_NAME (decl)));
1193 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1195 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1196 "mips16" attribute, arbitrarily pick one. We must pick the same
1197 setting for duplicate declarations of a function. */
1198 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1199 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1204 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1206 static tree
1207 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1209 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1210 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1211 error ("%qs redeclared with conflicting %qs attributes",
1212 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1213 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1214 error ("%qs redeclared with conflicting %qs attributes",
1215 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1217 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1218 DECL_ATTRIBUTES (newdecl));
1221 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1222 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1224 static void
1225 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1227 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1229 *base_ptr = XEXP (x, 0);
1230 *offset_ptr = INTVAL (XEXP (x, 1));
1232 else
1234 *base_ptr = x;
1235 *offset_ptr = 0;
1239 static unsigned int mips_build_integer (struct mips_integer_op *,
1240 unsigned HOST_WIDE_INT);
1242 /* A subroutine of mips_build_integer, with the same interface.
1243 Assume that the final action in the sequence should be a left shift. */
1245 static unsigned int
1246 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1248 unsigned int i, shift;
1250 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1251 since signed numbers are easier to load than unsigned ones. */
1252 shift = 0;
1253 while ((value & 1) == 0)
1254 value /= 2, shift++;
1256 i = mips_build_integer (codes, value);
1257 codes[i].code = ASHIFT;
1258 codes[i].value = shift;
1259 return i + 1;
1262 /* As for mips_build_shift, but assume that the final action will be
1263 an IOR or PLUS operation. */
1265 static unsigned int
1266 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1268 unsigned HOST_WIDE_INT high;
1269 unsigned int i;
1271 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1272 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1274 /* The constant is too complex to load with a simple LUI/ORI pair,
1275 so we want to give the recursive call as many trailing zeros as
1276 possible. In this case, we know bit 16 is set and that the
1277 low 16 bits form a negative number. If we subtract that number
1278 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1279 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1280 codes[i].code = PLUS;
1281 codes[i].value = CONST_LOW_PART (value);
1283 else
1285 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1286 bits gives a value with at least 17 trailing zeros. */
1287 i = mips_build_integer (codes, high);
1288 codes[i].code = IOR;
1289 codes[i].value = value & 0xffff;
1291 return i + 1;
1294 /* Fill CODES with a sequence of rtl operations to load VALUE.
1295 Return the number of operations needed. */
1297 static unsigned int
1298 mips_build_integer (struct mips_integer_op *codes,
1299 unsigned HOST_WIDE_INT value)
1301 if (SMALL_OPERAND (value)
1302 || SMALL_OPERAND_UNSIGNED (value)
1303 || LUI_OPERAND (value))
1305 /* The value can be loaded with a single instruction. */
1306 codes[0].code = UNKNOWN;
1307 codes[0].value = value;
1308 return 1;
1310 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1312 /* Either the constant is a simple LUI/ORI combination or its
1313 lowest bit is set. We don't want to shift in this case. */
1314 return mips_build_lower (codes, value);
1316 else if ((value & 0xffff) == 0)
1318 /* The constant will need at least three actions. The lowest
1319 16 bits are clear, so the final action will be a shift. */
1320 return mips_build_shift (codes, value);
1322 else
1324 /* The final action could be a shift, add or inclusive OR.
1325 Rather than use a complex condition to select the best
1326 approach, try both mips_build_shift and mips_build_lower
1327 and pick the one that gives the shortest sequence.
1328 Note that this case is only used once per constant. */
1329 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1330 unsigned int cost, alt_cost;
1332 cost = mips_build_shift (codes, value);
1333 alt_cost = mips_build_lower (alt_codes, value);
1334 if (alt_cost < cost)
1336 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1337 cost = alt_cost;
1339 return cost;
1343 /* Return true if X is a thread-local symbol. */
1345 static bool
1346 mips_tls_symbol_p (rtx x)
1348 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1351 /* Return true if SYMBOL_REF X is associated with a global symbol
1352 (in the STB_GLOBAL sense). */
1354 static bool
1355 mips_global_symbol_p (const_rtx x)
1357 const_tree decl = SYMBOL_REF_DECL (x);
1359 if (!decl)
1360 return !SYMBOL_REF_LOCAL_P (x);
1362 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1363 or weak symbols. Relocations in the object file will be against
1364 the target symbol, so it's that symbol's binding that matters here. */
1365 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1368 /* Return true if SYMBOL_REF X binds locally. */
1370 static bool
1371 mips_symbol_binds_local_p (const_rtx x)
1373 return (SYMBOL_REF_DECL (x)
1374 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1375 : SYMBOL_REF_LOCAL_P (x));
1378 /* Return true if rtx constants of mode MODE should be put into a small
1379 data section. */
1381 static bool
1382 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1384 return (!TARGET_EMBEDDED_DATA
1385 && TARGET_LOCAL_SDATA
1386 && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1389 /* Return true if X should not be moved directly into register $25.
1390 We need this because many versions of GAS will treat "la $25,foo" as
1391 part of a call sequence and so allow a global "foo" to be lazily bound. */
1393 bool
1394 mips_dangerous_for_la25_p (rtx x)
1396 return (!TARGET_EXPLICIT_RELOCS
1397 && TARGET_USE_GOT
1398 && GET_CODE (x) == SYMBOL_REF
1399 && mips_global_symbol_p (x));
1402 /* Return the method that should be used to access SYMBOL_REF or
1403 LABEL_REF X in context CONTEXT. */
1405 static enum mips_symbol_type
1406 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1408 if (TARGET_RTP_PIC)
1409 return SYMBOL_GOT_DISP;
1411 if (GET_CODE (x) == LABEL_REF)
1413 /* LABEL_REFs are used for jump tables as well as text labels.
1414 Only return SYMBOL_PC_RELATIVE if we know the label is in
1415 the text section. */
1416 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1417 return SYMBOL_PC_RELATIVE;
1419 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1420 return SYMBOL_GOT_PAGE_OFST;
1422 return SYMBOL_ABSOLUTE;
1425 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1427 if (SYMBOL_REF_TLS_MODEL (x))
1428 return SYMBOL_TLS;
1430 if (CONSTANT_POOL_ADDRESS_P (x))
1432 if (TARGET_MIPS16_TEXT_LOADS)
1433 return SYMBOL_PC_RELATIVE;
1435 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1436 return SYMBOL_PC_RELATIVE;
1438 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1439 return SYMBOL_GP_RELATIVE;
1442 /* Do not use small-data accesses for weak symbols; they may end up
1443 being zero. */
1444 if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1445 return SYMBOL_GP_RELATIVE;
1447 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1448 is in effect. */
1449 if (TARGET_ABICALLS
1450 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1452 /* There are three cases to consider:
1454 - o32 PIC (either with or without explicit relocs)
1455 - n32/n64 PIC without explicit relocs
1456 - n32/n64 PIC with explicit relocs
1458 In the first case, both local and global accesses will use an
1459 R_MIPS_GOT16 relocation. We must correctly predict which of
1460 the two semantics (local or global) the assembler and linker
1461 will apply. The choice depends on the symbol's binding rather
1462 than its visibility.
1464 In the second case, the assembler will not use R_MIPS_GOT16
1465 relocations, but it chooses between local and global accesses
1466 in the same way as for o32 PIC.
1468 In the third case we have more freedom since both forms of
1469 access will work for any kind of symbol. However, there seems
1470 little point in doing things differently. */
1471 if (mips_global_symbol_p (x))
1472 return SYMBOL_GOT_DISP;
1474 return SYMBOL_GOT_PAGE_OFST;
1477 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1478 return SYMBOL_FORCE_TO_MEM;
1480 return SYMBOL_ABSOLUTE;
1483 /* Classify the base of symbolic expression X, given that X appears in
1484 context CONTEXT. */
1486 static enum mips_symbol_type
1487 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1489 rtx offset;
1491 split_const (x, &x, &offset);
1492 if (UNSPEC_ADDRESS_P (x))
1493 return UNSPEC_ADDRESS_TYPE (x);
1495 return mips_classify_symbol (x, context);
1498 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1499 is the alignment in bytes of SYMBOL_REF X. */
1501 static bool
1502 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1504 HOST_WIDE_INT align;
1506 align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1507 return IN_RANGE (offset, 0, align - 1);
1510 /* Return true if X is a symbolic constant that can be used in context
1511 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1513 bool
1514 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1515 enum mips_symbol_type *symbol_type)
1517 rtx offset;
1519 split_const (x, &x, &offset);
1520 if (UNSPEC_ADDRESS_P (x))
1522 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1523 x = UNSPEC_ADDRESS (x);
1525 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1527 *symbol_type = mips_classify_symbol (x, context);
1528 if (*symbol_type == SYMBOL_TLS)
1529 return false;
1531 else
1532 return false;
1534 if (offset == const0_rtx)
1535 return true;
1537 /* Check whether a nonzero offset is valid for the underlying
1538 relocations. */
1539 switch (*symbol_type)
1541 case SYMBOL_ABSOLUTE:
1542 case SYMBOL_FORCE_TO_MEM:
1543 case SYMBOL_32_HIGH:
1544 case SYMBOL_64_HIGH:
1545 case SYMBOL_64_MID:
1546 case SYMBOL_64_LOW:
1547 /* If the target has 64-bit pointers and the object file only
1548 supports 32-bit symbols, the values of those symbols will be
1549 sign-extended. In this case we can't allow an arbitrary offset
1550 in case the 32-bit value X + OFFSET has a different sign from X. */
1551 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1552 return offset_within_block_p (x, INTVAL (offset));
1554 /* In other cases the relocations can handle any offset. */
1555 return true;
1557 case SYMBOL_PC_RELATIVE:
1558 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1559 In this case, we no longer have access to the underlying constant,
1560 but the original symbol-based access was known to be valid. */
1561 if (GET_CODE (x) == LABEL_REF)
1562 return true;
1564 /* Fall through. */
1566 case SYMBOL_GP_RELATIVE:
1567 /* Make sure that the offset refers to something within the
1568 same object block. This should guarantee that the final
1569 PC- or GP-relative offset is within the 16-bit limit. */
1570 return offset_within_block_p (x, INTVAL (offset));
1572 case SYMBOL_GOT_PAGE_OFST:
1573 case SYMBOL_GOTOFF_PAGE:
1574 /* If the symbol is global, the GOT entry will contain the symbol's
1575 address, and we will apply a 16-bit offset after loading it.
1576 If the symbol is local, the linker should provide enough local
1577 GOT entries for a 16-bit offset, but larger offsets may lead
1578 to GOT overflow. */
1579 return SMALL_INT (offset);
1581 case SYMBOL_TPREL:
1582 case SYMBOL_DTPREL:
1583 /* There is no carry between the HI and LO REL relocations, so the
1584 offset is only valid if we know it won't lead to such a carry. */
1585 return mips_offset_within_alignment_p (x, INTVAL (offset));
1587 case SYMBOL_GOT_DISP:
1588 case SYMBOL_GOTOFF_DISP:
1589 case SYMBOL_GOTOFF_CALL:
1590 case SYMBOL_GOTOFF_LOADGP:
1591 case SYMBOL_TLSGD:
1592 case SYMBOL_TLSLDM:
1593 case SYMBOL_GOTTPREL:
1594 case SYMBOL_TLS:
1595 case SYMBOL_HALF:
1596 return false;
1598 gcc_unreachable ();
1601 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1602 single instruction. We rely on the fact that, in the worst case,
1603 all instructions involved in a MIPS16 address calculation are usually
1604 extended ones. */
1606 static int
1607 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1609 switch (type)
1611 case SYMBOL_ABSOLUTE:
1612 /* When using 64-bit symbols, we need 5 preparatory instructions,
1613 such as:
1615 lui $at,%highest(symbol)
1616 daddiu $at,$at,%higher(symbol)
1617 dsll $at,$at,16
1618 daddiu $at,$at,%hi(symbol)
1619 dsll $at,$at,16
1621 The final address is then $at + %lo(symbol). With 32-bit
1622 symbols we just need a preparatory LUI for normal mode and
1623 a preparatory LI and SLL for MIPS16. */
1624 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1626 case SYMBOL_GP_RELATIVE:
1627 /* Treat GP-relative accesses as taking a single instruction on
1628 MIPS16 too; the copy of $gp can often be shared. */
1629 return 1;
1631 case SYMBOL_PC_RELATIVE:
1632 /* PC-relative constants can be only be used with ADDIUPC,
1633 DADDIUPC, LWPC and LDPC. */
1634 if (mode == MAX_MACHINE_MODE
1635 || GET_MODE_SIZE (mode) == 4
1636 || GET_MODE_SIZE (mode) == 8)
1637 return 1;
1639 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1640 return 0;
1642 case SYMBOL_FORCE_TO_MEM:
1643 /* LEAs will be converted into constant-pool references by
1644 mips_reorg. */
1645 if (mode == MAX_MACHINE_MODE)
1646 return 1;
1648 /* The constant must be loaded and then dereferenced. */
1649 return 0;
1651 case SYMBOL_GOT_DISP:
1652 /* The constant will have to be loaded from the GOT before it
1653 is used in an address. */
1654 if (mode != MAX_MACHINE_MODE)
1655 return 0;
1657 /* Fall through. */
1659 case SYMBOL_GOT_PAGE_OFST:
1660 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1661 local/global classification is accurate. The worst cases are:
1663 (1) For local symbols when generating o32 or o64 code. The assembler
1664 will use:
1666 lw $at,%got(symbol)
1669 ...and the final address will be $at + %lo(symbol).
1671 (2) For global symbols when -mxgot. The assembler will use:
1673 lui $at,%got_hi(symbol)
1674 (d)addu $at,$at,$gp
1676 ...and the final address will be $at + %got_lo(symbol). */
1677 return 3;
1679 case SYMBOL_GOTOFF_PAGE:
1680 case SYMBOL_GOTOFF_DISP:
1681 case SYMBOL_GOTOFF_CALL:
1682 case SYMBOL_GOTOFF_LOADGP:
1683 case SYMBOL_32_HIGH:
1684 case SYMBOL_64_HIGH:
1685 case SYMBOL_64_MID:
1686 case SYMBOL_64_LOW:
1687 case SYMBOL_TLSGD:
1688 case SYMBOL_TLSLDM:
1689 case SYMBOL_DTPREL:
1690 case SYMBOL_GOTTPREL:
1691 case SYMBOL_TPREL:
1692 case SYMBOL_HALF:
1693 /* A 16-bit constant formed by a single relocation, or a 32-bit
1694 constant formed from a high 16-bit relocation and a low 16-bit
1695 relocation. Use mips_split_p to determine which. 32-bit
1696 constants need an "lui; addiu" sequence for normal mode and
1697 an "li; sll; addiu" sequence for MIPS16 mode. */
1698 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1700 case SYMBOL_TLS:
1701 /* We don't treat a bare TLS symbol as a constant. */
1702 return 0;
1704 gcc_unreachable ();
1707 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1708 to load symbols of type TYPE into a register. Return 0 if the given
1709 type of symbol cannot be used as an immediate operand.
1711 Otherwise, return the number of instructions needed to load or store
1712 values of mode MODE to or from addresses of type TYPE. Return 0 if
1713 the given type of symbol is not valid in addresses.
1715 In both cases, treat extended MIPS16 instructions as two instructions. */
1717 static int
1718 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1720 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1723 /* A for_each_rtx callback. Stop the search if *X references a
1724 thread-local symbol. */
1726 static int
1727 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1729 return mips_tls_symbol_p (*x);
1732 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1734 static bool
1735 mips_cannot_force_const_mem (rtx x)
1737 rtx base, offset;
1739 if (!TARGET_MIPS16)
1741 /* As an optimization, reject constants that mips_legitimize_move
1742 can expand inline.
1744 Suppose we have a multi-instruction sequence that loads constant C
1745 into register R. If R does not get allocated a hard register, and
1746 R is used in an operand that allows both registers and memory
1747 references, reload will consider forcing C into memory and using
1748 one of the instruction's memory alternatives. Returning false
1749 here will force it to use an input reload instead. */
1750 if (GET_CODE (x) == CONST_INT)
1751 return true;
1753 split_const (x, &base, &offset);
1754 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1755 return true;
1758 /* TLS symbols must be computed by mips_legitimize_move. */
1759 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
1760 return true;
1762 return false;
1765 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1766 constants when we're using a per-function constant pool. */
1768 static bool
1769 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1770 const_rtx x ATTRIBUTE_UNUSED)
1772 return !TARGET_MIPS16_PCREL_LOADS;
1775 /* Return true if register REGNO is a valid base register for mode MODE.
1776 STRICT_P is true if REG_OK_STRICT is in effect. */
1779 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
1780 bool strict_p)
1782 if (!HARD_REGISTER_NUM_P (regno))
1784 if (!strict_p)
1785 return true;
1786 regno = reg_renumber[regno];
1789 /* These fake registers will be eliminated to either the stack or
1790 hard frame pointer, both of which are usually valid base registers.
1791 Reload deals with the cases where the eliminated form isn't valid. */
1792 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1793 return true;
1795 /* In MIPS16 mode, the stack pointer can only address word and doubleword
1796 values, nothing smaller. There are two problems here:
1798 (a) Instantiating virtual registers can introduce new uses of the
1799 stack pointer. If these virtual registers are valid addresses,
1800 the stack pointer should be too.
1802 (b) Most uses of the stack pointer are not made explicit until
1803 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1804 We don't know until that stage whether we'll be eliminating to the
1805 stack pointer (which needs the restriction) or the hard frame
1806 pointer (which doesn't).
1808 All in all, it seems more consistent to only enforce this restriction
1809 during and after reload. */
1810 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1811 return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1813 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1816 /* Return true if X is a valid base register for mode MODE.
1817 STRICT_P is true if REG_OK_STRICT is in effect. */
1819 static bool
1820 mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
1822 if (!strict_p && GET_CODE (x) == SUBREG)
1823 x = SUBREG_REG (x);
1825 return (REG_P (x)
1826 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
1829 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
1830 can address a value of mode MODE. */
1832 static bool
1833 mips_valid_offset_p (rtx x, enum machine_mode mode)
1835 /* Check that X is a signed 16-bit number. */
1836 if (!const_arith_operand (x, Pmode))
1837 return false;
1839 /* We may need to split multiword moves, so make sure that every word
1840 is accessible. */
1841 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1842 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
1843 return false;
1845 return true;
1848 /* Return true if a LO_SUM can address a value of mode MODE when the
1849 LO_SUM symbol has type SYMBOL_TYPE. */
1851 static bool
1852 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
1854 /* Check that symbols of type SYMBOL_TYPE can be used to access values
1855 of mode MODE. */
1856 if (mips_symbol_insns (symbol_type, mode) == 0)
1857 return false;
1859 /* Check that there is a known low-part relocation. */
1860 if (mips_lo_relocs[symbol_type] == NULL)
1861 return false;
1863 /* We may need to split multiword moves, so make sure that each word
1864 can be accessed without inducing a carry. This is mainly needed
1865 for o64, which has historically only guaranteed 64-bit alignment
1866 for 128-bit types. */
1867 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1868 && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
1869 return false;
1871 return true;
1874 /* Return true if X is a valid address for machine mode MODE. If it is,
1875 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
1876 effect. */
1878 static bool
1879 mips_classify_address (struct mips_address_info *info, rtx x,
1880 enum machine_mode mode, bool strict_p)
1882 switch (GET_CODE (x))
1884 case REG:
1885 case SUBREG:
1886 info->type = ADDRESS_REG;
1887 info->reg = x;
1888 info->offset = const0_rtx;
1889 return mips_valid_base_register_p (info->reg, mode, strict_p);
1891 case PLUS:
1892 info->type = ADDRESS_REG;
1893 info->reg = XEXP (x, 0);
1894 info->offset = XEXP (x, 1);
1895 return (mips_valid_base_register_p (info->reg, mode, strict_p)
1896 && mips_valid_offset_p (info->offset, mode));
1898 case LO_SUM:
1899 info->type = ADDRESS_LO_SUM;
1900 info->reg = XEXP (x, 0);
1901 info->offset = XEXP (x, 1);
1902 /* We have to trust the creator of the LO_SUM to do something vaguely
1903 sane. Target-independent code that creates a LO_SUM should also
1904 create and verify the matching HIGH. Target-independent code that
1905 adds an offset to a LO_SUM must prove that the offset will not
1906 induce a carry. Failure to do either of these things would be
1907 a bug, and we are not required to check for it here. The MIPS
1908 backend itself should only create LO_SUMs for valid symbolic
1909 constants, with the high part being either a HIGH or a copy
1910 of _gp. */
1911 info->symbol_type
1912 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1913 return (mips_valid_base_register_p (info->reg, mode, strict_p)
1914 && mips_valid_lo_sum_p (info->symbol_type, mode));
1916 case CONST_INT:
1917 /* Small-integer addresses don't occur very often, but they
1918 are legitimate if $0 is a valid base register. */
1919 info->type = ADDRESS_CONST_INT;
1920 return !TARGET_MIPS16 && SMALL_INT (x);
1922 case CONST:
1923 case LABEL_REF:
1924 case SYMBOL_REF:
1925 info->type = ADDRESS_SYMBOLIC;
1926 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1927 &info->symbol_type)
1928 && mips_symbol_insns (info->symbol_type, mode) > 0
1929 && !mips_split_p[info->symbol_type]);
1931 default:
1932 return false;
1936 /* Return true if X is a legitimate address for a memory operand of mode
1937 MODE. STRICT_P is true if REG_OK_STRICT is in effect. */
1939 bool
1940 mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
1942 struct mips_address_info addr;
1944 return mips_classify_address (&addr, x, mode, strict_p);
1947 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1949 bool
1950 mips_stack_address_p (rtx x, enum machine_mode mode)
1952 struct mips_address_info addr;
1954 return (mips_classify_address (&addr, x, mode, false)
1955 && addr.type == ADDRESS_REG
1956 && addr.reg == stack_pointer_rtx);
1959 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
1960 address instruction. Note that such addresses are not considered
1961 legitimate in the GO_IF_LEGITIMATE_ADDRESS sense, because their use
1962 is so restricted. */
1964 static bool
1965 mips_lwxs_address_p (rtx addr)
1967 if (ISA_HAS_LWXS
1968 && GET_CODE (addr) == PLUS
1969 && REG_P (XEXP (addr, 1)))
1971 rtx offset = XEXP (addr, 0);
1972 if (GET_CODE (offset) == MULT
1973 && REG_P (XEXP (offset, 0))
1974 && GET_CODE (XEXP (offset, 1)) == CONST_INT
1975 && INTVAL (XEXP (offset, 1)) == 4)
1976 return true;
1978 return false;
1981 /* Return true if a value at OFFSET bytes from base register BASE can be
1982 accessed using an unextended MIPS16 instruction. MODE is the mode of
1983 the value.
1985 Usually the offset in an unextended instruction is a 5-bit field.
1986 The offset is unsigned and shifted left once for LH and SH, twice
1987 for LW and SW, and so on. An exception is LWSP and SWSP, which have
1988 an 8-bit immediate field that's shifted left twice. */
1990 static bool
1991 mips16_unextended_reference_p (enum machine_mode mode, rtx base,
1992 unsigned HOST_WIDE_INT offset)
1994 if (offset % GET_MODE_SIZE (mode) == 0)
1996 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1997 return offset < 256U * GET_MODE_SIZE (mode);
1998 return offset < 32U * GET_MODE_SIZE (mode);
2000 return false;
2003 /* Return the number of instructions needed to load or store a value
2004 of mode MODE at address X. Return 0 if X isn't valid for MODE.
2005 Assume that multiword moves may need to be split into word moves
2006 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2007 enough.
2009 For MIPS16 code, count extended instructions as two instructions. */
2012 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2014 struct mips_address_info addr;
2015 int factor;
2017 /* BLKmode is used for single unaligned loads and stores and should
2018 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2019 meaningless, so we have to single it out as a special case one way
2020 or the other.) */
2021 if (mode != BLKmode && might_split_p)
2022 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2023 else
2024 factor = 1;
2026 if (mips_classify_address (&addr, x, mode, false))
2027 switch (addr.type)
2029 case ADDRESS_REG:
2030 if (TARGET_MIPS16
2031 && !mips16_unextended_reference_p (mode, addr.reg,
2032 UINTVAL (addr.offset)))
2033 return factor * 2;
2034 return factor;
2036 case ADDRESS_LO_SUM:
2037 return TARGET_MIPS16 ? factor * 2 : factor;
2039 case ADDRESS_CONST_INT:
2040 return factor;
2042 case ADDRESS_SYMBOLIC:
2043 return factor * mips_symbol_insns (addr.symbol_type, mode);
2045 return 0;
2048 /* Return the number of instructions needed to load constant X.
2049 Return 0 if X isn't a valid constant. */
2052 mips_const_insns (rtx x)
2054 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2055 enum mips_symbol_type symbol_type;
2056 rtx offset;
2058 switch (GET_CODE (x))
2060 case HIGH:
2061 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2062 &symbol_type)
2063 || !mips_split_p[symbol_type])
2064 return 0;
2066 /* This is simply an LUI for normal mode. It is an extended
2067 LI followed by an extended SLL for MIPS16. */
2068 return TARGET_MIPS16 ? 4 : 1;
2070 case CONST_INT:
2071 if (TARGET_MIPS16)
2072 /* Unsigned 8-bit constants can be loaded using an unextended
2073 LI instruction. Unsigned 16-bit constants can be loaded
2074 using an extended LI. Negative constants must be loaded
2075 using LI and then negated. */
2076 return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2077 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2078 : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2079 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2080 : 0);
2082 return mips_build_integer (codes, INTVAL (x));
2084 case CONST_DOUBLE:
2085 case CONST_VECTOR:
2086 /* Allow zeros for normal mode, where we can use $0. */
2087 return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2089 case CONST:
2090 if (CONST_GP_P (x))
2091 return 1;
2093 /* See if we can refer to X directly. */
2094 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2095 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2097 /* Otherwise try splitting the constant into a base and offset.
2098 16-bit offsets can be added using an extra ADDIU. Larger offsets
2099 must be calculated separately and then added to the base. */
2100 split_const (x, &x, &offset);
2101 if (offset != 0)
2103 int n = mips_const_insns (x);
2104 if (n != 0)
2106 if (SMALL_INT (offset))
2107 return n + 1;
2108 else
2109 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2112 return 0;
2114 case SYMBOL_REF:
2115 case LABEL_REF:
2116 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2117 MAX_MACHINE_MODE);
2119 default:
2120 return 0;
2124 /* X is a doubleword constant that can be handled by splitting it into
2125 two words and loading each word separately. Return the number of
2126 instructions required to do this. */
2129 mips_split_const_insns (rtx x)
2131 unsigned int low, high;
2133 low = mips_const_insns (mips_subword (x, false));
2134 high = mips_const_insns (mips_subword (x, true));
2135 gcc_assert (low > 0 && high > 0);
2136 return low + high;
2139 /* Return the number of instructions needed to implement INSN,
2140 given that it loads from or stores to MEM. Count extended
2141 MIPS16 instructions as two instructions. */
2144 mips_load_store_insns (rtx mem, rtx insn)
2146 enum machine_mode mode;
2147 bool might_split_p;
2148 rtx set;
2150 gcc_assert (MEM_P (mem));
2151 mode = GET_MODE (mem);
2153 /* Try to prove that INSN does not need to be split. */
2154 might_split_p = true;
2155 if (GET_MODE_BITSIZE (mode) == 64)
2157 set = single_set (insn);
2158 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2159 might_split_p = false;
2162 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2165 /* Return the number of instructions needed for an integer division. */
2168 mips_idiv_insns (void)
2170 int count;
2172 count = 1;
2173 if (TARGET_CHECK_ZERO_DIV)
2175 if (GENERATE_DIVIDE_TRAPS)
2176 count++;
2177 else
2178 count += 2;
2181 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2182 count++;
2183 return count;
2186 /* Emit a move from SRC to DEST. Assume that the move expanders can
2187 handle all moves if !can_create_pseudo_p (). The distinction is
2188 important because, unlike emit_move_insn, the move expanders know
2189 how to force Pmode objects into the constant pool even when the
2190 constant pool address is not itself legitimate. */
2193 mips_emit_move (rtx dest, rtx src)
2195 return (can_create_pseudo_p ()
2196 ? emit_move_insn (dest, src)
2197 : emit_move_insn_1 (dest, src));
2200 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2202 static void
2203 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2205 emit_insn (gen_rtx_SET (VOIDmode, target,
2206 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2209 /* Compute (CODE OP0 OP1) and store the result in a new register
2210 of mode MODE. Return that new register. */
2212 static rtx
2213 mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
2215 rtx reg;
2217 reg = gen_reg_rtx (mode);
2218 mips_emit_binary (code, reg, op0, op1);
2219 return reg;
2222 /* Copy VALUE to a register and return that register. If new pseudos
2223 are allowed, copy it into a new register, otherwise use DEST. */
2225 static rtx
2226 mips_force_temporary (rtx dest, rtx value)
2228 if (can_create_pseudo_p ())
2229 return force_reg (Pmode, value);
2230 else
2232 mips_emit_move (dest, value);
2233 return dest;
2237 /* Emit a call sequence with call pattern PATTERN and return the call
2238 instruction itself (which is not necessarily the last instruction
2239 emitted). LAZY_P is true if the call address is lazily-bound. */
2241 static rtx
2242 mips_emit_call_insn (rtx pattern, bool lazy_p)
2244 rtx insn;
2246 insn = emit_call_insn (pattern);
2248 /* Lazy-binding stubs require $gp to be valid on entry. */
2249 if (lazy_p)
2250 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2252 if (TARGET_USE_GOT)
2254 /* See the comment above load_call<mode> for details. */
2255 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2256 gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2257 emit_insn (gen_update_got_version ());
2259 return insn;
2262 /* Return an instruction that copies $gp into register REG. We want
2263 GCC to treat the register's value as constant, so that its value
2264 can be rematerialized on demand. */
2266 static rtx
2267 gen_load_const_gp (rtx reg)
2269 return (Pmode == SImode
2270 ? gen_load_const_gp_si (reg)
2271 : gen_load_const_gp_di (reg));
2274 /* Return a pseudo register that contains the value of $gp throughout
2275 the current function. Such registers are needed by MIPS16 functions,
2276 for which $gp itself is not a valid base register or addition operand. */
2278 static rtx
2279 mips16_gp_pseudo_reg (void)
2281 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2282 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2284 /* Don't emit an instruction to initialize the pseudo register if
2285 we are being called from the tree optimizers' cost-calculation
2286 routines. */
2287 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2288 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2290 rtx insn, scan, after;
2292 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2294 push_topmost_sequence ();
2295 /* We need to emit the initialization after the FUNCTION_BEG
2296 note, so that it will be integrated. */
2297 after = get_insns ();
2298 for (scan = after; scan != NULL_RTX; scan = NEXT_INSN (scan))
2299 if (NOTE_P (scan) && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
2301 after = scan;
2302 break;
2304 insn = emit_insn_after (insn, after);
2305 pop_topmost_sequence ();
2307 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2310 return cfun->machine->mips16_gp_pseudo_rtx;
2313 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2314 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2315 constant in that context and can be split into a high part and a LO_SUM.
2316 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2317 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2319 TEMP is as for mips_force_temporary and is used to load the high
2320 part into a register. */
2322 bool
2323 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2325 enum mips_symbol_context context;
2326 enum mips_symbol_type symbol_type;
2327 rtx high;
2329 context = (mode == MAX_MACHINE_MODE
2330 ? SYMBOL_CONTEXT_LEA
2331 : SYMBOL_CONTEXT_MEM);
2332 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2333 || mips_symbol_insns (symbol_type, mode) == 0
2334 || !mips_split_p[symbol_type])
2335 return false;
2337 if (lo_sum_out)
2339 if (symbol_type == SYMBOL_GP_RELATIVE)
2341 if (!can_create_pseudo_p ())
2343 emit_insn (gen_load_const_gp (temp));
2344 high = temp;
2346 else
2347 high = mips16_gp_pseudo_reg ();
2349 else
2351 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2352 high = mips_force_temporary (temp, high);
2354 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2356 return true;
2359 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2360 then add CONST_INT OFFSET to the result. */
2362 static rtx
2363 mips_unspec_address_offset (rtx base, rtx offset,
2364 enum mips_symbol_type symbol_type)
2366 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2367 UNSPEC_ADDRESS_FIRST + symbol_type);
2368 if (offset != const0_rtx)
2369 base = gen_rtx_PLUS (Pmode, base, offset);
2370 return gen_rtx_CONST (Pmode, base);
2373 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2374 type SYMBOL_TYPE. */
2377 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2379 rtx base, offset;
2381 split_const (address, &base, &offset);
2382 return mips_unspec_address_offset (base, offset, symbol_type);
2385 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2386 high part to BASE and return the result. Just return BASE otherwise.
2387 TEMP is as for mips_force_temporary.
2389 The returned expression can be used as the first operand to a LO_SUM. */
2391 static rtx
2392 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2393 enum mips_symbol_type symbol_type)
2395 if (mips_split_p[symbol_type])
2397 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2398 addr = mips_force_temporary (temp, addr);
2399 base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2401 return base;
2404 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2405 mips_force_temporary; it is only needed when OFFSET is not a
2406 SMALL_OPERAND. */
2408 static rtx
2409 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2411 if (!SMALL_OPERAND (offset))
2413 rtx high;
2415 if (TARGET_MIPS16)
2417 /* Load the full offset into a register so that we can use
2418 an unextended instruction for the address itself. */
2419 high = GEN_INT (offset);
2420 offset = 0;
2422 else
2424 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2425 high = GEN_INT (CONST_HIGH_PART (offset));
2426 offset = CONST_LOW_PART (offset);
2428 high = mips_force_temporary (temp, high);
2429 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2431 return plus_constant (reg, offset);
2434 /* The __tls_get_attr symbol. */
2435 static GTY(()) rtx mips_tls_symbol;
2437 /* Return an instruction sequence that calls __tls_get_addr. SYM is
2438 the TLS symbol we are referencing and TYPE is the symbol type to use
2439 (either global dynamic or local dynamic). V0 is an RTX for the
2440 return value location. */
2442 static rtx
2443 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2445 rtx insn, loc, a0;
2447 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2449 if (!mips_tls_symbol)
2450 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2452 loc = mips_unspec_address (sym, type);
2454 start_sequence ();
2456 emit_insn (gen_rtx_SET (Pmode, a0,
2457 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2458 insn = mips_expand_call (v0, mips_tls_symbol, const0_rtx, const0_rtx, false);
2459 RTL_CONST_CALL_P (insn) = 1;
2460 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2461 insn = get_insns ();
2463 end_sequence ();
2465 return insn;
2468 /* Return a pseudo register that contains the current thread pointer. */
2470 static rtx
2471 mips_get_tp (void)
2473 rtx tp;
2475 tp = gen_reg_rtx (Pmode);
2476 if (Pmode == DImode)
2477 emit_insn (gen_tls_get_tp_di (tp));
2478 else
2479 emit_insn (gen_tls_get_tp_si (tp));
2480 return tp;
2483 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2484 its address. The return value will be both a valid address and a valid
2485 SET_SRC (either a REG or a LO_SUM). */
2487 static rtx
2488 mips_legitimize_tls_address (rtx loc)
2490 rtx dest, insn, v0, tp, tmp1, tmp2, eqv;
2491 enum tls_model model;
2493 if (TARGET_MIPS16)
2495 sorry ("MIPS16 TLS");
2496 return gen_reg_rtx (Pmode);
2499 model = SYMBOL_REF_TLS_MODEL (loc);
2500 /* Only TARGET_ABICALLS code can have more than one module; other
2501 code must be be static and should not use a GOT. All TLS models
2502 reduce to local exec in this situation. */
2503 if (!TARGET_ABICALLS)
2504 model = TLS_MODEL_LOCAL_EXEC;
2506 switch (model)
2508 case TLS_MODEL_GLOBAL_DYNAMIC:
2509 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2510 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2511 dest = gen_reg_rtx (Pmode);
2512 emit_libcall_block (insn, dest, v0, loc);
2513 break;
2515 case TLS_MODEL_LOCAL_DYNAMIC:
2516 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2517 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2518 tmp1 = gen_reg_rtx (Pmode);
2520 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2521 share the LDM result with other LD model accesses. */
2522 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2523 UNSPEC_TLS_LDM);
2524 emit_libcall_block (insn, tmp1, v0, eqv);
2526 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2527 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2528 mips_unspec_address (loc, SYMBOL_DTPREL));
2529 break;
2531 case TLS_MODEL_INITIAL_EXEC:
2532 tp = mips_get_tp ();
2533 tmp1 = gen_reg_rtx (Pmode);
2534 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2535 if (Pmode == DImode)
2536 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2537 else
2538 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2539 dest = gen_reg_rtx (Pmode);
2540 emit_insn (gen_add3_insn (dest, tmp1, tp));
2541 break;
2543 case TLS_MODEL_LOCAL_EXEC:
2544 tp = mips_get_tp ();
2545 tmp1 = mips_unspec_offset_high (NULL, tp, loc, SYMBOL_TPREL);
2546 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2547 mips_unspec_address (loc, SYMBOL_TPREL));
2548 break;
2550 default:
2551 gcc_unreachable ();
2553 return dest;
2556 /* If X is not a valid address for mode MODE, force it into a register. */
2558 static rtx
2559 mips_force_address (rtx x, enum machine_mode mode)
2561 if (!mips_legitimate_address_p (mode, x, false))
2562 x = force_reg (Pmode, x);
2563 return x;
2566 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2567 be legitimized in a way that the generic machinery might not expect,
2568 put the new address in *XLOC and return true. MODE is the mode of
2569 the memory being accessed. */
2571 bool
2572 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2574 rtx base, addr;
2575 HOST_WIDE_INT offset;
2577 if (mips_tls_symbol_p (*xloc))
2579 *xloc = mips_legitimize_tls_address (*xloc);
2580 return true;
2583 /* See if the address can split into a high part and a LO_SUM. */
2584 if (mips_split_symbol (NULL, *xloc, mode, &addr))
2586 *xloc = mips_force_address (addr, mode);
2587 return true;
2590 /* Handle BASE + OFFSET using mips_add_offset. */
2591 mips_split_plus (*xloc, &base, &offset);
2592 if (offset != 0)
2594 if (!mips_valid_base_register_p (base, mode, false))
2595 base = copy_to_mode_reg (Pmode, base);
2596 addr = mips_add_offset (NULL, base, offset);
2597 *xloc = mips_force_address (addr, mode);
2598 return true;
2600 return false;
2603 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
2605 void
2606 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
2608 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2609 enum machine_mode mode;
2610 unsigned int i, num_ops;
2611 rtx x;
2613 mode = GET_MODE (dest);
2614 num_ops = mips_build_integer (codes, value);
2616 /* Apply each binary operation to X. Invariant: X is a legitimate
2617 source operand for a SET pattern. */
2618 x = GEN_INT (codes[0].value);
2619 for (i = 1; i < num_ops; i++)
2621 if (!can_create_pseudo_p ())
2623 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2624 x = temp;
2626 else
2627 x = force_reg (mode, x);
2628 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2631 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2634 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2635 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2636 move_operand. */
2638 static void
2639 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2641 rtx base, offset;
2643 /* Split moves of big integers into smaller pieces. */
2644 if (splittable_const_int_operand (src, mode))
2646 mips_move_integer (dest, dest, INTVAL (src));
2647 return;
2650 /* Split moves of symbolic constants into high/low pairs. */
2651 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2653 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2654 return;
2657 /* Generate the appropriate access sequences for TLS symbols. */
2658 if (mips_tls_symbol_p (src))
2660 mips_emit_move (dest, mips_legitimize_tls_address (src));
2661 return;
2664 /* If we have (const (plus symbol offset)), and that expression cannot
2665 be forced into memory, load the symbol first and add in the offset.
2666 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2667 forced into memory, as it usually produces better code. */
2668 split_const (src, &base, &offset);
2669 if (offset != const0_rtx
2670 && (targetm.cannot_force_const_mem (src)
2671 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2673 base = mips_force_temporary (dest, base);
2674 mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
2675 return;
2678 src = force_const_mem (mode, src);
2680 /* When using explicit relocs, constant pool references are sometimes
2681 not legitimate addresses. */
2682 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2683 mips_emit_move (dest, src);
2686 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
2687 sequence that is valid. */
2689 bool
2690 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2692 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2694 mips_emit_move (dest, force_reg (mode, src));
2695 return true;
2698 /* We need to deal with constants that would be legitimate
2699 immediate_operands but aren't legitimate move_operands. */
2700 if (CONSTANT_P (src) && !move_operand (src, mode))
2702 mips_legitimize_const_move (mode, dest, src);
2703 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2704 return true;
2706 return false;
2709 /* Return true if value X in context CONTEXT is a small-data address
2710 that can be rewritten as a LO_SUM. */
2712 static bool
2713 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2715 enum mips_symbol_type symbol_type;
2717 return (TARGET_EXPLICIT_RELOCS
2718 && mips_symbolic_constant_p (x, context, &symbol_type)
2719 && symbol_type == SYMBOL_GP_RELATIVE);
2722 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2723 containing MEM, or null if none. */
2725 static int
2726 mips_small_data_pattern_1 (rtx *loc, void *data)
2728 enum mips_symbol_context context;
2730 if (GET_CODE (*loc) == LO_SUM)
2731 return -1;
2733 if (MEM_P (*loc))
2735 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
2736 return 1;
2737 return -1;
2740 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2741 return mips_rewrite_small_data_p (*loc, context);
2744 /* Return true if OP refers to small data symbols directly, not through
2745 a LO_SUM. */
2747 bool
2748 mips_small_data_pattern_p (rtx op)
2750 return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
2753 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2754 DATA is the containing MEM, or null if none. */
2756 static int
2757 mips_rewrite_small_data_1 (rtx *loc, void *data)
2759 enum mips_symbol_context context;
2761 if (MEM_P (*loc))
2763 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
2764 return -1;
2767 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2768 if (mips_rewrite_small_data_p (*loc, context))
2769 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
2771 if (GET_CODE (*loc) == LO_SUM)
2772 return -1;
2774 return 0;
2777 /* Rewrite instruction pattern PATTERN so that it refers to small data
2778 using explicit relocations. */
2781 mips_rewrite_small_data (rtx pattern)
2783 pattern = copy_insn (pattern);
2784 for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
2785 return pattern;
2788 /* We need a lot of little routines to check the range of MIPS16 immediate
2789 operands. */
2791 static int
2792 m16_check_op (rtx op, int low, int high, int mask)
2794 return (GET_CODE (op) == CONST_INT
2795 && IN_RANGE (INTVAL (op), low, high)
2796 && (INTVAL (op) & mask) == 0);
2800 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2802 return m16_check_op (op, 0x1, 0x8, 0);
2806 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2808 return m16_check_op (op, -0x8, 0x7, 0);
2812 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2814 return m16_check_op (op, -0x7, 0x8, 0);
2818 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2820 return m16_check_op (op, -0x10, 0xf, 0);
2824 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2826 return m16_check_op (op, -0xf, 0x10, 0);
2830 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2832 return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
2836 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2838 return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
2842 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2844 return m16_check_op (op, -0x80, 0x7f, 0);
2848 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2850 return m16_check_op (op, -0x7f, 0x80, 0);
2854 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2856 return m16_check_op (op, 0x0, 0xff, 0);
2860 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2862 return m16_check_op (op, -0xff, 0x0, 0);
2866 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2868 return m16_check_op (op, -0x1, 0xfe, 0);
2872 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2874 return m16_check_op (op, 0x0, 0xff << 2, 3);
2878 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2880 return m16_check_op (op, -0xff << 2, 0x0, 3);
2884 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2886 return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
2890 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2892 return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
2895 /* The cost of loading values from the constant pool. It should be
2896 larger than the cost of any constant we want to synthesize inline. */
2897 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2899 /* Return the cost of X when used as an operand to the MIPS16 instruction
2900 that implements CODE. Return -1 if there is no such instruction, or if
2901 X is not a valid immediate operand for it. */
2903 static int
2904 mips16_constant_cost (int code, HOST_WIDE_INT x)
2906 switch (code)
2908 case ASHIFT:
2909 case ASHIFTRT:
2910 case LSHIFTRT:
2911 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2912 other shifts are extended. The shift patterns truncate the shift
2913 count to the right size, so there are no out-of-range values. */
2914 if (IN_RANGE (x, 1, 8))
2915 return 0;
2916 return COSTS_N_INSNS (1);
2918 case PLUS:
2919 if (IN_RANGE (x, -128, 127))
2920 return 0;
2921 if (SMALL_OPERAND (x))
2922 return COSTS_N_INSNS (1);
2923 return -1;
2925 case LEU:
2926 /* Like LE, but reject the always-true case. */
2927 if (x == -1)
2928 return -1;
2929 case LE:
2930 /* We add 1 to the immediate and use SLT. */
2931 x += 1;
2932 case XOR:
2933 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2934 case LT:
2935 case LTU:
2936 if (IN_RANGE (x, 0, 255))
2937 return 0;
2938 if (SMALL_OPERAND_UNSIGNED (x))
2939 return COSTS_N_INSNS (1);
2940 return -1;
2942 case EQ:
2943 case NE:
2944 /* Equality comparisons with 0 are cheap. */
2945 if (x == 0)
2946 return 0;
2947 return -1;
2949 default:
2950 return -1;
2954 /* Return true if there is a non-MIPS16 instruction that implements CODE
2955 and if that instruction accepts X as an immediate operand. */
2957 static int
2958 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2960 switch (code)
2962 case ASHIFT:
2963 case ASHIFTRT:
2964 case LSHIFTRT:
2965 /* All shift counts are truncated to a valid constant. */
2966 return true;
2968 case ROTATE:
2969 case ROTATERT:
2970 /* Likewise rotates, if the target supports rotates at all. */
2971 return ISA_HAS_ROR;
2973 case AND:
2974 case IOR:
2975 case XOR:
2976 /* These instructions take 16-bit unsigned immediates. */
2977 return SMALL_OPERAND_UNSIGNED (x);
2979 case PLUS:
2980 case LT:
2981 case LTU:
2982 /* These instructions take 16-bit signed immediates. */
2983 return SMALL_OPERAND (x);
2985 case EQ:
2986 case NE:
2987 case GT:
2988 case GTU:
2989 /* The "immediate" forms of these instructions are really
2990 implemented as comparisons with register 0. */
2991 return x == 0;
2993 case GE:
2994 case GEU:
2995 /* Likewise, meaning that the only valid immediate operand is 1. */
2996 return x == 1;
2998 case LE:
2999 /* We add 1 to the immediate and use SLT. */
3000 return SMALL_OPERAND (x + 1);
3002 case LEU:
3003 /* Likewise SLTU, but reject the always-true case. */
3004 return SMALL_OPERAND (x + 1) && x + 1 != 0;
3006 case SIGN_EXTRACT:
3007 case ZERO_EXTRACT:
3008 /* The bit position and size are immediate operands. */
3009 return ISA_HAS_EXT_INS;
3011 default:
3012 /* By default assume that $0 can be used for 0. */
3013 return x == 0;
3017 /* Return the cost of binary operation X, given that the instruction
3018 sequence for a word-sized or smaller operation has cost SINGLE_COST
3019 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3021 static int
3022 mips_binary_cost (rtx x, int single_cost, int double_cost)
3024 int cost;
3026 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3027 cost = double_cost;
3028 else
3029 cost = single_cost;
3030 return (cost
3031 + rtx_cost (XEXP (x, 0), 0)
3032 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
3035 /* Return the cost of floating-point multiplications of mode MODE. */
3037 static int
3038 mips_fp_mult_cost (enum machine_mode mode)
3040 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3043 /* Return the cost of floating-point divisions of mode MODE. */
3045 static int
3046 mips_fp_div_cost (enum machine_mode mode)
3048 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3051 /* Return the cost of sign-extending OP to mode MODE, not including the
3052 cost of OP itself. */
3054 static int
3055 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3057 if (MEM_P (op))
3058 /* Extended loads are as cheap as unextended ones. */
3059 return 0;
3061 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3062 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3063 return 0;
3065 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3066 /* We can use SEB or SEH. */
3067 return COSTS_N_INSNS (1);
3069 /* We need to use a shift left and a shift right. */
3070 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3073 /* Return the cost of zero-extending OP to mode MODE, not including the
3074 cost of OP itself. */
3076 static int
3077 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3079 if (MEM_P (op))
3080 /* Extended loads are as cheap as unextended ones. */
3081 return 0;
3083 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3084 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3085 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3087 if (GENERATE_MIPS16E)
3088 /* We can use ZEB or ZEH. */
3089 return COSTS_N_INSNS (1);
3091 if (TARGET_MIPS16)
3092 /* We need to load 0xff or 0xffff into a register and use AND. */
3093 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3095 /* We can use ANDI. */
3096 return COSTS_N_INSNS (1);
3099 /* Implement TARGET_RTX_COSTS. */
3101 static bool
3102 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3104 enum machine_mode mode = GET_MODE (x);
3105 bool float_mode_p = FLOAT_MODE_P (mode);
3106 int cost;
3107 rtx addr;
3109 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3110 appear in the instruction stream, and the cost of a comparison is
3111 really the cost of the branch or scc condition. At the time of
3112 writing, GCC only uses an explicit outer COMPARE code when optabs
3113 is testing whether a constant is expensive enough to force into a
3114 register. We want optabs to pass such constants through the MIPS
3115 expanders instead, so make all constants very cheap here. */
3116 if (outer_code == COMPARE)
3118 gcc_assert (CONSTANT_P (x));
3119 *total = 0;
3120 return true;
3123 switch (code)
3125 case CONST_INT:
3126 /* Treat *clear_upper32-style ANDs as having zero cost in the
3127 second operand. The cost is entirely in the first operand.
3129 ??? This is needed because we would otherwise try to CSE
3130 the constant operand. Although that's the right thing for
3131 instructions that continue to be a register operation throughout
3132 compilation, it is disastrous for instructions that could
3133 later be converted into a memory operation. */
3134 if (TARGET_64BIT
3135 && outer_code == AND
3136 && UINTVAL (x) == 0xffffffff)
3138 *total = 0;
3139 return true;
3142 if (TARGET_MIPS16)
3144 cost = mips16_constant_cost (outer_code, INTVAL (x));
3145 if (cost >= 0)
3147 *total = cost;
3148 return true;
3151 else
3153 /* When not optimizing for size, we care more about the cost
3154 of hot code, and hot code is often in a loop. If a constant
3155 operand needs to be forced into a register, we will often be
3156 able to hoist the constant load out of the loop, so the load
3157 should not contribute to the cost. */
3158 if (!optimize_size
3159 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3161 *total = 0;
3162 return true;
3165 /* Fall through. */
3167 case CONST:
3168 case SYMBOL_REF:
3169 case LABEL_REF:
3170 case CONST_DOUBLE:
3171 if (force_to_mem_operand (x, VOIDmode))
3173 *total = COSTS_N_INSNS (1);
3174 return true;
3176 cost = mips_const_insns (x);
3177 if (cost > 0)
3179 /* If the constant is likely to be stored in a GPR, SETs of
3180 single-insn constants are as cheap as register sets; we
3181 never want to CSE them.
3183 Don't reduce the cost of storing a floating-point zero in
3184 FPRs. If we have a zero in an FPR for other reasons, we
3185 can get better cfg-cleanup and delayed-branch results by
3186 using it consistently, rather than using $0 sometimes and
3187 an FPR at other times. Also, moves between floating-point
3188 registers are sometimes cheaper than (D)MTC1 $0. */
3189 if (cost == 1
3190 && outer_code == SET
3191 && !(float_mode_p && TARGET_HARD_FLOAT))
3192 cost = 0;
3193 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3194 want to CSE the constant itself. It is usually better to
3195 have N copies of the last operation in the sequence and one
3196 shared copy of the other operations. (Note that this is
3197 not true for MIPS16 code, where the final operation in the
3198 sequence is often an extended instruction.)
3200 Also, if we have a CONST_INT, we don't know whether it is
3201 for a word or doubleword operation, so we cannot rely on
3202 the result of mips_build_integer. */
3203 else if (!TARGET_MIPS16
3204 && (outer_code == SET || mode == VOIDmode))
3205 cost = 1;
3206 *total = COSTS_N_INSNS (cost);
3207 return true;
3209 /* The value will need to be fetched from the constant pool. */
3210 *total = CONSTANT_POOL_COST;
3211 return true;
3213 case MEM:
3214 /* If the address is legitimate, return the number of
3215 instructions it needs. */
3216 addr = XEXP (x, 0);
3217 cost = mips_address_insns (addr, mode, true);
3218 if (cost > 0)
3220 *total = COSTS_N_INSNS (cost + 1);
3221 return true;
3223 /* Check for a scaled indexed address. */
3224 if (mips_lwxs_address_p (addr))
3226 *total = COSTS_N_INSNS (2);
3227 return true;
3229 /* Otherwise use the default handling. */
3230 return false;
3232 case FFS:
3233 *total = COSTS_N_INSNS (6);
3234 return false;
3236 case NOT:
3237 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3238 return false;
3240 case AND:
3241 /* Check for a *clear_upper32 pattern and treat it like a zero
3242 extension. See the pattern's comment for details. */
3243 if (TARGET_64BIT
3244 && mode == DImode
3245 && CONST_INT_P (XEXP (x, 1))
3246 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3248 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3249 + rtx_cost (XEXP (x, 0), 0));
3250 return true;
3252 /* Fall through. */
3254 case IOR:
3255 case XOR:
3256 /* Double-word operations use two single-word operations. */
3257 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3258 return true;
3260 case ASHIFT:
3261 case ASHIFTRT:
3262 case LSHIFTRT:
3263 case ROTATE:
3264 case ROTATERT:
3265 if (CONSTANT_P (XEXP (x, 1)))
3266 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3267 else
3268 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3269 return true;
3271 case ABS:
3272 if (float_mode_p)
3273 *total = mips_cost->fp_add;
3274 else
3275 *total = COSTS_N_INSNS (4);
3276 return false;
3278 case LO_SUM:
3279 /* Low-part immediates need an extended MIPS16 instruction. */
3280 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3281 + rtx_cost (XEXP (x, 0), 0));
3282 return true;
3284 case LT:
3285 case LTU:
3286 case LE:
3287 case LEU:
3288 case GT:
3289 case GTU:
3290 case GE:
3291 case GEU:
3292 case EQ:
3293 case NE:
3294 case UNORDERED:
3295 case LTGT:
3296 /* Branch comparisons have VOIDmode, so use the first operand's
3297 mode instead. */
3298 mode = GET_MODE (XEXP (x, 0));
3299 if (FLOAT_MODE_P (mode))
3301 *total = mips_cost->fp_add;
3302 return false;
3304 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3305 return true;
3307 case MINUS:
3308 if (float_mode_p
3309 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3310 && TARGET_FUSED_MADD
3311 && !HONOR_NANS (mode)
3312 && !HONOR_SIGNED_ZEROS (mode))
3314 /* See if we can use NMADD or NMSUB. See mips.md for the
3315 associated patterns. */
3316 rtx op0 = XEXP (x, 0);
3317 rtx op1 = XEXP (x, 1);
3318 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3320 *total = (mips_fp_mult_cost (mode)
3321 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3322 + rtx_cost (XEXP (op0, 1), 0)
3323 + rtx_cost (op1, 0));
3324 return true;
3326 if (GET_CODE (op1) == MULT)
3328 *total = (mips_fp_mult_cost (mode)
3329 + rtx_cost (op0, 0)
3330 + rtx_cost (XEXP (op1, 0), 0)
3331 + rtx_cost (XEXP (op1, 1), 0));
3332 return true;
3335 /* Fall through. */
3337 case PLUS:
3338 if (float_mode_p)
3340 /* If this is part of a MADD or MSUB, treat the PLUS as
3341 being free. */
3342 if (ISA_HAS_FP4
3343 && TARGET_FUSED_MADD
3344 && GET_CODE (XEXP (x, 0)) == MULT)
3345 *total = 0;
3346 else
3347 *total = mips_cost->fp_add;
3348 return false;
3351 /* Double-word operations require three single-word operations and
3352 an SLTU. The MIPS16 version then needs to move the result of
3353 the SLTU from $24 to a MIPS16 register. */
3354 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3355 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3356 return true;
3358 case NEG:
3359 if (float_mode_p
3360 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3361 && TARGET_FUSED_MADD
3362 && !HONOR_NANS (mode)
3363 && HONOR_SIGNED_ZEROS (mode))
3365 /* See if we can use NMADD or NMSUB. See mips.md for the
3366 associated patterns. */
3367 rtx op = XEXP (x, 0);
3368 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3369 && GET_CODE (XEXP (op, 0)) == MULT)
3371 *total = (mips_fp_mult_cost (mode)
3372 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3373 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3374 + rtx_cost (XEXP (op, 1), 0));
3375 return true;
3379 if (float_mode_p)
3380 *total = mips_cost->fp_add;
3381 else
3382 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3383 return false;
3385 case MULT:
3386 if (float_mode_p)
3387 *total = mips_fp_mult_cost (mode);
3388 else if (mode == DImode && !TARGET_64BIT)
3389 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3390 where the mulsidi3 always includes an MFHI and an MFLO. */
3391 *total = (optimize_size
3392 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3393 : mips_cost->int_mult_si * 3 + 6);
3394 else if (optimize_size)
3395 *total = (ISA_HAS_MUL3 ? 1 : 2);
3396 else if (mode == DImode)
3397 *total = mips_cost->int_mult_di;
3398 else
3399 *total = mips_cost->int_mult_si;
3400 return false;
3402 case DIV:
3403 /* Check for a reciprocal. */
3404 if (float_mode_p
3405 && ISA_HAS_FP4
3406 && flag_unsafe_math_optimizations
3407 && XEXP (x, 0) == CONST1_RTX (mode))
3409 if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3410 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3411 division as being free. */
3412 *total = rtx_cost (XEXP (x, 1), 0);
3413 else
3414 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3415 return true;
3417 /* Fall through. */
3419 case SQRT:
3420 case MOD:
3421 if (float_mode_p)
3423 *total = mips_fp_div_cost (mode);
3424 return false;
3426 /* Fall through. */
3428 case UDIV:
3429 case UMOD:
3430 if (optimize_size)
3432 /* It is our responsibility to make division by a power of 2
3433 as cheap as 2 register additions if we want the division
3434 expanders to be used for such operations; see the setting
3435 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3436 should always produce shorter code than using
3437 expand_sdiv2_pow2. */
3438 if (TARGET_MIPS16
3439 && CONST_INT_P (XEXP (x, 1))
3440 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3442 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3443 return true;
3445 *total = COSTS_N_INSNS (mips_idiv_insns ());
3447 else if (mode == DImode)
3448 *total = mips_cost->int_div_di;
3449 else
3450 *total = mips_cost->int_div_si;
3451 return false;
3453 case SIGN_EXTEND:
3454 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3455 return false;
3457 case ZERO_EXTEND:
3458 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3459 return false;
3461 case FLOAT:
3462 case UNSIGNED_FLOAT:
3463 case FIX:
3464 case FLOAT_EXTEND:
3465 case FLOAT_TRUNCATE:
3466 *total = mips_cost->fp_add;
3467 return false;
3469 default:
3470 return false;
3474 /* Implement TARGET_ADDRESS_COST. */
3476 static int
3477 mips_address_cost (rtx addr)
3479 return mips_address_insns (addr, SImode, false);
3482 /* Return one word of double-word value OP, taking into account the fixed
3483 endianness of certain registers. HIGH_P is true to select the high part,
3484 false to select the low part. */
3487 mips_subword (rtx op, bool high_p)
3489 unsigned int byte, offset;
3490 enum machine_mode mode;
3492 mode = GET_MODE (op);
3493 if (mode == VOIDmode)
3494 mode = TARGET_64BIT ? TImode : DImode;
3496 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3497 byte = UNITS_PER_WORD;
3498 else
3499 byte = 0;
3501 if (FP_REG_RTX_P (op))
3503 /* Paired FPRs are always ordered little-endian. */
3504 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3505 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3508 if (MEM_P (op))
3509 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3511 return simplify_gen_subreg (word_mode, op, mode, byte);
3514 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3516 bool
3517 mips_split_64bit_move_p (rtx dest, rtx src)
3519 if (TARGET_64BIT)
3520 return false;
3522 /* FPR-to-FPR moves can be done in a single instruction, if they're
3523 allowed at all. */
3524 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3525 return false;
3527 /* Check for floating-point loads and stores. */
3528 if (ISA_HAS_LDC1_SDC1)
3530 if (FP_REG_RTX_P (dest) && MEM_P (src))
3531 return false;
3532 if (FP_REG_RTX_P (src) && MEM_P (dest))
3533 return false;
3535 return true;
3538 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3539 this function handles 64-bit moves for which mips_split_64bit_move_p
3540 holds. For 64-bit targets, this function handles 128-bit moves. */
3542 void
3543 mips_split_doubleword_move (rtx dest, rtx src)
3545 rtx low_dest;
3547 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3549 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3550 emit_insn (gen_move_doubleword_fprdi (dest, src));
3551 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3552 emit_insn (gen_move_doubleword_fprdf (dest, src));
3553 else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
3554 emit_insn (gen_move_doubleword_fprv2sf (dest, src));
3555 else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
3556 emit_insn (gen_move_doubleword_fprv2si (dest, src));
3557 else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
3558 emit_insn (gen_move_doubleword_fprv4hi (dest, src));
3559 else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
3560 emit_insn (gen_move_doubleword_fprv8qi (dest, src));
3561 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3562 emit_insn (gen_move_doubleword_fprtf (dest, src));
3563 else
3564 gcc_unreachable ();
3566 else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
3568 low_dest = mips_subword (dest, false);
3569 mips_emit_move (low_dest, mips_subword (src, false));
3570 if (TARGET_64BIT)
3571 emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
3572 else
3573 emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
3575 else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
3577 mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
3578 if (TARGET_64BIT)
3579 emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
3580 else
3581 emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
3583 else
3585 /* The operation can be split into two normal moves. Decide in
3586 which order to do them. */
3587 low_dest = mips_subword (dest, false);
3588 if (REG_P (low_dest)
3589 && reg_overlap_mentioned_p (low_dest, src))
3591 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3592 mips_emit_move (low_dest, mips_subword (src, false));
3594 else
3596 mips_emit_move (low_dest, mips_subword (src, false));
3597 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3602 /* Return the appropriate instructions to move SRC into DEST. Assume
3603 that SRC is operand 1 and DEST is operand 0. */
3605 const char *
3606 mips_output_move (rtx dest, rtx src)
3608 enum rtx_code dest_code, src_code;
3609 enum machine_mode mode;
3610 enum mips_symbol_type symbol_type;
3611 bool dbl_p;
3613 dest_code = GET_CODE (dest);
3614 src_code = GET_CODE (src);
3615 mode = GET_MODE (dest);
3616 dbl_p = (GET_MODE_SIZE (mode) == 8);
3618 if (dbl_p && mips_split_64bit_move_p (dest, src))
3619 return "#";
3621 if ((src_code == REG && GP_REG_P (REGNO (src)))
3622 || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
3624 if (dest_code == REG)
3626 if (GP_REG_P (REGNO (dest)))
3627 return "move\t%0,%z1";
3629 /* Moves to HI are handled by special .md insns. */
3630 if (REGNO (dest) == LO_REGNUM)
3631 return "mtlo\t%z1";
3633 if (DSP_ACC_REG_P (REGNO (dest)))
3635 static char retval[] = "mt__\t%z1,%q0";
3637 retval[2] = reg_names[REGNO (dest)][4];
3638 retval[3] = reg_names[REGNO (dest)][5];
3639 return retval;
3642 if (FP_REG_P (REGNO (dest)))
3643 return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
3645 if (ALL_COP_REG_P (REGNO (dest)))
3647 static char retval[] = "dmtc_\t%z1,%0";
3649 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3650 return dbl_p ? retval : retval + 1;
3653 if (dest_code == MEM)
3654 switch (GET_MODE_SIZE (mode))
3656 case 1: return "sb\t%z1,%0";
3657 case 2: return "sh\t%z1,%0";
3658 case 4: return "sw\t%z1,%0";
3659 case 8: return "sd\t%z1,%0";
3662 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3664 if (src_code == REG)
3666 /* Moves from HI are handled by special .md insns. */
3667 if (REGNO (src) == LO_REGNUM)
3669 /* When generating VR4120 or VR4130 code, we use MACC and
3670 DMACC instead of MFLO. This avoids both the normal
3671 MIPS III HI/LO hazards and the errata related to
3672 -mfix-vr4130. */
3673 if (ISA_HAS_MACCHI)
3674 return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
3675 return "mflo\t%0";
3678 if (DSP_ACC_REG_P (REGNO (src)))
3680 static char retval[] = "mf__\t%0,%q1";
3682 retval[2] = reg_names[REGNO (src)][4];
3683 retval[3] = reg_names[REGNO (src)][5];
3684 return retval;
3687 if (FP_REG_P (REGNO (src)))
3688 return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
3690 if (ALL_COP_REG_P (REGNO (src)))
3692 static char retval[] = "dmfc_\t%0,%1";
3694 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3695 return dbl_p ? retval : retval + 1;
3698 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3699 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3702 if (src_code == MEM)
3703 switch (GET_MODE_SIZE (mode))
3705 case 1: return "lbu\t%0,%1";
3706 case 2: return "lhu\t%0,%1";
3707 case 4: return "lw\t%0,%1";
3708 case 8: return "ld\t%0,%1";
3711 if (src_code == CONST_INT)
3713 /* Don't use the X format for the operand itself, because that
3714 will give out-of-range numbers for 64-bit hosts and 32-bit
3715 targets. */
3716 if (!TARGET_MIPS16)
3717 return "li\t%0,%1\t\t\t# %X1";
3719 if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
3720 return "li\t%0,%1";
3722 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
3723 return "#";
3726 if (src_code == HIGH)
3727 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3729 if (CONST_GP_P (src))
3730 return "move\t%0,%1";
3732 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3733 && mips_lo_relocs[symbol_type] != 0)
3735 /* A signed 16-bit constant formed by applying a relocation
3736 operator to a symbolic address. */
3737 gcc_assert (!mips_split_p[symbol_type]);
3738 return "li\t%0,%R1";
3741 if (symbolic_operand (src, VOIDmode))
3743 gcc_assert (TARGET_MIPS16
3744 ? TARGET_MIPS16_TEXT_LOADS
3745 : !TARGET_EXPLICIT_RELOCS);
3746 return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
3749 if (src_code == REG && FP_REG_P (REGNO (src)))
3751 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3753 if (GET_MODE (dest) == V2SFmode)
3754 return "mov.ps\t%0,%1";
3755 else
3756 return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
3759 if (dest_code == MEM)
3760 return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
3762 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3764 if (src_code == MEM)
3765 return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
3767 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3769 static char retval[] = "l_c_\t%0,%1";
3771 retval[1] = (dbl_p ? 'd' : 'w');
3772 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3773 return retval;
3775 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3777 static char retval[] = "s_c_\t%1,%0";
3779 retval[1] = (dbl_p ? 'd' : 'w');
3780 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3781 return retval;
3783 gcc_unreachable ();
3786 /* Return true if CMP1 is a suitable second operand for integer ordering
3787 test CODE. See also the *sCC patterns in mips.md. */
3789 static bool
3790 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
3792 switch (code)
3794 case GT:
3795 case GTU:
3796 return reg_or_0_operand (cmp1, VOIDmode);
3798 case GE:
3799 case GEU:
3800 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3802 case LT:
3803 case LTU:
3804 return arith_operand (cmp1, VOIDmode);
3806 case LE:
3807 return sle_operand (cmp1, VOIDmode);
3809 case LEU:
3810 return sleu_operand (cmp1, VOIDmode);
3812 default:
3813 gcc_unreachable ();
3817 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
3818 integer ordering test *CODE, or if an equivalent combination can
3819 be formed by adjusting *CODE and *CMP1. When returning true, update
3820 *CODE and *CMP1 with the chosen code and operand, otherwise leave
3821 them alone. */
3823 static bool
3824 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
3825 enum machine_mode mode)
3827 HOST_WIDE_INT plus_one;
3829 if (mips_int_order_operand_ok_p (*code, *cmp1))
3830 return true;
3832 if (GET_CODE (*cmp1) == CONST_INT)
3833 switch (*code)
3835 case LE:
3836 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
3837 if (INTVAL (*cmp1) < plus_one)
3839 *code = LT;
3840 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3841 return true;
3843 break;
3845 case LEU:
3846 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
3847 if (plus_one != 0)
3849 *code = LTU;
3850 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3851 return true;
3853 break;
3855 default:
3856 break;
3858 return false;
3861 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
3862 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
3863 is nonnull, it's OK to set TARGET to the inverse of the result and
3864 flip *INVERT_PTR instead. */
3866 static void
3867 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
3868 rtx target, rtx cmp0, rtx cmp1)
3870 enum machine_mode mode;
3872 /* First see if there is a MIPS instruction that can do this operation.
3873 If not, try doing the same for the inverse operation. If that also
3874 fails, force CMP1 into a register and try again. */
3875 mode = GET_MODE (cmp0);
3876 if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
3877 mips_emit_binary (code, target, cmp0, cmp1);
3878 else
3880 enum rtx_code inv_code = reverse_condition (code);
3881 if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
3883 cmp1 = force_reg (mode, cmp1);
3884 mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
3886 else if (invert_ptr == 0)
3888 rtx inv_target;
3890 inv_target = mips_force_binary (GET_MODE (target),
3891 inv_code, cmp0, cmp1);
3892 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3894 else
3896 *invert_ptr = !*invert_ptr;
3897 mips_emit_binary (inv_code, target, cmp0, cmp1);
3902 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3903 The register will have the same mode as CMP0. */
3905 static rtx
3906 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3908 if (cmp1 == const0_rtx)
3909 return cmp0;
3911 if (uns_arith_operand (cmp1, VOIDmode))
3912 return expand_binop (GET_MODE (cmp0), xor_optab,
3913 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3915 return expand_binop (GET_MODE (cmp0), sub_optab,
3916 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3919 /* Convert *CODE into a code that can be used in a floating-point
3920 scc instruction (C.cond.fmt). Return true if the values of
3921 the condition code registers will be inverted, with 0 indicating
3922 that the condition holds. */
3924 static bool
3925 mips_reversed_fp_cond (enum rtx_code *code)
3927 switch (*code)
3929 case NE:
3930 case LTGT:
3931 case ORDERED:
3932 *code = reverse_condition_maybe_unordered (*code);
3933 return true;
3935 default:
3936 return false;
3940 /* Convert a comparison into something that can be used in a branch or
3941 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3942 being compared and *CODE is the code used to compare them.
3944 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3945 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
3946 otherwise any standard branch condition can be used. The standard branch
3947 conditions are:
3949 - EQ or NE between two registers.
3950 - any comparison between a register and zero. */
3952 static void
3953 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3955 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3957 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3959 *op0 = cmp_operands[0];
3960 *op1 = cmp_operands[1];
3962 else if (*code == EQ || *code == NE)
3964 if (need_eq_ne_p)
3966 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3967 *op1 = const0_rtx;
3969 else
3971 *op0 = cmp_operands[0];
3972 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3975 else
3977 /* The comparison needs a separate scc instruction. Store the
3978 result of the scc in *OP0 and compare it against zero. */
3979 bool invert = false;
3980 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3981 mips_emit_int_order_test (*code, &invert, *op0,
3982 cmp_operands[0], cmp_operands[1]);
3983 *code = (invert ? EQ : NE);
3984 *op1 = const0_rtx;
3987 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3989 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3990 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3991 *code = NE;
3992 *op1 = const0_rtx;
3994 else
3996 enum rtx_code cmp_code;
3998 /* Floating-point tests use a separate C.cond.fmt comparison to
3999 set a condition code register. The branch or conditional move
4000 will then compare that register against zero.
4002 Set CMP_CODE to the code of the comparison instruction and
4003 *CODE to the code that the branch or move should use. */
4004 cmp_code = *code;
4005 *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
4006 *op0 = (ISA_HAS_8CC
4007 ? gen_reg_rtx (CCmode)
4008 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4009 *op1 = const0_rtx;
4010 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
4014 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4015 Store the result in TARGET and return true if successful.
4017 On 64-bit targets, TARGET may be narrower than cmp_operands[0]. */
4019 bool
4020 mips_expand_scc (enum rtx_code code, rtx target)
4022 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4023 return false;
4025 if (code == EQ || code == NE)
4027 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4028 mips_emit_binary (code, target, zie, const0_rtx);
4030 else
4031 mips_emit_int_order_test (code, 0, target,
4032 cmp_operands[0], cmp_operands[1]);
4033 return true;
4036 /* Compare cmp_operands[0] with cmp_operands[1] using comparison code
4037 CODE and jump to OPERANDS[0] if the condition holds. */
4039 void
4040 mips_expand_conditional_branch (rtx *operands, enum rtx_code code)
4042 rtx op0, op1, condition;
4044 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4045 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4046 emit_jump_insn (gen_condjump (condition, operands[0]));
4049 /* Implement:
4051 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4052 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4054 void
4055 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4056 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4058 rtx cmp_result;
4059 bool reversed_p;
4061 reversed_p = mips_reversed_fp_cond (&cond);
4062 cmp_result = gen_reg_rtx (CCV2mode);
4063 emit_insn (gen_scc_ps (cmp_result,
4064 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4065 if (reversed_p)
4066 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4067 cmp_result));
4068 else
4069 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4070 cmp_result));
4073 /* Compare cmp_operands[0] with cmp_operands[1] using the code of
4074 OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] if the condition
4075 holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
4077 void
4078 mips_expand_conditional_move (rtx *operands)
4080 enum rtx_code code;
4081 rtx cond, op0, op1;
4083 code = GET_CODE (operands[1]);
4084 mips_emit_compare (&code, &op0, &op1, true);
4085 cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1),
4086 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4087 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
4088 operands[2], operands[3])));
4091 /* Compare cmp_operands[0] with cmp_operands[1] using rtl code CODE,
4092 then trap if the condition holds. */
4094 void
4095 mips_expand_conditional_trap (enum rtx_code code)
4097 rtx op0, op1;
4098 enum machine_mode mode;
4100 /* MIPS conditional trap instructions don't have GT or LE flavors,
4101 so we must swap the operands and convert to LT and GE respectively. */
4102 switch (code)
4104 case GT:
4105 case LE:
4106 case GTU:
4107 case LEU:
4108 code = swap_condition (code);
4109 op0 = cmp_operands[1];
4110 op1 = cmp_operands[0];
4111 break;
4113 default:
4114 op0 = cmp_operands[0];
4115 op1 = cmp_operands[1];
4116 break;
4119 mode = GET_MODE (cmp_operands[0]);
4120 op0 = force_reg (mode, op0);
4121 if (!arith_operand (op1, mode))
4122 op1 = force_reg (mode, op1);
4124 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4125 gen_rtx_fmt_ee (code, mode, op0, op1),
4126 const0_rtx));
4129 /* Initialize *CUM for a call to a function of type FNTYPE. */
4131 void
4132 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
4134 memset (cum, 0, sizeof (*cum));
4135 cum->prototype = (fntype && prototype_p (fntype));
4136 cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
4139 /* Fill INFO with information about a single argument. CUM is the
4140 cumulative state for earlier arguments. MODE is the mode of this
4141 argument and TYPE is its type (if known). NAMED is true if this
4142 is a named (fixed) argument rather than a variable one. */
4144 static void
4145 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4146 enum machine_mode mode, tree type, int named)
4148 bool doubleword_aligned_p;
4149 unsigned int num_bytes, num_words, max_regs;
4151 /* Work out the size of the argument. */
4152 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4153 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4155 /* Decide whether it should go in a floating-point register, assuming
4156 one is free. Later code checks for availability.
4158 The checks against UNITS_PER_FPVALUE handle the soft-float and
4159 single-float cases. */
4160 switch (mips_abi)
4162 case ABI_EABI:
4163 /* The EABI conventions have traditionally been defined in terms
4164 of TYPE_MODE, regardless of the actual type. */
4165 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4166 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4167 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4168 break;
4170 case ABI_32:
4171 case ABI_O64:
4172 /* Only leading floating-point scalars are passed in
4173 floating-point registers. We also handle vector floats the same
4174 say, which is OK because they are not covered by the standard ABI. */
4175 info->fpr_p = (!cum->gp_reg_found
4176 && cum->arg_number < 2
4177 && (type == 0
4178 || SCALAR_FLOAT_TYPE_P (type)
4179 || VECTOR_FLOAT_TYPE_P (type))
4180 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4181 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4182 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4183 break;
4185 case ABI_N32:
4186 case ABI_64:
4187 /* Scalar, complex and vector floating-point types are passed in
4188 floating-point registers, as long as this is a named rather
4189 than a variable argument. */
4190 info->fpr_p = (named
4191 && (type == 0 || FLOAT_TYPE_P (type))
4192 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4193 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4194 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4195 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4197 /* ??? According to the ABI documentation, the real and imaginary
4198 parts of complex floats should be passed in individual registers.
4199 The real and imaginary parts of stack arguments are supposed
4200 to be contiguous and there should be an extra word of padding
4201 at the end.
4203 This has two problems. First, it makes it impossible to use a
4204 single "void *" va_list type, since register and stack arguments
4205 are passed differently. (At the time of writing, MIPSpro cannot
4206 handle complex float varargs correctly.) Second, it's unclear
4207 what should happen when there is only one register free.
4209 For now, we assume that named complex floats should go into FPRs
4210 if there are two FPRs free, otherwise they should be passed in the
4211 same way as a struct containing two floats. */
4212 if (info->fpr_p
4213 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4214 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4216 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4217 info->fpr_p = false;
4218 else
4219 num_words = 2;
4221 break;
4223 default:
4224 gcc_unreachable ();
4227 /* See whether the argument has doubleword alignment. */
4228 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4230 /* Set REG_OFFSET to the register count we're interested in.
4231 The EABI allocates the floating-point registers separately,
4232 but the other ABIs allocate them like integer registers. */
4233 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4234 ? cum->num_fprs
4235 : cum->num_gprs);
4237 /* Advance to an even register if the argument is doubleword-aligned. */
4238 if (doubleword_aligned_p)
4239 info->reg_offset += info->reg_offset & 1;
4241 /* Work out the offset of a stack argument. */
4242 info->stack_offset = cum->stack_words;
4243 if (doubleword_aligned_p)
4244 info->stack_offset += info->stack_offset & 1;
4246 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4248 /* Partition the argument between registers and stack. */
4249 info->reg_words = MIN (num_words, max_regs);
4250 info->stack_words = num_words - info->reg_words;
4253 /* INFO describes a register argument that has the normal format for the
4254 argument's mode. Return the register it uses, assuming that FPRs are
4255 available if HARD_FLOAT_P. */
4257 static unsigned int
4258 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4260 if (!info->fpr_p || !hard_float_p)
4261 return GP_ARG_FIRST + info->reg_offset;
4262 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4263 /* In o32, the second argument is always passed in $f14
4264 for TARGET_DOUBLE_FLOAT, regardless of whether the
4265 first argument was a word or doubleword. */
4266 return FP_ARG_FIRST + 2;
4267 else
4268 return FP_ARG_FIRST + info->reg_offset;
4271 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
4273 static bool
4274 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4276 return !TARGET_OLDABI;
4279 /* Implement FUNCTION_ARG. */
4282 mips_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4283 tree type, int named)
4285 struct mips_arg_info info;
4287 /* We will be called with a mode of VOIDmode after the last argument
4288 has been seen. Whatever we return will be passed to the call expander.
4289 If we need a MIPS16 fp_code, return a REG with the code stored as
4290 the mode. */
4291 if (mode == VOIDmode)
4293 if (TARGET_MIPS16 && cum->fp_code != 0)
4294 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4295 else
4296 return NULL;
4299 mips_get_arg_info (&info, cum, mode, type, named);
4301 /* Return straight away if the whole argument is passed on the stack. */
4302 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4303 return NULL;
4305 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4306 contains a double in its entirety, then that 64-bit chunk is passed
4307 in a floating-point register. */
4308 if (TARGET_NEWABI
4309 && TARGET_HARD_FLOAT
4310 && named
4311 && type != 0
4312 && TREE_CODE (type) == RECORD_TYPE
4313 && TYPE_SIZE_UNIT (type)
4314 && host_integerp (TYPE_SIZE_UNIT (type), 1))
4316 tree field;
4318 /* First check to see if there is any such field. */
4319 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4320 if (TREE_CODE (field) == FIELD_DECL
4321 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4322 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4323 && host_integerp (bit_position (field), 0)
4324 && int_bit_position (field) % BITS_PER_WORD == 0)
4325 break;
4327 if (field != 0)
4329 /* Now handle the special case by returning a PARALLEL
4330 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4331 chunks are passed in registers. */
4332 unsigned int i;
4333 HOST_WIDE_INT bitpos;
4334 rtx ret;
4336 /* assign_parms checks the mode of ENTRY_PARM, so we must
4337 use the actual mode here. */
4338 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4340 bitpos = 0;
4341 field = TYPE_FIELDS (type);
4342 for (i = 0; i < info.reg_words; i++)
4344 rtx reg;
4346 for (; field; field = TREE_CHAIN (field))
4347 if (TREE_CODE (field) == FIELD_DECL
4348 && int_bit_position (field) >= bitpos)
4349 break;
4351 if (field
4352 && int_bit_position (field) == bitpos
4353 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4354 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4355 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4356 else
4357 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4359 XVECEXP (ret, 0, i)
4360 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4361 GEN_INT (bitpos / BITS_PER_UNIT));
4363 bitpos += BITS_PER_WORD;
4365 return ret;
4369 /* Handle the n32/n64 conventions for passing complex floating-point
4370 arguments in FPR pairs. The real part goes in the lower register
4371 and the imaginary part goes in the upper register. */
4372 if (TARGET_NEWABI
4373 && info.fpr_p
4374 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4376 rtx real, imag;
4377 enum machine_mode inner;
4378 unsigned int regno;
4380 inner = GET_MODE_INNER (mode);
4381 regno = FP_ARG_FIRST + info.reg_offset;
4382 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4384 /* Real part in registers, imaginary part on stack. */
4385 gcc_assert (info.stack_words == info.reg_words);
4386 return gen_rtx_REG (inner, regno);
4388 else
4390 gcc_assert (info.stack_words == 0);
4391 real = gen_rtx_EXPR_LIST (VOIDmode,
4392 gen_rtx_REG (inner, regno),
4393 const0_rtx);
4394 imag = gen_rtx_EXPR_LIST (VOIDmode,
4395 gen_rtx_REG (inner,
4396 regno + info.reg_words / 2),
4397 GEN_INT (GET_MODE_SIZE (inner)));
4398 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4402 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4405 /* Implement FUNCTION_ARG_ADVANCE. */
4407 void
4408 mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4409 tree type, int named)
4411 struct mips_arg_info info;
4413 mips_get_arg_info (&info, cum, mode, type, named);
4415 if (!info.fpr_p)
4416 cum->gp_reg_found = true;
4418 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4419 an explanation of what this code does. It assumes that we're using
4420 either the o32 or the o64 ABI, both of which pass at most 2 arguments
4421 in FPRs. */
4422 if (cum->arg_number < 2 && info.fpr_p)
4423 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4425 /* Advance the register count. This has the effect of setting
4426 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4427 argument required us to skip the final GPR and pass the whole
4428 argument on the stack. */
4429 if (mips_abi != ABI_EABI || !info.fpr_p)
4430 cum->num_gprs = info.reg_offset + info.reg_words;
4431 else if (info.reg_words > 0)
4432 cum->num_fprs += MAX_FPRS_PER_FMT;
4434 /* Advance the stack word count. */
4435 if (info.stack_words > 0)
4436 cum->stack_words = info.stack_offset + info.stack_words;
4438 cum->arg_number++;
4441 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4443 static int
4444 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4445 enum machine_mode mode, tree type, bool named)
4447 struct mips_arg_info info;
4449 mips_get_arg_info (&info, cum, mode, type, named);
4450 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4453 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4454 PARM_BOUNDARY bits of alignment, but will be given anything up
4455 to STACK_BOUNDARY bits if the type requires it. */
4458 mips_function_arg_boundary (enum machine_mode mode, tree type)
4460 unsigned int alignment;
4462 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4463 if (alignment < PARM_BOUNDARY)
4464 alignment = PARM_BOUNDARY;
4465 if (alignment > STACK_BOUNDARY)
4466 alignment = STACK_BOUNDARY;
4467 return alignment;
4470 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4471 upward rather than downward. In other words, return true if the
4472 first byte of the stack slot has useful data, false if the last
4473 byte does. */
4475 bool
4476 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4478 /* On little-endian targets, the first byte of every stack argument
4479 is passed in the first byte of the stack slot. */
4480 if (!BYTES_BIG_ENDIAN)
4481 return true;
4483 /* Otherwise, integral types are padded downward: the last byte of a
4484 stack argument is passed in the last byte of the stack slot. */
4485 if (type != 0
4486 ? (INTEGRAL_TYPE_P (type)
4487 || POINTER_TYPE_P (type)
4488 || FIXED_POINT_TYPE_P (type))
4489 : (SCALAR_INT_MODE_P (mode)
4490 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4491 return false;
4493 /* Big-endian o64 pads floating-point arguments downward. */
4494 if (mips_abi == ABI_O64)
4495 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4496 return false;
4498 /* Other types are padded upward for o32, o64, n32 and n64. */
4499 if (mips_abi != ABI_EABI)
4500 return true;
4502 /* Arguments smaller than a stack slot are padded downward. */
4503 if (mode != BLKmode)
4504 return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
4505 else
4506 return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
4509 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4510 if the least significant byte of the register has useful data. Return
4511 the opposite if the most significant byte does. */
4513 bool
4514 mips_pad_reg_upward (enum machine_mode mode, tree type)
4516 /* No shifting is required for floating-point arguments. */
4517 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4518 return !BYTES_BIG_ENDIAN;
4520 /* Otherwise, apply the same padding to register arguments as we do
4521 to stack arguments. */
4522 return mips_pad_arg_upward (mode, type);
4525 /* Return nonzero when an argument must be passed by reference. */
4527 static bool
4528 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4529 enum machine_mode mode, const_tree type,
4530 bool named ATTRIBUTE_UNUSED)
4532 if (mips_abi == ABI_EABI)
4534 int size;
4536 /* ??? How should SCmode be handled? */
4537 if (mode == DImode || mode == DFmode
4538 || mode == DQmode || mode == UDQmode
4539 || mode == DAmode || mode == UDAmode)
4540 return 0;
4542 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4543 return size == -1 || size > UNITS_PER_WORD;
4545 else
4547 /* If we have a variable-sized parameter, we have no choice. */
4548 return targetm.calls.must_pass_in_stack (mode, type);
4552 /* Implement TARGET_CALLEE_COPIES. */
4554 static bool
4555 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4556 enum machine_mode mode ATTRIBUTE_UNUSED,
4557 const_tree type ATTRIBUTE_UNUSED, bool named)
4559 return mips_abi == ABI_EABI && named;
4562 /* See whether VALTYPE is a record whose fields should be returned in
4563 floating-point registers. If so, return the number of fields and
4564 list them in FIELDS (which should have two elements). Return 0
4565 otherwise.
4567 For n32 & n64, a structure with one or two fields is returned in
4568 floating-point registers as long as every field has a floating-point
4569 type. */
4571 static int
4572 mips_fpr_return_fields (const_tree valtype, tree *fields)
4574 tree field;
4575 int i;
4577 if (!TARGET_NEWABI)
4578 return 0;
4580 if (TREE_CODE (valtype) != RECORD_TYPE)
4581 return 0;
4583 i = 0;
4584 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4586 if (TREE_CODE (field) != FIELD_DECL)
4587 continue;
4589 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
4590 return 0;
4592 if (i == 2)
4593 return 0;
4595 fields[i++] = field;
4597 return i;
4600 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4601 a value in the most significant part of $2/$3 if:
4603 - the target is big-endian;
4605 - the value has a structure or union type (we generalize this to
4606 cover aggregates from other languages too); and
4608 - the structure is not returned in floating-point registers. */
4610 static bool
4611 mips_return_in_msb (const_tree valtype)
4613 tree fields[2];
4615 return (TARGET_NEWABI
4616 && TARGET_BIG_ENDIAN
4617 && AGGREGATE_TYPE_P (valtype)
4618 && mips_fpr_return_fields (valtype, fields) == 0);
4621 /* Return true if the function return value MODE will get returned in a
4622 floating-point register. */
4624 static bool
4625 mips_return_mode_in_fpr_p (enum machine_mode mode)
4627 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4628 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4629 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4630 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4633 /* Return the representation of an FPR return register when the
4634 value being returned in FP_RETURN has mode VALUE_MODE and the
4635 return type itself has mode TYPE_MODE. On NewABI targets,
4636 the two modes may be different for structures like:
4638 struct __attribute__((packed)) foo { float f; }
4640 where we return the SFmode value of "f" in FP_RETURN, but where
4641 the structure itself has mode BLKmode. */
4643 static rtx
4644 mips_return_fpr_single (enum machine_mode type_mode,
4645 enum machine_mode value_mode)
4647 rtx x;
4649 x = gen_rtx_REG (value_mode, FP_RETURN);
4650 if (type_mode != value_mode)
4652 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
4653 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
4655 return x;
4658 /* Return a composite value in a pair of floating-point registers.
4659 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4660 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4661 complete value.
4663 For n32 & n64, $f0 always holds the first value and $f2 the second.
4664 Otherwise the values are packed together as closely as possible. */
4666 static rtx
4667 mips_return_fpr_pair (enum machine_mode mode,
4668 enum machine_mode mode1, HOST_WIDE_INT offset1,
4669 enum machine_mode mode2, HOST_WIDE_INT offset2)
4671 int inc;
4673 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4674 return gen_rtx_PARALLEL
4675 (mode,
4676 gen_rtvec (2,
4677 gen_rtx_EXPR_LIST (VOIDmode,
4678 gen_rtx_REG (mode1, FP_RETURN),
4679 GEN_INT (offset1)),
4680 gen_rtx_EXPR_LIST (VOIDmode,
4681 gen_rtx_REG (mode2, FP_RETURN + inc),
4682 GEN_INT (offset2))));
4686 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4687 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4688 VALTYPE is null and MODE is the mode of the return value. */
4691 mips_function_value (const_tree valtype, enum machine_mode mode)
4693 if (valtype)
4695 tree fields[2];
4696 int unsigned_p;
4698 mode = TYPE_MODE (valtype);
4699 unsigned_p = TYPE_UNSIGNED (valtype);
4701 /* Since TARGET_PROMOTE_FUNCTION_RETURN unconditionally returns true,
4702 we must promote the mode just as PROMOTE_MODE does. */
4703 mode = promote_mode (valtype, mode, &unsigned_p, 1);
4705 /* Handle structures whose fields are returned in $f0/$f2. */
4706 switch (mips_fpr_return_fields (valtype, fields))
4708 case 1:
4709 return mips_return_fpr_single (mode,
4710 TYPE_MODE (TREE_TYPE (fields[0])));
4712 case 2:
4713 return mips_return_fpr_pair (mode,
4714 TYPE_MODE (TREE_TYPE (fields[0])),
4715 int_byte_position (fields[0]),
4716 TYPE_MODE (TREE_TYPE (fields[1])),
4717 int_byte_position (fields[1]));
4720 /* If a value is passed in the most significant part of a register, see
4721 whether we have to round the mode up to a whole number of words. */
4722 if (mips_return_in_msb (valtype))
4724 HOST_WIDE_INT size = int_size_in_bytes (valtype);
4725 if (size % UNITS_PER_WORD != 0)
4727 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4728 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4732 /* For EABI, the class of return register depends entirely on MODE.
4733 For example, "struct { some_type x; }" and "union { some_type x; }"
4734 are returned in the same way as a bare "some_type" would be.
4735 Other ABIs only use FPRs for scalar, complex or vector types. */
4736 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
4737 return gen_rtx_REG (mode, GP_RETURN);
4740 if (!TARGET_MIPS16)
4742 /* Handle long doubles for n32 & n64. */
4743 if (mode == TFmode)
4744 return mips_return_fpr_pair (mode,
4745 DImode, 0,
4746 DImode, GET_MODE_SIZE (mode) / 2);
4748 if (mips_return_mode_in_fpr_p (mode))
4750 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4751 return mips_return_fpr_pair (mode,
4752 GET_MODE_INNER (mode), 0,
4753 GET_MODE_INNER (mode),
4754 GET_MODE_SIZE (mode) / 2);
4755 else
4756 return gen_rtx_REG (mode, FP_RETURN);
4760 return gen_rtx_REG (mode, GP_RETURN);
4763 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
4764 all BLKmode objects are returned in memory. Under the n32, n64
4765 and embedded ABIs, small structures are returned in a register.
4766 Objects with varying size must still be returned in memory, of
4767 course. */
4769 static bool
4770 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
4772 return (TARGET_OLDABI
4773 ? TYPE_MODE (type) == BLKmode
4774 : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
4777 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
4779 static void
4780 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4781 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4782 int no_rtl)
4784 CUMULATIVE_ARGS local_cum;
4785 int gp_saved, fp_saved;
4787 /* The caller has advanced CUM up to, but not beyond, the last named
4788 argument. Advance a local copy of CUM past the last "real" named
4789 argument, to find out how many registers are left over. */
4790 local_cum = *cum;
4791 FUNCTION_ARG_ADVANCE (local_cum, mode, type, true);
4793 /* Found out how many registers we need to save. */
4794 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4795 fp_saved = (EABI_FLOAT_VARARGS_P
4796 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4797 : 0);
4799 if (!no_rtl)
4801 if (gp_saved > 0)
4803 rtx ptr, mem;
4805 ptr = plus_constant (virtual_incoming_args_rtx,
4806 REG_PARM_STACK_SPACE (cfun->decl)
4807 - gp_saved * UNITS_PER_WORD);
4808 mem = gen_frame_mem (BLKmode, ptr);
4809 set_mem_alias_set (mem, get_varargs_alias_set ());
4811 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4812 mem, gp_saved);
4814 if (fp_saved > 0)
4816 /* We can't use move_block_from_reg, because it will use
4817 the wrong mode. */
4818 enum machine_mode mode;
4819 int off, i;
4821 /* Set OFF to the offset from virtual_incoming_args_rtx of
4822 the first float register. The FP save area lies below
4823 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4824 off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
4825 off -= fp_saved * UNITS_PER_FPREG;
4827 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4829 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4830 i += MAX_FPRS_PER_FMT)
4832 rtx ptr, mem;
4834 ptr = plus_constant (virtual_incoming_args_rtx, off);
4835 mem = gen_frame_mem (mode, ptr);
4836 set_mem_alias_set (mem, get_varargs_alias_set ());
4837 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4838 off += UNITS_PER_HWFPVALUE;
4842 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4843 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4844 + fp_saved * UNITS_PER_FPREG);
4847 /* Implement TARGET_BUILTIN_VA_LIST. */
4849 static tree
4850 mips_build_builtin_va_list (void)
4852 if (EABI_FLOAT_VARARGS_P)
4854 /* We keep 3 pointers, and two offsets.
4856 Two pointers are to the overflow area, which starts at the CFA.
4857 One of these is constant, for addressing into the GPR save area
4858 below it. The other is advanced up the stack through the
4859 overflow region.
4861 The third pointer is to the bottom of the GPR save area.
4862 Since the FPR save area is just below it, we can address
4863 FPR slots off this pointer.
4865 We also keep two one-byte offsets, which are to be subtracted
4866 from the constant pointers to yield addresses in the GPR and
4867 FPR save areas. These are downcounted as float or non-float
4868 arguments are used, and when they get to zero, the argument
4869 must be obtained from the overflow region. */
4870 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4871 tree array, index;
4873 record = lang_hooks.types.make_type (RECORD_TYPE);
4875 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4876 ptr_type_node);
4877 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4878 ptr_type_node);
4879 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4880 ptr_type_node);
4881 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4882 unsigned_char_type_node);
4883 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4884 unsigned_char_type_node);
4885 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4886 warn on every user file. */
4887 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4888 array = build_array_type (unsigned_char_type_node,
4889 build_index_type (index));
4890 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4892 DECL_FIELD_CONTEXT (f_ovfl) = record;
4893 DECL_FIELD_CONTEXT (f_gtop) = record;
4894 DECL_FIELD_CONTEXT (f_ftop) = record;
4895 DECL_FIELD_CONTEXT (f_goff) = record;
4896 DECL_FIELD_CONTEXT (f_foff) = record;
4897 DECL_FIELD_CONTEXT (f_res) = record;
4899 TYPE_FIELDS (record) = f_ovfl;
4900 TREE_CHAIN (f_ovfl) = f_gtop;
4901 TREE_CHAIN (f_gtop) = f_ftop;
4902 TREE_CHAIN (f_ftop) = f_goff;
4903 TREE_CHAIN (f_goff) = f_foff;
4904 TREE_CHAIN (f_foff) = f_res;
4906 layout_type (record);
4907 return record;
4909 else if (TARGET_IRIX && TARGET_IRIX6)
4910 /* On IRIX 6, this type is 'char *'. */
4911 return build_pointer_type (char_type_node);
4912 else
4913 /* Otherwise, we use 'void *'. */
4914 return ptr_type_node;
4917 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
4919 static void
4920 mips_va_start (tree valist, rtx nextarg)
4922 if (EABI_FLOAT_VARARGS_P)
4924 const CUMULATIVE_ARGS *cum;
4925 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4926 tree ovfl, gtop, ftop, goff, foff;
4927 tree t;
4928 int gpr_save_area_size;
4929 int fpr_save_area_size;
4930 int fpr_offset;
4932 cum = &crtl->args.info;
4933 gpr_save_area_size
4934 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4935 fpr_save_area_size
4936 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4938 f_ovfl = TYPE_FIELDS (va_list_type_node);
4939 f_gtop = TREE_CHAIN (f_ovfl);
4940 f_ftop = TREE_CHAIN (f_gtop);
4941 f_goff = TREE_CHAIN (f_ftop);
4942 f_foff = TREE_CHAIN (f_goff);
4944 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4945 NULL_TREE);
4946 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4947 NULL_TREE);
4948 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4949 NULL_TREE);
4950 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4951 NULL_TREE);
4952 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4953 NULL_TREE);
4955 /* Emit code to initialize OVFL, which points to the next varargs
4956 stack argument. CUM->STACK_WORDS gives the number of stack
4957 words used by named arguments. */
4958 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4959 if (cum->stack_words > 0)
4960 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4961 size_int (cum->stack_words * UNITS_PER_WORD));
4962 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4963 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4965 /* Emit code to initialize GTOP, the top of the GPR save area. */
4966 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4967 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
4968 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4970 /* Emit code to initialize FTOP, the top of the FPR save area.
4971 This address is gpr_save_area_bytes below GTOP, rounded
4972 down to the next fp-aligned boundary. */
4973 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4974 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4975 fpr_offset &= -UNITS_PER_FPVALUE;
4976 if (fpr_offset)
4977 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4978 size_int (-fpr_offset));
4979 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
4980 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4982 /* Emit code to initialize GOFF, the offset from GTOP of the
4983 next GPR argument. */
4984 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
4985 build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
4986 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4988 /* Likewise emit code to initialize FOFF, the offset from FTOP
4989 of the next FPR argument. */
4990 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
4991 build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
4992 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4994 else
4996 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4997 std_expand_builtin_va_start (valist, nextarg);
5001 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
5003 static tree
5004 mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5005 gimple_seq *post_p)
5007 tree addr;
5008 bool indirect_p;
5010 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5011 if (indirect_p)
5012 type = build_pointer_type (type);
5014 if (!EABI_FLOAT_VARARGS_P)
5015 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5016 else
5018 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5019 tree ovfl, top, off, align;
5020 HOST_WIDE_INT size, rsize, osize;
5021 tree t, u;
5023 f_ovfl = TYPE_FIELDS (va_list_type_node);
5024 f_gtop = TREE_CHAIN (f_ovfl);
5025 f_ftop = TREE_CHAIN (f_gtop);
5026 f_goff = TREE_CHAIN (f_ftop);
5027 f_foff = TREE_CHAIN (f_goff);
5029 /* Let:
5031 TOP be the top of the GPR or FPR save area;
5032 OFF be the offset from TOP of the next register;
5033 ADDR_RTX be the address of the argument;
5034 SIZE be the number of bytes in the argument type;
5035 RSIZE be the number of bytes used to store the argument
5036 when it's in the register save area; and
5037 OSIZE be the number of bytes used to store it when it's
5038 in the stack overflow area.
5040 The code we want is:
5042 1: off &= -rsize; // round down
5043 2: if (off != 0)
5044 3: {
5045 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
5046 5: off -= rsize;
5047 6: }
5048 7: else
5049 8: {
5050 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
5051 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
5052 11: ovfl += osize;
5053 14: }
5055 [1] and [9] can sometimes be optimized away. */
5057 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5058 NULL_TREE);
5059 size = int_size_in_bytes (type);
5061 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5062 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5064 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5065 NULL_TREE);
5066 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5067 NULL_TREE);
5069 /* When va_start saves FPR arguments to the stack, each slot
5070 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
5071 argument's precision. */
5072 rsize = UNITS_PER_HWFPVALUE;
5074 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5075 (= PARM_BOUNDARY bits). This can be different from RSIZE
5076 in two cases:
5078 (1) On 32-bit targets when TYPE is a structure such as:
5080 struct s { float f; };
5082 Such structures are passed in paired FPRs, so RSIZE
5083 will be 8 bytes. However, the structure only takes
5084 up 4 bytes of memory, so OSIZE will only be 4.
5086 (2) In combinations such as -mgp64 -msingle-float
5087 -fshort-double. Doubles passed in registers will then take
5088 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
5089 stack take up UNITS_PER_WORD bytes. */
5090 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5092 else
5094 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5095 NULL_TREE);
5096 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5097 NULL_TREE);
5098 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5099 if (rsize > UNITS_PER_WORD)
5101 /* [1] Emit code for: off &= -rsize. */
5102 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5103 build_int_cst (NULL_TREE, -rsize));
5104 gimplify_assign (off, t, pre_p);
5106 osize = rsize;
5109 /* [2] Emit code to branch if off == 0. */
5110 t = build2 (NE_EXPR, boolean_type_node, off,
5111 build_int_cst (TREE_TYPE (off), 0));
5112 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5114 /* [5] Emit code for: off -= rsize. We do this as a form of
5115 post-decrement not available to C. */
5116 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5117 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5119 /* [4] Emit code for:
5120 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
5121 t = fold_convert (sizetype, t);
5122 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5123 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5124 if (BYTES_BIG_ENDIAN && rsize > size)
5126 u = size_int (rsize - size);
5127 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5129 COND_EXPR_THEN (addr) = t;
5131 if (osize > UNITS_PER_WORD)
5133 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
5134 u = size_int (osize - 1);
5135 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5136 t = fold_convert (sizetype, t);
5137 u = size_int (-osize);
5138 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5139 t = fold_convert (TREE_TYPE (ovfl), t);
5140 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5142 else
5143 align = NULL;
5145 /* [10, 11] Emit code for:
5146 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5147 ovfl += osize. */
5148 u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
5149 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5150 if (BYTES_BIG_ENDIAN && osize > size)
5152 u = size_int (osize - size);
5153 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5156 /* String [9] and [10, 11] together. */
5157 if (align)
5158 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5159 COND_EXPR_ELSE (addr) = t;
5161 addr = fold_convert (build_pointer_type (type), addr);
5162 addr = build_va_arg_indirect_ref (addr);
5165 if (indirect_p)
5166 addr = build_va_arg_indirect_ref (addr);
5168 return addr;
5171 /* A chained list of functions for which mips16_build_call_stub has already
5172 generated a stub. NAME is the name of the function and FP_RET_P is true
5173 if the function returns a value in floating-point registers. */
5174 struct mips16_stub {
5175 struct mips16_stub *next;
5176 char *name;
5177 bool fp_ret_p;
5179 static struct mips16_stub *mips16_stubs;
5181 /* Return the two-character string that identifies floating-point
5182 return mode MODE in the name of a MIPS16 function stub. */
5184 static const char *
5185 mips16_call_stub_mode_suffix (enum machine_mode mode)
5187 if (mode == SFmode)
5188 return "sf";
5189 else if (mode == DFmode)
5190 return "df";
5191 else if (mode == SCmode)
5192 return "sc";
5193 else if (mode == DCmode)
5194 return "dc";
5195 else if (mode == V2SFmode)
5196 return "df";
5197 else
5198 gcc_unreachable ();
5201 /* Write instructions to move a 32-bit value between general register
5202 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5203 from GPREG to FPREG and 'f' to move in the opposite direction. */
5205 static void
5206 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5208 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5209 reg_names[gpreg], reg_names[fpreg]);
5212 /* Likewise for 64-bit values. */
5214 static void
5215 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5217 if (TARGET_64BIT)
5218 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
5219 reg_names[gpreg], reg_names[fpreg]);
5220 else if (TARGET_FLOAT64)
5222 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5223 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5224 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
5225 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
5227 else
5229 /* Move the least-significant word. */
5230 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5231 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5232 /* ...then the most significant word. */
5233 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5234 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
5238 /* Write out code to move floating-point arguments into or out of
5239 general registers. FP_CODE is the code describing which arguments
5240 are present (see the comment above the definition of CUMULATIVE_ARGS
5241 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
5243 static void
5244 mips_output_args_xfer (int fp_code, char direction)
5246 unsigned int gparg, fparg, f;
5247 CUMULATIVE_ARGS cum;
5249 /* This code only works for o32 and o64. */
5250 gcc_assert (TARGET_OLDABI);
5252 mips_init_cumulative_args (&cum, NULL);
5254 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5256 enum machine_mode mode;
5257 struct mips_arg_info info;
5259 if ((f & 3) == 1)
5260 mode = SFmode;
5261 else if ((f & 3) == 2)
5262 mode = DFmode;
5263 else
5264 gcc_unreachable ();
5266 mips_get_arg_info (&info, &cum, mode, NULL, true);
5267 gparg = mips_arg_regno (&info, false);
5268 fparg = mips_arg_regno (&info, true);
5270 if (mode == SFmode)
5271 mips_output_32bit_xfer (direction, gparg, fparg);
5272 else
5273 mips_output_64bit_xfer (direction, gparg, fparg);
5275 mips_function_arg_advance (&cum, mode, NULL, true);
5279 /* Write a MIPS16 stub for the current function. This stub is used
5280 for functions which take arguments in the floating-point registers.
5281 It is normal-mode code that moves the floating-point arguments
5282 into the general registers and then jumps to the MIPS16 code. */
5284 static void
5285 mips16_build_function_stub (void)
5287 const char *fnname, *separator;
5288 char *secname, *stubname;
5289 tree stubdecl;
5290 unsigned int f;
5292 /* Create the name of the stub, and its unique section. */
5293 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5294 fnname = targetm.strip_name_encoding (fnname);
5295 secname = ACONCAT ((".mips16.fn.", fnname, NULL));
5296 stubname = ACONCAT (("__fn_stub_", fnname, NULL));
5298 /* Build a decl for the stub. */
5299 stubdecl = build_decl (FUNCTION_DECL, get_identifier (stubname),
5300 build_function_type (void_type_node, NULL_TREE));
5301 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5302 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5304 /* Output a comment. */
5305 fprintf (asm_out_file, "\t# Stub function for %s (",
5306 current_function_name ());
5307 separator = "";
5308 for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
5310 fprintf (asm_out_file, "%s%s", separator,
5311 (f & 3) == 1 ? "float" : "double");
5312 separator = ", ";
5314 fprintf (asm_out_file, ")\n");
5316 /* Write the preamble leading up to the function declaration. */
5317 fprintf (asm_out_file, "\t.set\tnomips16\n");
5318 switch_to_section (function_section (stubdecl));
5319 ASM_OUTPUT_ALIGN (asm_out_file,
5320 floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
5322 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
5323 within a .ent, and we cannot emit another .ent. */
5324 if (!FUNCTION_NAME_ALREADY_DECLARED)
5326 fputs ("\t.ent\t", asm_out_file);
5327 assemble_name (asm_out_file, stubname);
5328 fputs ("\n", asm_out_file);
5331 /* Start the definition proper. */
5332 assemble_name (asm_out_file, stubname);
5333 fputs (":\n", asm_out_file);
5335 /* Load the address of the MIPS16 function into $at. Do this first so
5336 that targets with coprocessor interlocks can use an MFC1 to fill the
5337 delay slot. */
5338 fprintf (asm_out_file, "\t.set\tnoat\n");
5339 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
5340 assemble_name (asm_out_file, fnname);
5341 fprintf (asm_out_file, "\n");
5343 /* Move the arguments from floating-point registers to general registers. */
5344 mips_output_args_xfer (crtl->args.info.fp_code, 'f');
5346 /* Jump to the MIPS16 function. */
5347 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5348 fprintf (asm_out_file, "\t.set\tat\n");
5350 if (!FUNCTION_NAME_ALREADY_DECLARED)
5352 fputs ("\t.end\t", asm_out_file);
5353 assemble_name (asm_out_file, stubname);
5354 fputs ("\n", asm_out_file);
5357 switch_to_section (function_section (current_function_decl));
5360 /* The current function is a MIPS16 function that returns a value in an FPR.
5361 Copy the return value from its soft-float to its hard-float location.
5362 libgcc2 has special non-MIPS16 helper functions for each case. */
5364 static void
5365 mips16_copy_fpr_return_value (void)
5367 rtx fn, insn, arg, call;
5368 tree id, return_type;
5369 enum machine_mode return_mode;
5371 return_type = DECL_RESULT (current_function_decl);
5372 return_mode = DECL_MODE (return_type);
5374 id = get_identifier (ACONCAT (("__mips16_ret_",
5375 mips16_call_stub_mode_suffix (return_mode),
5376 NULL)));
5377 fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5378 arg = gen_rtx_REG (return_mode, GP_RETURN);
5379 call = gen_call_value_internal (arg, fn, const0_rtx);
5380 insn = mips_emit_call_insn (call, false);
5381 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), arg);
5384 /* Consider building a stub for a MIPS16 call to function FN.
5385 RETVAL is the location of the return value, or null if this is
5386 a "call" rather than a "call_value". ARGS_SIZE is the size of the
5387 arguments and FP_CODE is the code built by mips_function_arg;
5388 see the comment above CUMULATIVE_ARGS for details.
5390 If a stub was needed, emit the call and return the call insn itself.
5391 Return null otherwise.
5393 A stub is needed for calls to functions that, in normal mode,
5394 receive arguments in FPRs or return values in FPRs. The stub
5395 copies the arguments from their soft-float positions to their
5396 hard-float positions, calls the real function, then copies the
5397 return value from its hard-float position to its soft-float
5398 position.
5400 We emit a JAL to FN even when FN might need a stub. If FN turns out
5401 to be to a non-MIPS16 function, the linker automatically redirects
5402 the JAL to the stub, otherwise the JAL continues to call FN directly. */
5404 static rtx
5405 mips16_build_call_stub (rtx retval, rtx fn, rtx args_size, int fp_code)
5407 const char *fnname;
5408 bool fp_ret_p;
5409 struct mips16_stub *l;
5410 rtx insn;
5412 /* We don't need to do anything if we aren't in MIPS16 mode, or if
5413 we were invoked with the -msoft-float option. */
5414 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5415 return NULL_RTX;
5417 /* Figure out whether the value might come back in a floating-point
5418 register. */
5419 fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
5421 /* We don't need to do anything if there were no floating-point
5422 arguments and the value will not be returned in a floating-point
5423 register. */
5424 if (fp_code == 0 && !fp_ret_p)
5425 return NULL_RTX;
5427 /* We don't need to do anything if this is a call to a special
5428 MIPS16 support function. */
5429 if (GET_CODE (fn) == SYMBOL_REF
5430 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
5431 return NULL_RTX;
5433 /* This code will only work for o32 and o64 abis. The other ABI's
5434 require more sophisticated support. */
5435 gcc_assert (TARGET_OLDABI);
5437 /* If we're calling via a function pointer, use one of the magic
5438 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
5439 Each stub expects the function address to arrive in register $2. */
5440 if (GET_CODE (fn) != SYMBOL_REF)
5442 char buf[30];
5443 tree id;
5444 rtx stub_fn, insn;
5446 /* Create a SYMBOL_REF for the libgcc.a function. */
5447 if (fp_ret_p)
5448 sprintf (buf, "__mips16_call_stub_%s_%d",
5449 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5450 fp_code);
5451 else
5452 sprintf (buf, "__mips16_call_stub_%d", fp_code);
5453 id = get_identifier (buf);
5454 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5456 /* Load the target function into $2. */
5457 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
5459 /* Emit the call. */
5460 if (retval == NULL_RTX)
5461 insn = gen_call_internal (stub_fn, args_size);
5462 else
5463 insn = gen_call_value_internal (retval, stub_fn, args_size);
5464 insn = mips_emit_call_insn (insn, false);
5466 /* Tell GCC that this call does indeed use the value of $2. */
5467 CALL_INSN_FUNCTION_USAGE (insn) =
5468 gen_rtx_EXPR_LIST (VOIDmode,
5469 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
5470 CALL_INSN_FUNCTION_USAGE (insn));
5472 /* If we are handling a floating-point return value, we need to
5473 save $18 in the function prologue. Putting a note on the
5474 call will mean that df_regs_ever_live_p ($18) will be true if the
5475 call is not eliminated, and we can check that in the prologue
5476 code. */
5477 if (fp_ret_p)
5478 CALL_INSN_FUNCTION_USAGE (insn) =
5479 gen_rtx_EXPR_LIST (VOIDmode,
5480 gen_rtx_USE (VOIDmode,
5481 gen_rtx_REG (word_mode, 18)),
5482 CALL_INSN_FUNCTION_USAGE (insn));
5484 return insn;
5487 /* We know the function we are going to call. If we have already
5488 built a stub, we don't need to do anything further. */
5489 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5490 for (l = mips16_stubs; l != NULL; l = l->next)
5491 if (strcmp (l->name, fnname) == 0)
5492 break;
5494 if (l == NULL)
5496 const char *separator;
5497 char *secname, *stubname;
5498 tree stubid, stubdecl;
5499 unsigned int f;
5501 /* If the function does not return in FPRs, the special stub
5502 section is named
5503 .mips16.call.FNNAME
5505 If the function does return in FPRs, the stub section is named
5506 .mips16.call.fp.FNNAME
5508 Build a decl for the stub. */
5509 secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
5510 fnname, NULL));
5511 stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
5512 fnname, NULL));
5513 stubid = get_identifier (stubname);
5514 stubdecl = build_decl (FUNCTION_DECL, stubid,
5515 build_function_type (void_type_node, NULL_TREE));
5516 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5517 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE,
5518 void_type_node);
5520 /* Output a comment. */
5521 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
5522 (fp_ret_p
5523 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
5524 : ""),
5525 fnname);
5526 separator = "";
5527 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5529 fprintf (asm_out_file, "%s%s", separator,
5530 (f & 3) == 1 ? "float" : "double");
5531 separator = ", ";
5533 fprintf (asm_out_file, ")\n");
5535 /* Write the preamble leading up to the function declaration. */
5536 fprintf (asm_out_file, "\t.set\tnomips16\n");
5537 assemble_start_function (stubdecl, stubname);
5539 if (!FUNCTION_NAME_ALREADY_DECLARED)
5541 fputs ("\t.ent\t", asm_out_file);
5542 assemble_name (asm_out_file, stubname);
5543 fputs ("\n", asm_out_file);
5545 assemble_name (asm_out_file, stubname);
5546 fputs (":\n", asm_out_file);
5549 if (!fp_ret_p)
5551 /* Load the address of the MIPS16 function into $at. Do this
5552 first so that targets with coprocessor interlocks can use
5553 an MFC1 to fill the delay slot. */
5554 fprintf (asm_out_file, "\t.set\tnoat\n");
5555 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
5556 fnname);
5559 /* Move the arguments from general registers to floating-point
5560 registers. */
5561 mips_output_args_xfer (fp_code, 't');
5563 if (!fp_ret_p)
5565 /* Jump to the previously-loaded address. */
5566 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5567 fprintf (asm_out_file, "\t.set\tat\n");
5569 else
5571 /* Save the return address in $18 and call the non-MIPS16 function.
5572 The stub's caller knows that $18 might be clobbered, even though
5573 $18 is usually a call-saved register. */
5574 fprintf (asm_out_file, "\tmove\t%s,%s\n",
5575 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
5576 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
5578 /* Move the result from floating-point registers to
5579 general registers. */
5580 switch (GET_MODE (retval))
5582 case SCmode:
5583 mips_output_32bit_xfer ('f', GP_RETURN + 1,
5584 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5585 /* Fall though. */
5586 case SFmode:
5587 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5588 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
5590 /* On 64-bit targets, complex floats are returned in
5591 a single GPR, such that "sd" on a suitably-aligned
5592 target would store the value correctly. */
5593 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
5594 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
5595 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
5596 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
5597 reg_names[GP_RETURN],
5598 reg_names[GP_RETURN],
5599 reg_names[GP_RETURN + 1]);
5601 break;
5603 case DCmode:
5604 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
5605 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5606 /* Fall though. */
5607 case DFmode:
5608 case V2SFmode:
5609 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5610 break;
5612 default:
5613 gcc_unreachable ();
5615 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
5618 #ifdef ASM_DECLARE_FUNCTION_SIZE
5619 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
5620 #endif
5622 if (!FUNCTION_NAME_ALREADY_DECLARED)
5624 fputs ("\t.end\t", asm_out_file);
5625 assemble_name (asm_out_file, stubname);
5626 fputs ("\n", asm_out_file);
5629 /* Record this stub. */
5630 l = XNEW (struct mips16_stub);
5631 l->name = xstrdup (fnname);
5632 l->fp_ret_p = fp_ret_p;
5633 l->next = mips16_stubs;
5634 mips16_stubs = l;
5637 /* If we expect a floating-point return value, but we've built a
5638 stub which does not expect one, then we're in trouble. We can't
5639 use the existing stub, because it won't handle the floating-point
5640 value. We can't build a new stub, because the linker won't know
5641 which stub to use for the various calls in this object file.
5642 Fortunately, this case is illegal, since it means that a function
5643 was declared in two different ways in a single compilation. */
5644 if (fp_ret_p && !l->fp_ret_p)
5645 error ("cannot handle inconsistent calls to %qs", fnname);
5647 if (retval == NULL_RTX)
5648 insn = gen_call_internal_direct (fn, args_size);
5649 else
5650 insn = gen_call_value_internal_direct (retval, fn, args_size);
5651 insn = mips_emit_call_insn (insn, false);
5653 /* If we are calling a stub which handles a floating-point return
5654 value, we need to arrange to save $18 in the prologue. We do this
5655 by marking the function call as using the register. The prologue
5656 will later see that it is used, and emit code to save it. */
5657 if (fp_ret_p)
5658 CALL_INSN_FUNCTION_USAGE (insn) =
5659 gen_rtx_EXPR_LIST (VOIDmode,
5660 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
5661 CALL_INSN_FUNCTION_USAGE (insn));
5663 return insn;
5666 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5668 static bool
5669 mips_ok_for_lazy_binding_p (rtx x)
5671 return (TARGET_USE_GOT
5672 && GET_CODE (x) == SYMBOL_REF
5673 && !mips_symbol_binds_local_p (x));
5676 /* Load function address ADDR into register DEST. SIBCALL_P is true
5677 if the address is needed for a sibling call. Return true if we
5678 used an explicit lazy-binding sequence. */
5680 static bool
5681 mips_load_call_address (rtx dest, rtx addr, bool sibcall_p)
5683 /* If we're generating PIC, and this call is to a global function,
5684 try to allow its address to be resolved lazily. This isn't
5685 possible for sibcalls when $gp is call-saved because the value
5686 of $gp on entry to the stub would be our caller's gp, not ours. */
5687 if (TARGET_EXPLICIT_RELOCS
5688 && !(sibcall_p && TARGET_CALL_SAVED_GP)
5689 && mips_ok_for_lazy_binding_p (addr))
5691 rtx high, lo_sum_symbol;
5693 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
5694 addr, SYMBOL_GOTOFF_CALL);
5695 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
5696 if (Pmode == SImode)
5697 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
5698 else
5699 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
5700 return true;
5702 else
5704 mips_emit_move (dest, addr);
5705 return false;
5709 /* Expand a "call", "sibcall", "call_value" or "sibcall_value" instruction.
5710 RESULT is where the result will go (null for "call"s and "sibcall"s),
5711 ADDR is the address of the function, ARGS_SIZE is the size of the
5712 arguments and AUX is the value passed to us by mips_function_arg.
5713 SIBCALL_P is true if we are expanding a sibling call, false if we're
5714 expanding a normal call.
5716 Return the call itself. */
5719 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, bool sibcall_p)
5721 rtx orig_addr, pattern, insn;
5722 bool lazy_p;
5724 orig_addr = addr;
5725 lazy_p = false;
5726 if (!call_insn_operand (addr, VOIDmode))
5728 addr = gen_reg_rtx (Pmode);
5729 lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
5732 insn = mips16_build_call_stub (result, addr, args_size,
5733 aux == 0 ? 0 : (int) GET_MODE (aux));
5734 if (insn)
5736 gcc_assert (!sibcall_p && !lazy_p);
5737 return insn;
5740 if (result == 0)
5741 pattern = (sibcall_p
5742 ? gen_sibcall_internal (addr, args_size)
5743 : gen_call_internal (addr, args_size));
5744 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
5746 /* Handle return values created by mips_return_fpr_pair. */
5747 rtx reg1, reg2;
5749 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
5750 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
5751 pattern =
5752 (sibcall_p
5753 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
5754 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
5756 else
5758 /* Handle return values created by mips_return_fpr_single. */
5759 if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
5760 result = XEXP (XVECEXP (result, 0, 0), 0);
5761 pattern = (sibcall_p
5762 ? gen_sibcall_value_internal (result, addr, args_size)
5763 : gen_call_value_internal (result, addr, args_size));
5766 return mips_emit_call_insn (pattern, lazy_p);
5769 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
5771 static bool
5772 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5774 if (!TARGET_SIBCALLS)
5775 return false;
5777 /* We can't do a sibcall if the called function is a MIPS16 function
5778 because there is no direct "jx" instruction equivalent to "jalx" to
5779 switch the ISA mode. We only care about cases where the sibling
5780 and normal calls would both be direct. */
5781 if (mips_use_mips16_mode_p (decl)
5782 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5783 return false;
5785 /* When -minterlink-mips16 is in effect, assume that non-locally-binding
5786 functions could be MIPS16 ones unless an attribute explicitly tells
5787 us otherwise. */
5788 if (TARGET_INTERLINK_MIPS16
5789 && decl
5790 && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
5791 && !mips_nomips16_decl_p (decl)
5792 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5793 return false;
5795 /* Otherwise OK. */
5796 return true;
5799 /* Emit code to move general operand SRC into condition-code
5800 register DEST given that SCRATCH is a scratch TFmode FPR.
5801 The sequence is:
5803 FP1 = SRC
5804 FP2 = 0.0f
5805 DEST = FP2 < FP1
5807 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
5809 void
5810 mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
5812 rtx fp1, fp2;
5814 /* Change the source to SFmode. */
5815 if (MEM_P (src))
5816 src = adjust_address (src, SFmode, 0);
5817 else if (REG_P (src) || GET_CODE (src) == SUBREG)
5818 src = gen_rtx_REG (SFmode, true_regnum (src));
5820 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
5821 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
5823 mips_emit_move (copy_rtx (fp1), src);
5824 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
5825 emit_insn (gen_slt_sf (dest, fp2, fp1));
5828 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
5829 Assume that the areas do not overlap. */
5831 static void
5832 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
5834 HOST_WIDE_INT offset, delta;
5835 unsigned HOST_WIDE_INT bits;
5836 int i;
5837 enum machine_mode mode;
5838 rtx *regs;
5840 /* Work out how many bits to move at a time. If both operands have
5841 half-word alignment, it is usually better to move in half words.
5842 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
5843 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
5844 Otherwise move word-sized chunks. */
5845 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
5846 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
5847 bits = BITS_PER_WORD / 2;
5848 else
5849 bits = BITS_PER_WORD;
5851 mode = mode_for_size (bits, MODE_INT, 0);
5852 delta = bits / BITS_PER_UNIT;
5854 /* Allocate a buffer for the temporary registers. */
5855 regs = XALLOCAVEC (rtx, length / delta);
5857 /* Load as many BITS-sized chunks as possible. Use a normal load if
5858 the source has enough alignment, otherwise use left/right pairs. */
5859 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5861 regs[i] = gen_reg_rtx (mode);
5862 if (MEM_ALIGN (src) >= bits)
5863 mips_emit_move (regs[i], adjust_address (src, mode, offset));
5864 else
5866 rtx part = adjust_address (src, BLKmode, offset);
5867 if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
5868 gcc_unreachable ();
5872 /* Copy the chunks to the destination. */
5873 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5874 if (MEM_ALIGN (dest) >= bits)
5875 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
5876 else
5878 rtx part = adjust_address (dest, BLKmode, offset);
5879 if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
5880 gcc_unreachable ();
5883 /* Mop up any left-over bytes. */
5884 if (offset < length)
5886 src = adjust_address (src, BLKmode, offset);
5887 dest = adjust_address (dest, BLKmode, offset);
5888 move_by_pieces (dest, src, length - offset,
5889 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
5893 /* Helper function for doing a loop-based block operation on memory
5894 reference MEM. Each iteration of the loop will operate on LENGTH
5895 bytes of MEM.
5897 Create a new base register for use within the loop and point it to
5898 the start of MEM. Create a new memory reference that uses this
5899 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
5901 static void
5902 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
5903 rtx *loop_reg, rtx *loop_mem)
5905 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
5907 /* Although the new mem does not refer to a known location,
5908 it does keep up to LENGTH bytes of alignment. */
5909 *loop_mem = change_address (mem, BLKmode, *loop_reg);
5910 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
5913 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
5914 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
5915 the memory regions do not overlap. */
5917 static void
5918 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
5919 HOST_WIDE_INT bytes_per_iter)
5921 rtx label, src_reg, dest_reg, final_src;
5922 HOST_WIDE_INT leftover;
5924 leftover = length % bytes_per_iter;
5925 length -= leftover;
5927 /* Create registers and memory references for use within the loop. */
5928 mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
5929 mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
5931 /* Calculate the value that SRC_REG should have after the last iteration
5932 of the loop. */
5933 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
5934 0, 0, OPTAB_WIDEN);
5936 /* Emit the start of the loop. */
5937 label = gen_label_rtx ();
5938 emit_label (label);
5940 /* Emit the loop body. */
5941 mips_block_move_straight (dest, src, bytes_per_iter);
5943 /* Move on to the next block. */
5944 mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
5945 mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
5947 /* Emit the loop condition. */
5948 if (Pmode == DImode)
5949 emit_insn (gen_cmpdi (src_reg, final_src));
5950 else
5951 emit_insn (gen_cmpsi (src_reg, final_src));
5952 emit_jump_insn (gen_bne (label));
5954 /* Mop up any left-over bytes. */
5955 if (leftover)
5956 mips_block_move_straight (dest, src, leftover);
5959 /* Expand a movmemsi instruction, which copies LENGTH bytes from
5960 memory reference SRC to memory reference DEST. */
5962 bool
5963 mips_expand_block_move (rtx dest, rtx src, rtx length)
5965 if (GET_CODE (length) == CONST_INT)
5967 if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
5969 mips_block_move_straight (dest, src, INTVAL (length));
5970 return true;
5972 else if (optimize)
5974 mips_block_move_loop (dest, src, INTVAL (length),
5975 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
5976 return true;
5979 return false;
5982 /* Expand a loop of synci insns for the address range [BEGIN, END). */
5984 void
5985 mips_expand_synci_loop (rtx begin, rtx end)
5987 rtx inc, label, cmp, cmp_result;
5989 /* Load INC with the cache line size (rdhwr INC,$1). */
5990 inc = gen_reg_rtx (SImode);
5991 emit_insn (gen_rdhwr (inc, const1_rtx));
5993 /* Loop back to here. */
5994 label = gen_label_rtx ();
5995 emit_label (label);
5997 emit_insn (gen_synci (begin));
5999 cmp = mips_force_binary (Pmode, GTU, begin, end);
6001 mips_emit_binary (PLUS, begin, begin, inc);
6003 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
6004 emit_jump_insn (gen_condjump (cmp_result, label));
6007 /* Expand a QI or HI mode atomic memory operation.
6009 GENERATOR contains a pointer to the gen_* function that generates
6010 the SI mode underlying atomic operation using masks that we
6011 calculate.
6013 RESULT is the return register for the operation. Its value is NULL
6014 if unused.
6016 MEM is the location of the atomic access.
6018 OLDVAL is the first operand for the operation.
6020 NEWVAL is the optional second operand for the operation. Its value
6021 is NULL if unused. */
6023 void
6024 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
6025 rtx result, rtx mem, rtx oldval, rtx newval)
6027 rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
6028 rtx unshifted_mask_reg, mask, inverted_mask, si_op;
6029 rtx res = NULL;
6030 enum machine_mode mode;
6032 mode = GET_MODE (mem);
6034 /* Compute the address of the containing SImode value. */
6035 orig_addr = force_reg (Pmode, XEXP (mem, 0));
6036 memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
6037 force_reg (Pmode, GEN_INT (-4)));
6039 /* Create a memory reference for it. */
6040 memsi = gen_rtx_MEM (SImode, memsi_addr);
6041 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
6042 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
6044 /* Work out the byte offset of the QImode or HImode value,
6045 counting from the least significant byte. */
6046 shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
6047 if (TARGET_BIG_ENDIAN)
6048 mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
6050 /* Multiply by eight to convert the shift value from bytes to bits. */
6051 mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
6053 /* Make the final shift an SImode value, so that it can be used in
6054 SImode operations. */
6055 shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
6057 /* Set MASK to an inclusive mask of the QImode or HImode value. */
6058 unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
6059 unshifted_mask_reg = force_reg (SImode, unshifted_mask);
6060 mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
6062 /* Compute the equivalent exclusive mask. */
6063 inverted_mask = gen_reg_rtx (SImode);
6064 emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
6065 gen_rtx_NOT (SImode, mask)));
6067 /* Shift the old value into place. */
6068 if (oldval != const0_rtx)
6070 oldval = convert_modes (SImode, mode, oldval, true);
6071 oldval = force_reg (SImode, oldval);
6072 oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
6075 /* Do the same for the new value. */
6076 if (newval && newval != const0_rtx)
6078 newval = convert_modes (SImode, mode, newval, true);
6079 newval = force_reg (SImode, newval);
6080 newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
6083 /* Do the SImode atomic access. */
6084 if (result)
6085 res = gen_reg_rtx (SImode);
6086 if (newval)
6087 si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
6088 else if (result)
6089 si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
6090 else
6091 si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
6093 emit_insn (si_op);
6095 if (result)
6097 /* Shift and convert the result. */
6098 mips_emit_binary (AND, res, res, mask);
6099 mips_emit_binary (LSHIFTRT, res, res, shiftsi);
6100 mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
6104 /* Return true if it is possible to use left/right accesses for a
6105 bitfield of WIDTH bits starting BITPOS bits into *OP. When
6106 returning true, update *OP, *LEFT and *RIGHT as follows:
6108 *OP is a BLKmode reference to the whole field.
6110 *LEFT is a QImode reference to the first byte if big endian or
6111 the last byte if little endian. This address can be used in the
6112 left-side instructions (LWL, SWL, LDL, SDL).
6114 *RIGHT is a QImode reference to the opposite end of the field and
6115 can be used in the patterning right-side instruction. */
6117 static bool
6118 mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
6119 rtx *left, rtx *right)
6121 rtx first, last;
6123 /* Check that the operand really is a MEM. Not all the extv and
6124 extzv predicates are checked. */
6125 if (!MEM_P (*op))
6126 return false;
6128 /* Check that the size is valid. */
6129 if (width != 32 && (!TARGET_64BIT || width != 64))
6130 return false;
6132 /* We can only access byte-aligned values. Since we are always passed
6133 a reference to the first byte of the field, it is not necessary to
6134 do anything with BITPOS after this check. */
6135 if (bitpos % BITS_PER_UNIT != 0)
6136 return false;
6138 /* Reject aligned bitfields: we want to use a normal load or store
6139 instead of a left/right pair. */
6140 if (MEM_ALIGN (*op) >= width)
6141 return false;
6143 /* Adjust *OP to refer to the whole field. This also has the effect
6144 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
6145 *op = adjust_address (*op, BLKmode, 0);
6146 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
6148 /* Get references to both ends of the field. We deliberately don't
6149 use the original QImode *OP for FIRST since the new BLKmode one
6150 might have a simpler address. */
6151 first = adjust_address (*op, QImode, 0);
6152 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
6154 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
6155 correspond to the MSB and RIGHT to the LSB. */
6156 if (TARGET_BIG_ENDIAN)
6157 *left = first, *right = last;
6158 else
6159 *left = last, *right = first;
6161 return true;
6164 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
6165 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
6166 the operation is the equivalent of:
6168 (set DEST (*_extract SRC WIDTH BITPOS))
6170 Return true on success. */
6172 bool
6173 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
6174 HOST_WIDE_INT bitpos)
6176 rtx left, right, temp;
6178 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
6179 be a paradoxical word_mode subreg. This is the only case in which
6180 we allow the destination to be larger than the source. */
6181 if (GET_CODE (dest) == SUBREG
6182 && GET_MODE (dest) == DImode
6183 && GET_MODE (SUBREG_REG (dest)) == SImode)
6184 dest = SUBREG_REG (dest);
6186 /* After the above adjustment, the destination must be the same
6187 width as the source. */
6188 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
6189 return false;
6191 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
6192 return false;
6194 temp = gen_reg_rtx (GET_MODE (dest));
6195 if (GET_MODE (dest) == DImode)
6197 emit_insn (gen_mov_ldl (temp, src, left));
6198 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
6200 else
6202 emit_insn (gen_mov_lwl (temp, src, left));
6203 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
6205 return true;
6208 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
6209 BITPOS and SRC are the operands passed to the expander; the operation
6210 is the equivalent of:
6212 (set (zero_extract DEST WIDTH BITPOS) SRC)
6214 Return true on success. */
6216 bool
6217 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
6218 HOST_WIDE_INT bitpos)
6220 rtx left, right;
6221 enum machine_mode mode;
6223 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
6224 return false;
6226 mode = mode_for_size (width, MODE_INT, 0);
6227 src = gen_lowpart (mode, src);
6228 if (mode == DImode)
6230 emit_insn (gen_mov_sdl (dest, src, left));
6231 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
6233 else
6235 emit_insn (gen_mov_swl (dest, src, left));
6236 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
6238 return true;
6241 /* Return true if X is a MEM with the same size as MODE. */
6243 bool
6244 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6246 rtx size;
6248 if (!MEM_P (x))
6249 return false;
6251 size = MEM_SIZE (x);
6252 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6255 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
6256 source of an "ext" instruction or the destination of an "ins"
6257 instruction. OP must be a register operand and the following
6258 conditions must hold:
6260 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
6261 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6262 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6264 Also reject lengths equal to a word as they are better handled
6265 by the move patterns. */
6267 bool
6268 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
6270 if (!ISA_HAS_EXT_INS
6271 || !register_operand (op, VOIDmode)
6272 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6273 return false;
6275 if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
6276 return false;
6278 if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
6279 return false;
6281 return true;
6284 /* Return true if -msplit-addresses is selected and should be honored.
6286 -msplit-addresses is a half-way house between explicit relocations
6287 and the traditional assembler macros. It can split absolute 32-bit
6288 symbolic constants into a high/lo_sum pair but uses macros for other
6289 sorts of access.
6291 Like explicit relocation support for REL targets, it relies
6292 on GNU extensions in the assembler and the linker.
6294 Although this code should work for -O0, it has traditionally
6295 been treated as an optimization. */
6297 static bool
6298 mips_split_addresses_p (void)
6300 return (TARGET_SPLIT_ADDRESSES
6301 && optimize
6302 && !TARGET_MIPS16
6303 && !flag_pic
6304 && !ABI_HAS_64BIT_SYMBOLS);
6307 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
6309 static void
6310 mips_init_relocs (void)
6312 memset (mips_split_p, '\0', sizeof (mips_split_p));
6313 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6314 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6316 if (ABI_HAS_64BIT_SYMBOLS)
6318 if (TARGET_EXPLICIT_RELOCS)
6320 mips_split_p[SYMBOL_64_HIGH] = true;
6321 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6322 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6324 mips_split_p[SYMBOL_64_MID] = true;
6325 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6326 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6328 mips_split_p[SYMBOL_64_LOW] = true;
6329 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6330 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6332 mips_split_p[SYMBOL_ABSOLUTE] = true;
6333 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6336 else
6338 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses_p () || TARGET_MIPS16)
6340 mips_split_p[SYMBOL_ABSOLUTE] = true;
6341 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6342 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6344 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6348 if (TARGET_MIPS16)
6350 /* The high part is provided by a pseudo copy of $gp. */
6351 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6352 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6355 if (TARGET_EXPLICIT_RELOCS)
6357 /* Small data constants are kept whole until after reload,
6358 then lowered by mips_rewrite_small_data. */
6359 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6361 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6362 if (TARGET_NEWABI)
6364 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6365 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6367 else
6369 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6370 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6373 if (TARGET_XGOT)
6375 /* The HIGH and LO_SUM are matched by special .md patterns. */
6376 mips_split_p[SYMBOL_GOT_DISP] = true;
6378 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6379 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6380 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6382 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6383 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6384 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6386 else
6388 if (TARGET_NEWABI)
6389 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6390 else
6391 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6392 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6396 if (TARGET_NEWABI)
6398 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6399 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6400 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6403 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6404 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6406 mips_split_p[SYMBOL_DTPREL] = true;
6407 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6408 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6410 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6412 mips_split_p[SYMBOL_TPREL] = true;
6413 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6414 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6416 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6419 /* If OP is an UNSPEC address, return the address to which it refers,
6420 otherwise return OP itself. */
6422 static rtx
6423 mips_strip_unspec_address (rtx op)
6425 rtx base, offset;
6427 split_const (op, &base, &offset);
6428 if (UNSPEC_ADDRESS_P (base))
6429 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6430 return op;
6433 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6434 in context CONTEXT. RELOCS is the array of relocations to use. */
6436 static void
6437 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6438 const char **relocs)
6440 enum mips_symbol_type symbol_type;
6441 const char *p;
6443 symbol_type = mips_classify_symbolic_expression (op, context);
6444 gcc_assert (relocs[symbol_type]);
6446 fputs (relocs[symbol_type], file);
6447 output_addr_const (file, mips_strip_unspec_address (op));
6448 for (p = relocs[symbol_type]; *p != 0; p++)
6449 if (*p == '(')
6450 fputc (')', file);
6453 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6454 The punctuation characters are:
6456 '(' Start a nested ".set noreorder" block.
6457 ')' End a nested ".set noreorder" block.
6458 '[' Start a nested ".set noat" block.
6459 ']' End a nested ".set noat" block.
6460 '<' Start a nested ".set nomacro" block.
6461 '>' End a nested ".set nomacro" block.
6462 '*' Behave like %(%< if generating a delayed-branch sequence.
6463 '#' Print a nop if in a ".set noreorder" block.
6464 '/' Like '#', but do nothing within a delayed-branch sequence.
6465 '?' Print "l" if mips_branch_likely is true
6466 '.' Print the name of the register with a hard-wired zero (zero or $0).
6467 '@' Print the name of the assembler temporary register (at or $1).
6468 '^' Print the name of the pic call-through register (t9 or $25).
6469 '+' Print the name of the gp register (usually gp or $28).
6470 '$' Print the name of the stack pointer register (sp or $29).
6471 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6472 '-' Print ".set pop" under the same conditions for '|'.
6474 See also mips_init_print_operand_pucnt. */
6476 static void
6477 mips_print_operand_punctuation (FILE *file, int ch)
6479 switch (ch)
6481 case '(':
6482 if (set_noreorder++ == 0)
6483 fputs (".set\tnoreorder\n\t", file);
6484 break;
6486 case ')':
6487 gcc_assert (set_noreorder > 0);
6488 if (--set_noreorder == 0)
6489 fputs ("\n\t.set\treorder", file);
6490 break;
6492 case '[':
6493 if (set_noat++ == 0)
6494 fputs (".set\tnoat\n\t", file);
6495 break;
6497 case ']':
6498 gcc_assert (set_noat > 0);
6499 if (--set_noat == 0)
6500 fputs ("\n\t.set\tat", file);
6501 break;
6503 case '<':
6504 if (set_nomacro++ == 0)
6505 fputs (".set\tnomacro\n\t", file);
6506 break;
6508 case '>':
6509 gcc_assert (set_nomacro > 0);
6510 if (--set_nomacro == 0)
6511 fputs ("\n\t.set\tmacro", file);
6512 break;
6514 case '*':
6515 if (final_sequence != 0)
6517 mips_print_operand_punctuation (file, '(');
6518 mips_print_operand_punctuation (file, '<');
6520 break;
6522 case '#':
6523 if (set_noreorder != 0)
6524 fputs ("\n\tnop", file);
6525 break;
6527 case '/':
6528 /* Print an extra newline so that the delayed insn is separated
6529 from the following ones. This looks neater and is consistent
6530 with non-nop delayed sequences. */
6531 if (set_noreorder != 0 && final_sequence == 0)
6532 fputs ("\n\tnop\n", file);
6533 break;
6535 case '?':
6536 if (mips_branch_likely)
6537 putc ('l', file);
6538 break;
6540 case '.':
6541 fputs (reg_names[GP_REG_FIRST + 0], file);
6542 break;
6544 case '@':
6545 fputs (reg_names[GP_REG_FIRST + 1], file);
6546 break;
6548 case '^':
6549 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
6550 break;
6552 case '+':
6553 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6554 break;
6556 case '$':
6557 fputs (reg_names[STACK_POINTER_REGNUM], file);
6558 break;
6560 case '|':
6561 if (!ISA_HAS_LL_SC)
6562 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6563 break;
6565 case '-':
6566 if (!ISA_HAS_LL_SC)
6567 fputs ("\n\t.set\tpop", file);
6568 break;
6570 default:
6571 gcc_unreachable ();
6572 break;
6576 /* Initialize mips_print_operand_punct. */
6578 static void
6579 mips_init_print_operand_punct (void)
6581 const char *p;
6583 for (p = "()[]<>*#/?.@^+$|-"; *p; p++)
6584 mips_print_operand_punct[(unsigned char) *p] = true;
6587 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
6588 associated with condition CODE. Print the condition part of the
6589 opcode to FILE. */
6591 static void
6592 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
6594 switch (code)
6596 case EQ:
6597 case NE:
6598 case GT:
6599 case GE:
6600 case LT:
6601 case LE:
6602 case GTU:
6603 case GEU:
6604 case LTU:
6605 case LEU:
6606 /* Conveniently, the MIPS names for these conditions are the same
6607 as their RTL equivalents. */
6608 fputs (GET_RTX_NAME (code), file);
6609 break;
6611 default:
6612 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6613 break;
6617 /* Likewise floating-point branches. */
6619 static void
6620 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
6622 switch (code)
6624 case EQ:
6625 fputs ("c1f", file);
6626 break;
6628 case NE:
6629 fputs ("c1t", file);
6630 break;
6632 default:
6633 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6634 break;
6638 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6640 'X' Print CONST_INT OP in hexadecimal format.
6641 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
6642 'd' Print CONST_INT OP in decimal.
6643 'h' Print the high-part relocation associated with OP, after stripping
6644 any outermost HIGH.
6645 'R' Print the low-part relocation associated with OP.
6646 'C' Print the integer branch condition for comparison OP.
6647 'N' Print the inverse of the integer branch condition for comparison OP.
6648 'F' Print the FPU branch condition for comparison OP.
6649 'W' Print the inverse of the FPU branch condition for comparison OP.
6650 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6651 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6652 't' Like 'T', but with the EQ/NE cases reversed
6653 'Y' Print mips_fp_conditions[INTVAL (OP)]
6654 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
6655 'q' Print a DSP accumulator register.
6656 'D' Print the second part of a double-word register or memory operand.
6657 'L' Print the low-order register in a double-word register operand.
6658 'M' Print high-order register in a double-word register operand.
6659 'z' Print $0 if OP is zero, otherwise print OP normally. */
6661 void
6662 mips_print_operand (FILE *file, rtx op, int letter)
6664 enum rtx_code code;
6666 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6668 mips_print_operand_punctuation (file, letter);
6669 return;
6672 gcc_assert (op);
6673 code = GET_CODE (op);
6675 switch (letter)
6677 case 'X':
6678 if (GET_CODE (op) == CONST_INT)
6679 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6680 else
6681 output_operand_lossage ("invalid use of '%%%c'", letter);
6682 break;
6684 case 'x':
6685 if (GET_CODE (op) == CONST_INT)
6686 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
6687 else
6688 output_operand_lossage ("invalid use of '%%%c'", letter);
6689 break;
6691 case 'd':
6692 if (GET_CODE (op) == CONST_INT)
6693 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
6694 else
6695 output_operand_lossage ("invalid use of '%%%c'", letter);
6696 break;
6698 case 'h':
6699 if (code == HIGH)
6700 op = XEXP (op, 0);
6701 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6702 break;
6704 case 'R':
6705 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6706 break;
6708 case 'C':
6709 mips_print_int_branch_condition (file, code, letter);
6710 break;
6712 case 'N':
6713 mips_print_int_branch_condition (file, reverse_condition (code), letter);
6714 break;
6716 case 'F':
6717 mips_print_float_branch_condition (file, code, letter);
6718 break;
6720 case 'W':
6721 mips_print_float_branch_condition (file, reverse_condition (code),
6722 letter);
6723 break;
6725 case 'T':
6726 case 't':
6728 int truth = (code == NE) == (letter == 'T');
6729 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6731 break;
6733 case 'Y':
6734 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
6735 fputs (mips_fp_conditions[UINTVAL (op)], file);
6736 else
6737 output_operand_lossage ("'%%%c' is not a valid operand prefix",
6738 letter);
6739 break;
6741 case 'Z':
6742 if (ISA_HAS_8CC)
6744 mips_print_operand (file, op, 0);
6745 fputc (',', file);
6747 break;
6749 case 'q':
6750 if (code == REG && MD_REG_P (REGNO (op)))
6751 fprintf (file, "$ac0");
6752 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
6753 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
6754 else
6755 output_operand_lossage ("invalid use of '%%%c'", letter);
6756 break;
6758 default:
6759 switch (code)
6761 case REG:
6763 unsigned int regno = REGNO (op);
6764 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
6765 || (letter == 'L' && TARGET_BIG_ENDIAN)
6766 || letter == 'D')
6767 regno++;
6768 fprintf (file, "%s", reg_names[regno]);
6770 break;
6772 case MEM:
6773 if (letter == 'D')
6774 output_address (plus_constant (XEXP (op, 0), 4));
6775 else
6776 output_address (XEXP (op, 0));
6777 break;
6779 default:
6780 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6781 fputs (reg_names[GP_REG_FIRST], file);
6782 else if (CONST_GP_P (op))
6783 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6784 else
6785 output_addr_const (file, mips_strip_unspec_address (op));
6786 break;
6791 /* Output address operand X to FILE. */
6793 void
6794 mips_print_operand_address (FILE *file, rtx x)
6796 struct mips_address_info addr;
6798 if (mips_classify_address (&addr, x, word_mode, true))
6799 switch (addr.type)
6801 case ADDRESS_REG:
6802 mips_print_operand (file, addr.offset, 0);
6803 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6804 return;
6806 case ADDRESS_LO_SUM:
6807 mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6808 mips_lo_relocs);
6809 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6810 return;
6812 case ADDRESS_CONST_INT:
6813 output_addr_const (file, x);
6814 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
6815 return;
6817 case ADDRESS_SYMBOLIC:
6818 output_addr_const (file, mips_strip_unspec_address (x));
6819 return;
6821 gcc_unreachable ();
6824 /* Implement TARGET_ENCODE_SECTION_INFO. */
6826 static void
6827 mips_encode_section_info (tree decl, rtx rtl, int first)
6829 default_encode_section_info (decl, rtl, first);
6831 if (TREE_CODE (decl) == FUNCTION_DECL)
6833 rtx symbol = XEXP (rtl, 0);
6834 tree type = TREE_TYPE (decl);
6836 /* Encode whether the symbol is short or long. */
6837 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
6838 || mips_far_type_p (type))
6839 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
6843 /* Implement TARGET_SELECT_RTX_SECTION. */
6845 static section *
6846 mips_select_rtx_section (enum machine_mode mode, rtx x,
6847 unsigned HOST_WIDE_INT align)
6849 /* ??? Consider using mergeable small data sections. */
6850 if (mips_rtx_constant_in_small_data_p (mode))
6851 return get_named_section (NULL, ".sdata", 0);
6853 return default_elf_select_rtx_section (mode, x, align);
6856 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6858 The complication here is that, with the combination TARGET_ABICALLS
6859 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6860 therefore not be included in the read-only part of a DSO. Handle such
6861 cases by selecting a normal data section instead of a read-only one.
6862 The logic apes that in default_function_rodata_section. */
6864 static section *
6865 mips_function_rodata_section (tree decl)
6867 if (!TARGET_ABICALLS || TARGET_GPWORD)
6868 return default_function_rodata_section (decl);
6870 if (decl && DECL_SECTION_NAME (decl))
6872 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6873 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6875 char *rname = ASTRDUP (name);
6876 rname[14] = 'd';
6877 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6879 else if (flag_function_sections
6880 && flag_data_sections
6881 && strncmp (name, ".text.", 6) == 0)
6883 char *rname = ASTRDUP (name);
6884 memcpy (rname + 1, "data", 4);
6885 return get_section (rname, SECTION_WRITE, decl);
6888 return data_section;
6891 /* Implement TARGET_IN_SMALL_DATA_P. */
6893 static bool
6894 mips_in_small_data_p (const_tree decl)
6896 unsigned HOST_WIDE_INT size;
6898 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6899 return false;
6901 /* We don't yet generate small-data references for -mabicalls
6902 or VxWorks RTP code. See the related -G handling in
6903 mips_override_options. */
6904 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
6905 return false;
6907 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6909 const char *name;
6911 /* Reject anything that isn't in a known small-data section. */
6912 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6913 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6914 return false;
6916 /* If a symbol is defined externally, the assembler will use the
6917 usual -G rules when deciding how to implement macros. */
6918 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
6919 return true;
6921 else if (TARGET_EMBEDDED_DATA)
6923 /* Don't put constants into the small data section: we want them
6924 to be in ROM rather than RAM. */
6925 if (TREE_CODE (decl) != VAR_DECL)
6926 return false;
6928 if (TREE_READONLY (decl)
6929 && !TREE_SIDE_EFFECTS (decl)
6930 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6931 return false;
6934 /* Enforce -mlocal-sdata. */
6935 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
6936 return false;
6938 /* Enforce -mextern-sdata. */
6939 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
6941 if (DECL_EXTERNAL (decl))
6942 return false;
6943 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
6944 return false;
6947 /* We have traditionally not treated zero-sized objects as small data,
6948 so this is now effectively part of the ABI. */
6949 size = int_size_in_bytes (TREE_TYPE (decl));
6950 return size > 0 && size <= mips_small_data_threshold;
6953 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
6954 anchors for small data: the GP register acts as an anchor in that
6955 case. We also don't want to use them for PC-relative accesses,
6956 where the PC acts as an anchor. */
6958 static bool
6959 mips_use_anchors_for_symbol_p (const_rtx symbol)
6961 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
6963 case SYMBOL_PC_RELATIVE:
6964 case SYMBOL_GP_RELATIVE:
6965 return false;
6967 default:
6968 return default_use_anchors_for_symbol_p (symbol);
6972 /* The MIPS debug format wants all automatic variables and arguments
6973 to be in terms of the virtual frame pointer (stack pointer before
6974 any adjustment in the function), while the MIPS 3.0 linker wants
6975 the frame pointer to be the stack pointer after the initial
6976 adjustment. So, we do the adjustment here. The arg pointer (which
6977 is eliminated) points to the virtual frame pointer, while the frame
6978 pointer (which may be eliminated) points to the stack pointer after
6979 the initial adjustments. */
6981 HOST_WIDE_INT
6982 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6984 rtx offset2 = const0_rtx;
6985 rtx reg = eliminate_constant_term (addr, &offset2);
6987 if (offset == 0)
6988 offset = INTVAL (offset2);
6990 if (reg == stack_pointer_rtx
6991 || reg == frame_pointer_rtx
6992 || reg == hard_frame_pointer_rtx)
6994 offset -= cfun->machine->frame.total_size;
6995 if (reg == hard_frame_pointer_rtx)
6996 offset += cfun->machine->frame.hard_frame_pointer_offset;
6999 /* sdbout_parms does not want this to crash for unrecognized cases. */
7000 #if 0
7001 else if (reg != arg_pointer_rtx)
7002 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
7003 addr);
7004 #endif
7006 return offset;
7009 /* Implement ASM_OUTPUT_EXTERNAL. */
7011 void
7012 mips_output_external (FILE *file, tree decl, const char *name)
7014 default_elf_asm_output_external (file, decl, name);
7016 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
7017 set in order to avoid putting out names that are never really
7018 used. */
7019 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
7021 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
7023 /* When using assembler macros, emit .extern directives for
7024 all small-data externs so that the assembler knows how
7025 big they are.
7027 In most cases it would be safe (though pointless) to emit
7028 .externs for other symbols too. One exception is when an
7029 object is within the -G limit but declared by the user to
7030 be in a section other than .sbss or .sdata. */
7031 fputs ("\t.extern\t", file);
7032 assemble_name (file, name);
7033 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
7034 int_size_in_bytes (TREE_TYPE (decl)));
7036 else if (TARGET_IRIX
7037 && mips_abi == ABI_32
7038 && TREE_CODE (decl) == FUNCTION_DECL)
7040 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
7041 `.global name .text' directive for every used but
7042 undefined function. If we don't, the linker may perform
7043 an optimization (skipping over the insns that set $gp)
7044 when it is unsafe. */
7045 fputs ("\t.globl ", file);
7046 assemble_name (file, name);
7047 fputs (" .text\n", file);
7052 /* Implement ASM_OUTPUT_SOURCE_FILENAME. */
7054 void
7055 mips_output_filename (FILE *stream, const char *name)
7057 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
7058 directives. */
7059 if (write_symbols == DWARF2_DEBUG)
7060 return;
7061 else if (mips_output_filename_first_time)
7063 mips_output_filename_first_time = 0;
7064 num_source_filenames += 1;
7065 current_function_file = name;
7066 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7067 output_quoted_string (stream, name);
7068 putc ('\n', stream);
7070 /* If we are emitting stabs, let dbxout.c handle this (except for
7071 the mips_output_filename_first_time case). */
7072 else if (write_symbols == DBX_DEBUG)
7073 return;
7074 else if (name != current_function_file
7075 && strcmp (name, current_function_file) != 0)
7077 num_source_filenames += 1;
7078 current_function_file = name;
7079 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7080 output_quoted_string (stream, name);
7081 putc ('\n', stream);
7085 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
7087 static void ATTRIBUTE_UNUSED
7088 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
7090 switch (size)
7092 case 4:
7093 fputs ("\t.dtprelword\t", file);
7094 break;
7096 case 8:
7097 fputs ("\t.dtpreldword\t", file);
7098 break;
7100 default:
7101 gcc_unreachable ();
7103 output_addr_const (file, x);
7104 fputs ("+0x8000", file);
7107 /* Implement TARGET_DWARF_REGISTER_SPAN. */
7109 static rtx
7110 mips_dwarf_register_span (rtx reg)
7112 rtx high, low;
7113 enum machine_mode mode;
7115 /* By default, GCC maps increasing register numbers to increasing
7116 memory locations, but paired FPRs are always little-endian,
7117 regardless of the prevailing endianness. */
7118 mode = GET_MODE (reg);
7119 if (FP_REG_P (REGNO (reg))
7120 && TARGET_BIG_ENDIAN
7121 && MAX_FPRS_PER_FMT > 1
7122 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
7124 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
7125 high = mips_subword (reg, true);
7126 low = mips_subword (reg, false);
7127 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
7130 return NULL_RTX;
7133 /* Implement ASM_OUTPUT_ASCII. */
7135 void
7136 mips_output_ascii (FILE *stream, const char *string, size_t len)
7138 size_t i;
7139 int cur_pos;
7141 cur_pos = 17;
7142 fprintf (stream, "\t.ascii\t\"");
7143 for (i = 0; i < len; i++)
7145 int c;
7147 c = (unsigned char) string[i];
7148 if (ISPRINT (c))
7150 if (c == '\\' || c == '\"')
7152 putc ('\\', stream);
7153 cur_pos++;
7155 putc (c, stream);
7156 cur_pos++;
7158 else
7160 fprintf (stream, "\\%03o", c);
7161 cur_pos += 4;
7164 if (cur_pos > 72 && i+1 < len)
7166 cur_pos = 17;
7167 fprintf (stream, "\"\n\t.ascii\t\"");
7170 fprintf (stream, "\"\n");
7173 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7174 macros, mark the symbol as written so that mips_asm_output_external
7175 won't emit an .extern for it. STREAM is the output file, NAME is the
7176 name of the symbol, INIT_STRING is the string that should be written
7177 before the symbol and FINAL_STRING is the string that should be
7178 written after it. FINAL_STRING is a printf format that consumes the
7179 remaining arguments. */
7181 void
7182 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7183 const char *final_string, ...)
7185 va_list ap;
7187 fputs (init_string, stream);
7188 assemble_name (stream, name);
7189 va_start (ap, final_string);
7190 vfprintf (stream, final_string, ap);
7191 va_end (ap);
7193 if (!TARGET_EXPLICIT_RELOCS)
7195 tree name_tree = get_identifier (name);
7196 TREE_ASM_WRITTEN (name_tree) = 1;
7200 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7201 NAME is the name of the object and ALIGN is the required alignment
7202 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7203 alignment argument. */
7205 void
7206 mips_declare_common_object (FILE *stream, const char *name,
7207 const char *init_string,
7208 unsigned HOST_WIDE_INT size,
7209 unsigned int align, bool takes_alignment_p)
7211 if (!takes_alignment_p)
7213 size += (align / BITS_PER_UNIT) - 1;
7214 size -= size % (align / BITS_PER_UNIT);
7215 mips_declare_object (stream, name, init_string,
7216 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7218 else
7219 mips_declare_object (stream, name, init_string,
7220 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7221 size, align / BITS_PER_UNIT);
7224 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7225 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7227 void
7228 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7229 unsigned HOST_WIDE_INT size,
7230 unsigned int align)
7232 /* If the target wants uninitialized const declarations in
7233 .rdata then don't put them in .comm. */
7234 if (TARGET_EMBEDDED_DATA
7235 && TARGET_UNINIT_CONST_IN_RODATA
7236 && TREE_CODE (decl) == VAR_DECL
7237 && TREE_READONLY (decl)
7238 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7240 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7241 targetm.asm_out.globalize_label (stream, name);
7243 switch_to_section (readonly_data_section);
7244 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7245 mips_declare_object (stream, name, "",
7246 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7247 size);
7249 else
7250 mips_declare_common_object (stream, name, "\n\t.comm\t",
7251 size, align, true);
7254 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7255 extern int size_directive_output;
7257 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7258 definitions except that it uses mips_declare_object to emit the label. */
7260 void
7261 mips_declare_object_name (FILE *stream, const char *name,
7262 tree decl ATTRIBUTE_UNUSED)
7264 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7265 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7266 #endif
7268 size_directive_output = 0;
7269 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7271 HOST_WIDE_INT size;
7273 size_directive_output = 1;
7274 size = int_size_in_bytes (TREE_TYPE (decl));
7275 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7278 mips_declare_object (stream, name, "", ":\n");
7281 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7283 void
7284 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7286 const char *name;
7288 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7289 if (!flag_inhibit_size_directive
7290 && DECL_SIZE (decl) != 0
7291 && !at_end
7292 && top_level
7293 && DECL_INITIAL (decl) == error_mark_node
7294 && !size_directive_output)
7296 HOST_WIDE_INT size;
7298 size_directive_output = 1;
7299 size = int_size_in_bytes (TREE_TYPE (decl));
7300 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7303 #endif
7305 /* Return the FOO in the name of the ".mdebug.FOO" section associated
7306 with the current ABI. */
7308 static const char *
7309 mips_mdebug_abi_name (void)
7311 switch (mips_abi)
7313 case ABI_32:
7314 return "abi32";
7315 case ABI_O64:
7316 return "abiO64";
7317 case ABI_N32:
7318 return "abiN32";
7319 case ABI_64:
7320 return "abiN64";
7321 case ABI_EABI:
7322 return TARGET_64BIT ? "eabi64" : "eabi32";
7323 default:
7324 gcc_unreachable ();
7328 /* Implement TARGET_ASM_FILE_START. */
7330 static void
7331 mips_file_start (void)
7333 default_file_start ();
7335 /* Generate a special section to describe the ABI switches used to
7336 produce the resultant binary. This is unnecessary on IRIX and
7337 causes unwanted warnings from the native linker. */
7338 if (!TARGET_IRIX)
7340 /* Record the ABI itself. Modern versions of binutils encode
7341 this information in the ELF header flags, but GDB needs the
7342 information in order to correctly debug binaries produced by
7343 older binutils. See the function mips_gdbarch_init in
7344 gdb/mips-tdep.c. */
7345 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7346 mips_mdebug_abi_name ());
7348 /* There is no ELF header flag to distinguish long32 forms of the
7349 EABI from long64 forms. Emit a special section to help tools
7350 such as GDB. Do the same for o64, which is sometimes used with
7351 -mlong64. */
7352 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7353 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7354 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7356 #ifdef HAVE_AS_GNU_ATTRIBUTE
7357 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7358 (TARGET_HARD_FLOAT_ABI
7359 ? (TARGET_DOUBLE_FLOAT
7360 ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
7361 #endif
7364 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
7365 if (TARGET_ABICALLS)
7366 fprintf (asm_out_file, "\t.abicalls\n");
7368 if (flag_verbose_asm)
7369 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7370 ASM_COMMENT_START,
7371 mips_small_data_threshold, mips_arch_info->name, mips_isa);
7374 /* Make the last instruction frame-related and note that it performs
7375 the operation described by FRAME_PATTERN. */
7377 static void
7378 mips_set_frame_expr (rtx frame_pattern)
7380 rtx insn;
7382 insn = get_last_insn ();
7383 RTX_FRAME_RELATED_P (insn) = 1;
7384 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7385 frame_pattern,
7386 REG_NOTES (insn));
7389 /* Return a frame-related rtx that stores REG at MEM.
7390 REG must be a single register. */
7392 static rtx
7393 mips_frame_set (rtx mem, rtx reg)
7395 rtx set;
7397 /* If we're saving the return address register and the DWARF return
7398 address column differs from the hard register number, adjust the
7399 note reg to refer to the former. */
7400 if (REGNO (reg) == GP_REG_FIRST + 31
7401 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7402 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7404 set = gen_rtx_SET (VOIDmode, mem, reg);
7405 RTX_FRAME_RELATED_P (set) = 1;
7407 return set;
7410 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7411 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7412 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7413 static const unsigned char mips16e_s2_s8_regs[] = {
7414 30, 23, 22, 21, 20, 19, 18
7416 static const unsigned char mips16e_a0_a3_regs[] = {
7417 4, 5, 6, 7
7420 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7421 ordered from the uppermost in memory to the lowest in memory. */
7422 static const unsigned char mips16e_save_restore_regs[] = {
7423 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7426 /* Return the index of the lowest X in the range [0, SIZE) for which
7427 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7429 static unsigned int
7430 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7431 unsigned int size)
7433 unsigned int i;
7435 for (i = 0; i < size; i++)
7436 if (BITSET_P (mask, regs[i]))
7437 break;
7439 return i;
7442 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7443 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7444 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7445 is true for all indexes (X, SIZE). */
7447 static void
7448 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7449 unsigned int size, unsigned int *num_regs_ptr)
7451 unsigned int i;
7453 i = mips16e_find_first_register (*mask_ptr, regs, size);
7454 for (i++; i < size; i++)
7455 if (!BITSET_P (*mask_ptr, regs[i]))
7457 *num_regs_ptr += 1;
7458 *mask_ptr |= 1 << regs[i];
7462 /* Return a simplified form of X using the register values in REG_VALUES.
7463 REG_VALUES[R] is the last value assigned to hard register R, or null
7464 if R has not been modified.
7466 This function is rather limited, but is good enough for our purposes. */
7468 static rtx
7469 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7471 x = avoid_constant_pool_reference (x);
7473 if (UNARY_P (x))
7475 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7476 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7477 x0, GET_MODE (XEXP (x, 0)));
7480 if (ARITHMETIC_P (x))
7482 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7483 rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7484 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7487 if (REG_P (x)
7488 && reg_values[REGNO (x)]
7489 && !rtx_unstable_p (reg_values[REGNO (x)]))
7490 return reg_values[REGNO (x)];
7492 return x;
7495 /* Return true if (set DEST SRC) stores an argument register into its
7496 caller-allocated save slot, storing the number of that argument
7497 register in *REGNO_PTR if so. REG_VALUES is as for
7498 mips16e_collect_propagate_value. */
7500 static bool
7501 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7502 unsigned int *regno_ptr)
7504 unsigned int argno, regno;
7505 HOST_WIDE_INT offset, required_offset;
7506 rtx addr, base;
7508 /* Check that this is a word-mode store. */
7509 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7510 return false;
7512 /* Check that the register being saved is an unmodified argument
7513 register. */
7514 regno = REGNO (src);
7515 if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
7516 return false;
7517 argno = regno - GP_ARG_FIRST;
7519 /* Check whether the address is an appropriate stack-pointer or
7520 frame-pointer access. */
7521 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7522 mips_split_plus (addr, &base, &offset);
7523 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7524 if (base == hard_frame_pointer_rtx)
7525 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
7526 else if (base != stack_pointer_rtx)
7527 return false;
7528 if (offset != required_offset)
7529 return false;
7531 *regno_ptr = regno;
7532 return true;
7535 /* A subroutine of mips_expand_prologue, called only when generating
7536 MIPS16e SAVE instructions. Search the start of the function for any
7537 instructions that save argument registers into their caller-allocated
7538 save slots. Delete such instructions and return a value N such that
7539 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7540 instructions redundant. */
7542 static unsigned int
7543 mips16e_collect_argument_saves (void)
7545 rtx reg_values[FIRST_PSEUDO_REGISTER];
7546 rtx insn, next, set, dest, src;
7547 unsigned int nargs, regno;
7549 push_topmost_sequence ();
7550 nargs = 0;
7551 memset (reg_values, 0, sizeof (reg_values));
7552 for (insn = get_insns (); insn; insn = next)
7554 next = NEXT_INSN (insn);
7555 if (NOTE_P (insn))
7556 continue;
7558 if (!INSN_P (insn))
7559 break;
7561 set = PATTERN (insn);
7562 if (GET_CODE (set) != SET)
7563 break;
7565 dest = SET_DEST (set);
7566 src = SET_SRC (set);
7567 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
7569 if (!BITSET_P (cfun->machine->frame.mask, regno))
7571 delete_insn (insn);
7572 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7575 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7576 reg_values[REGNO (dest)]
7577 = mips16e_collect_propagate_value (src, reg_values);
7578 else
7579 break;
7581 pop_topmost_sequence ();
7583 return nargs;
7586 /* Return a move between register REGNO and memory location SP + OFFSET.
7587 Make the move a load if RESTORE_P, otherwise make it a frame-related
7588 store. */
7590 static rtx
7591 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7592 unsigned int regno)
7594 rtx reg, mem;
7596 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7597 reg = gen_rtx_REG (SImode, regno);
7598 return (restore_p
7599 ? gen_rtx_SET (VOIDmode, reg, mem)
7600 : mips_frame_set (mem, reg));
7603 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7604 The instruction must:
7606 - Allocate or deallocate SIZE bytes in total; SIZE is known
7607 to be nonzero.
7609 - Save or restore as many registers in *MASK_PTR as possible.
7610 The instruction saves the first registers at the top of the
7611 allocated area, with the other registers below it.
7613 - Save NARGS argument registers above the allocated area.
7615 (NARGS is always zero if RESTORE_P.)
7617 The SAVE and RESTORE instructions cannot save and restore all general
7618 registers, so there may be some registers left over for the caller to
7619 handle. Destructively modify *MASK_PTR so that it contains the registers
7620 that still need to be saved or restored. The caller can save these
7621 registers in the memory immediately below *OFFSET_PTR, which is a
7622 byte offset from the bottom of the allocated stack area. */
7624 static rtx
7625 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7626 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7627 HOST_WIDE_INT size)
7629 rtx pattern, set;
7630 HOST_WIDE_INT offset, top_offset;
7631 unsigned int i, regno;
7632 int n;
7634 gcc_assert (cfun->machine->frame.num_fp == 0);
7636 /* Calculate the number of elements in the PARALLEL. We need one element
7637 for the stack adjustment, one for each argument register save, and one
7638 for each additional register move. */
7639 n = 1 + nargs;
7640 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7641 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7642 n++;
7644 /* Create the final PARALLEL. */
7645 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7646 n = 0;
7648 /* Add the stack pointer adjustment. */
7649 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7650 plus_constant (stack_pointer_rtx,
7651 restore_p ? size : -size));
7652 RTX_FRAME_RELATED_P (set) = 1;
7653 XVECEXP (pattern, 0, n++) = set;
7655 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7656 top_offset = restore_p ? size : 0;
7658 /* Save the arguments. */
7659 for (i = 0; i < nargs; i++)
7661 offset = top_offset + i * UNITS_PER_WORD;
7662 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7663 XVECEXP (pattern, 0, n++) = set;
7666 /* Then fill in the other register moves. */
7667 offset = top_offset;
7668 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7670 regno = mips16e_save_restore_regs[i];
7671 if (BITSET_P (*mask_ptr, regno))
7673 offset -= UNITS_PER_WORD;
7674 set = mips16e_save_restore_reg (restore_p, offset, regno);
7675 XVECEXP (pattern, 0, n++) = set;
7676 *mask_ptr &= ~(1 << regno);
7680 /* Tell the caller what offset it should use for the remaining registers. */
7681 *offset_ptr = size + (offset - top_offset);
7683 gcc_assert (n == XVECLEN (pattern, 0));
7685 return pattern;
7688 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7689 pointer. Return true if PATTERN matches the kind of instruction
7690 generated by mips16e_build_save_restore. If INFO is nonnull,
7691 initialize it when returning true. */
7693 bool
7694 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7695 struct mips16e_save_restore_info *info)
7697 unsigned int i, nargs, mask, extra;
7698 HOST_WIDE_INT top_offset, save_offset, offset;
7699 rtx set, reg, mem, base;
7700 int n;
7702 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7703 return false;
7705 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7706 top_offset = adjust > 0 ? adjust : 0;
7708 /* Interpret all other members of the PARALLEL. */
7709 save_offset = top_offset - UNITS_PER_WORD;
7710 mask = 0;
7711 nargs = 0;
7712 i = 0;
7713 for (n = 1; n < XVECLEN (pattern, 0); n++)
7715 /* Check that we have a SET. */
7716 set = XVECEXP (pattern, 0, n);
7717 if (GET_CODE (set) != SET)
7718 return false;
7720 /* Check that the SET is a load (if restoring) or a store
7721 (if saving). */
7722 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7723 if (!MEM_P (mem))
7724 return false;
7726 /* Check that the address is the sum of the stack pointer and a
7727 possibly-zero constant offset. */
7728 mips_split_plus (XEXP (mem, 0), &base, &offset);
7729 if (base != stack_pointer_rtx)
7730 return false;
7732 /* Check that SET's other operand is a register. */
7733 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7734 if (!REG_P (reg))
7735 return false;
7737 /* Check for argument saves. */
7738 if (offset == top_offset + nargs * UNITS_PER_WORD
7739 && REGNO (reg) == GP_ARG_FIRST + nargs)
7740 nargs++;
7741 else if (offset == save_offset)
7743 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7744 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7745 return false;
7747 mask |= 1 << REGNO (reg);
7748 save_offset -= UNITS_PER_WORD;
7750 else
7751 return false;
7754 /* Check that the restrictions on register ranges are met. */
7755 extra = 0;
7756 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7757 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7758 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7759 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7760 if (extra != 0)
7761 return false;
7763 /* Make sure that the topmost argument register is not saved twice.
7764 The checks above ensure that the same is then true for the other
7765 argument registers. */
7766 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7767 return false;
7769 /* Pass back information, if requested. */
7770 if (info)
7772 info->nargs = nargs;
7773 info->mask = mask;
7774 info->size = (adjust > 0 ? adjust : -adjust);
7777 return true;
7780 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7781 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7782 the null terminator. */
7784 static char *
7785 mips16e_add_register_range (char *s, unsigned int min_reg,
7786 unsigned int max_reg)
7788 if (min_reg != max_reg)
7789 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7790 else
7791 s += sprintf (s, ",%s", reg_names[min_reg]);
7792 return s;
7795 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7796 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7798 const char *
7799 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7801 static char buffer[300];
7803 struct mips16e_save_restore_info info;
7804 unsigned int i, end;
7805 char *s;
7807 /* Parse the pattern. */
7808 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7809 gcc_unreachable ();
7811 /* Add the mnemonic. */
7812 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7813 s += strlen (s);
7815 /* Save the arguments. */
7816 if (info.nargs > 1)
7817 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7818 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7819 else if (info.nargs == 1)
7820 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7822 /* Emit the amount of stack space to allocate or deallocate. */
7823 s += sprintf (s, "%d", (int) info.size);
7825 /* Save or restore $16. */
7826 if (BITSET_P (info.mask, 16))
7827 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7829 /* Save or restore $17. */
7830 if (BITSET_P (info.mask, 17))
7831 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7833 /* Save or restore registers in the range $s2...$s8, which
7834 mips16e_s2_s8_regs lists in decreasing order. Note that this
7835 is a software register range; the hardware registers are not
7836 numbered consecutively. */
7837 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7838 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7839 if (i < end)
7840 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7841 mips16e_s2_s8_regs[i]);
7843 /* Save or restore registers in the range $a0...$a3. */
7844 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7845 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7846 if (i < end)
7847 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7848 mips16e_a0_a3_regs[end - 1]);
7850 /* Save or restore $31. */
7851 if (BITSET_P (info.mask, 31))
7852 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7854 return buffer;
7857 /* Return true if the current function has an insn that implicitly
7858 refers to $gp. */
7860 static bool
7861 mips_function_has_gp_insn (void)
7863 /* Don't bother rechecking if we found one last time. */
7864 if (!cfun->machine->has_gp_insn_p)
7866 rtx insn;
7868 push_topmost_sequence ();
7869 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7870 if (USEFUL_INSN_P (insn)
7871 && (get_attr_got (insn) != GOT_UNSET
7872 || mips_small_data_pattern_p (PATTERN (insn))))
7874 cfun->machine->has_gp_insn_p = true;
7875 break;
7877 pop_topmost_sequence ();
7879 return cfun->machine->has_gp_insn_p;
7882 /* Return the register that should be used as the global pointer
7883 within this function. Return 0 if the function doesn't need
7884 a global pointer. */
7886 static unsigned int
7887 mips_global_pointer (void)
7889 unsigned int regno;
7891 /* $gp is always available unless we're using a GOT. */
7892 if (!TARGET_USE_GOT)
7893 return GLOBAL_POINTER_REGNUM;
7895 /* We must always provide $gp when it is used implicitly. */
7896 if (!TARGET_EXPLICIT_RELOCS)
7897 return GLOBAL_POINTER_REGNUM;
7899 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7900 a valid gp. */
7901 if (crtl->profile)
7902 return GLOBAL_POINTER_REGNUM;
7904 /* If the function has a nonlocal goto, $gp must hold the correct
7905 global pointer for the target function. */
7906 if (crtl->has_nonlocal_goto)
7907 return GLOBAL_POINTER_REGNUM;
7909 /* If the gp is never referenced, there's no need to initialize it.
7910 Note that reload can sometimes introduce constant pool references
7911 into a function that otherwise didn't need them. For example,
7912 suppose we have an instruction like:
7914 (set (reg:DF R1) (float:DF (reg:SI R2)))
7916 If R2 turns out to be constant such as 1, the instruction may have a
7917 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7918 using this constant if R2 doesn't get allocated to a register.
7920 In cases like these, reload will have added the constant to the pool
7921 but no instruction will yet refer to it. */
7922 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7923 && !crtl->uses_const_pool
7924 && !mips_function_has_gp_insn ())
7925 return 0;
7927 /* We need a global pointer, but perhaps we can use a call-clobbered
7928 register instead of $gp. */
7929 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7930 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7931 if (!df_regs_ever_live_p (regno)
7932 && call_really_used_regs[regno]
7933 && !fixed_regs[regno]
7934 && regno != PIC_FUNCTION_ADDR_REGNUM)
7935 return regno;
7937 return GLOBAL_POINTER_REGNUM;
7940 /* Return true if the current function returns its value in a floating-point
7941 register in MIPS16 mode. */
7943 static bool
7944 mips16_cfun_returns_in_fpr_p (void)
7946 tree return_type = DECL_RESULT (current_function_decl);
7947 return (TARGET_MIPS16
7948 && TARGET_HARD_FLOAT_ABI
7949 && !aggregate_value_p (return_type, current_function_decl)
7950 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7953 /* Return true if the current function must save register REGNO. */
7955 static bool
7956 mips_save_reg_p (unsigned int regno)
7958 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7959 if we have not chosen a call-clobbered substitute. */
7960 if (regno == GLOBAL_POINTER_REGNUM)
7961 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7963 /* Check call-saved registers. */
7964 if ((crtl->saves_all_registers || df_regs_ever_live_p (regno))
7965 && !call_really_used_regs[regno])
7966 return true;
7968 /* Save both registers in an FPR pair if either one is used. This is
7969 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7970 register to be used without the even register. */
7971 if (FP_REG_P (regno)
7972 && MAX_FPRS_PER_FMT == 2
7973 && df_regs_ever_live_p (regno + 1)
7974 && !call_really_used_regs[regno + 1])
7975 return true;
7977 /* We need to save the old frame pointer before setting up a new one. */
7978 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7979 return true;
7981 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7982 if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7983 return true;
7985 /* We need to save the incoming return address if it is ever clobbered
7986 within the function, if __builtin_eh_return is being used to set a
7987 different return address, or if a stub is being used to return a
7988 value in FPRs. */
7989 if (regno == GP_REG_FIRST + 31
7990 && (df_regs_ever_live_p (regno)
7991 || crtl->calls_eh_return
7992 || mips16_cfun_returns_in_fpr_p ()))
7993 return true;
7995 return false;
7998 /* Populate the current function's mips_frame_info structure.
8000 MIPS stack frames look like:
8002 +-------------------------------+
8004 | incoming stack arguments |
8006 +-------------------------------+
8008 | caller-allocated save area |
8009 A | for register arguments |
8011 +-------------------------------+ <-- incoming stack pointer
8013 | callee-allocated save area |
8014 B | for arguments that are |
8015 | split between registers and |
8016 | the stack |
8018 +-------------------------------+ <-- arg_pointer_rtx
8020 C | callee-allocated save area |
8021 | for register varargs |
8023 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
8024 | | + UNITS_PER_HWFPVALUE
8025 | FPR save area |
8027 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
8028 | | + UNITS_PER_WORD
8029 | GPR save area |
8031 +-------------------------------+
8032 | | \
8033 | local variables | | var_size
8034 | | /
8035 +-------------------------------+
8036 | | \
8037 | $gp save area | | cprestore_size
8038 | | /
8039 P +-------------------------------+ <-- hard_frame_pointer_rtx for
8040 | | MIPS16 code
8041 | outgoing stack arguments |
8043 +-------------------------------+
8045 | caller-allocated save area |
8046 | for register arguments |
8048 +-------------------------------+ <-- stack_pointer_rtx
8049 frame_pointer_rtx
8050 hard_frame_pointer_rtx for
8051 non-MIPS16 code.
8053 At least two of A, B and C will be empty.
8055 Dynamic stack allocations such as alloca insert data at point P.
8056 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
8057 hard_frame_pointer_rtx unchanged. */
8059 static void
8060 mips_compute_frame_info (void)
8062 struct mips_frame_info *frame;
8063 HOST_WIDE_INT offset, size;
8064 unsigned int regno, i;
8066 frame = &cfun->machine->frame;
8067 memset (frame, 0, sizeof (*frame));
8068 size = get_frame_size ();
8070 cfun->machine->global_pointer = mips_global_pointer ();
8072 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
8073 area and the $gp save slot. This area isn't needed in leaf functions,
8074 but if the target-independent frame size is nonzero, we're committed
8075 to allocating it anyway. */
8076 if (size == 0 && current_function_is_leaf)
8078 /* The MIPS 3.0 linker does not like functions that dynamically
8079 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
8080 looks like we are trying to create a second frame pointer to the
8081 function, so allocate some stack space to make it happy. */
8082 if (cfun->calls_alloca)
8083 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
8084 else
8085 frame->args_size = 0;
8086 frame->cprestore_size = 0;
8088 else
8090 frame->args_size = crtl->outgoing_args_size;
8091 frame->cprestore_size = STARTING_FRAME_OFFSET - frame->args_size;
8093 offset = frame->args_size + frame->cprestore_size;
8095 /* Move above the local variables. */
8096 frame->var_size = MIPS_STACK_ALIGN (size);
8097 offset += frame->var_size;
8099 /* Find out which GPRs we need to save. */
8100 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8101 if (mips_save_reg_p (regno))
8103 frame->num_gp++;
8104 frame->mask |= 1 << (regno - GP_REG_FIRST);
8107 /* If this function calls eh_return, we must also save and restore the
8108 EH data registers. */
8109 if (crtl->calls_eh_return)
8110 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
8112 frame->num_gp++;
8113 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
8116 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
8117 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
8118 save all later registers too. */
8119 if (GENERATE_MIPS16E_SAVE_RESTORE)
8121 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
8122 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
8123 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
8124 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
8127 /* Move above the GPR save area. */
8128 if (frame->num_gp > 0)
8130 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
8131 frame->gp_sp_offset = offset - UNITS_PER_WORD;
8134 /* Find out which FPRs we need to save. This loop must iterate over
8135 the same space as its companion in mips_for_each_saved_reg. */
8136 if (TARGET_HARD_FLOAT)
8137 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
8138 if (mips_save_reg_p (regno))
8140 frame->num_fp += MAX_FPRS_PER_FMT;
8141 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
8144 /* Move above the FPR save area. */
8145 if (frame->num_fp > 0)
8147 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
8148 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
8151 /* Move above the callee-allocated varargs save area. */
8152 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
8153 frame->arg_pointer_offset = offset;
8155 /* Move above the callee-allocated area for pretend stack arguments. */
8156 offset += crtl->args.pretend_args_size;
8157 frame->total_size = offset;
8159 /* Work out the offsets of the save areas from the top of the frame. */
8160 if (frame->gp_sp_offset > 0)
8161 frame->gp_save_offset = frame->gp_sp_offset - offset;
8162 if (frame->fp_sp_offset > 0)
8163 frame->fp_save_offset = frame->fp_sp_offset - offset;
8165 /* MIPS16 code offsets the frame pointer by the size of the outgoing
8166 arguments. This tends to increase the chances of using unextended
8167 instructions for local variables and incoming arguments. */
8168 if (TARGET_MIPS16)
8169 frame->hard_frame_pointer_offset = frame->args_size;
8172 /* Return the style of GP load sequence that is being used for the
8173 current function. */
8175 enum mips_loadgp_style
8176 mips_current_loadgp_style (void)
8178 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
8179 return LOADGP_NONE;
8181 if (TARGET_RTP_PIC)
8182 return LOADGP_RTP;
8184 if (TARGET_ABSOLUTE_ABICALLS)
8185 return LOADGP_ABSOLUTE;
8187 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
8190 /* Implement FRAME_POINTER_REQUIRED. */
8192 bool
8193 mips_frame_pointer_required (void)
8195 /* If the function contains dynamic stack allocations, we need to
8196 use the frame pointer to access the static parts of the frame. */
8197 if (cfun->calls_alloca)
8198 return true;
8200 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
8201 reload may be unable to compute the address of a local variable,
8202 since there is no way to add a large constant to the stack pointer
8203 without using a second temporary register. */
8204 if (TARGET_MIPS16)
8206 mips_compute_frame_info ();
8207 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
8208 return true;
8211 return false;
8214 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
8215 or argument pointer. TO is either the stack pointer or hard frame
8216 pointer. */
8218 HOST_WIDE_INT
8219 mips_initial_elimination_offset (int from, int to)
8221 HOST_WIDE_INT offset;
8223 mips_compute_frame_info ();
8225 /* Set OFFSET to the offset from the soft frame pointer, which is also
8226 the offset from the end-of-prologue stack pointer. */
8227 switch (from)
8229 case FRAME_POINTER_REGNUM:
8230 offset = 0;
8231 break;
8233 case ARG_POINTER_REGNUM:
8234 offset = cfun->machine->frame.arg_pointer_offset;
8235 break;
8237 default:
8238 gcc_unreachable ();
8241 if (to == HARD_FRAME_POINTER_REGNUM)
8242 offset -= cfun->machine->frame.hard_frame_pointer_offset;
8244 return offset;
8247 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
8249 static void
8250 mips_extra_live_on_entry (bitmap regs)
8252 if (TARGET_USE_GOT)
8254 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
8255 the global pointer. */
8256 if (!TARGET_ABSOLUTE_ABICALLS)
8257 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8259 /* See the comment above load_call<mode> for details. */
8260 bitmap_set_bit (regs, GOT_VERSION_REGNUM);
8264 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
8265 previous frame. */
8268 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8270 if (count != 0)
8271 return const0_rtx;
8273 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8276 /* Emit code to change the current function's return address to
8277 ADDRESS. SCRATCH is available as a scratch register, if needed.
8278 ADDRESS and SCRATCH are both word-mode GPRs. */
8280 void
8281 mips_set_return_address (rtx address, rtx scratch)
8283 rtx slot_address;
8285 gcc_assert (BITSET_P (cfun->machine->frame.mask, 31));
8286 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
8287 cfun->machine->frame.gp_sp_offset);
8288 mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
8291 /* Restore $gp from its save slot. Valid only when using o32 or
8292 o64 abicalls. */
8294 void
8295 mips_restore_gp (void)
8297 rtx base, address;
8299 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
8301 base = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
8302 address = mips_add_offset (pic_offset_table_rtx, base,
8303 crtl->outgoing_args_size);
8304 mips_emit_move (pic_offset_table_rtx, gen_frame_mem (Pmode, address));
8305 if (!TARGET_EXPLICIT_RELOCS)
8306 emit_insn (gen_blockage ());
8309 /* A function to save or store a register. The first argument is the
8310 register and the second is the stack slot. */
8311 typedef void (*mips_save_restore_fn) (rtx, rtx);
8313 /* Use FN to save or restore register REGNO. MODE is the register's
8314 mode and OFFSET is the offset of its save slot from the current
8315 stack pointer. */
8317 static void
8318 mips_save_restore_reg (enum machine_mode mode, int regno,
8319 HOST_WIDE_INT offset, mips_save_restore_fn fn)
8321 rtx mem;
8323 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
8324 fn (gen_rtx_REG (mode, regno), mem);
8327 /* Call FN for each register that is saved by the current function.
8328 SP_OFFSET is the offset of the current stack pointer from the start
8329 of the frame. */
8331 static void
8332 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
8334 enum machine_mode fpr_mode;
8335 HOST_WIDE_INT offset;
8336 int regno;
8338 /* Save registers starting from high to low. The debuggers prefer at least
8339 the return register be stored at func+4, and also it allows us not to
8340 need a nop in the epilogue if at least one register is reloaded in
8341 addition to return address. */
8342 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
8343 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
8344 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
8346 mips_save_restore_reg (word_mode, regno, offset, fn);
8347 offset -= UNITS_PER_WORD;
8350 /* This loop must iterate over the same space as its companion in
8351 mips_compute_frame_info. */
8352 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
8353 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
8354 for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
8355 regno >= FP_REG_FIRST;
8356 regno -= MAX_FPRS_PER_FMT)
8357 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
8359 mips_save_restore_reg (fpr_mode, regno, offset, fn);
8360 offset -= GET_MODE_SIZE (fpr_mode);
8364 /* If we're generating n32 or n64 abicalls, and the current function
8365 does not use $28 as its global pointer, emit a cplocal directive.
8366 Use pic_offset_table_rtx as the argument to the directive. */
8368 static void
8369 mips_output_cplocal (void)
8371 if (!TARGET_EXPLICIT_RELOCS
8372 && cfun->machine->global_pointer > 0
8373 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
8374 output_asm_insn (".cplocal %+", 0);
8377 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
8379 static void
8380 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8382 const char *fnname;
8384 #ifdef SDB_DEBUGGING_INFO
8385 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
8386 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
8387 #endif
8389 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
8390 floating-point arguments. */
8391 if (TARGET_MIPS16
8392 && TARGET_HARD_FLOAT_ABI
8393 && crtl->args.info.fp_code != 0)
8394 mips16_build_function_stub ();
8396 /* Select the MIPS16 mode for this function. */
8397 if (TARGET_MIPS16)
8398 fprintf (file, "\t.set\tmips16\n");
8399 else
8400 fprintf (file, "\t.set\tnomips16\n");
8402 if (!FUNCTION_NAME_ALREADY_DECLARED)
8404 /* Get the function name the same way that toplev.c does before calling
8405 assemble_start_function. This is needed so that the name used here
8406 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8407 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8409 if (!flag_inhibit_size_directive)
8411 fputs ("\t.ent\t", file);
8412 assemble_name (file, fnname);
8413 fputs ("\n", file);
8416 assemble_name (file, fnname);
8417 fputs (":\n", file);
8420 /* Stop mips_file_end from treating this function as external. */
8421 if (TARGET_IRIX && mips_abi == ABI_32)
8422 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
8424 /* Output MIPS-specific frame information. */
8425 if (!flag_inhibit_size_directive)
8427 const struct mips_frame_info *frame;
8429 frame = &cfun->machine->frame;
8431 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
8432 fprintf (file,
8433 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
8434 "# vars= " HOST_WIDE_INT_PRINT_DEC
8435 ", regs= %d/%d"
8436 ", args= " HOST_WIDE_INT_PRINT_DEC
8437 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
8438 reg_names[frame_pointer_needed
8439 ? HARD_FRAME_POINTER_REGNUM
8440 : STACK_POINTER_REGNUM],
8441 (frame_pointer_needed
8442 ? frame->total_size - frame->hard_frame_pointer_offset
8443 : frame->total_size),
8444 reg_names[GP_REG_FIRST + 31],
8445 frame->var_size,
8446 frame->num_gp, frame->num_fp,
8447 frame->args_size,
8448 frame->cprestore_size);
8450 /* .mask MASK, OFFSET. */
8451 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8452 frame->mask, frame->gp_save_offset);
8454 /* .fmask MASK, OFFSET. */
8455 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8456 frame->fmask, frame->fp_save_offset);
8459 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
8460 Also emit the ".set noreorder; .set nomacro" sequence for functions
8461 that need it. */
8462 if (mips_current_loadgp_style () == LOADGP_OLDABI)
8464 /* .cpload must be in a .set noreorder but not a .set nomacro block. */
8465 if (!cfun->machine->all_noreorder_p)
8466 output_asm_insn ("%(.cpload\t%^%)", 0);
8467 else
8468 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8470 else if (cfun->machine->all_noreorder_p)
8471 output_asm_insn ("%(%<", 0);
8473 /* Tell the assembler which register we're using as the global
8474 pointer. This is needed for thunks, since they can use either
8475 explicit relocs or assembler macros. */
8476 mips_output_cplocal ();
8479 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
8481 static void
8482 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8483 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8485 /* Reinstate the normal $gp. */
8486 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8487 mips_output_cplocal ();
8489 if (cfun->machine->all_noreorder_p)
8491 /* Avoid using %>%) since it adds excess whitespace. */
8492 output_asm_insn (".set\tmacro", 0);
8493 output_asm_insn (".set\treorder", 0);
8494 set_noreorder = set_nomacro = 0;
8497 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8499 const char *fnname;
8501 /* Get the function name the same way that toplev.c does before calling
8502 assemble_start_function. This is needed so that the name used here
8503 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8504 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8505 fputs ("\t.end\t", file);
8506 assemble_name (file, fnname);
8507 fputs ("\n", file);
8511 /* Save register REG to MEM. Make the instruction frame-related. */
8513 static void
8514 mips_save_reg (rtx reg, rtx mem)
8516 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8518 rtx x1, x2;
8520 if (mips_split_64bit_move_p (mem, reg))
8521 mips_split_doubleword_move (mem, reg);
8522 else
8523 mips_emit_move (mem, reg);
8525 x1 = mips_frame_set (mips_subword (mem, false),
8526 mips_subword (reg, false));
8527 x2 = mips_frame_set (mips_subword (mem, true),
8528 mips_subword (reg, true));
8529 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8531 else
8533 if (TARGET_MIPS16
8534 && REGNO (reg) != GP_REG_FIRST + 31
8535 && !M16_REG_P (REGNO (reg)))
8537 /* Save a non-MIPS16 register by moving it through a temporary.
8538 We don't need to do this for $31 since there's a special
8539 instruction for it. */
8540 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8541 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8543 else
8544 mips_emit_move (mem, reg);
8546 mips_set_frame_expr (mips_frame_set (mem, reg));
8550 /* The __gnu_local_gp symbol. */
8552 static GTY(()) rtx mips_gnu_local_gp;
8554 /* If we're generating n32 or n64 abicalls, emit instructions
8555 to set up the global pointer. */
8557 static void
8558 mips_emit_loadgp (void)
8560 rtx addr, offset, incoming_address, base, index, pic_reg;
8562 pic_reg = pic_offset_table_rtx;
8563 switch (mips_current_loadgp_style ())
8565 case LOADGP_ABSOLUTE:
8566 if (mips_gnu_local_gp == NULL)
8568 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
8569 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
8571 emit_insn (Pmode == SImode
8572 ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
8573 : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
8574 break;
8576 case LOADGP_NEWABI:
8577 addr = XEXP (DECL_RTL (current_function_decl), 0);
8578 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
8579 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8580 emit_insn (Pmode == SImode
8581 ? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
8582 : gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
8583 break;
8585 case LOADGP_RTP:
8586 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
8587 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
8588 emit_insn (Pmode == SImode
8589 ? gen_loadgp_rtp_si (pic_reg, base, index)
8590 : gen_loadgp_rtp_di (pic_reg, base, index));
8591 break;
8593 default:
8594 return;
8596 /* Emit a blockage if there are implicit uses of the GP register.
8597 This includes profiled functions, because FUNCTION_PROFILE uses
8598 a jal macro. */
8599 if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
8600 emit_insn (gen_loadgp_blockage ());
8603 /* Expand the "prologue" pattern. */
8605 void
8606 mips_expand_prologue (void)
8608 const struct mips_frame_info *frame;
8609 HOST_WIDE_INT size;
8610 unsigned int nargs;
8611 rtx insn;
8613 if (cfun->machine->global_pointer > 0)
8614 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8616 frame = &cfun->machine->frame;
8617 size = frame->total_size;
8619 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8620 bytes beforehand; this is enough to cover the register save area
8621 without going out of range. */
8622 if ((frame->mask | frame->fmask) != 0)
8624 HOST_WIDE_INT step1;
8626 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8627 if (GENERATE_MIPS16E_SAVE_RESTORE)
8629 HOST_WIDE_INT offset;
8630 unsigned int mask, regno;
8632 /* Try to merge argument stores into the save instruction. */
8633 nargs = mips16e_collect_argument_saves ();
8635 /* Build the save instruction. */
8636 mask = frame->mask;
8637 insn = mips16e_build_save_restore (false, &mask, &offset,
8638 nargs, step1);
8639 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8640 size -= step1;
8642 /* Check if we need to save other registers. */
8643 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8644 if (BITSET_P (mask, regno - GP_REG_FIRST))
8646 offset -= UNITS_PER_WORD;
8647 mips_save_restore_reg (word_mode, regno,
8648 offset, mips_save_reg);
8651 else
8653 insn = gen_add3_insn (stack_pointer_rtx,
8654 stack_pointer_rtx,
8655 GEN_INT (-step1));
8656 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8657 size -= step1;
8658 mips_for_each_saved_reg (size, mips_save_reg);
8662 /* Allocate the rest of the frame. */
8663 if (size > 0)
8665 if (SMALL_OPERAND (-size))
8666 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8667 stack_pointer_rtx,
8668 GEN_INT (-size)))) = 1;
8669 else
8671 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8672 if (TARGET_MIPS16)
8674 /* There are no instructions to add or subtract registers
8675 from the stack pointer, so use the frame pointer as a
8676 temporary. We should always be using a frame pointer
8677 in this case anyway. */
8678 gcc_assert (frame_pointer_needed);
8679 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8680 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8681 hard_frame_pointer_rtx,
8682 MIPS_PROLOGUE_TEMP (Pmode)));
8683 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8685 else
8686 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8687 stack_pointer_rtx,
8688 MIPS_PROLOGUE_TEMP (Pmode)));
8690 /* Describe the combined effect of the previous instructions. */
8691 mips_set_frame_expr
8692 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8693 plus_constant (stack_pointer_rtx, -size)));
8697 /* Set up the frame pointer, if we're using one. */
8698 if (frame_pointer_needed)
8700 HOST_WIDE_INT offset;
8702 offset = frame->hard_frame_pointer_offset;
8703 if (offset == 0)
8705 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8706 RTX_FRAME_RELATED_P (insn) = 1;
8708 else if (SMALL_OPERAND (offset))
8710 insn = gen_add3_insn (hard_frame_pointer_rtx,
8711 stack_pointer_rtx, GEN_INT (offset));
8712 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8714 else
8716 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
8717 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8718 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8719 hard_frame_pointer_rtx,
8720 MIPS_PROLOGUE_TEMP (Pmode)));
8721 mips_set_frame_expr
8722 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8723 plus_constant (stack_pointer_rtx, offset)));
8727 mips_emit_loadgp ();
8729 /* Initialize the $gp save slot. */
8730 if (frame->cprestore_size > 0)
8731 emit_insn (gen_cprestore (GEN_INT (crtl->outgoing_args_size)));
8733 /* If we are profiling, make sure no instructions are scheduled before
8734 the call to mcount. */
8735 if (crtl->profile)
8736 emit_insn (gen_blockage ());
8739 /* Emit instructions to restore register REG from slot MEM. */
8741 static void
8742 mips_restore_reg (rtx reg, rtx mem)
8744 /* There's no MIPS16 instruction to load $31 directly. Load into
8745 $7 instead and adjust the return insn appropriately. */
8746 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8747 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
8749 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8751 /* Can't restore directly; move through a temporary. */
8752 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8753 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8755 else
8756 mips_emit_move (reg, mem);
8759 /* Emit any instructions needed before a return. */
8761 void
8762 mips_expand_before_return (void)
8764 /* When using a call-clobbered gp, we start out with unified call
8765 insns that include instructions to restore the gp. We then split
8766 these unified calls after reload. These split calls explicitly
8767 clobber gp, so there is no need to define
8768 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
8770 For consistency, we should also insert an explicit clobber of $28
8771 before return insns, so that the post-reload optimizers know that
8772 the register is not live on exit. */
8773 if (TARGET_CALL_CLOBBERED_GP)
8774 emit_clobber (pic_offset_table_rtx);
8777 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
8778 says which. */
8780 void
8781 mips_expand_epilogue (bool sibcall_p)
8783 const struct mips_frame_info *frame;
8784 HOST_WIDE_INT step1, step2;
8785 rtx base, target;
8787 if (!sibcall_p && mips_can_use_return_insn ())
8789 emit_jump_insn (gen_return ());
8790 return;
8793 /* In MIPS16 mode, if the return value should go into a floating-point
8794 register, we need to call a helper routine to copy it over. */
8795 if (mips16_cfun_returns_in_fpr_p ())
8796 mips16_copy_fpr_return_value ();
8798 /* Split the frame into two. STEP1 is the amount of stack we should
8799 deallocate before restoring the registers. STEP2 is the amount we
8800 should deallocate afterwards.
8802 Start off by assuming that no registers need to be restored. */
8803 frame = &cfun->machine->frame;
8804 step1 = frame->total_size;
8805 step2 = 0;
8807 /* Work out which register holds the frame address. */
8808 if (!frame_pointer_needed)
8809 base = stack_pointer_rtx;
8810 else
8812 base = hard_frame_pointer_rtx;
8813 step1 -= frame->hard_frame_pointer_offset;
8816 /* If we need to restore registers, deallocate as much stack as
8817 possible in the second step without going out of range. */
8818 if ((frame->mask | frame->fmask) != 0)
8820 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8821 step1 -= step2;
8824 /* Set TARGET to BASE + STEP1. */
8825 target = base;
8826 if (step1 > 0)
8828 rtx adjust;
8830 /* Get an rtx for STEP1 that we can add to BASE. */
8831 adjust = GEN_INT (step1);
8832 if (!SMALL_OPERAND (step1))
8834 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8835 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8838 /* Normal mode code can copy the result straight into $sp. */
8839 if (!TARGET_MIPS16)
8840 target = stack_pointer_rtx;
8842 emit_insn (gen_add3_insn (target, base, adjust));
8845 /* Copy TARGET into the stack pointer. */
8846 if (target != stack_pointer_rtx)
8847 mips_emit_move (stack_pointer_rtx, target);
8849 /* If we're using addressing macros, $gp is implicitly used by all
8850 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8851 from the stack. */
8852 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8853 emit_insn (gen_blockage ());
8855 if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
8857 unsigned int regno, mask;
8858 HOST_WIDE_INT offset;
8859 rtx restore;
8861 /* Generate the restore instruction. */
8862 mask = frame->mask;
8863 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8865 /* Restore any other registers manually. */
8866 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8867 if (BITSET_P (mask, regno - GP_REG_FIRST))
8869 offset -= UNITS_PER_WORD;
8870 mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
8873 /* Restore the remaining registers and deallocate the final bit
8874 of the frame. */
8875 emit_insn (restore);
8877 else
8879 /* Restore the registers. */
8880 mips_for_each_saved_reg (frame->total_size - step2, mips_restore_reg);
8882 /* Deallocate the final bit of the frame. */
8883 if (step2 > 0)
8884 emit_insn (gen_add3_insn (stack_pointer_rtx,
8885 stack_pointer_rtx,
8886 GEN_INT (step2)));
8889 /* Add in the __builtin_eh_return stack adjustment. We need to
8890 use a temporary in MIPS16 code. */
8891 if (crtl->calls_eh_return)
8893 if (TARGET_MIPS16)
8895 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8896 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8897 MIPS_EPILOGUE_TEMP (Pmode),
8898 EH_RETURN_STACKADJ_RTX));
8899 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8901 else
8902 emit_insn (gen_add3_insn (stack_pointer_rtx,
8903 stack_pointer_rtx,
8904 EH_RETURN_STACKADJ_RTX));
8907 if (!sibcall_p)
8909 unsigned int regno;
8911 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8912 path will restore the return address into $7 rather than $31. */
8913 if (TARGET_MIPS16
8914 && !GENERATE_MIPS16E_SAVE_RESTORE
8915 && BITSET_P (frame->mask, 31))
8916 regno = GP_REG_FIRST + 7;
8917 else
8918 regno = GP_REG_FIRST + 31;
8919 mips_expand_before_return ();
8920 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
8924 /* Return nonzero if this function is known to have a null epilogue.
8925 This allows the optimizer to omit jumps to jumps if no stack
8926 was created. */
8928 bool
8929 mips_can_use_return_insn (void)
8931 if (!reload_completed)
8932 return false;
8934 if (crtl->profile)
8935 return false;
8937 /* In MIPS16 mode, a function that returns a floating-point value
8938 needs to arrange to copy the return value into the floating-point
8939 registers. */
8940 if (mips16_cfun_returns_in_fpr_p ())
8941 return false;
8943 return cfun->machine->frame.total_size == 0;
8946 /* Return true if register REGNO can store a value of mode MODE.
8947 The result of this function is cached in mips_hard_regno_mode_ok. */
8949 static bool
8950 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
8952 unsigned int size;
8953 enum mode_class class;
8955 if (mode == CCV2mode)
8956 return (ISA_HAS_8CC
8957 && ST_REG_P (regno)
8958 && (regno - ST_REG_FIRST) % 2 == 0);
8960 if (mode == CCV4mode)
8961 return (ISA_HAS_8CC
8962 && ST_REG_P (regno)
8963 && (regno - ST_REG_FIRST) % 4 == 0);
8965 if (mode == CCmode)
8967 if (!ISA_HAS_8CC)
8968 return regno == FPSW_REGNUM;
8970 return (ST_REG_P (regno)
8971 || GP_REG_P (regno)
8972 || FP_REG_P (regno));
8975 size = GET_MODE_SIZE (mode);
8976 class = GET_MODE_CLASS (mode);
8978 if (GP_REG_P (regno))
8979 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
8981 if (FP_REG_P (regno)
8982 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
8983 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
8985 /* Allow TFmode for CCmode reloads. */
8986 if (mode == TFmode && ISA_HAS_8CC)
8987 return true;
8989 /* Allow 64-bit vector modes for Loongson-2E/2F. */
8990 if (TARGET_LOONGSON_VECTORS
8991 && (mode == V2SImode
8992 || mode == V4HImode
8993 || mode == V8QImode
8994 || mode == DImode))
8995 return true;
8997 if (class == MODE_FLOAT
8998 || class == MODE_COMPLEX_FLOAT
8999 || class == MODE_VECTOR_FLOAT)
9000 return size <= UNITS_PER_FPVALUE;
9002 /* Allow integer modes that fit into a single register. We need
9003 to put integers into FPRs when using instructions like CVT
9004 and TRUNC. There's no point allowing sizes smaller than a word,
9005 because the FPU has no appropriate load/store instructions. */
9006 if (class == MODE_INT)
9007 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
9010 if (ACC_REG_P (regno)
9011 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
9013 if (MD_REG_P (regno))
9015 /* After a multiplication or division, clobbering HI makes
9016 the value of LO unpredictable, and vice versa. This means
9017 that, for all interesting cases, HI and LO are effectively
9018 a single register.
9020 We model this by requiring that any value that uses HI
9021 also uses LO. */
9022 if (size <= UNITS_PER_WORD * 2)
9023 return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
9025 else
9027 /* DSP accumulators do not have the same restrictions as
9028 HI and LO, so we can treat them as normal doubleword
9029 registers. */
9030 if (size <= UNITS_PER_WORD)
9031 return true;
9033 if (size <= UNITS_PER_WORD * 2
9034 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
9035 return true;
9039 if (ALL_COP_REG_P (regno))
9040 return class == MODE_INT && size <= UNITS_PER_WORD;
9042 if (regno == GOT_VERSION_REGNUM)
9043 return mode == SImode;
9045 return false;
9048 /* Implement HARD_REGNO_NREGS. */
9050 unsigned int
9051 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9053 if (ST_REG_P (regno))
9054 /* The size of FP status registers is always 4, because they only hold
9055 CCmode values, and CCmode is always considered to be 4 bytes wide. */
9056 return (GET_MODE_SIZE (mode) + 3) / 4;
9058 if (FP_REG_P (regno))
9059 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9061 /* All other registers are word-sized. */
9062 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9065 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
9066 in mips_hard_regno_nregs. */
9069 mips_class_max_nregs (enum reg_class class, enum machine_mode mode)
9071 int size;
9072 HARD_REG_SET left;
9074 size = 0x8000;
9075 COPY_HARD_REG_SET (left, reg_class_contents[(int) class]);
9076 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
9078 size = MIN (size, 4);
9079 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
9081 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
9083 size = MIN (size, UNITS_PER_FPREG);
9084 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
9086 if (!hard_reg_set_empty_p (left))
9087 size = MIN (size, UNITS_PER_WORD);
9088 return (GET_MODE_SIZE (mode) + size - 1) / size;
9091 /* Implement CANNOT_CHANGE_MODE_CLASS. */
9093 bool
9094 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
9095 enum machine_mode to ATTRIBUTE_UNUSED,
9096 enum reg_class class)
9098 /* There are several problems with changing the modes of values
9099 in floating-point registers:
9101 - When a multi-word value is stored in paired floating-point
9102 registers, the first register always holds the low word.
9103 We therefore can't allow FPRs to change between single-word
9104 and multi-word modes on big-endian targets.
9106 - GCC assumes that each word of a multiword register can be accessed
9107 individually using SUBREGs. This is not true for floating-point
9108 registers if they are bigger than a word.
9110 - Loading a 32-bit value into a 64-bit floating-point register
9111 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9112 We can't allow FPRs to change from SImode to to a wider mode on
9113 64-bit targets.
9115 - If the FPU has already interpreted a value in one format, we must
9116 not ask it to treat the value as having a different format.
9118 We therefore disallow all mode changes involving FPRs. */
9119 return reg_classes_intersect_p (FP_REGS, class);
9122 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
9124 static bool
9125 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
9127 switch (mode)
9129 case SFmode:
9130 return TARGET_HARD_FLOAT;
9132 case DFmode:
9133 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
9135 case V2SFmode:
9136 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
9138 default:
9139 return false;
9143 /* Implement MODES_TIEABLE_P. */
9145 bool
9146 mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9148 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
9149 prefer to put one of them in FPRs. */
9150 return (mode1 == mode2
9151 || (!mips_mode_ok_for_mov_fmt_p (mode1)
9152 && !mips_mode_ok_for_mov_fmt_p (mode2)));
9155 /* Implement PREFERRED_RELOAD_CLASS. */
9157 enum reg_class
9158 mips_preferred_reload_class (rtx x, enum reg_class class)
9160 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
9161 return LEA_REGS;
9163 if (reg_class_subset_p (FP_REGS, class)
9164 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
9165 return FP_REGS;
9167 if (reg_class_subset_p (GR_REGS, class))
9168 class = GR_REGS;
9170 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
9171 class = M16_REGS;
9173 return class;
9176 /* Implement REGISTER_MOVE_COST. */
9179 mips_register_move_cost (enum machine_mode mode,
9180 enum reg_class to, enum reg_class from)
9182 if (TARGET_MIPS16)
9184 /* ??? We cannot move general registers into HI and LO because
9185 MIPS16 has no MTHI and MTLO instructions. Make the cost of
9186 moves in the opposite direction just as high, which stops the
9187 register allocators from using HI and LO for pseudos. */
9188 if (reg_class_subset_p (from, GENERAL_REGS)
9189 && reg_class_subset_p (to, GENERAL_REGS))
9191 if (reg_class_subset_p (from, M16_REGS)
9192 || reg_class_subset_p (to, M16_REGS))
9193 return 2;
9194 /* Two MOVEs. */
9195 return 4;
9198 else if (reg_class_subset_p (from, GENERAL_REGS))
9200 if (reg_class_subset_p (to, GENERAL_REGS))
9201 return 2;
9202 if (reg_class_subset_p (to, FP_REGS))
9203 return 4;
9204 if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
9205 return 5;
9206 if (reg_class_subset_p (to, ACC_REGS))
9207 return 6;
9209 else if (reg_class_subset_p (to, GENERAL_REGS))
9211 if (reg_class_subset_p (from, FP_REGS))
9212 return 4;
9213 if (reg_class_subset_p (from, ST_REGS))
9214 /* LUI followed by MOVF. */
9215 return 4;
9216 if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
9217 return 5;
9218 if (reg_class_subset_p (from, ACC_REGS))
9219 return 6;
9221 else if (reg_class_subset_p (from, FP_REGS))
9223 if (reg_class_subset_p (to, FP_REGS)
9224 && mips_mode_ok_for_mov_fmt_p (mode))
9225 return 4;
9226 if (reg_class_subset_p (to, ST_REGS))
9227 /* An expensive sequence. */
9228 return 8;
9231 return 12;
9234 /* Return the register class required for a secondary register when
9235 copying between one of the registers in CLASS and value X, which
9236 has mode MODE. X is the source of the move if IN_P, otherwise it
9237 is the destination. Return NO_REGS if no secondary register is
9238 needed. */
9240 enum reg_class
9241 mips_secondary_reload_class (enum reg_class class,
9242 enum machine_mode mode, rtx x, bool in_p)
9244 int regno;
9246 /* If X is a constant that cannot be loaded into $25, it must be loaded
9247 into some other GPR. No other register class allows a direct move. */
9248 if (mips_dangerous_for_la25_p (x))
9249 return reg_class_subset_p (class, LEA_REGS) ? NO_REGS : LEA_REGS;
9251 regno = true_regnum (x);
9252 if (TARGET_MIPS16)
9254 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
9255 if (!reg_class_subset_p (class, M16_REGS) && !M16_REG_P (regno))
9256 return M16_REGS;
9258 /* We can't really copy to HI or LO at all in MIPS16 mode. */
9259 if (in_p ? reg_classes_intersect_p (class, ACC_REGS) : ACC_REG_P (regno))
9260 return M16_REGS;
9262 return NO_REGS;
9265 /* Copying from accumulator registers to anywhere other than a general
9266 register requires a temporary general register. */
9267 if (reg_class_subset_p (class, ACC_REGS))
9268 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9269 if (ACC_REG_P (regno))
9270 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9272 /* We can only copy a value to a condition code register from a
9273 floating-point register, and even then we require a scratch
9274 floating-point register. We can only copy a value out of a
9275 condition-code register into a general register. */
9276 if (reg_class_subset_p (class, ST_REGS))
9278 if (in_p)
9279 return FP_REGS;
9280 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9282 if (ST_REG_P (regno))
9284 if (!in_p)
9285 return FP_REGS;
9286 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9289 if (reg_class_subset_p (class, FP_REGS))
9291 if (MEM_P (x)
9292 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
9293 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9294 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9295 return NO_REGS;
9297 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
9298 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9299 return NO_REGS;
9301 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
9302 /* We can force the constant to memory and use lwc1
9303 and ldc1. As above, we will use pairs of lwc1s if
9304 ldc1 is not supported. */
9305 return NO_REGS;
9307 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
9308 /* In this case we can use mov.fmt. */
9309 return NO_REGS;
9311 /* Otherwise, we need to reload through an integer register. */
9312 return GR_REGS;
9314 if (FP_REG_P (regno))
9315 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9317 return NO_REGS;
9320 /* Implement TARGET_MODE_REP_EXTENDED. */
9322 static int
9323 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
9325 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
9326 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
9327 return SIGN_EXTEND;
9329 return UNKNOWN;
9332 /* Implement TARGET_VALID_POINTER_MODE. */
9334 static bool
9335 mips_valid_pointer_mode (enum machine_mode mode)
9337 return mode == SImode || (TARGET_64BIT && mode == DImode);
9340 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
9342 static bool
9343 mips_vector_mode_supported_p (enum machine_mode mode)
9345 switch (mode)
9347 case V2SFmode:
9348 return TARGET_PAIRED_SINGLE_FLOAT;
9350 case V2HImode:
9351 case V4QImode:
9352 case V2HQmode:
9353 case V2UHQmode:
9354 case V2HAmode:
9355 case V2UHAmode:
9356 case V4QQmode:
9357 case V4UQQmode:
9358 return TARGET_DSP;
9360 case V2SImode:
9361 case V4HImode:
9362 case V8QImode:
9363 return TARGET_LOONGSON_VECTORS;
9365 default:
9366 return false;
9370 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9372 static bool
9373 mips_scalar_mode_supported_p (enum machine_mode mode)
9375 if (ALL_FIXED_POINT_MODE_P (mode)
9376 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9377 return true;
9379 return default_scalar_mode_supported_p (mode);
9382 /* Implement TARGET_INIT_LIBFUNCS. */
9384 #include "config/gofast.h"
9386 static void
9387 mips_init_libfuncs (void)
9389 if (TARGET_FIX_VR4120)
9391 /* Register the special divsi3 and modsi3 functions needed to work
9392 around VR4120 division errata. */
9393 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9394 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9397 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
9399 /* Register the MIPS16 -mhard-float stubs. */
9400 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9401 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9402 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9403 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9405 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9406 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9407 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9408 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9409 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9410 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9411 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
9413 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9414 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9415 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
9417 if (TARGET_DOUBLE_FLOAT)
9419 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9420 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9421 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9422 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9424 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9425 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9426 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9427 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9428 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9429 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9430 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
9432 set_conv_libfunc (sext_optab, DFmode, SFmode,
9433 "__mips16_extendsfdf2");
9434 set_conv_libfunc (trunc_optab, SFmode, DFmode,
9435 "__mips16_truncdfsf2");
9436 set_conv_libfunc (sfix_optab, SImode, DFmode,
9437 "__mips16_fix_truncdfsi");
9438 set_conv_libfunc (sfloat_optab, DFmode, SImode,
9439 "__mips16_floatsidf");
9440 set_conv_libfunc (ufloat_optab, DFmode, SImode,
9441 "__mips16_floatunsidf");
9444 else
9445 /* Register the gofast functions if selected using --enable-gofast. */
9446 gofast_maybe_init_libfuncs ();
9448 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
9449 on an external non-MIPS16 routine to implement __sync_synchronize. */
9450 if (TARGET_MIPS16)
9451 synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
9454 /* Return the length of INSN. LENGTH is the initial length computed by
9455 attributes in the machine-description file. */
9458 mips_adjust_insn_length (rtx insn, int length)
9460 /* A unconditional jump has an unfilled delay slot if it is not part
9461 of a sequence. A conditional jump normally has a delay slot, but
9462 does not on MIPS16. */
9463 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9464 length += 4;
9466 /* See how many nops might be needed to avoid hardware hazards. */
9467 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9468 switch (get_attr_hazard (insn))
9470 case HAZARD_NONE:
9471 break;
9473 case HAZARD_DELAY:
9474 length += 4;
9475 break;
9477 case HAZARD_HILO:
9478 length += 8;
9479 break;
9482 /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
9483 the .md file length attributes are 4-based for both modes.
9484 Adjust the MIPS16 ones here. */
9485 if (TARGET_MIPS16)
9486 length /= 2;
9488 return length;
9491 /* Return an asm sequence to start a noat block and load the address
9492 of a label into $1. */
9494 const char *
9495 mips_output_load_label (void)
9497 if (TARGET_EXPLICIT_RELOCS)
9498 switch (mips_abi)
9500 case ABI_N32:
9501 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9503 case ABI_64:
9504 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9506 default:
9507 if (ISA_HAS_LOAD_DELAY)
9508 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9509 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9511 else
9513 if (Pmode == DImode)
9514 return "%[dla\t%@,%0";
9515 else
9516 return "%[la\t%@,%0";
9520 /* Return the assembly code for INSN, which has the operands given by
9521 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9522 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9523 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9524 version of BRANCH_IF_TRUE. */
9526 const char *
9527 mips_output_conditional_branch (rtx insn, rtx *operands,
9528 const char *branch_if_true,
9529 const char *branch_if_false)
9531 unsigned int length;
9532 rtx taken, not_taken;
9534 length = get_attr_length (insn);
9535 if (length <= 8)
9537 /* Just a simple conditional branch. */
9538 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9539 return branch_if_true;
9542 /* Generate a reversed branch around a direct jump. This fallback does
9543 not use branch-likely instructions. */
9544 mips_branch_likely = false;
9545 not_taken = gen_label_rtx ();
9546 taken = operands[1];
9548 /* Generate the reversed branch to NOT_TAKEN. */
9549 operands[1] = not_taken;
9550 output_asm_insn (branch_if_false, operands);
9552 /* If INSN has a delay slot, we must provide delay slots for both the
9553 branch to NOT_TAKEN and the conditional jump. We must also ensure
9554 that INSN's delay slot is executed in the appropriate cases. */
9555 if (final_sequence)
9557 /* This first delay slot will always be executed, so use INSN's
9558 delay slot if is not annulled. */
9559 if (!INSN_ANNULLED_BRANCH_P (insn))
9561 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9562 asm_out_file, optimize, 1, NULL);
9563 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9565 else
9566 output_asm_insn ("nop", 0);
9567 fprintf (asm_out_file, "\n");
9570 /* Output the unconditional branch to TAKEN. */
9571 if (length <= 16)
9572 output_asm_insn ("j\t%0%/", &taken);
9573 else
9575 output_asm_insn (mips_output_load_label (), &taken);
9576 output_asm_insn ("jr\t%@%]%/", 0);
9579 /* Now deal with its delay slot; see above. */
9580 if (final_sequence)
9582 /* This delay slot will only be executed if the branch is taken.
9583 Use INSN's delay slot if is annulled. */
9584 if (INSN_ANNULLED_BRANCH_P (insn))
9586 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9587 asm_out_file, optimize, 1, NULL);
9588 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9590 else
9591 output_asm_insn ("nop", 0);
9592 fprintf (asm_out_file, "\n");
9595 /* Output NOT_TAKEN. */
9596 targetm.asm_out.internal_label (asm_out_file, "L",
9597 CODE_LABEL_NUMBER (not_taken));
9598 return "";
9601 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9602 if some ordering condition is true. The condition is given by
9603 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9604 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9605 its second is always zero. */
9607 const char *
9608 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9610 const char *branch[2];
9612 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9613 Make BRANCH[0] branch on the inverse condition. */
9614 switch (GET_CODE (operands[0]))
9616 /* These cases are equivalent to comparisons against zero. */
9617 case LEU:
9618 inverted_p = !inverted_p;
9619 /* Fall through. */
9620 case GTU:
9621 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9622 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9623 break;
9625 /* These cases are always true or always false. */
9626 case LTU:
9627 inverted_p = !inverted_p;
9628 /* Fall through. */
9629 case GEU:
9630 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9631 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9632 break;
9634 default:
9635 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9636 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9637 break;
9639 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9642 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
9643 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
9645 When working around R4000 and R4400 errata, we need to make sure that
9646 the division is not immediately followed by a shift[1][2]. We also
9647 need to stop the division from being put into a branch delay slot[3].
9648 The easiest way to avoid both problems is to add a nop after the
9649 division. When a divide-by-zero check is needed, this nop can be
9650 used to fill the branch delay slot.
9652 [1] If a double-word or a variable shift executes immediately
9653 after starting an integer division, the shift may give an
9654 incorrect result. See quotations of errata #16 and #28 from
9655 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9656 in mips.md for details.
9658 [2] A similar bug to [1] exists for all revisions of the
9659 R4000 and the R4400 when run in an MC configuration.
9660 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9662 "19. In this following sequence:
9664 ddiv (or ddivu or div or divu)
9665 dsll32 (or dsrl32, dsra32)
9667 if an MPT stall occurs, while the divide is slipping the cpu
9668 pipeline, then the following double shift would end up with an
9669 incorrect result.
9671 Workaround: The compiler needs to avoid generating any
9672 sequence with divide followed by extended double shift."
9674 This erratum is also present in "MIPS R4400MC Errata, Processor
9675 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9676 & 3.0" as errata #10 and #4, respectively.
9678 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9679 (also valid for MIPS R4000MC processors):
9681 "52. R4000SC: This bug does not apply for the R4000PC.
9683 There are two flavors of this bug:
9685 1) If the instruction just after divide takes an RF exception
9686 (tlb-refill, tlb-invalid) and gets an instruction cache
9687 miss (both primary and secondary) and the line which is
9688 currently in secondary cache at this index had the first
9689 data word, where the bits 5..2 are set, then R4000 would
9690 get a wrong result for the div.
9694 div r8, r9
9695 ------------------- # end-of page. -tlb-refill
9699 div r8, r9
9700 ------------------- # end-of page. -tlb-invalid
9703 2) If the divide is in the taken branch delay slot, where the
9704 target takes RF exception and gets an I-cache miss for the
9705 exception vector or where I-cache miss occurs for the
9706 target address, under the above mentioned scenarios, the
9707 div would get wrong results.
9710 j r2 # to next page mapped or unmapped
9711 div r8,r9 # this bug would be there as long
9712 # as there is an ICache miss and
9713 nop # the "data pattern" is present
9716 beq r0, r0, NextPage # to Next page
9717 div r8,r9
9720 This bug is present for div, divu, ddiv, and ddivu
9721 instructions.
9723 Workaround: For item 1), OS could make sure that the next page
9724 after the divide instruction is also mapped. For item 2), the
9725 compiler could make sure that the divide instruction is not in
9726 the branch delay slot."
9728 These processors have PRId values of 0x00004220 and 0x00004300 for
9729 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9731 const char *
9732 mips_output_division (const char *division, rtx *operands)
9734 const char *s;
9736 s = division;
9737 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9739 output_asm_insn (s, operands);
9740 s = "nop";
9742 if (TARGET_CHECK_ZERO_DIV)
9744 if (TARGET_MIPS16)
9746 output_asm_insn (s, operands);
9747 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9749 else if (GENERATE_DIVIDE_TRAPS)
9751 output_asm_insn (s, operands);
9752 s = "teq\t%2,%.,7";
9754 else
9756 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9757 output_asm_insn (s, operands);
9758 s = "break\t7%)\n1:";
9761 return s;
9764 /* Return true if IN_INSN is a multiply-add or multiply-subtract
9765 instruction and if OUT_INSN assigns to the accumulator operand. */
9767 bool
9768 mips_linked_madd_p (rtx out_insn, rtx in_insn)
9770 rtx x;
9772 x = single_set (in_insn);
9773 if (x == 0)
9774 return false;
9776 x = SET_SRC (x);
9778 if (GET_CODE (x) == PLUS
9779 && GET_CODE (XEXP (x, 0)) == MULT
9780 && reg_set_p (XEXP (x, 1), out_insn))
9781 return true;
9783 if (GET_CODE (x) == MINUS
9784 && GET_CODE (XEXP (x, 1)) == MULT
9785 && reg_set_p (XEXP (x, 0), out_insn))
9786 return true;
9788 return false;
9791 /* True if the dependency between OUT_INSN and IN_INSN is on the store
9792 data rather than the address. We need this because the cprestore
9793 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
9794 which causes the default routine to abort. We just return false
9795 for that case. */
9797 bool
9798 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
9800 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
9801 return false;
9803 return !store_data_bypass_p (out_insn, in_insn);
9807 /* Variables and flags used in scheduler hooks when tuning for
9808 Loongson 2E/2F. */
9809 static struct
9811 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
9812 strategy. */
9814 /* If true, then next ALU1/2 instruction will go to ALU1. */
9815 bool alu1_turn_p;
9817 /* If true, then next FALU1/2 unstruction will go to FALU1. */
9818 bool falu1_turn_p;
9820 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
9821 int alu1_core_unit_code;
9822 int alu2_core_unit_code;
9823 int falu1_core_unit_code;
9824 int falu2_core_unit_code;
9826 /* True if current cycle has a multi instruction.
9827 This flag is used in mips_ls2_dfa_post_advance_cycle. */
9828 bool cycle_has_multi_p;
9830 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
9831 These are used in mips_ls2_dfa_post_advance_cycle to initialize
9832 DFA state.
9833 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
9834 instruction to go ALU1. */
9835 rtx alu1_turn_enabled_insn;
9836 rtx alu2_turn_enabled_insn;
9837 rtx falu1_turn_enabled_insn;
9838 rtx falu2_turn_enabled_insn;
9839 } mips_ls2;
9841 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9842 dependencies have no cost, except on the 20Kc where output-dependence
9843 is treated like input-dependence. */
9845 static int
9846 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9847 rtx dep ATTRIBUTE_UNUSED, int cost)
9849 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
9850 && TUNE_20KC)
9851 return cost;
9852 if (REG_NOTE_KIND (link) != 0)
9853 return 0;
9854 return cost;
9857 /* Return the number of instructions that can be issued per cycle. */
9859 static int
9860 mips_issue_rate (void)
9862 switch (mips_tune)
9864 case PROCESSOR_74KC:
9865 case PROCESSOR_74KF2_1:
9866 case PROCESSOR_74KF1_1:
9867 case PROCESSOR_74KF3_2:
9868 /* The 74k is not strictly quad-issue cpu, but can be seen as one
9869 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
9870 but in reality only a maximum of 3 insns can be issued as
9871 floating-point loads and stores also require a slot in the
9872 AGEN pipe. */
9873 return 4;
9875 case PROCESSOR_20KC:
9876 case PROCESSOR_R4130:
9877 case PROCESSOR_R5400:
9878 case PROCESSOR_R5500:
9879 case PROCESSOR_R7000:
9880 case PROCESSOR_R9000:
9881 return 2;
9883 case PROCESSOR_SB1:
9884 case PROCESSOR_SB1A:
9885 /* This is actually 4, but we get better performance if we claim 3.
9886 This is partly because of unwanted speculative code motion with the
9887 larger number, and partly because in most common cases we can't
9888 reach the theoretical max of 4. */
9889 return 3;
9891 case PROCESSOR_LOONGSON_2E:
9892 case PROCESSOR_LOONGSON_2F:
9893 return 4;
9895 default:
9896 return 1;
9900 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
9902 static void
9903 mips_ls2_init_dfa_post_cycle_insn (void)
9905 start_sequence ();
9906 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
9907 mips_ls2.alu1_turn_enabled_insn = get_insns ();
9908 end_sequence ();
9910 start_sequence ();
9911 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
9912 mips_ls2.alu2_turn_enabled_insn = get_insns ();
9913 end_sequence ();
9915 start_sequence ();
9916 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
9917 mips_ls2.falu1_turn_enabled_insn = get_insns ();
9918 end_sequence ();
9920 start_sequence ();
9921 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
9922 mips_ls2.falu2_turn_enabled_insn = get_insns ();
9923 end_sequence ();
9925 mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
9926 mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
9927 mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
9928 mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
9931 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
9932 Init data used in mips_dfa_post_advance_cycle. */
9934 static void
9935 mips_init_dfa_post_cycle_insn (void)
9937 if (TUNE_LOONGSON_2EF)
9938 mips_ls2_init_dfa_post_cycle_insn ();
9941 /* Initialize STATE when scheduling for Loongson 2E/2F.
9942 Support round-robin dispatch scheme by enabling only one of
9943 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
9944 respectively. */
9946 static void
9947 mips_ls2_dfa_post_advance_cycle (state_t state)
9949 if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
9951 /* Though there are no non-pipelined ALU1 insns,
9952 we can get an instruction of type 'multi' before reload. */
9953 gcc_assert (mips_ls2.cycle_has_multi_p);
9954 mips_ls2.alu1_turn_p = false;
9957 mips_ls2.cycle_has_multi_p = false;
9959 if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
9960 /* We have a non-pipelined alu instruction in the core,
9961 adjust round-robin counter. */
9962 mips_ls2.alu1_turn_p = true;
9964 if (mips_ls2.alu1_turn_p)
9966 if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
9967 gcc_unreachable ();
9969 else
9971 if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
9972 gcc_unreachable ();
9975 if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
9977 /* There are no non-pipelined FALU1 insns. */
9978 gcc_unreachable ();
9979 mips_ls2.falu1_turn_p = false;
9982 if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
9983 /* We have a non-pipelined falu instruction in the core,
9984 adjust round-robin counter. */
9985 mips_ls2.falu1_turn_p = true;
9987 if (mips_ls2.falu1_turn_p)
9989 if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
9990 gcc_unreachable ();
9992 else
9994 if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
9995 gcc_unreachable ();
9999 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
10000 This hook is being called at the start of each cycle. */
10002 static void
10003 mips_dfa_post_advance_cycle (void)
10005 if (TUNE_LOONGSON_2EF)
10006 mips_ls2_dfa_post_advance_cycle (curr_state);
10009 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
10010 be as wide as the scheduling freedom in the DFA. */
10012 static int
10013 mips_multipass_dfa_lookahead (void)
10015 /* Can schedule up to 4 of the 6 function units in any one cycle. */
10016 if (TUNE_SB1)
10017 return 4;
10019 if (TUNE_LOONGSON_2EF)
10020 return 4;
10022 return 0;
10025 /* Remove the instruction at index LOWER from ready queue READY and
10026 reinsert it in front of the instruction at index HIGHER. LOWER must
10027 be <= HIGHER. */
10029 static void
10030 mips_promote_ready (rtx *ready, int lower, int higher)
10032 rtx new_head;
10033 int i;
10035 new_head = ready[lower];
10036 for (i = lower; i < higher; i++)
10037 ready[i] = ready[i + 1];
10038 ready[i] = new_head;
10041 /* If the priority of the instruction at POS2 in the ready queue READY
10042 is within LIMIT units of that of the instruction at POS1, swap the
10043 instructions if POS2 is not already less than POS1. */
10045 static void
10046 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
10048 if (pos1 < pos2
10049 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
10051 rtx temp;
10053 temp = ready[pos1];
10054 ready[pos1] = ready[pos2];
10055 ready[pos2] = temp;
10059 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10060 that may clobber hi or lo. */
10061 static rtx mips_macc_chains_last_hilo;
10063 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10064 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10066 static void
10067 mips_macc_chains_record (rtx insn)
10069 if (get_attr_may_clobber_hilo (insn))
10070 mips_macc_chains_last_hilo = insn;
10073 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10074 has NREADY elements, looking for a multiply-add or multiply-subtract
10075 instruction that is cumulative with mips_macc_chains_last_hilo.
10076 If there is one, promote it ahead of anything else that might
10077 clobber hi or lo. */
10079 static void
10080 mips_macc_chains_reorder (rtx *ready, int nready)
10082 int i, j;
10084 if (mips_macc_chains_last_hilo != 0)
10085 for (i = nready - 1; i >= 0; i--)
10086 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10088 for (j = nready - 1; j > i; j--)
10089 if (recog_memoized (ready[j]) >= 0
10090 && get_attr_may_clobber_hilo (ready[j]))
10092 mips_promote_ready (ready, i, j);
10093 break;
10095 break;
10099 /* The last instruction to be scheduled. */
10100 static rtx vr4130_last_insn;
10102 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10103 points to an rtx that is initially an instruction. Nullify the rtx
10104 if the instruction uses the value of register X. */
10106 static void
10107 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
10108 void *data)
10110 rtx *insn_ptr;
10112 insn_ptr = (rtx *) data;
10113 if (REG_P (x)
10114 && *insn_ptr != 0
10115 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10116 *insn_ptr = 0;
10119 /* Return true if there is true register dependence between vr4130_last_insn
10120 and INSN. */
10122 static bool
10123 vr4130_true_reg_dependence_p (rtx insn)
10125 note_stores (PATTERN (vr4130_last_insn),
10126 vr4130_true_reg_dependence_p_1, &insn);
10127 return insn == 0;
10130 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10131 the ready queue and that INSN2 is the instruction after it, return
10132 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10133 in which INSN1 and INSN2 can probably issue in parallel, but for
10134 which (INSN2, INSN1) should be less sensitive to instruction
10135 alignment than (INSN1, INSN2). See 4130.md for more details. */
10137 static bool
10138 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10140 sd_iterator_def sd_it;
10141 dep_t dep;
10143 /* Check for the following case:
10145 1) there is some other instruction X with an anti dependence on INSN1;
10146 2) X has a higher priority than INSN2; and
10147 3) X is an arithmetic instruction (and thus has no unit restrictions).
10149 If INSN1 is the last instruction blocking X, it would better to
10150 choose (INSN1, X) over (INSN2, INSN1). */
10151 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
10152 if (DEP_TYPE (dep) == REG_DEP_ANTI
10153 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
10154 && recog_memoized (DEP_CON (dep)) >= 0
10155 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
10156 return false;
10158 if (vr4130_last_insn != 0
10159 && recog_memoized (insn1) >= 0
10160 && recog_memoized (insn2) >= 0)
10162 /* See whether INSN1 and INSN2 use different execution units,
10163 or if they are both ALU-type instructions. If so, they can
10164 probably execute in parallel. */
10165 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10166 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10167 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10169 /* If only one of the instructions has a dependence on
10170 vr4130_last_insn, prefer to schedule the other one first. */
10171 bool dep1_p = vr4130_true_reg_dependence_p (insn1);
10172 bool dep2_p = vr4130_true_reg_dependence_p (insn2);
10173 if (dep1_p != dep2_p)
10174 return dep1_p;
10176 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10177 is not an ALU-type instruction and if INSN1 uses the same
10178 execution unit. (Note that if this condition holds, we already
10179 know that INSN2 uses a different execution unit.) */
10180 if (class1 != VR4130_CLASS_ALU
10181 && recog_memoized (vr4130_last_insn) >= 0
10182 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10183 return true;
10186 return false;
10189 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10190 queue with at least two instructions. Swap the first two if
10191 vr4130_swap_insns_p says that it could be worthwhile. */
10193 static void
10194 vr4130_reorder (rtx *ready, int nready)
10196 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10197 mips_promote_ready (ready, nready - 2, nready - 1);
10200 /* Record whether last 74k AGEN instruction was a load or store. */
10201 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
10203 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
10204 resets to TYPE_UNKNOWN state. */
10206 static void
10207 mips_74k_agen_init (rtx insn)
10209 if (!insn || !NONJUMP_INSN_P (insn))
10210 mips_last_74k_agen_insn = TYPE_UNKNOWN;
10211 else
10213 enum attr_type type = get_attr_type (insn);
10214 if (type == TYPE_LOAD || type == TYPE_STORE)
10215 mips_last_74k_agen_insn = type;
10219 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
10220 loads to be grouped together, and multiple stores to be grouped
10221 together. Swap things around in the ready queue to make this happen. */
10223 static void
10224 mips_74k_agen_reorder (rtx *ready, int nready)
10226 int i;
10227 int store_pos, load_pos;
10229 store_pos = -1;
10230 load_pos = -1;
10232 for (i = nready - 1; i >= 0; i--)
10234 rtx insn = ready[i];
10235 if (USEFUL_INSN_P (insn))
10236 switch (get_attr_type (insn))
10238 case TYPE_STORE:
10239 if (store_pos == -1)
10240 store_pos = i;
10241 break;
10243 case TYPE_LOAD:
10244 if (load_pos == -1)
10245 load_pos = i;
10246 break;
10248 default:
10249 break;
10253 if (load_pos == -1 || store_pos == -1)
10254 return;
10256 switch (mips_last_74k_agen_insn)
10258 case TYPE_UNKNOWN:
10259 /* Prefer to schedule loads since they have a higher latency. */
10260 case TYPE_LOAD:
10261 /* Swap loads to the front of the queue. */
10262 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
10263 break;
10264 case TYPE_STORE:
10265 /* Swap stores to the front of the queue. */
10266 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
10267 break;
10268 default:
10269 break;
10273 /* Implement TARGET_SCHED_INIT. */
10275 static void
10276 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10277 int max_ready ATTRIBUTE_UNUSED)
10279 mips_macc_chains_last_hilo = 0;
10280 vr4130_last_insn = 0;
10281 mips_74k_agen_init (NULL_RTX);
10283 /* When scheduling for Loongson2, branch instructions go to ALU1,
10284 therefore basic block is most likely to start with round-robin counter
10285 pointed to ALU2. */
10286 mips_ls2.alu1_turn_p = false;
10287 mips_ls2.falu1_turn_p = true;
10290 /* Implement TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
10292 static int
10293 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10294 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
10296 if (!reload_completed
10297 && TUNE_MACC_CHAINS
10298 && *nreadyp > 0)
10299 mips_macc_chains_reorder (ready, *nreadyp);
10301 if (reload_completed
10302 && TUNE_MIPS4130
10303 && !TARGET_VR4130_ALIGN
10304 && *nreadyp > 1)
10305 vr4130_reorder (ready, *nreadyp);
10307 if (TUNE_74K)
10308 mips_74k_agen_reorder (ready, *nreadyp);
10310 return mips_issue_rate ();
10313 /* Update round-robin counters for ALU1/2 and FALU1/2. */
10315 static void
10316 mips_ls2_variable_issue (rtx insn)
10318 if (mips_ls2.alu1_turn_p)
10320 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
10321 mips_ls2.alu1_turn_p = false;
10323 else
10325 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
10326 mips_ls2.alu1_turn_p = true;
10329 if (mips_ls2.falu1_turn_p)
10331 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
10332 mips_ls2.falu1_turn_p = false;
10334 else
10336 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
10337 mips_ls2.falu1_turn_p = true;
10340 if (recog_memoized (insn) >= 0)
10341 mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
10344 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
10346 static int
10347 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10348 rtx insn, int more)
10350 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
10351 if (USEFUL_INSN_P (insn))
10353 more--;
10354 if (!reload_completed && TUNE_MACC_CHAINS)
10355 mips_macc_chains_record (insn);
10356 vr4130_last_insn = insn;
10357 if (TUNE_74K)
10358 mips_74k_agen_init (insn);
10359 else if (TUNE_LOONGSON_2EF)
10360 mips_ls2_variable_issue (insn);
10363 /* Instructions of type 'multi' should all be split before
10364 the second scheduling pass. */
10365 gcc_assert (!reload_completed
10366 || recog_memoized (insn) < 0
10367 || get_attr_type (insn) != TYPE_MULTI);
10369 return more;
10372 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
10373 return the first operand of the associated PREF or PREFX insn. */
10376 mips_prefetch_cookie (rtx write, rtx locality)
10378 /* store_streamed / load_streamed. */
10379 if (INTVAL (locality) <= 0)
10380 return GEN_INT (INTVAL (write) + 4);
10382 /* store / load. */
10383 if (INTVAL (locality) <= 2)
10384 return write;
10386 /* store_retained / load_retained. */
10387 return GEN_INT (INTVAL (write) + 6);
10390 /* Flags that indicate when a built-in function is available.
10392 BUILTIN_AVAIL_NON_MIPS16
10393 The function is available on the current target, but only
10394 in non-MIPS16 mode. */
10395 #define BUILTIN_AVAIL_NON_MIPS16 1
10397 /* Declare an availability predicate for built-in functions that
10398 require non-MIPS16 mode and also require COND to be true.
10399 NAME is the main part of the predicate's name. */
10400 #define AVAIL_NON_MIPS16(NAME, COND) \
10401 static unsigned int \
10402 mips_builtin_avail_##NAME (void) \
10404 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
10407 /* This structure describes a single built-in function. */
10408 struct mips_builtin_description {
10409 /* The code of the main .md file instruction. See mips_builtin_type
10410 for more information. */
10411 enum insn_code icode;
10413 /* The floating-point comparison code to use with ICODE, if any. */
10414 enum mips_fp_condition cond;
10416 /* The name of the built-in function. */
10417 const char *name;
10419 /* Specifies how the function should be expanded. */
10420 enum mips_builtin_type builtin_type;
10422 /* The function's prototype. */
10423 enum mips_function_type function_type;
10425 /* Whether the function is available. */
10426 unsigned int (*avail) (void);
10429 AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
10430 AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
10431 AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
10432 AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
10433 AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
10434 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
10435 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
10436 AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
10438 /* Construct a mips_builtin_description from the given arguments.
10440 INSN is the name of the associated instruction pattern, without the
10441 leading CODE_FOR_mips_.
10443 CODE is the floating-point condition code associated with the
10444 function. It can be 'f' if the field is not applicable.
10446 NAME is the name of the function itself, without the leading
10447 "__builtin_mips_".
10449 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
10451 AVAIL is the name of the availability predicate, without the leading
10452 mips_builtin_avail_. */
10453 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
10454 FUNCTION_TYPE, AVAIL) \
10455 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
10456 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
10457 mips_builtin_avail_ ## AVAIL }
10459 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
10460 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
10461 are as for MIPS_BUILTIN. */
10462 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
10463 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
10465 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
10466 are subject to mips_builtin_avail_<AVAIL>. */
10467 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
10468 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
10469 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
10470 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
10471 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
10473 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10474 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
10475 while the any and all forms are subject to mips_builtin_avail_mips3d. */
10476 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
10477 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
10478 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
10479 mips3d), \
10480 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
10481 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
10482 mips3d), \
10483 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
10484 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
10485 AVAIL), \
10486 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
10487 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
10488 AVAIL)
10490 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10491 are subject to mips_builtin_avail_mips3d. */
10492 #define CMP_4S_BUILTINS(INSN, COND) \
10493 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
10494 MIPS_BUILTIN_CMP_ANY, \
10495 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
10496 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
10497 MIPS_BUILTIN_CMP_ALL, \
10498 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
10500 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10501 instruction requires mips_builtin_avail_<AVAIL>. */
10502 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
10503 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
10504 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10505 AVAIL), \
10506 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
10507 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10508 AVAIL)
10510 /* Define all the built-in functions related to C.cond.fmt condition COND. */
10511 #define CMP_BUILTINS(COND) \
10512 MOVTF_BUILTINS (c, COND, paired_single), \
10513 MOVTF_BUILTINS (cabs, COND, mips3d), \
10514 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
10515 CMP_PS_BUILTINS (c, COND, paired_single), \
10516 CMP_PS_BUILTINS (cabs, COND, mips3d), \
10517 CMP_4S_BUILTINS (c, COND), \
10518 CMP_4S_BUILTINS (cabs, COND)
10520 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
10521 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
10522 and AVAIL are as for MIPS_BUILTIN. */
10523 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
10524 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
10525 FUNCTION_TYPE, AVAIL)
10527 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10528 branch instruction. AVAIL is as for MIPS_BUILTIN. */
10529 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
10530 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
10531 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
10533 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
10534 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
10535 builtin_description field. */
10536 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
10537 { CODE_FOR_loongson_ ## INSN, 0, "__builtin_loongson_" #FN_NAME, \
10538 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, mips_builtin_avail_loongson }
10540 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
10541 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
10542 builtin_description field. */
10543 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
10544 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
10546 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
10547 We use functions of this form when the same insn can be usefully applied
10548 to more than one datatype. */
10549 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
10550 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
10552 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10553 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10554 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10555 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10556 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10557 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
10559 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
10560 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
10561 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
10562 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
10563 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
10564 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
10565 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
10566 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
10567 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
10568 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
10569 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
10570 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
10571 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
10572 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
10573 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
10574 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
10575 #define CODE_FOR_loongson_biadd CODE_FOR_reduc_uplus_v8qi
10576 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
10577 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
10578 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
10579 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
10580 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
10581 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
10582 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
10583 #define CODE_FOR_loongson_punpckhbh CODE_FOR_vec_interleave_highv8qi
10584 #define CODE_FOR_loongson_punpckhhw CODE_FOR_vec_interleave_highv4hi
10585 #define CODE_FOR_loongson_punpckhwd CODE_FOR_vec_interleave_highv2si
10586 #define CODE_FOR_loongson_punpcklbh CODE_FOR_vec_interleave_lowv8qi
10587 #define CODE_FOR_loongson_punpcklhw CODE_FOR_vec_interleave_lowv4hi
10588 #define CODE_FOR_loongson_punpcklwd CODE_FOR_vec_interleave_lowv2si
10590 static const struct mips_builtin_description mips_builtins[] = {
10591 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10592 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10593 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10594 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
10595 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
10596 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
10597 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
10598 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
10600 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
10601 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10602 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10603 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
10604 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
10606 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
10607 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
10608 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
10609 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
10610 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
10611 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10613 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
10614 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
10615 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
10616 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
10617 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
10618 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
10620 MIPS_FP_CONDITIONS (CMP_BUILTINS),
10622 /* Built-in functions for the SB-1 processor. */
10623 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
10625 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
10626 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10627 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10628 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
10629 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10630 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10631 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10632 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10633 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
10634 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10635 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10636 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
10637 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
10638 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
10639 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
10640 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
10641 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
10642 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
10643 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
10644 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
10645 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
10646 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
10647 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
10648 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
10649 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
10650 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
10651 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
10652 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
10653 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
10654 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
10655 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
10656 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
10657 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10658 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10659 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
10660 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
10661 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10662 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
10663 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
10664 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
10665 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
10666 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10667 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
10668 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
10669 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
10670 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
10671 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
10672 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
10673 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
10674 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
10675 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
10676 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
10677 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
10678 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
10679 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
10680 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
10681 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
10682 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
10683 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10684 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
10685 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
10686 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
10687 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
10688 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
10689 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
10690 BPOSGE_BUILTIN (32, dsp),
10692 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
10693 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
10694 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10695 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10696 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10697 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10698 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
10699 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
10700 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
10701 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
10702 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
10703 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10704 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10705 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10706 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10707 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10708 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
10709 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
10710 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
10711 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
10712 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
10713 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
10714 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
10715 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10716 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10717 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10718 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
10719 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10720 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10721 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10722 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10723 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10724 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
10725 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10726 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
10728 /* Built-in functions for the DSP ASE (32-bit only). */
10729 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10730 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10731 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10732 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
10733 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10734 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10735 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10736 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
10737 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
10738 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10739 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10740 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10741 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
10742 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
10743 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
10744 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
10745 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
10746 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
10747 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
10748 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
10749 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
10751 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
10752 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10753 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10754 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
10755 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
10756 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
10757 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
10758 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10759 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dspr2_32),
10760 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dspr2_32),
10761 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10762 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10763 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10764 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10765 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10766 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
10768 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
10769 LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
10770 LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
10771 LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
10772 LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10773 LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10774 LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10775 LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10776 LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10777 LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10778 LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
10779 LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
10780 LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10781 LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
10782 LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10783 LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10784 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
10785 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10786 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10787 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10788 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
10789 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
10790 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10791 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
10792 LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10793 LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10794 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10795 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10796 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10797 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10798 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10799 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10800 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10801 LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10802 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10803 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10804 LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10805 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10806 LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
10807 LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
10808 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10809 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10810 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10811 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10812 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10813 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10814 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10815 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10816 LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
10817 LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10818 LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10819 LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10820 LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10821 LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
10822 LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
10823 LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10824 LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10825 LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10826 LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
10827 LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10828 LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
10829 LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
10830 LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI_UQI),
10831 LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_V4HI_UQI),
10832 LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
10833 LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
10834 LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
10835 LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
10836 LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
10837 LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
10838 LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
10839 LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
10840 LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
10841 LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
10842 LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
10843 LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
10844 LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10845 LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10846 LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10847 LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10848 LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10849 LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10850 LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
10851 LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
10852 LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
10853 LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
10854 LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10855 LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10856 LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10857 LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10858 LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10859 LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10860 LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10861 LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
10862 LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
10863 LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
10864 LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
10865 LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
10866 LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
10867 LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI)
10870 /* MODE is a vector mode whose elements have type TYPE. Return the type
10871 of the vector itself. */
10873 static tree
10874 mips_builtin_vector_type (tree type, enum machine_mode mode)
10876 static tree types[2 * (int) MAX_MACHINE_MODE];
10877 int mode_index;
10879 mode_index = (int) mode;
10881 if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
10882 mode_index += MAX_MACHINE_MODE;
10884 if (types[mode_index] == NULL_TREE)
10885 types[mode_index] = build_vector_type_for_mode (type, mode);
10886 return types[mode_index];
10889 /* Source-level argument types. */
10890 #define MIPS_ATYPE_VOID void_type_node
10891 #define MIPS_ATYPE_INT integer_type_node
10892 #define MIPS_ATYPE_POINTER ptr_type_node
10894 /* Standard mode-based argument types. */
10895 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
10896 #define MIPS_ATYPE_SI intSI_type_node
10897 #define MIPS_ATYPE_USI unsigned_intSI_type_node
10898 #define MIPS_ATYPE_DI intDI_type_node
10899 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
10900 #define MIPS_ATYPE_SF float_type_node
10901 #define MIPS_ATYPE_DF double_type_node
10903 /* Vector argument types. */
10904 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
10905 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
10906 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
10907 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
10908 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
10909 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
10910 #define MIPS_ATYPE_UV2SI \
10911 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
10912 #define MIPS_ATYPE_UV4HI \
10913 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
10914 #define MIPS_ATYPE_UV8QI \
10915 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
10917 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
10918 their associated MIPS_ATYPEs. */
10919 #define MIPS_FTYPE_ATYPES1(A, B) \
10920 MIPS_ATYPE_##A, MIPS_ATYPE_##B
10922 #define MIPS_FTYPE_ATYPES2(A, B, C) \
10923 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
10925 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
10926 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
10928 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
10929 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
10930 MIPS_ATYPE_##E
10932 /* Return the function type associated with function prototype TYPE. */
10934 static tree
10935 mips_build_function_type (enum mips_function_type type)
10937 static tree types[(int) MIPS_MAX_FTYPE_MAX];
10939 if (types[(int) type] == NULL_TREE)
10940 switch (type)
10942 #define DEF_MIPS_FTYPE(NUM, ARGS) \
10943 case MIPS_FTYPE_NAME##NUM ARGS: \
10944 types[(int) type] \
10945 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
10946 NULL_TREE); \
10947 break;
10948 #include "config/mips/mips-ftypes.def"
10949 #undef DEF_MIPS_FTYPE
10950 default:
10951 gcc_unreachable ();
10954 return types[(int) type];
10957 /* Implement TARGET_INIT_BUILTINS. */
10959 static void
10960 mips_init_builtins (void)
10962 const struct mips_builtin_description *d;
10963 unsigned int i;
10965 /* Iterate through all of the bdesc arrays, initializing all of the
10966 builtin functions. */
10967 for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
10969 d = &mips_builtins[i];
10970 if (d->avail ())
10971 add_builtin_function (d->name,
10972 mips_build_function_type (d->function_type),
10973 i, BUILT_IN_MD, NULL, NULL);
10977 /* Take argument ARGNO from EXP's argument list and convert it into a
10978 form suitable for input operand OPNO of instruction ICODE. Return the
10979 value. */
10981 static rtx
10982 mips_prepare_builtin_arg (enum insn_code icode,
10983 unsigned int opno, tree exp, unsigned int argno)
10985 rtx value;
10986 enum machine_mode mode;
10988 value = expand_normal (CALL_EXPR_ARG (exp, argno));
10989 mode = insn_data[icode].operand[opno].mode;
10990 if (!insn_data[icode].operand[opno].predicate (value, mode))
10992 value = copy_to_mode_reg (mode, value);
10993 /* Check the predicate again. */
10994 if (!insn_data[icode].operand[opno].predicate (value, mode))
10996 error ("invalid argument to built-in function");
10997 return const0_rtx;
11001 return value;
11004 /* Return an rtx suitable for output operand OP of instruction ICODE.
11005 If TARGET is non-null, try to use it where possible. */
11007 static rtx
11008 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11010 enum machine_mode mode;
11012 mode = insn_data[icode].operand[op].mode;
11013 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11014 target = gen_reg_rtx (mode);
11016 return target;
11019 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
11020 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
11021 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
11022 suggests a good place to put the result. */
11024 static rtx
11025 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11026 bool has_target_p)
11028 rtx ops[MAX_RECOG_OPERANDS];
11029 int opno, argno;
11031 /* Map any target to operand 0. */
11032 opno = 0;
11033 if (has_target_p)
11035 ops[opno] = mips_prepare_builtin_target (icode, opno, target);
11036 opno++;
11039 /* Map the arguments to the other operands. The n_operands value
11040 for an expander includes match_dups and match_scratches as well as
11041 match_operands, so n_operands is only an upper bound on the number
11042 of arguments to the expander function. */
11043 gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
11044 for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
11045 ops[opno] = mips_prepare_builtin_arg (icode, opno, exp, argno);
11047 switch (opno)
11049 case 2:
11050 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11051 break;
11053 case 3:
11054 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
11055 break;
11057 case 4:
11058 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
11059 break;
11061 default:
11062 gcc_unreachable ();
11064 return target;
11067 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
11068 function; TYPE says which. EXP is the CALL_EXPR that calls the
11069 function, ICODE is the instruction that should be used to compare
11070 the first two arguments, and COND is the condition it should test.
11071 TARGET, if nonnull, suggests a good place to put the result. */
11073 static rtx
11074 mips_expand_builtin_movtf (enum mips_builtin_type type,
11075 enum insn_code icode, enum mips_fp_condition cond,
11076 rtx target, tree exp)
11078 rtx cmp_result, op0, op1;
11080 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11081 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
11082 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
11083 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
11085 icode = CODE_FOR_mips_cond_move_tf_ps;
11086 target = mips_prepare_builtin_target (icode, 0, target);
11087 if (type == MIPS_BUILTIN_MOVT)
11089 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
11090 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
11092 else
11094 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
11095 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
11097 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
11098 return target;
11101 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11102 into TARGET otherwise. Return TARGET. */
11104 static rtx
11105 mips_builtin_branch_and_move (rtx condition, rtx target,
11106 rtx value_if_true, rtx value_if_false)
11108 rtx true_label, done_label;
11110 true_label = gen_label_rtx ();
11111 done_label = gen_label_rtx ();
11113 /* First assume that CONDITION is false. */
11114 mips_emit_move (target, value_if_false);
11116 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11117 emit_jump_insn (gen_condjump (condition, true_label));
11118 emit_jump_insn (gen_jump (done_label));
11119 emit_barrier ();
11121 /* Fix TARGET if CONDITION is true. */
11122 emit_label (true_label);
11123 mips_emit_move (target, value_if_true);
11125 emit_label (done_label);
11126 return target;
11129 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
11130 the CALL_EXPR that calls the function, ICODE is the code of the
11131 comparison instruction, and COND is the condition it should test.
11132 TARGET, if nonnull, suggests a good place to put the boolean result. */
11134 static rtx
11135 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
11136 enum insn_code icode, enum mips_fp_condition cond,
11137 rtx target, tree exp)
11139 rtx offset, condition, cmp_result, args[MAX_RECOG_OPERANDS];
11140 int argno;
11142 if (target == 0 || GET_MODE (target) != SImode)
11143 target = gen_reg_rtx (SImode);
11145 /* The instruction should have a target operand, an operand for each
11146 argument, and an operand for COND. */
11147 gcc_assert (call_expr_nargs (exp) + 2 == insn_data[icode].n_operands);
11149 /* Prepare the operands to the comparison. */
11150 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11151 for (argno = 0; argno < call_expr_nargs (exp); argno++)
11152 args[argno] = mips_prepare_builtin_arg (icode, argno + 1, exp, argno);
11154 switch (insn_data[icode].n_operands)
11156 case 4:
11157 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
11158 GEN_INT (cond)));
11159 break;
11161 case 6:
11162 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
11163 args[2], args[3], GEN_INT (cond)));
11164 break;
11166 default:
11167 gcc_unreachable ();
11170 /* If the comparison sets more than one register, we define the result
11171 to be 0 if all registers are false and -1 if all registers are true.
11172 The value of the complete result is indeterminate otherwise. */
11173 switch (builtin_type)
11175 case MIPS_BUILTIN_CMP_ALL:
11176 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
11177 return mips_builtin_branch_and_move (condition, target,
11178 const0_rtx, const1_rtx);
11180 case MIPS_BUILTIN_CMP_UPPER:
11181 case MIPS_BUILTIN_CMP_LOWER:
11182 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
11183 condition = gen_single_cc (cmp_result, offset);
11184 return mips_builtin_branch_and_move (condition, target,
11185 const1_rtx, const0_rtx);
11187 default:
11188 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
11189 return mips_builtin_branch_and_move (condition, target,
11190 const1_rtx, const0_rtx);
11194 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
11195 if nonnull, suggests a good place to put the boolean result. */
11197 static rtx
11198 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
11200 rtx condition, cmp_result;
11201 int cmp_value;
11203 if (target == 0 || GET_MODE (target) != SImode)
11204 target = gen_reg_rtx (SImode);
11206 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
11208 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
11209 cmp_value = 32;
11210 else
11211 gcc_assert (0);
11213 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
11214 return mips_builtin_branch_and_move (condition, target,
11215 const1_rtx, const0_rtx);
11218 /* Implement TARGET_EXPAND_BUILTIN. */
11220 static rtx
11221 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11222 enum machine_mode mode ATTRIBUTE_UNUSED,
11223 int ignore ATTRIBUTE_UNUSED)
11225 tree fndecl;
11226 unsigned int fcode, avail;
11227 const struct mips_builtin_description *d;
11229 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11230 fcode = DECL_FUNCTION_CODE (fndecl);
11231 gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
11232 d = &mips_builtins[fcode];
11233 avail = d->avail ();
11234 gcc_assert (avail != 0);
11235 if (TARGET_MIPS16)
11237 error ("built-in function %qs not supported for MIPS16",
11238 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
11239 return const0_rtx;
11241 switch (d->builtin_type)
11243 case MIPS_BUILTIN_DIRECT:
11244 return mips_expand_builtin_direct (d->icode, target, exp, true);
11246 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11247 return mips_expand_builtin_direct (d->icode, target, exp, false);
11249 case MIPS_BUILTIN_MOVT:
11250 case MIPS_BUILTIN_MOVF:
11251 return mips_expand_builtin_movtf (d->builtin_type, d->icode,
11252 d->cond, target, exp);
11254 case MIPS_BUILTIN_CMP_ANY:
11255 case MIPS_BUILTIN_CMP_ALL:
11256 case MIPS_BUILTIN_CMP_UPPER:
11257 case MIPS_BUILTIN_CMP_LOWER:
11258 case MIPS_BUILTIN_CMP_SINGLE:
11259 return mips_expand_builtin_compare (d->builtin_type, d->icode,
11260 d->cond, target, exp);
11262 case MIPS_BUILTIN_BPOSGE32:
11263 return mips_expand_builtin_bposge (d->builtin_type, target);
11265 gcc_unreachable ();
11268 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
11269 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
11270 struct mips16_constant {
11271 struct mips16_constant *next;
11272 rtx value;
11273 rtx label;
11274 enum machine_mode mode;
11277 /* Information about an incomplete MIPS16 constant pool. FIRST is the
11278 first constant, HIGHEST_ADDRESS is the highest address that the first
11279 byte of the pool can have, and INSN_ADDRESS is the current instruction
11280 address. */
11281 struct mips16_constant_pool {
11282 struct mips16_constant *first;
11283 int highest_address;
11284 int insn_address;
11287 /* Add constant VALUE to POOL and return its label. MODE is the
11288 value's mode (used for CONST_INTs, etc.). */
11290 static rtx
11291 mips16_add_constant (struct mips16_constant_pool *pool,
11292 rtx value, enum machine_mode mode)
11294 struct mips16_constant **p, *c;
11295 bool first_of_size_p;
11297 /* See whether the constant is already in the pool. If so, return the
11298 existing label, otherwise leave P pointing to the place where the
11299 constant should be added.
11301 Keep the pool sorted in increasing order of mode size so that we can
11302 reduce the number of alignments needed. */
11303 first_of_size_p = true;
11304 for (p = &pool->first; *p != 0; p = &(*p)->next)
11306 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
11307 return (*p)->label;
11308 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
11309 break;
11310 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
11311 first_of_size_p = false;
11314 /* In the worst case, the constant needed by the earliest instruction
11315 will end up at the end of the pool. The entire pool must then be
11316 accessible from that instruction.
11318 When adding the first constant, set the pool's highest address to
11319 the address of the first out-of-range byte. Adjust this address
11320 downwards each time a new constant is added. */
11321 if (pool->first == 0)
11322 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
11323 of the instruction with the lowest two bits clear. The base PC
11324 value for LDPC has the lowest three bits clear. Assume the worst
11325 case here; namely that the PC-relative instruction occupies the
11326 last 2 bytes in an aligned word. */
11327 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
11328 pool->highest_address -= GET_MODE_SIZE (mode);
11329 if (first_of_size_p)
11330 /* Take into account the worst possible padding due to alignment. */
11331 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
11333 /* Create a new entry. */
11334 c = XNEW (struct mips16_constant);
11335 c->value = value;
11336 c->mode = mode;
11337 c->label = gen_label_rtx ();
11338 c->next = *p;
11339 *p = c;
11341 return c->label;
11344 /* Output constant VALUE after instruction INSN and return the last
11345 instruction emitted. MODE is the mode of the constant. */
11347 static rtx
11348 mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
11350 if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
11352 rtx size = GEN_INT (GET_MODE_SIZE (mode));
11353 return emit_insn_after (gen_consttable_int (value, size), insn);
11356 if (SCALAR_FLOAT_MODE_P (mode))
11357 return emit_insn_after (gen_consttable_float (value), insn);
11359 if (VECTOR_MODE_P (mode))
11361 int i;
11363 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
11364 insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
11365 CONST_VECTOR_ELT (value, i), insn);
11366 return insn;
11369 gcc_unreachable ();
11372 /* Dump out the constants in CONSTANTS after INSN. */
11374 static void
11375 mips16_emit_constants (struct mips16_constant *constants, rtx insn)
11377 struct mips16_constant *c, *next;
11378 int align;
11380 align = 0;
11381 for (c = constants; c != NULL; c = next)
11383 /* If necessary, increase the alignment of PC. */
11384 if (align < GET_MODE_SIZE (c->mode))
11386 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
11387 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
11389 align = GET_MODE_SIZE (c->mode);
11391 insn = emit_label_after (c->label, insn);
11392 insn = mips16_emit_constants_1 (c->mode, c->value, insn);
11394 next = c->next;
11395 free (c);
11398 emit_barrier_after (insn);
11401 /* Return the length of instruction INSN. */
11403 static int
11404 mips16_insn_length (rtx insn)
11406 if (JUMP_P (insn))
11408 rtx body = PATTERN (insn);
11409 if (GET_CODE (body) == ADDR_VEC)
11410 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
11411 if (GET_CODE (body) == ADDR_DIFF_VEC)
11412 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
11414 return get_attr_length (insn);
11417 /* If *X is a symbolic constant that refers to the constant pool, add
11418 the constant to POOL and rewrite *X to use the constant's label. */
11420 static void
11421 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
11423 rtx base, offset, label;
11425 split_const (*x, &base, &offset);
11426 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
11428 label = mips16_add_constant (pool, get_pool_constant (base),
11429 get_pool_mode (base));
11430 base = gen_rtx_LABEL_REF (Pmode, label);
11431 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
11435 /* This structure is used to communicate with mips16_rewrite_pool_refs.
11436 INSN is the instruction we're rewriting and POOL points to the current
11437 constant pool. */
11438 struct mips16_rewrite_pool_refs_info {
11439 rtx insn;
11440 struct mips16_constant_pool *pool;
11443 /* Rewrite *X so that constant pool references refer to the constant's
11444 label instead. DATA points to a mips16_rewrite_pool_refs_info
11445 structure. */
11447 static int
11448 mips16_rewrite_pool_refs (rtx *x, void *data)
11450 struct mips16_rewrite_pool_refs_info *info =
11451 (struct mips16_rewrite_pool_refs_info *) data;
11453 if (force_to_mem_operand (*x, Pmode))
11455 rtx mem = force_const_mem (GET_MODE (*x), *x);
11456 validate_change (info->insn, x, mem, false);
11459 if (MEM_P (*x))
11461 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
11462 return -1;
11465 if (TARGET_MIPS16_TEXT_LOADS)
11466 mips16_rewrite_pool_constant (info->pool, x);
11468 return GET_CODE (*x) == CONST ? -1 : 0;
11471 /* Build MIPS16 constant pools. */
11473 static void
11474 mips16_lay_out_constants (void)
11476 struct mips16_constant_pool pool;
11477 struct mips16_rewrite_pool_refs_info info;
11478 rtx insn, barrier;
11480 if (!TARGET_MIPS16_PCREL_LOADS)
11481 return;
11483 barrier = 0;
11484 memset (&pool, 0, sizeof (pool));
11485 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11487 /* Rewrite constant pool references in INSN. */
11488 if (INSN_P (insn))
11490 info.insn = insn;
11491 info.pool = &pool;
11492 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
11495 pool.insn_address += mips16_insn_length (insn);
11497 if (pool.first != NULL)
11499 /* If there are no natural barriers between the first user of
11500 the pool and the highest acceptable address, we'll need to
11501 create a new instruction to jump around the constant pool.
11502 In the worst case, this instruction will be 4 bytes long.
11504 If it's too late to do this transformation after INSN,
11505 do it immediately before INSN. */
11506 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
11508 rtx label, jump;
11510 label = gen_label_rtx ();
11512 jump = emit_jump_insn_before (gen_jump (label), insn);
11513 JUMP_LABEL (jump) = label;
11514 LABEL_NUSES (label) = 1;
11515 barrier = emit_barrier_after (jump);
11517 emit_label_after (label, barrier);
11518 pool.insn_address += 4;
11521 /* See whether the constant pool is now out of range of the first
11522 user. If so, output the constants after the previous barrier.
11523 Note that any instructions between BARRIER and INSN (inclusive)
11524 will use negative offsets to refer to the pool. */
11525 if (pool.insn_address > pool.highest_address)
11527 mips16_emit_constants (pool.first, barrier);
11528 pool.first = NULL;
11529 barrier = 0;
11531 else if (BARRIER_P (insn))
11532 barrier = insn;
11535 mips16_emit_constants (pool.first, get_last_insn ());
11538 /* A temporary variable used by for_each_rtx callbacks, etc. */
11539 static rtx mips_sim_insn;
11541 /* A structure representing the state of the processor pipeline.
11542 Used by the mips_sim_* family of functions. */
11543 struct mips_sim {
11544 /* The maximum number of instructions that can be issued in a cycle.
11545 (Caches mips_issue_rate.) */
11546 unsigned int issue_rate;
11548 /* The current simulation time. */
11549 unsigned int time;
11551 /* How many more instructions can be issued in the current cycle. */
11552 unsigned int insns_left;
11554 /* LAST_SET[X].INSN is the last instruction to set register X.
11555 LAST_SET[X].TIME is the time at which that instruction was issued.
11556 INSN is null if no instruction has yet set register X. */
11557 struct {
11558 rtx insn;
11559 unsigned int time;
11560 } last_set[FIRST_PSEUDO_REGISTER];
11562 /* The pipeline's current DFA state. */
11563 state_t dfa_state;
11566 /* Reset STATE to the initial simulation state. */
11568 static void
11569 mips_sim_reset (struct mips_sim *state)
11571 state->time = 0;
11572 state->insns_left = state->issue_rate;
11573 memset (&state->last_set, 0, sizeof (state->last_set));
11574 state_reset (state->dfa_state);
11577 /* Initialize STATE before its first use. DFA_STATE points to an
11578 allocated but uninitialized DFA state. */
11580 static void
11581 mips_sim_init (struct mips_sim *state, state_t dfa_state)
11583 state->issue_rate = mips_issue_rate ();
11584 state->dfa_state = dfa_state;
11585 mips_sim_reset (state);
11588 /* Advance STATE by one clock cycle. */
11590 static void
11591 mips_sim_next_cycle (struct mips_sim *state)
11593 state->time++;
11594 state->insns_left = state->issue_rate;
11595 state_transition (state->dfa_state, 0);
11598 /* Advance simulation state STATE until instruction INSN can read
11599 register REG. */
11601 static void
11602 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
11604 unsigned int regno, end_regno;
11606 end_regno = END_REGNO (reg);
11607 for (regno = REGNO (reg); regno < end_regno; regno++)
11608 if (state->last_set[regno].insn != 0)
11610 unsigned int t;
11612 t = (state->last_set[regno].time
11613 + insn_latency (state->last_set[regno].insn, insn));
11614 while (state->time < t)
11615 mips_sim_next_cycle (state);
11619 /* A for_each_rtx callback. If *X is a register, advance simulation state
11620 DATA until mips_sim_insn can read the register's value. */
11622 static int
11623 mips_sim_wait_regs_2 (rtx *x, void *data)
11625 if (REG_P (*x))
11626 mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
11627 return 0;
11630 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
11632 static void
11633 mips_sim_wait_regs_1 (rtx *x, void *data)
11635 for_each_rtx (x, mips_sim_wait_regs_2, data);
11638 /* Advance simulation state STATE until all of INSN's register
11639 dependencies are satisfied. */
11641 static void
11642 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
11644 mips_sim_insn = insn;
11645 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
11648 /* Advance simulation state STATE until the units required by
11649 instruction INSN are available. */
11651 static void
11652 mips_sim_wait_units (struct mips_sim *state, rtx insn)
11654 state_t tmp_state;
11656 tmp_state = alloca (state_size ());
11657 while (state->insns_left == 0
11658 || (memcpy (tmp_state, state->dfa_state, state_size ()),
11659 state_transition (tmp_state, insn) >= 0))
11660 mips_sim_next_cycle (state);
11663 /* Advance simulation state STATE until INSN is ready to issue. */
11665 static void
11666 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
11668 mips_sim_wait_regs (state, insn);
11669 mips_sim_wait_units (state, insn);
11672 /* mips_sim_insn has just set X. Update the LAST_SET array
11673 in simulation state DATA. */
11675 static void
11676 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11678 struct mips_sim *state;
11680 state = (struct mips_sim *) data;
11681 if (REG_P (x))
11683 unsigned int regno, end_regno;
11685 end_regno = END_REGNO (x);
11686 for (regno = REGNO (x); regno < end_regno; regno++)
11688 state->last_set[regno].insn = mips_sim_insn;
11689 state->last_set[regno].time = state->time;
11694 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
11695 can issue immediately (i.e., that mips_sim_wait_insn has already
11696 been called). */
11698 static void
11699 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
11701 state_transition (state->dfa_state, insn);
11702 state->insns_left--;
11704 mips_sim_insn = insn;
11705 note_stores (PATTERN (insn), mips_sim_record_set, state);
11708 /* Simulate issuing a NOP in state STATE. */
11710 static void
11711 mips_sim_issue_nop (struct mips_sim *state)
11713 if (state->insns_left == 0)
11714 mips_sim_next_cycle (state);
11715 state->insns_left--;
11718 /* Update simulation state STATE so that it's ready to accept the instruction
11719 after INSN. INSN should be part of the main rtl chain, not a member of a
11720 SEQUENCE. */
11722 static void
11723 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
11725 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
11726 if (JUMP_P (insn))
11727 mips_sim_issue_nop (state);
11729 switch (GET_CODE (SEQ_BEGIN (insn)))
11731 case CODE_LABEL:
11732 case CALL_INSN:
11733 /* We can't predict the processor state after a call or label. */
11734 mips_sim_reset (state);
11735 break;
11737 case JUMP_INSN:
11738 /* The delay slots of branch likely instructions are only executed
11739 when the branch is taken. Therefore, if the caller has simulated
11740 the delay slot instruction, STATE does not really reflect the state
11741 of the pipeline for the instruction after the delay slot. Also,
11742 branch likely instructions tend to incur a penalty when not taken,
11743 so there will probably be an extra delay between the branch and
11744 the instruction after the delay slot. */
11745 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
11746 mips_sim_reset (state);
11747 break;
11749 default:
11750 break;
11754 /* The VR4130 pipeline issues aligned pairs of instructions together,
11755 but it stalls the second instruction if it depends on the first.
11756 In order to cut down the amount of logic required, this dependence
11757 check is not based on a full instruction decode. Instead, any non-SPECIAL
11758 instruction is assumed to modify the register specified by bits 20-16
11759 (which is usually the "rt" field).
11761 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
11762 input, so we can end up with a false dependence between the branch
11763 and its delay slot. If this situation occurs in instruction INSN,
11764 try to avoid it by swapping rs and rt. */
11766 static void
11767 vr4130_avoid_branch_rt_conflict (rtx insn)
11769 rtx first, second;
11771 first = SEQ_BEGIN (insn);
11772 second = SEQ_END (insn);
11773 if (JUMP_P (first)
11774 && NONJUMP_INSN_P (second)
11775 && GET_CODE (PATTERN (first)) == SET
11776 && GET_CODE (SET_DEST (PATTERN (first))) == PC
11777 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
11779 /* Check for the right kind of condition. */
11780 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
11781 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
11782 && REG_P (XEXP (cond, 0))
11783 && REG_P (XEXP (cond, 1))
11784 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
11785 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
11787 /* SECOND mentions the rt register but not the rs register. */
11788 rtx tmp = XEXP (cond, 0);
11789 XEXP (cond, 0) = XEXP (cond, 1);
11790 XEXP (cond, 1) = tmp;
11795 /* Implement -mvr4130-align. Go through each basic block and simulate the
11796 processor pipeline. If we find that a pair of instructions could execute
11797 in parallel, and the first of those instructions is not 8-byte aligned,
11798 insert a nop to make it aligned. */
11800 static void
11801 vr4130_align_insns (void)
11803 struct mips_sim state;
11804 rtx insn, subinsn, last, last2, next;
11805 bool aligned_p;
11807 dfa_start ();
11809 /* LAST is the last instruction before INSN to have a nonzero length.
11810 LAST2 is the last such instruction before LAST. */
11811 last = 0;
11812 last2 = 0;
11814 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
11815 aligned_p = true;
11817 mips_sim_init (&state, alloca (state_size ()));
11818 for (insn = get_insns (); insn != 0; insn = next)
11820 unsigned int length;
11822 next = NEXT_INSN (insn);
11824 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
11825 This isn't really related to the alignment pass, but we do it on
11826 the fly to avoid a separate instruction walk. */
11827 vr4130_avoid_branch_rt_conflict (insn);
11829 if (USEFUL_INSN_P (insn))
11830 FOR_EACH_SUBINSN (subinsn, insn)
11832 mips_sim_wait_insn (&state, subinsn);
11834 /* If we want this instruction to issue in parallel with the
11835 previous one, make sure that the previous instruction is
11836 aligned. There are several reasons why this isn't worthwhile
11837 when the second instruction is a call:
11839 - Calls are less likely to be performance critical,
11840 - There's a good chance that the delay slot can execute
11841 in parallel with the call.
11842 - The return address would then be unaligned.
11844 In general, if we're going to insert a nop between instructions
11845 X and Y, it's better to insert it immediately after X. That
11846 way, if the nop makes Y aligned, it will also align any labels
11847 between X and Y. */
11848 if (state.insns_left != state.issue_rate
11849 && !CALL_P (subinsn))
11851 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
11853 /* SUBINSN is the first instruction in INSN and INSN is
11854 aligned. We want to align the previous instruction
11855 instead, so insert a nop between LAST2 and LAST.
11857 Note that LAST could be either a single instruction
11858 or a branch with a delay slot. In the latter case,
11859 LAST, like INSN, is already aligned, but the delay
11860 slot must have some extra delay that stops it from
11861 issuing at the same time as the branch. We therefore
11862 insert a nop before the branch in order to align its
11863 delay slot. */
11864 emit_insn_after (gen_nop (), last2);
11865 aligned_p = false;
11867 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
11869 /* SUBINSN is the delay slot of INSN, but INSN is
11870 currently unaligned. Insert a nop between
11871 LAST and INSN to align it. */
11872 emit_insn_after (gen_nop (), last);
11873 aligned_p = true;
11876 mips_sim_issue_insn (&state, subinsn);
11878 mips_sim_finish_insn (&state, insn);
11880 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
11881 length = get_attr_length (insn);
11882 if (length > 0)
11884 /* If the instruction is an asm statement or multi-instruction
11885 mips.md patern, the length is only an estimate. Insert an
11886 8 byte alignment after it so that the following instructions
11887 can be handled correctly. */
11888 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
11889 && (recog_memoized (insn) < 0 || length >= 8))
11891 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
11892 next = NEXT_INSN (next);
11893 mips_sim_next_cycle (&state);
11894 aligned_p = true;
11896 else if (length & 4)
11897 aligned_p = !aligned_p;
11898 last2 = last;
11899 last = insn;
11902 /* See whether INSN is an aligned label. */
11903 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
11904 aligned_p = true;
11906 dfa_finish ();
11909 /* This structure records that the current function has a LO_SUM
11910 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
11911 the largest offset applied to BASE by all such LO_SUMs. */
11912 struct mips_lo_sum_offset {
11913 rtx base;
11914 HOST_WIDE_INT offset;
11917 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
11919 static hashval_t
11920 mips_hash_base (rtx base)
11922 int do_not_record_p;
11924 return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
11927 /* Hash-table callbacks for mips_lo_sum_offsets. */
11929 static hashval_t
11930 mips_lo_sum_offset_hash (const void *entry)
11932 return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
11935 static int
11936 mips_lo_sum_offset_eq (const void *entry, const void *value)
11938 return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
11939 (const_rtx) value);
11942 /* Look up symbolic constant X in HTAB, which is a hash table of
11943 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
11944 paired with a recorded LO_SUM, otherwise record X in the table. */
11946 static bool
11947 mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
11949 rtx base, offset;
11950 void **slot;
11951 struct mips_lo_sum_offset *entry;
11953 /* Split X into a base and offset. */
11954 split_const (x, &base, &offset);
11955 if (UNSPEC_ADDRESS_P (base))
11956 base = UNSPEC_ADDRESS (base);
11958 /* Look up the base in the hash table. */
11959 slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
11960 if (slot == NULL)
11961 return false;
11963 entry = (struct mips_lo_sum_offset *) *slot;
11964 if (option == INSERT)
11966 if (entry == NULL)
11968 entry = XNEW (struct mips_lo_sum_offset);
11969 entry->base = base;
11970 entry->offset = INTVAL (offset);
11971 *slot = entry;
11973 else
11975 if (INTVAL (offset) > entry->offset)
11976 entry->offset = INTVAL (offset);
11979 return INTVAL (offset) <= entry->offset;
11982 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
11983 Record every LO_SUM in *LOC. */
11985 static int
11986 mips_record_lo_sum (rtx *loc, void *data)
11988 if (GET_CODE (*loc) == LO_SUM)
11989 mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
11990 return 0;
11993 /* Return true if INSN is a SET of an orphaned high-part relocation.
11994 HTAB is a hash table of mips_lo_sum_offsets that describes all the
11995 LO_SUMs in the current function. */
11997 static bool
11998 mips_orphaned_high_part_p (htab_t htab, rtx insn)
12000 enum mips_symbol_type type;
12001 rtx x, set;
12003 set = single_set (insn);
12004 if (set)
12006 /* Check for %his. */
12007 x = SET_SRC (set);
12008 if (GET_CODE (x) == HIGH
12009 && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
12010 return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
12012 /* Check for local %gots (and %got_pages, which is redundant but OK). */
12013 if (GET_CODE (x) == UNSPEC
12014 && XINT (x, 1) == UNSPEC_LOAD_GOT
12015 && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
12016 SYMBOL_CONTEXT_LEA, &type)
12017 && type == SYMBOL_GOTOFF_PAGE)
12018 return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
12020 return false;
12023 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
12024 INSN and a previous instruction, avoid it by inserting nops after
12025 instruction AFTER.
12027 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
12028 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
12029 before using the value of that register. *HILO_DELAY counts the
12030 number of instructions since the last hilo hazard (that is,
12031 the number of instructions since the last MFLO or MFHI).
12033 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
12034 for the next instruction.
12036 LO_REG is an rtx for the LO register, used in dependence checking. */
12038 static void
12039 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
12040 rtx *delayed_reg, rtx lo_reg)
12042 rtx pattern, set;
12043 int nops, ninsns;
12045 pattern = PATTERN (insn);
12047 /* Do not put the whole function in .set noreorder if it contains
12048 an asm statement. We don't know whether there will be hazards
12049 between the asm statement and the gcc-generated code. */
12050 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
12051 cfun->machine->all_noreorder_p = false;
12053 /* Ignore zero-length instructions (barriers and the like). */
12054 ninsns = get_attr_length (insn) / 4;
12055 if (ninsns == 0)
12056 return;
12058 /* Work out how many nops are needed. Note that we only care about
12059 registers that are explicitly mentioned in the instruction's pattern.
12060 It doesn't matter that calls use the argument registers or that they
12061 clobber hi and lo. */
12062 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
12063 nops = 2 - *hilo_delay;
12064 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
12065 nops = 1;
12066 else
12067 nops = 0;
12069 /* Insert the nops between this instruction and the previous one.
12070 Each new nop takes us further from the last hilo hazard. */
12071 *hilo_delay += nops;
12072 while (nops-- > 0)
12073 emit_insn_after (gen_hazard_nop (), after);
12075 /* Set up the state for the next instruction. */
12076 *hilo_delay += ninsns;
12077 *delayed_reg = 0;
12078 if (INSN_CODE (insn) >= 0)
12079 switch (get_attr_hazard (insn))
12081 case HAZARD_NONE:
12082 break;
12084 case HAZARD_HILO:
12085 *hilo_delay = 0;
12086 break;
12088 case HAZARD_DELAY:
12089 set = single_set (insn);
12090 gcc_assert (set);
12091 *delayed_reg = SET_DEST (set);
12092 break;
12096 /* Go through the instruction stream and insert nops where necessary.
12097 Also delete any high-part relocations whose partnering low parts
12098 are now all dead. See if the whole function can then be put into
12099 .set noreorder and .set nomacro. */
12101 static void
12102 mips_reorg_process_insns (void)
12104 rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
12105 int hilo_delay;
12106 htab_t htab;
12108 /* Force all instructions to be split into their final form. */
12109 split_all_insns_noflow ();
12111 /* Recalculate instruction lengths without taking nops into account. */
12112 cfun->machine->ignore_hazard_length_p = true;
12113 shorten_branches (get_insns ());
12115 cfun->machine->all_noreorder_p = true;
12117 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
12118 if (!TARGET_EXPLICIT_RELOCS)
12119 cfun->machine->all_noreorder_p = false;
12121 /* Profiled functions can't be all noreorder because the profiler
12122 support uses assembler macros. */
12123 if (crtl->profile)
12124 cfun->machine->all_noreorder_p = false;
12126 /* Code compiled with -mfix-vr4120 can't be all noreorder because
12127 we rely on the assembler to work around some errata. */
12128 if (TARGET_FIX_VR4120)
12129 cfun->machine->all_noreorder_p = false;
12131 /* The same is true for -mfix-vr4130 if we might generate MFLO or
12132 MFHI instructions. Note that we avoid using MFLO and MFHI if
12133 the VR4130 MACC and DMACC instructions are available instead;
12134 see the *mfhilo_{si,di}_macc patterns. */
12135 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
12136 cfun->machine->all_noreorder_p = false;
12138 htab = htab_create (37, mips_lo_sum_offset_hash,
12139 mips_lo_sum_offset_eq, free);
12141 /* Make a first pass over the instructions, recording all the LO_SUMs. */
12142 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
12143 FOR_EACH_SUBINSN (subinsn, insn)
12144 if (INSN_P (subinsn))
12145 for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
12147 last_insn = 0;
12148 hilo_delay = 2;
12149 delayed_reg = 0;
12150 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
12152 /* Make a second pass over the instructions. Delete orphaned
12153 high-part relocations or turn them into NOPs. Avoid hazards
12154 by inserting NOPs. */
12155 for (insn = get_insns (); insn != 0; insn = next_insn)
12157 next_insn = NEXT_INSN (insn);
12158 if (INSN_P (insn))
12160 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
12162 /* If we find an orphaned high-part relocation in a delay
12163 slot, it's easier to turn that instruction into a NOP than
12164 to delete it. The delay slot will be a NOP either way. */
12165 FOR_EACH_SUBINSN (subinsn, insn)
12166 if (INSN_P (subinsn))
12168 if (mips_orphaned_high_part_p (htab, subinsn))
12170 PATTERN (subinsn) = gen_nop ();
12171 INSN_CODE (subinsn) = CODE_FOR_nop;
12173 mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
12174 &delayed_reg, lo_reg);
12176 last_insn = insn;
12178 else
12180 /* INSN is a single instruction. Delete it if it's an
12181 orphaned high-part relocation. */
12182 if (mips_orphaned_high_part_p (htab, insn))
12183 delete_insn (insn);
12184 else
12186 mips_avoid_hazard (last_insn, insn, &hilo_delay,
12187 &delayed_reg, lo_reg);
12188 last_insn = insn;
12194 htab_delete (htab);
12197 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
12199 static void
12200 mips_reorg (void)
12202 mips16_lay_out_constants ();
12203 if (mips_base_delayed_branch)
12204 dbr_schedule (get_insns ());
12205 mips_reorg_process_insns ();
12206 if (TARGET_EXPLICIT_RELOCS && TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
12207 vr4130_align_insns ();
12210 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
12211 in order to avoid duplicating too much logic from elsewhere. */
12213 static void
12214 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
12215 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12216 tree function)
12218 rtx this, temp1, temp2, insn, fnaddr;
12219 bool use_sibcall_p;
12221 /* Pretend to be a post-reload pass while generating rtl. */
12222 reload_completed = 1;
12224 /* Mark the end of the (empty) prologue. */
12225 emit_note (NOTE_INSN_PROLOGUE_END);
12227 /* Determine if we can use a sibcall to call FUNCTION directly. */
12228 fnaddr = XEXP (DECL_RTL (function), 0);
12229 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
12230 && const_call_insn_operand (fnaddr, Pmode));
12232 /* Determine if we need to load FNADDR from the GOT. */
12233 if (!use_sibcall_p)
12234 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
12236 case SYMBOL_GOT_PAGE_OFST:
12237 case SYMBOL_GOT_DISP:
12238 /* Pick a global pointer. Use a call-clobbered register if
12239 TARGET_CALL_SAVED_GP. */
12240 cfun->machine->global_pointer =
12241 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
12242 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
12244 /* Set up the global pointer for n32 or n64 abicalls. */
12245 mips_emit_loadgp ();
12246 break;
12248 default:
12249 break;
12252 /* We need two temporary registers in some cases. */
12253 temp1 = gen_rtx_REG (Pmode, 2);
12254 temp2 = gen_rtx_REG (Pmode, 3);
12256 /* Find out which register contains the "this" pointer. */
12257 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12258 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
12259 else
12260 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
12262 /* Add DELTA to THIS. */
12263 if (delta != 0)
12265 rtx offset = GEN_INT (delta);
12266 if (!SMALL_OPERAND (delta))
12268 mips_emit_move (temp1, offset);
12269 offset = temp1;
12271 emit_insn (gen_add3_insn (this, this, offset));
12274 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
12275 if (vcall_offset != 0)
12277 rtx addr;
12279 /* Set TEMP1 to *THIS. */
12280 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
12282 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
12283 addr = mips_add_offset (temp2, temp1, vcall_offset);
12285 /* Load the offset and add it to THIS. */
12286 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
12287 emit_insn (gen_add3_insn (this, this, temp1));
12290 /* Jump to the target function. Use a sibcall if direct jumps are
12291 allowed, otherwise load the address into a register first. */
12292 if (use_sibcall_p)
12294 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
12295 SIBLING_CALL_P (insn) = 1;
12297 else
12299 /* This is messy. GAS treats "la $25,foo" as part of a call
12300 sequence and may allow a global "foo" to be lazily bound.
12301 The general move patterns therefore reject this combination.
12303 In this context, lazy binding would actually be OK
12304 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
12305 TARGET_CALL_SAVED_GP; see mips_load_call_address.
12306 We must therefore load the address via a temporary
12307 register if mips_dangerous_for_la25_p.
12309 If we jump to the temporary register rather than $25, the assembler
12310 can use the move insn to fill the jump's delay slot. */
12311 if (TARGET_USE_PIC_FN_ADDR_REG
12312 && !mips_dangerous_for_la25_p (fnaddr))
12313 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
12314 mips_load_call_address (temp1, fnaddr, true);
12316 if (TARGET_USE_PIC_FN_ADDR_REG
12317 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
12318 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
12319 emit_jump_insn (gen_indirect_jump (temp1));
12322 /* Run just enough of rest_of_compilation. This sequence was
12323 "borrowed" from alpha.c. */
12324 insn = get_insns ();
12325 insn_locators_alloc ();
12326 split_all_insns_noflow ();
12327 mips16_lay_out_constants ();
12328 shorten_branches (insn);
12329 final_start_function (insn, file, 1);
12330 final (insn, file, 1);
12331 final_end_function ();
12332 free_after_compilation (cfun);
12334 /* Clean up the vars set above. Note that final_end_function resets
12335 the global pointer for us. */
12336 reload_completed = 0;
12339 /* The last argument passed to mips_set_mips16_mode, or negative if the
12340 function hasn't been called yet.
12342 There are two copies of this information. One is saved and restored
12343 by the PCH process while the other is specific to this compiler
12344 invocation. The information calculated by mips_set_mips16_mode
12345 is invalid unless the two variables are the same. */
12346 static int was_mips16_p = -1;
12347 static GTY(()) int was_mips16_pch_p = -1;
12349 /* Set up the target-dependent global state so that it matches the
12350 current function's ISA mode. */
12352 static void
12353 mips_set_mips16_mode (int mips16_p)
12355 if (mips16_p == was_mips16_p
12356 && mips16_p == was_mips16_pch_p)
12357 return;
12359 /* Restore base settings of various flags. */
12360 target_flags = mips_base_target_flags;
12361 flag_schedule_insns = mips_base_schedule_insns;
12362 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
12363 flag_move_loop_invariants = mips_base_move_loop_invariants;
12364 align_loops = mips_base_align_loops;
12365 align_jumps = mips_base_align_jumps;
12366 align_functions = mips_base_align_functions;
12368 if (mips16_p)
12370 /* Switch to MIPS16 mode. */
12371 target_flags |= MASK_MIPS16;
12373 /* Don't run the scheduler before reload, since it tends to
12374 increase register pressure. */
12375 flag_schedule_insns = 0;
12377 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
12378 the whole function to be in a single section. */
12379 flag_reorder_blocks_and_partition = 0;
12381 /* Don't move loop invariants, because it tends to increase
12382 register pressure. It also introduces an extra move in cases
12383 where the constant is the first operand in a two-operand binary
12384 instruction, or when it forms a register argument to a functon
12385 call. */
12386 flag_move_loop_invariants = 0;
12388 /* Silently disable -mexplicit-relocs since it doesn't apply
12389 to MIPS16 code. Even so, it would overly pedantic to warn
12390 about "-mips16 -mexplicit-relocs", especially given that
12391 we use a %gprel() operator. */
12392 target_flags &= ~MASK_EXPLICIT_RELOCS;
12394 /* Experiments suggest we get the best overall section-anchor
12395 results from using the range of an unextended LW or SW. Code
12396 that makes heavy use of byte or short accesses can do better
12397 with ranges of 0...31 and 0...63 respectively, but most code is
12398 sensitive to the range of LW and SW instead. */
12399 targetm.min_anchor_offset = 0;
12400 targetm.max_anchor_offset = 127;
12402 if (flag_pic || TARGET_ABICALLS)
12403 sorry ("MIPS16 PIC");
12405 if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
12406 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
12408 else
12410 /* Switch to normal (non-MIPS16) mode. */
12411 target_flags &= ~MASK_MIPS16;
12413 /* Provide default values for align_* for 64-bit targets. */
12414 if (TARGET_64BIT)
12416 if (align_loops == 0)
12417 align_loops = 8;
12418 if (align_jumps == 0)
12419 align_jumps = 8;
12420 if (align_functions == 0)
12421 align_functions = 8;
12424 targetm.min_anchor_offset = -32768;
12425 targetm.max_anchor_offset = 32767;
12428 /* (Re)initialize MIPS target internals for new ISA. */
12429 mips_init_relocs ();
12431 if (was_mips16_p >= 0 || was_mips16_pch_p >= 0)
12432 /* Reinitialize target-dependent state. */
12433 target_reinit ();
12435 was_mips16_p = mips16_p;
12436 was_mips16_pch_p = mips16_p;
12439 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
12440 function should use the MIPS16 ISA and switch modes accordingly. */
12442 static void
12443 mips_set_current_function (tree fndecl)
12445 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
12448 /* Allocate a chunk of memory for per-function machine-dependent data. */
12450 static struct machine_function *
12451 mips_init_machine_status (void)
12453 return ((struct machine_function *)
12454 ggc_alloc_cleared (sizeof (struct machine_function)));
12457 /* Return the processor associated with the given ISA level, or null
12458 if the ISA isn't valid. */
12460 static const struct mips_cpu_info *
12461 mips_cpu_info_from_isa (int isa)
12463 unsigned int i;
12465 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
12466 if (mips_cpu_info_table[i].isa == isa)
12467 return mips_cpu_info_table + i;
12469 return NULL;
12472 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
12473 with a final "000" replaced by "k". Ignore case.
12475 Note: this function is shared between GCC and GAS. */
12477 static bool
12478 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
12480 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
12481 given++, canonical++;
12483 return ((*given == 0 && *canonical == 0)
12484 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
12487 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
12488 CPU name. We've traditionally allowed a lot of variation here.
12490 Note: this function is shared between GCC and GAS. */
12492 static bool
12493 mips_matching_cpu_name_p (const char *canonical, const char *given)
12495 /* First see if the name matches exactly, or with a final "000"
12496 turned into "k". */
12497 if (mips_strict_matching_cpu_name_p (canonical, given))
12498 return true;
12500 /* If not, try comparing based on numerical designation alone.
12501 See if GIVEN is an unadorned number, or 'r' followed by a number. */
12502 if (TOLOWER (*given) == 'r')
12503 given++;
12504 if (!ISDIGIT (*given))
12505 return false;
12507 /* Skip over some well-known prefixes in the canonical name,
12508 hoping to find a number there too. */
12509 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
12510 canonical += 2;
12511 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
12512 canonical += 2;
12513 else if (TOLOWER (canonical[0]) == 'r')
12514 canonical += 1;
12516 return mips_strict_matching_cpu_name_p (canonical, given);
12519 /* Return the mips_cpu_info entry for the processor or ISA given
12520 by CPU_STRING. Return null if the string isn't recognized.
12522 A similar function exists in GAS. */
12524 static const struct mips_cpu_info *
12525 mips_parse_cpu (const char *cpu_string)
12527 unsigned int i;
12528 const char *s;
12530 /* In the past, we allowed upper-case CPU names, but it doesn't
12531 work well with the multilib machinery. */
12532 for (s = cpu_string; *s != 0; s++)
12533 if (ISUPPER (*s))
12535 warning (0, "CPU names must be lower case");
12536 break;
12539 /* 'from-abi' selects the most compatible architecture for the given
12540 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
12541 EABIs, we have to decide whether we're using the 32-bit or 64-bit
12542 version. */
12543 if (strcasecmp (cpu_string, "from-abi") == 0)
12544 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
12545 : ABI_NEEDS_64BIT_REGS ? 3
12546 : (TARGET_64BIT ? 3 : 1));
12548 /* 'default' has traditionally been a no-op. Probably not very useful. */
12549 if (strcasecmp (cpu_string, "default") == 0)
12550 return NULL;
12552 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
12553 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
12554 return mips_cpu_info_table + i;
12556 return NULL;
12559 /* Set up globals to generate code for the ISA or processor
12560 described by INFO. */
12562 static void
12563 mips_set_architecture (const struct mips_cpu_info *info)
12565 if (info != 0)
12567 mips_arch_info = info;
12568 mips_arch = info->cpu;
12569 mips_isa = info->isa;
12573 /* Likewise for tuning. */
12575 static void
12576 mips_set_tune (const struct mips_cpu_info *info)
12578 if (info != 0)
12580 mips_tune_info = info;
12581 mips_tune = info->cpu;
12585 /* Implement TARGET_HANDLE_OPTION. */
12587 static bool
12588 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
12590 switch (code)
12592 case OPT_mabi_:
12593 if (strcmp (arg, "32") == 0)
12594 mips_abi = ABI_32;
12595 else if (strcmp (arg, "o64") == 0)
12596 mips_abi = ABI_O64;
12597 else if (strcmp (arg, "n32") == 0)
12598 mips_abi = ABI_N32;
12599 else if (strcmp (arg, "64") == 0)
12600 mips_abi = ABI_64;
12601 else if (strcmp (arg, "eabi") == 0)
12602 mips_abi = ABI_EABI;
12603 else
12604 return false;
12605 return true;
12607 case OPT_march_:
12608 case OPT_mtune_:
12609 return mips_parse_cpu (arg) != 0;
12611 case OPT_mips:
12612 mips_isa_option_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
12613 return mips_isa_option_info != 0;
12615 case OPT_mno_flush_func:
12616 mips_cache_flush_func = NULL;
12617 return true;
12619 case OPT_mcode_readable_:
12620 if (strcmp (arg, "yes") == 0)
12621 mips_code_readable = CODE_READABLE_YES;
12622 else if (strcmp (arg, "pcrel") == 0)
12623 mips_code_readable = CODE_READABLE_PCREL;
12624 else if (strcmp (arg, "no") == 0)
12625 mips_code_readable = CODE_READABLE_NO;
12626 else
12627 return false;
12628 return true;
12630 default:
12631 return true;
12635 /* Implement OVERRIDE_OPTIONS. */
12637 void
12638 mips_override_options (void)
12640 int i, start, regno, mode;
12642 /* Process flags as though we were generating non-MIPS16 code. */
12643 mips_base_mips16 = TARGET_MIPS16;
12644 target_flags &= ~MASK_MIPS16;
12646 #ifdef SUBTARGET_OVERRIDE_OPTIONS
12647 SUBTARGET_OVERRIDE_OPTIONS;
12648 #endif
12650 /* Set the small data limit. */
12651 mips_small_data_threshold = (g_switch_set
12652 ? g_switch_value
12653 : MIPS_DEFAULT_GVALUE);
12655 /* The following code determines the architecture and register size.
12656 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
12657 The GAS and GCC code should be kept in sync as much as possible. */
12659 if (mips_arch_string != 0)
12660 mips_set_architecture (mips_parse_cpu (mips_arch_string));
12662 if (mips_isa_option_info != 0)
12664 if (mips_arch_info == 0)
12665 mips_set_architecture (mips_isa_option_info);
12666 else if (mips_arch_info->isa != mips_isa_option_info->isa)
12667 error ("%<-%s%> conflicts with the other architecture options, "
12668 "which specify a %s processor",
12669 mips_isa_option_info->name,
12670 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
12673 if (mips_arch_info == 0)
12675 #ifdef MIPS_CPU_STRING_DEFAULT
12676 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
12677 #else
12678 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
12679 #endif
12682 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
12683 error ("%<-march=%s%> is not compatible with the selected ABI",
12684 mips_arch_info->name);
12686 /* Optimize for mips_arch, unless -mtune selects a different processor. */
12687 if (mips_tune_string != 0)
12688 mips_set_tune (mips_parse_cpu (mips_tune_string));
12690 if (mips_tune_info == 0)
12691 mips_set_tune (mips_arch_info);
12693 if ((target_flags_explicit & MASK_64BIT) != 0)
12695 /* The user specified the size of the integer registers. Make sure
12696 it agrees with the ABI and ISA. */
12697 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
12698 error ("%<-mgp64%> used with a 32-bit processor");
12699 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
12700 error ("%<-mgp32%> used with a 64-bit ABI");
12701 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
12702 error ("%<-mgp64%> used with a 32-bit ABI");
12704 else
12706 /* Infer the integer register size from the ABI and processor.
12707 Restrict ourselves to 32-bit registers if that's all the
12708 processor has, or if the ABI cannot handle 64-bit registers. */
12709 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
12710 target_flags &= ~MASK_64BIT;
12711 else
12712 target_flags |= MASK_64BIT;
12715 if ((target_flags_explicit & MASK_FLOAT64) != 0)
12717 if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
12718 error ("unsupported combination: %s", "-mfp64 -msingle-float");
12719 else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
12720 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
12721 else if (!TARGET_64BIT && TARGET_FLOAT64)
12723 if (!ISA_HAS_MXHC1)
12724 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
12725 " the target supports the mfhc1 and mthc1 instructions");
12726 else if (mips_abi != ABI_32)
12727 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
12728 " the o32 ABI");
12731 else
12733 /* -msingle-float selects 32-bit float registers. Otherwise the
12734 float registers should be the same size as the integer ones. */
12735 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
12736 target_flags |= MASK_FLOAT64;
12737 else
12738 target_flags &= ~MASK_FLOAT64;
12741 /* End of code shared with GAS. */
12743 /* If no -mlong* option was given, infer it from the other options. */
12744 if ((target_flags_explicit & MASK_LONG64) == 0)
12746 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
12747 target_flags |= MASK_LONG64;
12748 else
12749 target_flags &= ~MASK_LONG64;
12752 if (!TARGET_OLDABI)
12753 flag_pcc_struct_return = 0;
12755 /* Decide which rtx_costs structure to use. */
12756 if (optimize_size)
12757 mips_cost = &mips_rtx_cost_optimize_size;
12758 else
12759 mips_cost = &mips_rtx_cost_data[mips_tune];
12761 /* If the user hasn't specified a branch cost, use the processor's
12762 default. */
12763 if (mips_branch_cost == 0)
12764 mips_branch_cost = mips_cost->branch_cost;
12766 /* If neither -mbranch-likely nor -mno-branch-likely was given
12767 on the command line, set MASK_BRANCHLIKELY based on the target
12768 architecture and tuning flags. Annulled delay slots are a
12769 size win, so we only consider the processor-specific tuning
12770 for !optimize_size. */
12771 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
12773 if (ISA_HAS_BRANCHLIKELY
12774 && (optimize_size
12775 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
12776 target_flags |= MASK_BRANCHLIKELY;
12777 else
12778 target_flags &= ~MASK_BRANCHLIKELY;
12780 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
12781 warning (0, "the %qs architecture does not support branch-likely"
12782 " instructions", mips_arch_info->name);
12784 /* The effect of -mabicalls isn't defined for the EABI. */
12785 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
12787 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
12788 target_flags &= ~MASK_ABICALLS;
12791 if (TARGET_ABICALLS)
12792 /* We need to set flag_pic for executables as well as DSOs
12793 because we may reference symbols that are not defined in
12794 the final executable. (MIPS does not use things like
12795 copy relocs, for example.)
12797 Also, there is a body of code that uses __PIC__ to distinguish
12798 between -mabicalls and -mno-abicalls code. */
12799 flag_pic = 1;
12801 /* -mvr4130-align is a "speed over size" optimization: it usually produces
12802 faster code, but at the expense of more nops. Enable it at -O3 and
12803 above. */
12804 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
12805 target_flags |= MASK_VR4130_ALIGN;
12807 /* Prefer a call to memcpy over inline code when optimizing for size,
12808 though see MOVE_RATIO in mips.h. */
12809 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
12810 target_flags |= MASK_MEMCPY;
12812 /* If we have a nonzero small-data limit, check that the -mgpopt
12813 setting is consistent with the other target flags. */
12814 if (mips_small_data_threshold > 0)
12816 if (!TARGET_GPOPT)
12818 if (!TARGET_EXPLICIT_RELOCS)
12819 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
12821 TARGET_LOCAL_SDATA = false;
12822 TARGET_EXTERN_SDATA = false;
12824 else
12826 if (TARGET_VXWORKS_RTP)
12827 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
12829 if (TARGET_ABICALLS)
12830 warning (0, "cannot use small-data accesses for %qs",
12831 "-mabicalls");
12835 #ifdef MIPS_TFMODE_FORMAT
12836 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
12837 #endif
12839 /* Make sure that the user didn't turn off paired single support when
12840 MIPS-3D support is requested. */
12841 if (TARGET_MIPS3D
12842 && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
12843 && !TARGET_PAIRED_SINGLE_FLOAT)
12844 error ("%<-mips3d%> requires %<-mpaired-single%>");
12846 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
12847 if (TARGET_MIPS3D)
12848 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
12850 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
12851 and TARGET_HARD_FLOAT_ABI are both true. */
12852 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
12853 error ("%qs must be used with %qs",
12854 TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
12855 TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
12857 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
12858 enabled. */
12859 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
12860 warning (0, "the %qs architecture does not support paired-single"
12861 " instructions", mips_arch_info->name);
12863 /* If TARGET_DSPR2, enable MASK_DSP. */
12864 if (TARGET_DSPR2)
12865 target_flags |= MASK_DSP;
12867 mips_init_print_operand_punct ();
12869 /* Set up array to map GCC register number to debug register number.
12870 Ignore the special purpose register numbers. */
12872 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12874 mips_dbx_regno[i] = INVALID_REGNUM;
12875 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
12876 mips_dwarf_regno[i] = i;
12877 else
12878 mips_dwarf_regno[i] = INVALID_REGNUM;
12881 start = GP_DBX_FIRST - GP_REG_FIRST;
12882 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
12883 mips_dbx_regno[i] = i + start;
12885 start = FP_DBX_FIRST - FP_REG_FIRST;
12886 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
12887 mips_dbx_regno[i] = i + start;
12889 /* Accumulator debug registers use big-endian ordering. */
12890 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
12891 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
12892 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
12893 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
12894 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
12896 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
12897 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
12900 /* Set up mips_hard_regno_mode_ok. */
12901 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
12902 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12903 mips_hard_regno_mode_ok[(int)mode][regno]
12904 = mips_hard_regno_mode_ok_p (regno, mode);
12906 /* Function to allocate machine-dependent function status. */
12907 init_machine_status = &mips_init_machine_status;
12909 /* Default to working around R4000 errata only if the processor
12910 was selected explicitly. */
12911 if ((target_flags_explicit & MASK_FIX_R4000) == 0
12912 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
12913 target_flags |= MASK_FIX_R4000;
12915 /* Default to working around R4400 errata only if the processor
12916 was selected explicitly. */
12917 if ((target_flags_explicit & MASK_FIX_R4400) == 0
12918 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
12919 target_flags |= MASK_FIX_R4400;
12921 /* Save base state of options. */
12922 mips_base_target_flags = target_flags;
12923 mips_base_delayed_branch = flag_delayed_branch;
12924 mips_base_schedule_insns = flag_schedule_insns;
12925 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
12926 mips_base_move_loop_invariants = flag_move_loop_invariants;
12927 mips_base_align_loops = align_loops;
12928 mips_base_align_jumps = align_jumps;
12929 mips_base_align_functions = align_functions;
12931 /* Now select the ISA mode.
12933 Do all CPP-sensitive stuff in non-MIPS16 mode; we'll switch to
12934 MIPS16 mode afterwards if need be. */
12935 mips_set_mips16_mode (false);
12937 /* We call dbr_schedule from within mips_reorg. */
12938 flag_delayed_branch = 0;
12941 /* Swap the register information for registers I and I + 1, which
12942 currently have the wrong endianness. Note that the registers'
12943 fixedness and call-clobberedness might have been set on the
12944 command line. */
12946 static void
12947 mips_swap_registers (unsigned int i)
12949 int tmpi;
12950 const char *tmps;
12952 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
12953 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
12955 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
12956 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
12957 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
12958 SWAP_STRING (reg_names[i], reg_names[i + 1]);
12960 #undef SWAP_STRING
12961 #undef SWAP_INT
12964 /* Implement CONDITIONAL_REGISTER_USAGE. */
12966 void
12967 mips_conditional_register_usage (void)
12969 if (!ISA_HAS_DSP)
12971 int regno;
12973 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
12974 fixed_regs[regno] = call_used_regs[regno] = 1;
12976 if (!TARGET_HARD_FLOAT)
12978 int regno;
12980 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
12981 fixed_regs[regno] = call_used_regs[regno] = 1;
12982 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12983 fixed_regs[regno] = call_used_regs[regno] = 1;
12985 else if (! ISA_HAS_8CC)
12987 int regno;
12989 /* We only have a single condition-code register. We implement
12990 this by fixing all the condition-code registers and generating
12991 RTL that refers directly to ST_REG_FIRST. */
12992 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12993 fixed_regs[regno] = call_used_regs[regno] = 1;
12995 /* In MIPS16 mode, we permit the $t temporary registers to be used
12996 for reload. We prohibit the unused $s registers, since they
12997 are call-saved, and saving them via a MIPS16 register would
12998 probably waste more time than just reloading the value. */
12999 if (TARGET_MIPS16)
13001 fixed_regs[18] = call_used_regs[18] = 1;
13002 fixed_regs[19] = call_used_regs[19] = 1;
13003 fixed_regs[20] = call_used_regs[20] = 1;
13004 fixed_regs[21] = call_used_regs[21] = 1;
13005 fixed_regs[22] = call_used_regs[22] = 1;
13006 fixed_regs[23] = call_used_regs[23] = 1;
13007 fixed_regs[26] = call_used_regs[26] = 1;
13008 fixed_regs[27] = call_used_regs[27] = 1;
13009 fixed_regs[30] = call_used_regs[30] = 1;
13011 /* $f20-$f23 are call-clobbered for n64. */
13012 if (mips_abi == ABI_64)
13014 int regno;
13015 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
13016 call_really_used_regs[regno] = call_used_regs[regno] = 1;
13018 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
13019 for n32. */
13020 if (mips_abi == ABI_N32)
13022 int regno;
13023 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
13024 call_really_used_regs[regno] = call_used_regs[regno] = 1;
13026 /* Make sure that double-register accumulator values are correctly
13027 ordered for the current endianness. */
13028 if (TARGET_LITTLE_ENDIAN)
13030 unsigned int regno;
13032 mips_swap_registers (MD_REG_FIRST);
13033 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
13034 mips_swap_registers (regno);
13038 /* Initialize vector TARGET to VALS. */
13040 void
13041 mips_expand_vector_init (rtx target, rtx vals)
13043 enum machine_mode mode;
13044 enum machine_mode inner;
13045 unsigned int i, n_elts;
13046 rtx mem;
13048 mode = GET_MODE (target);
13049 inner = GET_MODE_INNER (mode);
13050 n_elts = GET_MODE_NUNITS (mode);
13052 gcc_assert (VECTOR_MODE_P (mode));
13054 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
13055 for (i = 0; i < n_elts; i++)
13056 emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)),
13057 XVECEXP (vals, 0, i));
13059 emit_move_insn (target, mem);
13062 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
13063 other registers for instructions for which it is possible. This
13064 encourages the compiler to use CMP in cases where an XOR would
13065 require some register shuffling. */
13067 void
13068 mips_order_regs_for_local_alloc (void)
13070 int i;
13072 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
13073 reg_alloc_order[i] = i;
13075 if (TARGET_MIPS16)
13077 /* It really doesn't matter where we put register 0, since it is
13078 a fixed register anyhow. */
13079 reg_alloc_order[0] = 24;
13080 reg_alloc_order[24] = 0;
13084 /* Initialize the GCC target structure. */
13085 #undef TARGET_ASM_ALIGNED_HI_OP
13086 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
13087 #undef TARGET_ASM_ALIGNED_SI_OP
13088 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
13089 #undef TARGET_ASM_ALIGNED_DI_OP
13090 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
13092 #undef TARGET_ASM_FUNCTION_PROLOGUE
13093 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
13094 #undef TARGET_ASM_FUNCTION_EPILOGUE
13095 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
13096 #undef TARGET_ASM_SELECT_RTX_SECTION
13097 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
13098 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
13099 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
13101 #undef TARGET_SCHED_INIT
13102 #define TARGET_SCHED_INIT mips_sched_init
13103 #undef TARGET_SCHED_REORDER
13104 #define TARGET_SCHED_REORDER mips_sched_reorder
13105 #undef TARGET_SCHED_REORDER2
13106 #define TARGET_SCHED_REORDER2 mips_sched_reorder
13107 #undef TARGET_SCHED_VARIABLE_ISSUE
13108 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
13109 #undef TARGET_SCHED_ADJUST_COST
13110 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
13111 #undef TARGET_SCHED_ISSUE_RATE
13112 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
13113 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
13114 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
13115 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
13116 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
13117 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
13118 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
13119 mips_multipass_dfa_lookahead
13121 #undef TARGET_DEFAULT_TARGET_FLAGS
13122 #define TARGET_DEFAULT_TARGET_FLAGS \
13123 (TARGET_DEFAULT \
13124 | TARGET_CPU_DEFAULT \
13125 | TARGET_ENDIAN_DEFAULT \
13126 | TARGET_FP_EXCEPTIONS_DEFAULT \
13127 | MASK_CHECK_ZERO_DIV \
13128 | MASK_FUSED_MADD)
13129 #undef TARGET_HANDLE_OPTION
13130 #define TARGET_HANDLE_OPTION mips_handle_option
13132 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
13133 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
13135 #undef TARGET_INSERT_ATTRIBUTES
13136 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
13137 #undef TARGET_MERGE_DECL_ATTRIBUTES
13138 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
13139 #undef TARGET_SET_CURRENT_FUNCTION
13140 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
13142 #undef TARGET_VALID_POINTER_MODE
13143 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
13144 #undef TARGET_RTX_COSTS
13145 #define TARGET_RTX_COSTS mips_rtx_costs
13146 #undef TARGET_ADDRESS_COST
13147 #define TARGET_ADDRESS_COST mips_address_cost
13149 #undef TARGET_IN_SMALL_DATA_P
13150 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
13152 #undef TARGET_MACHINE_DEPENDENT_REORG
13153 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
13155 #undef TARGET_ASM_FILE_START
13156 #define TARGET_ASM_FILE_START mips_file_start
13157 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
13158 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
13160 #undef TARGET_INIT_LIBFUNCS
13161 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
13163 #undef TARGET_BUILD_BUILTIN_VA_LIST
13164 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
13165 #undef TARGET_EXPAND_BUILTIN_VA_START
13166 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
13167 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
13168 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
13170 #undef TARGET_PROMOTE_FUNCTION_ARGS
13171 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
13172 #undef TARGET_PROMOTE_FUNCTION_RETURN
13173 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
13174 #undef TARGET_PROMOTE_PROTOTYPES
13175 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
13177 #undef TARGET_RETURN_IN_MEMORY
13178 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
13179 #undef TARGET_RETURN_IN_MSB
13180 #define TARGET_RETURN_IN_MSB mips_return_in_msb
13182 #undef TARGET_ASM_OUTPUT_MI_THUNK
13183 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
13184 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
13185 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
13187 #undef TARGET_SETUP_INCOMING_VARARGS
13188 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
13189 #undef TARGET_STRICT_ARGUMENT_NAMING
13190 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
13191 #undef TARGET_MUST_PASS_IN_STACK
13192 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
13193 #undef TARGET_PASS_BY_REFERENCE
13194 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
13195 #undef TARGET_CALLEE_COPIES
13196 #define TARGET_CALLEE_COPIES mips_callee_copies
13197 #undef TARGET_ARG_PARTIAL_BYTES
13198 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
13200 #undef TARGET_MODE_REP_EXTENDED
13201 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
13203 #undef TARGET_VECTOR_MODE_SUPPORTED_P
13204 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
13206 #undef TARGET_SCALAR_MODE_SUPPORTED_P
13207 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
13209 #undef TARGET_INIT_BUILTINS
13210 #define TARGET_INIT_BUILTINS mips_init_builtins
13211 #undef TARGET_EXPAND_BUILTIN
13212 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
13214 #undef TARGET_HAVE_TLS
13215 #define TARGET_HAVE_TLS HAVE_AS_TLS
13217 #undef TARGET_CANNOT_FORCE_CONST_MEM
13218 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
13220 #undef TARGET_ENCODE_SECTION_INFO
13221 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
13223 #undef TARGET_ATTRIBUTE_TABLE
13224 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
13225 /* All our function attributes are related to how out-of-line copies should
13226 be compiled or called. They don't in themselves prevent inlining. */
13227 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
13228 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
13230 #undef TARGET_EXTRA_LIVE_ON_ENTRY
13231 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
13233 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
13234 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
13235 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
13236 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
13238 #undef TARGET_COMP_TYPE_ATTRIBUTES
13239 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
13241 #ifdef HAVE_AS_DTPRELWORD
13242 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
13243 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
13244 #endif
13245 #undef TARGET_DWARF_REGISTER_SPAN
13246 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
13248 struct gcc_target targetm = TARGET_INITIALIZER;
13250 #include "gt-mips.h"