* config/mips/mips.c (mips_gimplify_va_arg_expr): Use -rsize
[official-gcc.git] / gcc / config / mips / mips.c
blobc868b10a107398613ced37d483170db57e478e82
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "libfuncs.h"
46 #include "flags.h"
47 #include "reload.h"
48 #include "tm_p.h"
49 #include "ggc.h"
50 #include "gstab.h"
51 #include "hashtab.h"
52 #include "debug.h"
53 #include "target.h"
54 #include "target-def.h"
55 #include "integrate.h"
56 #include "langhooks.h"
57 #include "cfglayout.h"
58 #include "sched-int.h"
59 #include "gimple.h"
60 #include "bitmap.h"
61 #include "diagnostic.h"
63 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
64 #define UNSPEC_ADDRESS_P(X) \
65 (GET_CODE (X) == UNSPEC \
66 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
67 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
69 /* Extract the symbol or label from UNSPEC wrapper X. */
70 #define UNSPEC_ADDRESS(X) \
71 XVECEXP (X, 0, 0)
73 /* Extract the symbol type from UNSPEC wrapper X. */
74 #define UNSPEC_ADDRESS_TYPE(X) \
75 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
77 /* The maximum distance between the top of the stack frame and the
78 value $sp has when we save and restore registers.
80 The value for normal-mode code must be a SMALL_OPERAND and must
81 preserve the maximum stack alignment. We therefore use a value
82 of 0x7ff0 in this case.
84 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
85 up to 0x7f8 bytes and can usually save or restore all the registers
86 that we need to save or restore. (Note that we can only use these
87 instructions for o32, for which the stack alignment is 8 bytes.)
89 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
90 RESTORE are not available. We can then use unextended instructions
91 to save and restore registers, and to allocate and deallocate the top
92 part of the frame. */
93 #define MIPS_MAX_FIRST_STACK_STEP \
94 (!TARGET_MIPS16 ? 0x7ff0 \
95 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
96 : TARGET_64BIT ? 0x100 : 0x400)
98 /* True if INSN is a mips.md pattern or asm statement. */
99 #define USEFUL_INSN_P(INSN) \
100 (INSN_P (INSN) \
101 && GET_CODE (PATTERN (INSN)) != USE \
102 && GET_CODE (PATTERN (INSN)) != CLOBBER \
103 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
104 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
106 /* If INSN is a delayed branch sequence, return the first instruction
107 in the sequence, otherwise return INSN itself. */
108 #define SEQ_BEGIN(INSN) \
109 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
110 ? XVECEXP (PATTERN (INSN), 0, 0) \
111 : (INSN))
113 /* Likewise for the last instruction in a delayed branch sequence. */
114 #define SEQ_END(INSN) \
115 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
116 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117 : (INSN))
119 /* Execute the following loop body with SUBINSN set to each instruction
120 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
121 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
122 for ((SUBINSN) = SEQ_BEGIN (INSN); \
123 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
124 (SUBINSN) = NEXT_INSN (SUBINSN))
126 /* True if bit BIT is set in VALUE. */
127 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
129 /* Classifies an address.
131 ADDRESS_REG
132 A natural register + offset address. The register satisfies
133 mips_valid_base_register_p and the offset is a const_arith_operand.
135 ADDRESS_LO_SUM
136 A LO_SUM rtx. The first operand is a valid base register and
137 the second operand is a symbolic address.
139 ADDRESS_CONST_INT
140 A signed 16-bit constant address.
142 ADDRESS_SYMBOLIC:
143 A constant symbolic address. */
144 enum mips_address_type {
145 ADDRESS_REG,
146 ADDRESS_LO_SUM,
147 ADDRESS_CONST_INT,
148 ADDRESS_SYMBOLIC
151 /* Enumerates the setting of the -mr10k-cache-barrier option. */
152 enum mips_r10k_cache_barrier_setting {
153 R10K_CACHE_BARRIER_NONE,
154 R10K_CACHE_BARRIER_STORE,
155 R10K_CACHE_BARRIER_LOAD_STORE
158 /* Macros to create an enumeration identifier for a function prototype. */
159 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
160 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
161 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
162 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
164 /* Classifies the prototype of a built-in function. */
165 enum mips_function_type {
166 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
167 #include "config/mips/mips-ftypes.def"
168 #undef DEF_MIPS_FTYPE
169 MIPS_MAX_FTYPE_MAX
172 /* Specifies how a built-in function should be converted into rtl. */
173 enum mips_builtin_type {
174 /* The function corresponds directly to an .md pattern. The return
175 value is mapped to operand 0 and the arguments are mapped to
176 operands 1 and above. */
177 MIPS_BUILTIN_DIRECT,
179 /* The function corresponds directly to an .md pattern. There is no return
180 value and the arguments are mapped to operands 0 and above. */
181 MIPS_BUILTIN_DIRECT_NO_TARGET,
183 /* The function corresponds to a comparison instruction followed by
184 a mips_cond_move_tf_ps pattern. The first two arguments are the
185 values to compare and the second two arguments are the vector
186 operands for the movt.ps or movf.ps instruction (in assembly order). */
187 MIPS_BUILTIN_MOVF,
188 MIPS_BUILTIN_MOVT,
190 /* The function corresponds to a V2SF comparison instruction. Operand 0
191 of this instruction is the result of the comparison, which has mode
192 CCV2 or CCV4. The function arguments are mapped to operands 1 and
193 above. The function's return value is an SImode boolean that is
194 true under the following conditions:
196 MIPS_BUILTIN_CMP_ANY: one of the registers is true
197 MIPS_BUILTIN_CMP_ALL: all of the registers are true
198 MIPS_BUILTIN_CMP_LOWER: the first register is true
199 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
200 MIPS_BUILTIN_CMP_ANY,
201 MIPS_BUILTIN_CMP_ALL,
202 MIPS_BUILTIN_CMP_UPPER,
203 MIPS_BUILTIN_CMP_LOWER,
205 /* As above, but the instruction only sets a single $fcc register. */
206 MIPS_BUILTIN_CMP_SINGLE,
208 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
209 MIPS_BUILTIN_BPOSGE32
212 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
213 #define MIPS_FP_CONDITIONS(MACRO) \
214 MACRO (f), \
215 MACRO (un), \
216 MACRO (eq), \
217 MACRO (ueq), \
218 MACRO (olt), \
219 MACRO (ult), \
220 MACRO (ole), \
221 MACRO (ule), \
222 MACRO (sf), \
223 MACRO (ngle), \
224 MACRO (seq), \
225 MACRO (ngl), \
226 MACRO (lt), \
227 MACRO (nge), \
228 MACRO (le), \
229 MACRO (ngt)
231 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
232 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
233 enum mips_fp_condition {
234 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
237 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
238 #define STRINGIFY(X) #X
239 static const char *const mips_fp_conditions[] = {
240 MIPS_FP_CONDITIONS (STRINGIFY)
243 /* Information about a function's frame layout. */
244 struct mips_frame_info GTY(()) {
245 /* The size of the frame in bytes. */
246 HOST_WIDE_INT total_size;
248 /* The number of bytes allocated to variables. */
249 HOST_WIDE_INT var_size;
251 /* The number of bytes allocated to outgoing function arguments. */
252 HOST_WIDE_INT args_size;
254 /* The number of bytes allocated to the .cprestore slot, or 0 if there
255 is no such slot. */
256 HOST_WIDE_INT cprestore_size;
258 /* Bit X is set if the function saves or restores GPR X. */
259 unsigned int mask;
261 /* Likewise FPR X. */
262 unsigned int fmask;
264 /* The number of GPRs and FPRs saved. */
265 unsigned int num_gp;
266 unsigned int num_fp;
268 /* The offset of the topmost GPR and FPR save slots from the top of
269 the frame, or zero if no such slots are needed. */
270 HOST_WIDE_INT gp_save_offset;
271 HOST_WIDE_INT fp_save_offset;
273 /* Likewise, but giving offsets from the bottom of the frame. */
274 HOST_WIDE_INT gp_sp_offset;
275 HOST_WIDE_INT fp_sp_offset;
277 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
278 HOST_WIDE_INT arg_pointer_offset;
280 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
281 HOST_WIDE_INT hard_frame_pointer_offset;
284 struct machine_function GTY(()) {
285 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
286 rtx mips16_gp_pseudo_rtx;
288 /* The number of extra stack bytes taken up by register varargs.
289 This area is allocated by the callee at the very top of the frame. */
290 int varargs_size;
292 /* The current frame information, calculated by mips_compute_frame_info. */
293 struct mips_frame_info frame;
295 /* The register to use as the function's global pointer, or INVALID_REGNUM
296 if the function doesn't need one. */
297 unsigned int global_pointer;
299 /* True if mips_adjust_insn_length should ignore an instruction's
300 hazard attribute. */
301 bool ignore_hazard_length_p;
303 /* True if the whole function is suitable for .set noreorder and
304 .set nomacro. */
305 bool all_noreorder_p;
307 /* True if the function is known to have an instruction that needs $gp. */
308 bool has_gp_insn_p;
310 /* True if we have emitted an instruction to initialize
311 mips16_gp_pseudo_rtx. */
312 bool initialized_mips16_gp_pseudo_p;
315 /* Information about a single argument. */
316 struct mips_arg_info {
317 /* True if the argument is passed in a floating-point register, or
318 would have been if we hadn't run out of registers. */
319 bool fpr_p;
321 /* The number of words passed in registers, rounded up. */
322 unsigned int reg_words;
324 /* For EABI, the offset of the first register from GP_ARG_FIRST or
325 FP_ARG_FIRST. For other ABIs, the offset of the first register from
326 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
327 comment for details).
329 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
330 on the stack. */
331 unsigned int reg_offset;
333 /* The number of words that must be passed on the stack, rounded up. */
334 unsigned int stack_words;
336 /* The offset from the start of the stack overflow area of the argument's
337 first stack word. Only meaningful when STACK_WORDS is nonzero. */
338 unsigned int stack_offset;
341 /* Information about an address described by mips_address_type.
343 ADDRESS_CONST_INT
344 No fields are used.
346 ADDRESS_REG
347 REG is the base register and OFFSET is the constant offset.
349 ADDRESS_LO_SUM
350 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
351 is the type of symbol it references.
353 ADDRESS_SYMBOLIC
354 SYMBOL_TYPE is the type of symbol that the address references. */
355 struct mips_address_info {
356 enum mips_address_type type;
357 rtx reg;
358 rtx offset;
359 enum mips_symbol_type symbol_type;
362 /* One stage in a constant building sequence. These sequences have
363 the form:
365 A = VALUE[0]
366 A = A CODE[1] VALUE[1]
367 A = A CODE[2] VALUE[2]
370 where A is an accumulator, each CODE[i] is a binary rtl operation
371 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
372 struct mips_integer_op {
373 enum rtx_code code;
374 unsigned HOST_WIDE_INT value;
377 /* The largest number of operations needed to load an integer constant.
378 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
379 When the lowest bit is clear, we can try, but reject a sequence with
380 an extra SLL at the end. */
381 #define MIPS_MAX_INTEGER_OPS 7
383 /* Information about a MIPS16e SAVE or RESTORE instruction. */
384 struct mips16e_save_restore_info {
385 /* The number of argument registers saved by a SAVE instruction.
386 0 for RESTORE instructions. */
387 unsigned int nargs;
389 /* Bit X is set if the instruction saves or restores GPR X. */
390 unsigned int mask;
392 /* The total number of bytes to allocate. */
393 HOST_WIDE_INT size;
396 /* Global variables for machine-dependent things. */
398 /* The -G setting, or the configuration's default small-data limit if
399 no -G option is given. */
400 static unsigned int mips_small_data_threshold;
402 /* The number of file directives written by mips_output_filename. */
403 int num_source_filenames;
405 /* The name that appeared in the last .file directive written by
406 mips_output_filename, or "" if mips_output_filename hasn't
407 written anything yet. */
408 const char *current_function_file = "";
410 /* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
411 int sdb_label_count;
413 /* Arrays that map GCC register numbers to debugger register numbers. */
414 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
415 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
417 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
418 int set_noreorder;
419 int set_nomacro;
420 static int set_noat;
422 /* True if we're writing out a branch-likely instruction rather than a
423 normal branch. */
424 static bool mips_branch_likely;
426 /* The operands passed to the last cmpMM expander. */
427 rtx cmp_operands[2];
429 /* The current instruction-set architecture. */
430 enum processor_type mips_arch;
431 const struct mips_cpu_info *mips_arch_info;
433 /* The processor that we should tune the code for. */
434 enum processor_type mips_tune;
435 const struct mips_cpu_info *mips_tune_info;
437 /* The ISA level associated with mips_arch. */
438 int mips_isa;
440 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
441 static const struct mips_cpu_info *mips_isa_option_info;
443 /* Which ABI to use. */
444 int mips_abi = MIPS_ABI_DEFAULT;
446 /* Which cost information to use. */
447 const struct mips_rtx_cost_data *mips_cost;
449 /* The ambient target flags, excluding MASK_MIPS16. */
450 static int mips_base_target_flags;
452 /* True if MIPS16 is the default mode. */
453 bool mips_base_mips16;
455 /* The ambient values of other global variables. */
456 static int mips_base_delayed_branch; /* flag_delayed_branch */
457 static int mips_base_schedule_insns; /* flag_schedule_insns */
458 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
459 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
460 static int mips_base_align_loops; /* align_loops */
461 static int mips_base_align_jumps; /* align_jumps */
462 static int mips_base_align_functions; /* align_functions */
464 /* The -mcode-readable setting. */
465 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
467 /* The -mr10k-cache-barrier setting. */
468 static enum mips_r10k_cache_barrier_setting mips_r10k_cache_barrier;
470 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
471 bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
473 /* Index C is true if character C is a valid PRINT_OPERAND punctation
474 character. */
475 bool mips_print_operand_punct[256];
477 static GTY (()) int mips_output_filename_first_time = 1;
479 /* mips_split_p[X] is true if symbols of type X can be split by
480 mips_split_symbol. */
481 bool mips_split_p[NUM_SYMBOL_TYPES];
483 /* mips_split_hi_p[X] is true if the high parts of symbols of type X
484 can be split by mips_split_symbol. */
485 bool mips_split_hi_p[NUM_SYMBOL_TYPES];
487 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
488 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
489 if they are matched by a special .md file pattern. */
490 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
492 /* Likewise for HIGHs. */
493 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
495 /* Index R is the smallest register class that contains register R. */
496 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
497 LEA_REGS, LEA_REGS, M16_REGS, V1_REG,
498 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
499 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
500 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
501 M16_REGS, M16_REGS, LEA_REGS, LEA_REGS,
502 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
503 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
504 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
505 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
506 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
507 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
508 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
509 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
510 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
511 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
512 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
513 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
514 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
515 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
516 NO_REGS, FRAME_REGS, FRAME_REGS, NO_REGS,
517 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
518 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
519 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
520 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
521 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
522 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
523 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
524 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
525 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
526 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
527 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
528 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
529 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
530 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
531 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
532 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
533 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
534 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
535 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
536 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
537 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
538 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
539 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
540 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
541 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
542 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
543 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
546 /* The value of TARGET_ATTRIBUTE_TABLE. */
547 const struct attribute_spec mips_attribute_table[] = {
548 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
549 { "long_call", 0, 0, false, true, true, NULL },
550 { "far", 0, 0, false, true, true, NULL },
551 { "near", 0, 0, false, true, true, NULL },
552 /* We would really like to treat "mips16" and "nomips16" as type
553 attributes, but GCC doesn't provide the hooks we need to support
554 the right conversion rules. As declaration attributes, they affect
555 code generation but don't carry other semantics. */
556 { "mips16", 0, 0, true, false, false, NULL },
557 { "nomips16", 0, 0, true, false, false, NULL },
558 { NULL, 0, 0, false, false, false, NULL }
561 /* A table describing all the processors GCC knows about. Names are
562 matched in the order listed. The first mention of an ISA level is
563 taken as the canonical name for that ISA.
565 To ease comparison, please keep this table in the same order
566 as GAS's mips_cpu_info_table. Please also make sure that
567 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
568 options correctly. */
569 static const struct mips_cpu_info mips_cpu_info_table[] = {
570 /* Entries for generic ISAs. */
571 { "mips1", PROCESSOR_R3000, 1, 0 },
572 { "mips2", PROCESSOR_R6000, 2, 0 },
573 { "mips3", PROCESSOR_R4000, 3, 0 },
574 { "mips4", PROCESSOR_R8000, 4, 0 },
575 /* Prefer not to use branch-likely instructions for generic MIPS32rX
576 and MIPS64rX code. The instructions were officially deprecated
577 in revisions 2 and earlier, but revision 3 is likely to downgrade
578 that to a recommendation to avoid the instructions in code that
579 isn't tuned to a specific processor. */
580 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
581 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
582 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
583 /* ??? For now just tune the generic MIPS64r2 for 5KC as well. */
584 { "mips64r2", PROCESSOR_5KC, 65, PTF_AVOID_BRANCHLIKELY },
586 /* MIPS I processors. */
587 { "r3000", PROCESSOR_R3000, 1, 0 },
588 { "r2000", PROCESSOR_R3000, 1, 0 },
589 { "r3900", PROCESSOR_R3900, 1, 0 },
591 /* MIPS II processors. */
592 { "r6000", PROCESSOR_R6000, 2, 0 },
594 /* MIPS III processors. */
595 { "r4000", PROCESSOR_R4000, 3, 0 },
596 { "vr4100", PROCESSOR_R4100, 3, 0 },
597 { "vr4111", PROCESSOR_R4111, 3, 0 },
598 { "vr4120", PROCESSOR_R4120, 3, 0 },
599 { "vr4130", PROCESSOR_R4130, 3, 0 },
600 { "vr4300", PROCESSOR_R4300, 3, 0 },
601 { "r4400", PROCESSOR_R4000, 3, 0 },
602 { "r4600", PROCESSOR_R4600, 3, 0 },
603 { "orion", PROCESSOR_R4600, 3, 0 },
604 { "r4650", PROCESSOR_R4650, 3, 0 },
605 /* ST Loongson 2E/2F processors. */
606 { "loongson2e", PROCESSOR_LOONGSON_2E, 3, PTF_AVOID_BRANCHLIKELY },
607 { "loongson2f", PROCESSOR_LOONGSON_2F, 3, PTF_AVOID_BRANCHLIKELY },
609 /* MIPS IV processors. */
610 { "r8000", PROCESSOR_R8000, 4, 0 },
611 { "r10000", PROCESSOR_R10000, 4, 0 },
612 { "r12000", PROCESSOR_R10000, 4, 0 },
613 { "r14000", PROCESSOR_R10000, 4, 0 },
614 { "r16000", PROCESSOR_R10000, 4, 0 },
615 { "vr5000", PROCESSOR_R5000, 4, 0 },
616 { "vr5400", PROCESSOR_R5400, 4, 0 },
617 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
618 { "rm7000", PROCESSOR_R7000, 4, 0 },
619 { "rm9000", PROCESSOR_R9000, 4, 0 },
621 /* MIPS32 processors. */
622 { "4kc", PROCESSOR_4KC, 32, 0 },
623 { "4km", PROCESSOR_4KC, 32, 0 },
624 { "4kp", PROCESSOR_4KP, 32, 0 },
625 { "4ksc", PROCESSOR_4KC, 32, 0 },
627 /* MIPS32 Release 2 processors. */
628 { "m4k", PROCESSOR_M4K, 33, 0 },
629 { "4kec", PROCESSOR_4KC, 33, 0 },
630 { "4kem", PROCESSOR_4KC, 33, 0 },
631 { "4kep", PROCESSOR_4KP, 33, 0 },
632 { "4ksd", PROCESSOR_4KC, 33, 0 },
634 { "24kc", PROCESSOR_24KC, 33, 0 },
635 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
636 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
637 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
638 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
639 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
641 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP. */
642 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
643 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
644 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
645 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
646 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
648 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP. */
649 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
650 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
651 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
652 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
653 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
655 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2. */
656 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
657 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
658 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
659 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
660 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
661 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
663 /* MIPS64 processors. */
664 { "5kc", PROCESSOR_5KC, 64, 0 },
665 { "5kf", PROCESSOR_5KF, 64, 0 },
666 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
667 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
668 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
669 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
670 { "xlr", PROCESSOR_XLR, 64, 0 },
672 /* MIPS64 Release 2 processors. */
673 { "octeon", PROCESSOR_OCTEON, 65, PTF_AVOID_BRANCHLIKELY }
676 /* Default costs. If these are used for a processor we should look
677 up the actual costs. */
678 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
679 COSTS_N_INSNS (7), /* fp_mult_sf */ \
680 COSTS_N_INSNS (8), /* fp_mult_df */ \
681 COSTS_N_INSNS (23), /* fp_div_sf */ \
682 COSTS_N_INSNS (36), /* fp_div_df */ \
683 COSTS_N_INSNS (10), /* int_mult_si */ \
684 COSTS_N_INSNS (10), /* int_mult_di */ \
685 COSTS_N_INSNS (69), /* int_div_si */ \
686 COSTS_N_INSNS (69), /* int_div_di */ \
687 2, /* branch_cost */ \
688 4 /* memory_latency */
690 /* Floating-point costs for processors without an FPU. Just assume that
691 all floating-point libcalls are very expensive. */
692 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
693 COSTS_N_INSNS (256), /* fp_mult_sf */ \
694 COSTS_N_INSNS (256), /* fp_mult_df */ \
695 COSTS_N_INSNS (256), /* fp_div_sf */ \
696 COSTS_N_INSNS (256) /* fp_div_df */
698 /* Costs to use when optimizing for size. */
699 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
700 COSTS_N_INSNS (1), /* fp_add */
701 COSTS_N_INSNS (1), /* fp_mult_sf */
702 COSTS_N_INSNS (1), /* fp_mult_df */
703 COSTS_N_INSNS (1), /* fp_div_sf */
704 COSTS_N_INSNS (1), /* fp_div_df */
705 COSTS_N_INSNS (1), /* int_mult_si */
706 COSTS_N_INSNS (1), /* int_mult_di */
707 COSTS_N_INSNS (1), /* int_div_si */
708 COSTS_N_INSNS (1), /* int_div_di */
709 2, /* branch_cost */
710 4 /* memory_latency */
713 /* Costs to use when optimizing for speed, indexed by processor. */
714 static const struct mips_rtx_cost_data mips_rtx_cost_data[PROCESSOR_MAX] = {
715 { /* R3000 */
716 COSTS_N_INSNS (2), /* fp_add */
717 COSTS_N_INSNS (4), /* fp_mult_sf */
718 COSTS_N_INSNS (5), /* fp_mult_df */
719 COSTS_N_INSNS (12), /* fp_div_sf */
720 COSTS_N_INSNS (19), /* fp_div_df */
721 COSTS_N_INSNS (12), /* int_mult_si */
722 COSTS_N_INSNS (12), /* int_mult_di */
723 COSTS_N_INSNS (35), /* int_div_si */
724 COSTS_N_INSNS (35), /* int_div_di */
725 1, /* branch_cost */
726 4 /* memory_latency */
728 { /* 4KC */
729 SOFT_FP_COSTS,
730 COSTS_N_INSNS (6), /* int_mult_si */
731 COSTS_N_INSNS (6), /* int_mult_di */
732 COSTS_N_INSNS (36), /* int_div_si */
733 COSTS_N_INSNS (36), /* int_div_di */
734 1, /* branch_cost */
735 4 /* memory_latency */
737 { /* 4KP */
738 SOFT_FP_COSTS,
739 COSTS_N_INSNS (36), /* int_mult_si */
740 COSTS_N_INSNS (36), /* int_mult_di */
741 COSTS_N_INSNS (37), /* int_div_si */
742 COSTS_N_INSNS (37), /* int_div_di */
743 1, /* branch_cost */
744 4 /* memory_latency */
746 { /* 5KC */
747 SOFT_FP_COSTS,
748 COSTS_N_INSNS (4), /* int_mult_si */
749 COSTS_N_INSNS (11), /* int_mult_di */
750 COSTS_N_INSNS (36), /* int_div_si */
751 COSTS_N_INSNS (68), /* int_div_di */
752 1, /* branch_cost */
753 4 /* memory_latency */
755 { /* 5KF */
756 COSTS_N_INSNS (4), /* fp_add */
757 COSTS_N_INSNS (4), /* fp_mult_sf */
758 COSTS_N_INSNS (5), /* fp_mult_df */
759 COSTS_N_INSNS (17), /* fp_div_sf */
760 COSTS_N_INSNS (32), /* fp_div_df */
761 COSTS_N_INSNS (4), /* int_mult_si */
762 COSTS_N_INSNS (11), /* int_mult_di */
763 COSTS_N_INSNS (36), /* int_div_si */
764 COSTS_N_INSNS (68), /* int_div_di */
765 1, /* branch_cost */
766 4 /* memory_latency */
768 { /* 20KC */
769 COSTS_N_INSNS (4), /* fp_add */
770 COSTS_N_INSNS (4), /* fp_mult_sf */
771 COSTS_N_INSNS (5), /* fp_mult_df */
772 COSTS_N_INSNS (17), /* fp_div_sf */
773 COSTS_N_INSNS (32), /* fp_div_df */
774 COSTS_N_INSNS (4), /* int_mult_si */
775 COSTS_N_INSNS (7), /* int_mult_di */
776 COSTS_N_INSNS (42), /* int_div_si */
777 COSTS_N_INSNS (72), /* int_div_di */
778 1, /* branch_cost */
779 4 /* memory_latency */
781 { /* 24KC */
782 SOFT_FP_COSTS,
783 COSTS_N_INSNS (5), /* int_mult_si */
784 COSTS_N_INSNS (5), /* int_mult_di */
785 COSTS_N_INSNS (41), /* int_div_si */
786 COSTS_N_INSNS (41), /* int_div_di */
787 1, /* branch_cost */
788 4 /* memory_latency */
790 { /* 24KF2_1 */
791 COSTS_N_INSNS (8), /* fp_add */
792 COSTS_N_INSNS (8), /* fp_mult_sf */
793 COSTS_N_INSNS (10), /* fp_mult_df */
794 COSTS_N_INSNS (34), /* fp_div_sf */
795 COSTS_N_INSNS (64), /* fp_div_df */
796 COSTS_N_INSNS (5), /* int_mult_si */
797 COSTS_N_INSNS (5), /* int_mult_di */
798 COSTS_N_INSNS (41), /* int_div_si */
799 COSTS_N_INSNS (41), /* int_div_di */
800 1, /* branch_cost */
801 4 /* memory_latency */
803 { /* 24KF1_1 */
804 COSTS_N_INSNS (4), /* fp_add */
805 COSTS_N_INSNS (4), /* fp_mult_sf */
806 COSTS_N_INSNS (5), /* fp_mult_df */
807 COSTS_N_INSNS (17), /* fp_div_sf */
808 COSTS_N_INSNS (32), /* fp_div_df */
809 COSTS_N_INSNS (5), /* int_mult_si */
810 COSTS_N_INSNS (5), /* int_mult_di */
811 COSTS_N_INSNS (41), /* int_div_si */
812 COSTS_N_INSNS (41), /* int_div_di */
813 1, /* branch_cost */
814 4 /* memory_latency */
816 { /* 74KC */
817 SOFT_FP_COSTS,
818 COSTS_N_INSNS (5), /* int_mult_si */
819 COSTS_N_INSNS (5), /* int_mult_di */
820 COSTS_N_INSNS (41), /* int_div_si */
821 COSTS_N_INSNS (41), /* int_div_di */
822 1, /* branch_cost */
823 4 /* memory_latency */
825 { /* 74KF2_1 */
826 COSTS_N_INSNS (8), /* fp_add */
827 COSTS_N_INSNS (8), /* fp_mult_sf */
828 COSTS_N_INSNS (10), /* fp_mult_df */
829 COSTS_N_INSNS (34), /* fp_div_sf */
830 COSTS_N_INSNS (64), /* fp_div_df */
831 COSTS_N_INSNS (5), /* int_mult_si */
832 COSTS_N_INSNS (5), /* int_mult_di */
833 COSTS_N_INSNS (41), /* int_div_si */
834 COSTS_N_INSNS (41), /* int_div_di */
835 1, /* branch_cost */
836 4 /* memory_latency */
838 { /* 74KF1_1 */
839 COSTS_N_INSNS (4), /* fp_add */
840 COSTS_N_INSNS (4), /* fp_mult_sf */
841 COSTS_N_INSNS (5), /* fp_mult_df */
842 COSTS_N_INSNS (17), /* fp_div_sf */
843 COSTS_N_INSNS (32), /* fp_div_df */
844 COSTS_N_INSNS (5), /* int_mult_si */
845 COSTS_N_INSNS (5), /* int_mult_di */
846 COSTS_N_INSNS (41), /* int_div_si */
847 COSTS_N_INSNS (41), /* int_div_di */
848 1, /* branch_cost */
849 4 /* memory_latency */
851 { /* 74KF3_2 */
852 COSTS_N_INSNS (6), /* fp_add */
853 COSTS_N_INSNS (6), /* fp_mult_sf */
854 COSTS_N_INSNS (7), /* fp_mult_df */
855 COSTS_N_INSNS (25), /* fp_div_sf */
856 COSTS_N_INSNS (48), /* fp_div_df */
857 COSTS_N_INSNS (5), /* int_mult_si */
858 COSTS_N_INSNS (5), /* int_mult_di */
859 COSTS_N_INSNS (41), /* int_div_si */
860 COSTS_N_INSNS (41), /* int_div_di */
861 1, /* branch_cost */
862 4 /* memory_latency */
864 { /* Loongson-2E */
865 DEFAULT_COSTS
867 { /* Loongson-2F */
868 DEFAULT_COSTS
870 { /* M4k */
871 DEFAULT_COSTS
873 /* Octeon */
875 SOFT_FP_COSTS,
876 COSTS_N_INSNS (5), /* int_mult_si */
877 COSTS_N_INSNS (5), /* int_mult_di */
878 COSTS_N_INSNS (72), /* int_div_si */
879 COSTS_N_INSNS (72), /* int_div_di */
880 1, /* branch_cost */
881 4 /* memory_latency */
883 { /* R3900 */
884 COSTS_N_INSNS (2), /* fp_add */
885 COSTS_N_INSNS (4), /* fp_mult_sf */
886 COSTS_N_INSNS (5), /* fp_mult_df */
887 COSTS_N_INSNS (12), /* fp_div_sf */
888 COSTS_N_INSNS (19), /* fp_div_df */
889 COSTS_N_INSNS (2), /* int_mult_si */
890 COSTS_N_INSNS (2), /* int_mult_di */
891 COSTS_N_INSNS (35), /* int_div_si */
892 COSTS_N_INSNS (35), /* int_div_di */
893 1, /* branch_cost */
894 4 /* memory_latency */
896 { /* R6000 */
897 COSTS_N_INSNS (3), /* fp_add */
898 COSTS_N_INSNS (5), /* fp_mult_sf */
899 COSTS_N_INSNS (6), /* fp_mult_df */
900 COSTS_N_INSNS (15), /* fp_div_sf */
901 COSTS_N_INSNS (16), /* fp_div_df */
902 COSTS_N_INSNS (17), /* int_mult_si */
903 COSTS_N_INSNS (17), /* int_mult_di */
904 COSTS_N_INSNS (38), /* int_div_si */
905 COSTS_N_INSNS (38), /* int_div_di */
906 2, /* branch_cost */
907 6 /* memory_latency */
909 { /* R4000 */
910 COSTS_N_INSNS (6), /* fp_add */
911 COSTS_N_INSNS (7), /* fp_mult_sf */
912 COSTS_N_INSNS (8), /* fp_mult_df */
913 COSTS_N_INSNS (23), /* fp_div_sf */
914 COSTS_N_INSNS (36), /* fp_div_df */
915 COSTS_N_INSNS (10), /* int_mult_si */
916 COSTS_N_INSNS (10), /* int_mult_di */
917 COSTS_N_INSNS (69), /* int_div_si */
918 COSTS_N_INSNS (69), /* int_div_di */
919 2, /* branch_cost */
920 6 /* memory_latency */
922 { /* R4100 */
923 DEFAULT_COSTS
925 { /* R4111 */
926 DEFAULT_COSTS
928 { /* R4120 */
929 DEFAULT_COSTS
931 { /* R4130 */
932 /* The only costs that appear to be updated here are
933 integer multiplication. */
934 SOFT_FP_COSTS,
935 COSTS_N_INSNS (4), /* int_mult_si */
936 COSTS_N_INSNS (6), /* int_mult_di */
937 COSTS_N_INSNS (69), /* int_div_si */
938 COSTS_N_INSNS (69), /* int_div_di */
939 1, /* branch_cost */
940 4 /* memory_latency */
942 { /* R4300 */
943 DEFAULT_COSTS
945 { /* R4600 */
946 DEFAULT_COSTS
948 { /* R4650 */
949 DEFAULT_COSTS
951 { /* R5000 */
952 COSTS_N_INSNS (6), /* fp_add */
953 COSTS_N_INSNS (4), /* fp_mult_sf */
954 COSTS_N_INSNS (5), /* fp_mult_df */
955 COSTS_N_INSNS (23), /* fp_div_sf */
956 COSTS_N_INSNS (36), /* fp_div_df */
957 COSTS_N_INSNS (5), /* int_mult_si */
958 COSTS_N_INSNS (5), /* int_mult_di */
959 COSTS_N_INSNS (36), /* int_div_si */
960 COSTS_N_INSNS (36), /* int_div_di */
961 1, /* branch_cost */
962 4 /* memory_latency */
964 { /* R5400 */
965 COSTS_N_INSNS (6), /* fp_add */
966 COSTS_N_INSNS (5), /* fp_mult_sf */
967 COSTS_N_INSNS (6), /* fp_mult_df */
968 COSTS_N_INSNS (30), /* fp_div_sf */
969 COSTS_N_INSNS (59), /* fp_div_df */
970 COSTS_N_INSNS (3), /* int_mult_si */
971 COSTS_N_INSNS (4), /* int_mult_di */
972 COSTS_N_INSNS (42), /* int_div_si */
973 COSTS_N_INSNS (74), /* int_div_di */
974 1, /* branch_cost */
975 4 /* memory_latency */
977 { /* R5500 */
978 COSTS_N_INSNS (6), /* fp_add */
979 COSTS_N_INSNS (5), /* fp_mult_sf */
980 COSTS_N_INSNS (6), /* fp_mult_df */
981 COSTS_N_INSNS (30), /* fp_div_sf */
982 COSTS_N_INSNS (59), /* fp_div_df */
983 COSTS_N_INSNS (5), /* int_mult_si */
984 COSTS_N_INSNS (9), /* int_mult_di */
985 COSTS_N_INSNS (42), /* int_div_si */
986 COSTS_N_INSNS (74), /* int_div_di */
987 1, /* branch_cost */
988 4 /* memory_latency */
990 { /* R7000 */
991 /* The only costs that are changed here are
992 integer multiplication. */
993 COSTS_N_INSNS (6), /* fp_add */
994 COSTS_N_INSNS (7), /* fp_mult_sf */
995 COSTS_N_INSNS (8), /* fp_mult_df */
996 COSTS_N_INSNS (23), /* fp_div_sf */
997 COSTS_N_INSNS (36), /* fp_div_df */
998 COSTS_N_INSNS (5), /* int_mult_si */
999 COSTS_N_INSNS (9), /* int_mult_di */
1000 COSTS_N_INSNS (69), /* int_div_si */
1001 COSTS_N_INSNS (69), /* int_div_di */
1002 1, /* branch_cost */
1003 4 /* memory_latency */
1005 { /* R8000 */
1006 DEFAULT_COSTS
1008 { /* R9000 */
1009 /* The only costs that are changed here are
1010 integer multiplication. */
1011 COSTS_N_INSNS (6), /* fp_add */
1012 COSTS_N_INSNS (7), /* fp_mult_sf */
1013 COSTS_N_INSNS (8), /* fp_mult_df */
1014 COSTS_N_INSNS (23), /* fp_div_sf */
1015 COSTS_N_INSNS (36), /* fp_div_df */
1016 COSTS_N_INSNS (3), /* int_mult_si */
1017 COSTS_N_INSNS (8), /* int_mult_di */
1018 COSTS_N_INSNS (69), /* int_div_si */
1019 COSTS_N_INSNS (69), /* int_div_di */
1020 1, /* branch_cost */
1021 4 /* memory_latency */
1023 { /* R1x000 */
1024 COSTS_N_INSNS (2), /* fp_add */
1025 COSTS_N_INSNS (2), /* fp_mult_sf */
1026 COSTS_N_INSNS (2), /* fp_mult_df */
1027 COSTS_N_INSNS (12), /* fp_div_sf */
1028 COSTS_N_INSNS (19), /* fp_div_df */
1029 COSTS_N_INSNS (5), /* int_mult_si */
1030 COSTS_N_INSNS (9), /* int_mult_di */
1031 COSTS_N_INSNS (34), /* int_div_si */
1032 COSTS_N_INSNS (66), /* int_div_di */
1033 1, /* branch_cost */
1034 4 /* memory_latency */
1036 { /* SB1 */
1037 /* These costs are the same as the SB-1A below. */
1038 COSTS_N_INSNS (4), /* fp_add */
1039 COSTS_N_INSNS (4), /* fp_mult_sf */
1040 COSTS_N_INSNS (4), /* fp_mult_df */
1041 COSTS_N_INSNS (24), /* fp_div_sf */
1042 COSTS_N_INSNS (32), /* fp_div_df */
1043 COSTS_N_INSNS (3), /* int_mult_si */
1044 COSTS_N_INSNS (4), /* int_mult_di */
1045 COSTS_N_INSNS (36), /* int_div_si */
1046 COSTS_N_INSNS (68), /* int_div_di */
1047 1, /* branch_cost */
1048 4 /* memory_latency */
1050 { /* SB1-A */
1051 /* These costs are the same as the SB-1 above. */
1052 COSTS_N_INSNS (4), /* fp_add */
1053 COSTS_N_INSNS (4), /* fp_mult_sf */
1054 COSTS_N_INSNS (4), /* fp_mult_df */
1055 COSTS_N_INSNS (24), /* fp_div_sf */
1056 COSTS_N_INSNS (32), /* fp_div_df */
1057 COSTS_N_INSNS (3), /* int_mult_si */
1058 COSTS_N_INSNS (4), /* int_mult_di */
1059 COSTS_N_INSNS (36), /* int_div_si */
1060 COSTS_N_INSNS (68), /* int_div_di */
1061 1, /* branch_cost */
1062 4 /* memory_latency */
1064 { /* SR71000 */
1065 DEFAULT_COSTS
1067 { /* XLR */
1068 /* Need to replace first five with the costs of calling the appropriate
1069 libgcc routine. */
1070 COSTS_N_INSNS (256), /* fp_add */
1071 COSTS_N_INSNS (256), /* fp_mult_sf */
1072 COSTS_N_INSNS (256), /* fp_mult_df */
1073 COSTS_N_INSNS (256), /* fp_div_sf */
1074 COSTS_N_INSNS (256), /* fp_div_df */
1075 COSTS_N_INSNS (8), /* int_mult_si */
1076 COSTS_N_INSNS (8), /* int_mult_di */
1077 COSTS_N_INSNS (72), /* int_div_si */
1078 COSTS_N_INSNS (72), /* int_div_di */
1079 1, /* branch_cost */
1080 4 /* memory_latency */
1084 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1085 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1086 struct mflip_mips16_entry GTY (()) {
1087 const char *name;
1088 bool mips16_p;
1090 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1092 /* Hash table callbacks for mflip_mips16_htab. */
1094 static hashval_t
1095 mflip_mips16_htab_hash (const void *entry)
1097 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1100 static int
1101 mflip_mips16_htab_eq (const void *entry, const void *name)
1103 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1104 (const char *) name) == 0;
1107 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1108 mode, false if it should next add an attribute for the opposite mode. */
1109 static GTY(()) bool mips16_flipper;
1111 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1112 for -mflip-mips16. Return true if it should use "mips16" and false if
1113 it should use "nomips16". */
1115 static bool
1116 mflip_mips16_use_mips16_p (tree decl)
1118 struct mflip_mips16_entry *entry;
1119 const char *name;
1120 hashval_t hash;
1121 void **slot;
1123 /* Use the opposite of the command-line setting for anonymous decls. */
1124 if (!DECL_NAME (decl))
1125 return !mips_base_mips16;
1127 if (!mflip_mips16_htab)
1128 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1129 mflip_mips16_htab_eq, NULL);
1131 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1132 hash = htab_hash_string (name);
1133 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1134 entry = (struct mflip_mips16_entry *) *slot;
1135 if (!entry)
1137 mips16_flipper = !mips16_flipper;
1138 entry = GGC_NEW (struct mflip_mips16_entry);
1139 entry->name = name;
1140 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1141 *slot = entry;
1143 return entry->mips16_p;
1146 /* Predicates to test for presence of "near" and "far"/"long_call"
1147 attributes on the given TYPE. */
1149 static bool
1150 mips_near_type_p (const_tree type)
1152 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1155 static bool
1156 mips_far_type_p (const_tree type)
1158 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1159 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1162 /* Similar predicates for "mips16"/"nomips16" function attributes. */
1164 static bool
1165 mips_mips16_decl_p (const_tree decl)
1167 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1170 static bool
1171 mips_nomips16_decl_p (const_tree decl)
1173 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1176 /* Return true if function DECL is a MIPS16 function. Return the ambient
1177 setting if DECL is null. */
1179 static bool
1180 mips_use_mips16_mode_p (tree decl)
1182 if (decl)
1184 /* Nested functions must use the same frame pointer as their
1185 parent and must therefore use the same ISA mode. */
1186 tree parent = decl_function_context (decl);
1187 if (parent)
1188 decl = parent;
1189 if (mips_mips16_decl_p (decl))
1190 return true;
1191 if (mips_nomips16_decl_p (decl))
1192 return false;
1194 return mips_base_mips16;
1197 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1199 static int
1200 mips_comp_type_attributes (const_tree type1, const_tree type2)
1202 /* Disallow mixed near/far attributes. */
1203 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1204 return 0;
1205 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1206 return 0;
1207 return 1;
1210 /* Implement TARGET_INSERT_ATTRIBUTES. */
1212 static void
1213 mips_insert_attributes (tree decl, tree *attributes)
1215 const char *name;
1216 bool mips16_p, nomips16_p;
1218 /* Check for "mips16" and "nomips16" attributes. */
1219 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1220 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1221 if (TREE_CODE (decl) != FUNCTION_DECL)
1223 if (mips16_p)
1224 error ("%qs attribute only applies to functions", "mips16");
1225 if (nomips16_p)
1226 error ("%qs attribute only applies to functions", "nomips16");
1228 else
1230 mips16_p |= mips_mips16_decl_p (decl);
1231 nomips16_p |= mips_nomips16_decl_p (decl);
1232 if (mips16_p || nomips16_p)
1234 /* DECL cannot be simultaneously "mips16" and "nomips16". */
1235 if (mips16_p && nomips16_p)
1236 error ("%qs cannot have both %<mips16%> and "
1237 "%<nomips16%> attributes",
1238 IDENTIFIER_POINTER (DECL_NAME (decl)));
1240 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1242 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1243 "mips16" attribute, arbitrarily pick one. We must pick the same
1244 setting for duplicate declarations of a function. */
1245 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1246 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1251 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1253 static tree
1254 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1256 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1257 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1258 error ("%qs redeclared with conflicting %qs attributes",
1259 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1260 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1261 error ("%qs redeclared with conflicting %qs attributes",
1262 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1264 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1265 DECL_ATTRIBUTES (newdecl));
1268 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1269 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1271 static void
1272 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1274 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1276 *base_ptr = XEXP (x, 0);
1277 *offset_ptr = INTVAL (XEXP (x, 1));
1279 else
1281 *base_ptr = x;
1282 *offset_ptr = 0;
1286 static unsigned int mips_build_integer (struct mips_integer_op *,
1287 unsigned HOST_WIDE_INT);
1289 /* A subroutine of mips_build_integer, with the same interface.
1290 Assume that the final action in the sequence should be a left shift. */
1292 static unsigned int
1293 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1295 unsigned int i, shift;
1297 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1298 since signed numbers are easier to load than unsigned ones. */
1299 shift = 0;
1300 while ((value & 1) == 0)
1301 value /= 2, shift++;
1303 i = mips_build_integer (codes, value);
1304 codes[i].code = ASHIFT;
1305 codes[i].value = shift;
1306 return i + 1;
1309 /* As for mips_build_shift, but assume that the final action will be
1310 an IOR or PLUS operation. */
1312 static unsigned int
1313 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1315 unsigned HOST_WIDE_INT high;
1316 unsigned int i;
1318 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1319 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1321 /* The constant is too complex to load with a simple LUI/ORI pair,
1322 so we want to give the recursive call as many trailing zeros as
1323 possible. In this case, we know bit 16 is set and that the
1324 low 16 bits form a negative number. If we subtract that number
1325 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1326 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1327 codes[i].code = PLUS;
1328 codes[i].value = CONST_LOW_PART (value);
1330 else
1332 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1333 bits gives a value with at least 17 trailing zeros. */
1334 i = mips_build_integer (codes, high);
1335 codes[i].code = IOR;
1336 codes[i].value = value & 0xffff;
1338 return i + 1;
1341 /* Fill CODES with a sequence of rtl operations to load VALUE.
1342 Return the number of operations needed. */
1344 static unsigned int
1345 mips_build_integer (struct mips_integer_op *codes,
1346 unsigned HOST_WIDE_INT value)
1348 if (SMALL_OPERAND (value)
1349 || SMALL_OPERAND_UNSIGNED (value)
1350 || LUI_OPERAND (value))
1352 /* The value can be loaded with a single instruction. */
1353 codes[0].code = UNKNOWN;
1354 codes[0].value = value;
1355 return 1;
1357 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1359 /* Either the constant is a simple LUI/ORI combination or its
1360 lowest bit is set. We don't want to shift in this case. */
1361 return mips_build_lower (codes, value);
1363 else if ((value & 0xffff) == 0)
1365 /* The constant will need at least three actions. The lowest
1366 16 bits are clear, so the final action will be a shift. */
1367 return mips_build_shift (codes, value);
1369 else
1371 /* The final action could be a shift, add or inclusive OR.
1372 Rather than use a complex condition to select the best
1373 approach, try both mips_build_shift and mips_build_lower
1374 and pick the one that gives the shortest sequence.
1375 Note that this case is only used once per constant. */
1376 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1377 unsigned int cost, alt_cost;
1379 cost = mips_build_shift (codes, value);
1380 alt_cost = mips_build_lower (alt_codes, value);
1381 if (alt_cost < cost)
1383 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1384 cost = alt_cost;
1386 return cost;
1390 /* Return true if symbols of type TYPE require a GOT access. */
1392 static bool
1393 mips_got_symbol_type_p (enum mips_symbol_type type)
1395 switch (type)
1397 case SYMBOL_GOT_PAGE_OFST:
1398 case SYMBOL_GOT_DISP:
1399 return true;
1401 default:
1402 return false;
1406 /* Return true if X is a thread-local symbol. */
1408 static bool
1409 mips_tls_symbol_p (rtx x)
1411 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1414 /* Return true if SYMBOL_REF X is associated with a global symbol
1415 (in the STB_GLOBAL sense). */
1417 static bool
1418 mips_global_symbol_p (const_rtx x)
1420 const_tree decl = SYMBOL_REF_DECL (x);
1422 if (!decl)
1423 return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x);
1425 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1426 or weak symbols. Relocations in the object file will be against
1427 the target symbol, so it's that symbol's binding that matters here. */
1428 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1431 /* Return true if function X is a libgcc MIPS16 stub function. */
1433 static bool
1434 mips16_stub_function_p (const_rtx x)
1436 return (GET_CODE (x) == SYMBOL_REF
1437 && strncmp (XSTR (x, 0), "__mips16_", 9) == 0);
1440 /* Return true if function X is a locally-defined and locally-binding
1441 MIPS16 function. */
1443 static bool
1444 mips16_local_function_p (const_rtx x)
1446 return (GET_CODE (x) == SYMBOL_REF
1447 && SYMBOL_REF_LOCAL_P (x)
1448 && !SYMBOL_REF_EXTERNAL_P (x)
1449 && mips_use_mips16_mode_p (SYMBOL_REF_DECL (x)));
1452 /* Return true if SYMBOL_REF X binds locally. */
1454 static bool
1455 mips_symbol_binds_local_p (const_rtx x)
1457 return (SYMBOL_REF_DECL (x)
1458 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1459 : SYMBOL_REF_LOCAL_P (x));
1462 /* Return true if rtx constants of mode MODE should be put into a small
1463 data section. */
1465 static bool
1466 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1468 return (!TARGET_EMBEDDED_DATA
1469 && TARGET_LOCAL_SDATA
1470 && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1473 /* Return true if X should not be moved directly into register $25.
1474 We need this because many versions of GAS will treat "la $25,foo" as
1475 part of a call sequence and so allow a global "foo" to be lazily bound. */
1477 bool
1478 mips_dangerous_for_la25_p (rtx x)
1480 return (!TARGET_EXPLICIT_RELOCS
1481 && TARGET_USE_GOT
1482 && GET_CODE (x) == SYMBOL_REF
1483 && mips_global_symbol_p (x));
1486 /* Return true if calls to X might need $25 to be valid on entry. */
1488 bool
1489 mips_use_pic_fn_addr_reg_p (const_rtx x)
1491 if (!TARGET_USE_PIC_FN_ADDR_REG)
1492 return false;
1494 /* MIPS16 stub functions are guaranteed not to use $25. */
1495 if (mips16_stub_function_p (x))
1496 return false;
1498 if (GET_CODE (x) == SYMBOL_REF)
1500 /* If PLTs and copy relocations are available, the static linker
1501 will make sure that $25 is valid on entry to the target function. */
1502 if (TARGET_ABICALLS_PIC0)
1503 return false;
1505 /* Locally-defined functions use absolute accesses to set up
1506 the global pointer. */
1507 if (TARGET_ABSOLUTE_ABICALLS
1508 && mips_symbol_binds_local_p (x)
1509 && !SYMBOL_REF_EXTERNAL_P (x))
1510 return false;
1513 return true;
1516 /* Return the method that should be used to access SYMBOL_REF or
1517 LABEL_REF X in context CONTEXT. */
1519 static enum mips_symbol_type
1520 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1522 if (TARGET_RTP_PIC)
1523 return SYMBOL_GOT_DISP;
1525 if (GET_CODE (x) == LABEL_REF)
1527 /* LABEL_REFs are used for jump tables as well as text labels.
1528 Only return SYMBOL_PC_RELATIVE if we know the label is in
1529 the text section. */
1530 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1531 return SYMBOL_PC_RELATIVE;
1533 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1534 return SYMBOL_GOT_PAGE_OFST;
1536 return SYMBOL_ABSOLUTE;
1539 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1541 if (SYMBOL_REF_TLS_MODEL (x))
1542 return SYMBOL_TLS;
1544 if (CONSTANT_POOL_ADDRESS_P (x))
1546 if (TARGET_MIPS16_TEXT_LOADS)
1547 return SYMBOL_PC_RELATIVE;
1549 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1550 return SYMBOL_PC_RELATIVE;
1552 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1553 return SYMBOL_GP_RELATIVE;
1556 /* Do not use small-data accesses for weak symbols; they may end up
1557 being zero. */
1558 if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1559 return SYMBOL_GP_RELATIVE;
1561 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1562 is in effect. */
1563 if (TARGET_ABICALLS_PIC2
1564 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1566 /* There are three cases to consider:
1568 - o32 PIC (either with or without explicit relocs)
1569 - n32/n64 PIC without explicit relocs
1570 - n32/n64 PIC with explicit relocs
1572 In the first case, both local and global accesses will use an
1573 R_MIPS_GOT16 relocation. We must correctly predict which of
1574 the two semantics (local or global) the assembler and linker
1575 will apply. The choice depends on the symbol's binding rather
1576 than its visibility.
1578 In the second case, the assembler will not use R_MIPS_GOT16
1579 relocations, but it chooses between local and global accesses
1580 in the same way as for o32 PIC.
1582 In the third case we have more freedom since both forms of
1583 access will work for any kind of symbol. However, there seems
1584 little point in doing things differently. */
1585 if (mips_global_symbol_p (x))
1586 return SYMBOL_GOT_DISP;
1588 return SYMBOL_GOT_PAGE_OFST;
1591 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1592 return SYMBOL_FORCE_TO_MEM;
1594 return SYMBOL_ABSOLUTE;
1597 /* Classify the base of symbolic expression X, given that X appears in
1598 context CONTEXT. */
1600 static enum mips_symbol_type
1601 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1603 rtx offset;
1605 split_const (x, &x, &offset);
1606 if (UNSPEC_ADDRESS_P (x))
1607 return UNSPEC_ADDRESS_TYPE (x);
1609 return mips_classify_symbol (x, context);
1612 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1613 is the alignment in bytes of SYMBOL_REF X. */
1615 static bool
1616 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1618 HOST_WIDE_INT align;
1620 align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1621 return IN_RANGE (offset, 0, align - 1);
1624 /* Return true if X is a symbolic constant that can be used in context
1625 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1627 bool
1628 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1629 enum mips_symbol_type *symbol_type)
1631 rtx offset;
1633 split_const (x, &x, &offset);
1634 if (UNSPEC_ADDRESS_P (x))
1636 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1637 x = UNSPEC_ADDRESS (x);
1639 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1641 *symbol_type = mips_classify_symbol (x, context);
1642 if (*symbol_type == SYMBOL_TLS)
1643 return false;
1645 else
1646 return false;
1648 if (offset == const0_rtx)
1649 return true;
1651 /* Check whether a nonzero offset is valid for the underlying
1652 relocations. */
1653 switch (*symbol_type)
1655 case SYMBOL_ABSOLUTE:
1656 case SYMBOL_FORCE_TO_MEM:
1657 case SYMBOL_32_HIGH:
1658 case SYMBOL_64_HIGH:
1659 case SYMBOL_64_MID:
1660 case SYMBOL_64_LOW:
1661 /* If the target has 64-bit pointers and the object file only
1662 supports 32-bit symbols, the values of those symbols will be
1663 sign-extended. In this case we can't allow an arbitrary offset
1664 in case the 32-bit value X + OFFSET has a different sign from X. */
1665 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1666 return offset_within_block_p (x, INTVAL (offset));
1668 /* In other cases the relocations can handle any offset. */
1669 return true;
1671 case SYMBOL_PC_RELATIVE:
1672 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1673 In this case, we no longer have access to the underlying constant,
1674 but the original symbol-based access was known to be valid. */
1675 if (GET_CODE (x) == LABEL_REF)
1676 return true;
1678 /* Fall through. */
1680 case SYMBOL_GP_RELATIVE:
1681 /* Make sure that the offset refers to something within the
1682 same object block. This should guarantee that the final
1683 PC- or GP-relative offset is within the 16-bit limit. */
1684 return offset_within_block_p (x, INTVAL (offset));
1686 case SYMBOL_GOT_PAGE_OFST:
1687 case SYMBOL_GOTOFF_PAGE:
1688 /* If the symbol is global, the GOT entry will contain the symbol's
1689 address, and we will apply a 16-bit offset after loading it.
1690 If the symbol is local, the linker should provide enough local
1691 GOT entries for a 16-bit offset, but larger offsets may lead
1692 to GOT overflow. */
1693 return SMALL_INT (offset);
1695 case SYMBOL_TPREL:
1696 case SYMBOL_DTPREL:
1697 /* There is no carry between the HI and LO REL relocations, so the
1698 offset is only valid if we know it won't lead to such a carry. */
1699 return mips_offset_within_alignment_p (x, INTVAL (offset));
1701 case SYMBOL_GOT_DISP:
1702 case SYMBOL_GOTOFF_DISP:
1703 case SYMBOL_GOTOFF_CALL:
1704 case SYMBOL_GOTOFF_LOADGP:
1705 case SYMBOL_TLSGD:
1706 case SYMBOL_TLSLDM:
1707 case SYMBOL_GOTTPREL:
1708 case SYMBOL_TLS:
1709 case SYMBOL_HALF:
1710 return false;
1712 gcc_unreachable ();
1715 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1716 single instruction. We rely on the fact that, in the worst case,
1717 all instructions involved in a MIPS16 address calculation are usually
1718 extended ones. */
1720 static int
1721 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1723 switch (type)
1725 case SYMBOL_ABSOLUTE:
1726 /* When using 64-bit symbols, we need 5 preparatory instructions,
1727 such as:
1729 lui $at,%highest(symbol)
1730 daddiu $at,$at,%higher(symbol)
1731 dsll $at,$at,16
1732 daddiu $at,$at,%hi(symbol)
1733 dsll $at,$at,16
1735 The final address is then $at + %lo(symbol). With 32-bit
1736 symbols we just need a preparatory LUI for normal mode and
1737 a preparatory LI and SLL for MIPS16. */
1738 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1740 case SYMBOL_GP_RELATIVE:
1741 /* Treat GP-relative accesses as taking a single instruction on
1742 MIPS16 too; the copy of $gp can often be shared. */
1743 return 1;
1745 case SYMBOL_PC_RELATIVE:
1746 /* PC-relative constants can be only be used with ADDIUPC,
1747 DADDIUPC, LWPC and LDPC. */
1748 if (mode == MAX_MACHINE_MODE
1749 || GET_MODE_SIZE (mode) == 4
1750 || GET_MODE_SIZE (mode) == 8)
1751 return 1;
1753 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1754 return 0;
1756 case SYMBOL_FORCE_TO_MEM:
1757 /* LEAs will be converted into constant-pool references by
1758 mips_reorg. */
1759 if (mode == MAX_MACHINE_MODE)
1760 return 1;
1762 /* The constant must be loaded and then dereferenced. */
1763 return 0;
1765 case SYMBOL_GOT_DISP:
1766 /* The constant will have to be loaded from the GOT before it
1767 is used in an address. */
1768 if (mode != MAX_MACHINE_MODE)
1769 return 0;
1771 /* Fall through. */
1773 case SYMBOL_GOT_PAGE_OFST:
1774 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1775 local/global classification is accurate. The worst cases are:
1777 (1) For local symbols when generating o32 or o64 code. The assembler
1778 will use:
1780 lw $at,%got(symbol)
1783 ...and the final address will be $at + %lo(symbol).
1785 (2) For global symbols when -mxgot. The assembler will use:
1787 lui $at,%got_hi(symbol)
1788 (d)addu $at,$at,$gp
1790 ...and the final address will be $at + %got_lo(symbol). */
1791 return 3;
1793 case SYMBOL_GOTOFF_PAGE:
1794 case SYMBOL_GOTOFF_DISP:
1795 case SYMBOL_GOTOFF_CALL:
1796 case SYMBOL_GOTOFF_LOADGP:
1797 case SYMBOL_32_HIGH:
1798 case SYMBOL_64_HIGH:
1799 case SYMBOL_64_MID:
1800 case SYMBOL_64_LOW:
1801 case SYMBOL_TLSGD:
1802 case SYMBOL_TLSLDM:
1803 case SYMBOL_DTPREL:
1804 case SYMBOL_GOTTPREL:
1805 case SYMBOL_TPREL:
1806 case SYMBOL_HALF:
1807 /* A 16-bit constant formed by a single relocation, or a 32-bit
1808 constant formed from a high 16-bit relocation and a low 16-bit
1809 relocation. Use mips_split_p to determine which. 32-bit
1810 constants need an "lui; addiu" sequence for normal mode and
1811 an "li; sll; addiu" sequence for MIPS16 mode. */
1812 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1814 case SYMBOL_TLS:
1815 /* We don't treat a bare TLS symbol as a constant. */
1816 return 0;
1818 gcc_unreachable ();
1821 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1822 to load symbols of type TYPE into a register. Return 0 if the given
1823 type of symbol cannot be used as an immediate operand.
1825 Otherwise, return the number of instructions needed to load or store
1826 values of mode MODE to or from addresses of type TYPE. Return 0 if
1827 the given type of symbol is not valid in addresses.
1829 In both cases, treat extended MIPS16 instructions as two instructions. */
1831 static int
1832 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1834 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1837 /* A for_each_rtx callback. Stop the search if *X references a
1838 thread-local symbol. */
1840 static int
1841 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1843 return mips_tls_symbol_p (*x);
1846 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1848 static bool
1849 mips_cannot_force_const_mem (rtx x)
1851 enum mips_symbol_type type;
1852 rtx base, offset;
1854 /* There is no assembler syntax for expressing an address-sized
1855 high part. */
1856 if (GET_CODE (x) == HIGH)
1857 return true;
1859 /* As an optimization, reject constants that mips_legitimize_move
1860 can expand inline.
1862 Suppose we have a multi-instruction sequence that loads constant C
1863 into register R. If R does not get allocated a hard register, and
1864 R is used in an operand that allows both registers and memory
1865 references, reload will consider forcing C into memory and using
1866 one of the instruction's memory alternatives. Returning false
1867 here will force it to use an input reload instead. */
1868 if (GET_CODE (x) == CONST_INT && LEGITIMATE_CONSTANT_P (x))
1869 return true;
1871 split_const (x, &base, &offset);
1872 if (mips_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type)
1873 && type != SYMBOL_FORCE_TO_MEM)
1875 /* The same optimization as for CONST_INT. */
1876 if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0)
1877 return true;
1879 /* If MIPS16 constant pools live in the text section, they should
1880 not refer to anything that might need run-time relocation. */
1881 if (TARGET_MIPS16_PCREL_LOADS && mips_got_symbol_type_p (type))
1882 return true;
1885 /* TLS symbols must be computed by mips_legitimize_move. */
1886 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
1887 return true;
1889 return false;
1892 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1893 constants when we're using a per-function constant pool. */
1895 static bool
1896 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1897 const_rtx x ATTRIBUTE_UNUSED)
1899 return !TARGET_MIPS16_PCREL_LOADS;
1902 /* Return true if register REGNO is a valid base register for mode MODE.
1903 STRICT_P is true if REG_OK_STRICT is in effect. */
1906 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
1907 bool strict_p)
1909 if (!HARD_REGISTER_NUM_P (regno))
1911 if (!strict_p)
1912 return true;
1913 regno = reg_renumber[regno];
1916 /* These fake registers will be eliminated to either the stack or
1917 hard frame pointer, both of which are usually valid base registers.
1918 Reload deals with the cases where the eliminated form isn't valid. */
1919 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1920 return true;
1922 /* In MIPS16 mode, the stack pointer can only address word and doubleword
1923 values, nothing smaller. There are two problems here:
1925 (a) Instantiating virtual registers can introduce new uses of the
1926 stack pointer. If these virtual registers are valid addresses,
1927 the stack pointer should be too.
1929 (b) Most uses of the stack pointer are not made explicit until
1930 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1931 We don't know until that stage whether we'll be eliminating to the
1932 stack pointer (which needs the restriction) or the hard frame
1933 pointer (which doesn't).
1935 All in all, it seems more consistent to only enforce this restriction
1936 during and after reload. */
1937 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1938 return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1940 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1943 /* Return true if X is a valid base register for mode MODE.
1944 STRICT_P is true if REG_OK_STRICT is in effect. */
1946 static bool
1947 mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
1949 if (!strict_p && GET_CODE (x) == SUBREG)
1950 x = SUBREG_REG (x);
1952 return (REG_P (x)
1953 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
1956 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
1957 can address a value of mode MODE. */
1959 static bool
1960 mips_valid_offset_p (rtx x, enum machine_mode mode)
1962 /* Check that X is a signed 16-bit number. */
1963 if (!const_arith_operand (x, Pmode))
1964 return false;
1966 /* We may need to split multiword moves, so make sure that every word
1967 is accessible. */
1968 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1969 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
1970 return false;
1972 return true;
1975 /* Return true if a LO_SUM can address a value of mode MODE when the
1976 LO_SUM symbol has type SYMBOL_TYPE. */
1978 static bool
1979 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
1981 /* Check that symbols of type SYMBOL_TYPE can be used to access values
1982 of mode MODE. */
1983 if (mips_symbol_insns (symbol_type, mode) == 0)
1984 return false;
1986 /* Check that there is a known low-part relocation. */
1987 if (mips_lo_relocs[symbol_type] == NULL)
1988 return false;
1990 /* We may need to split multiword moves, so make sure that each word
1991 can be accessed without inducing a carry. This is mainly needed
1992 for o64, which has historically only guaranteed 64-bit alignment
1993 for 128-bit types. */
1994 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1995 && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
1996 return false;
1998 return true;
2001 /* Return true if X is a valid address for machine mode MODE. If it is,
2002 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
2003 effect. */
2005 static bool
2006 mips_classify_address (struct mips_address_info *info, rtx x,
2007 enum machine_mode mode, bool strict_p)
2009 switch (GET_CODE (x))
2011 case REG:
2012 case SUBREG:
2013 info->type = ADDRESS_REG;
2014 info->reg = x;
2015 info->offset = const0_rtx;
2016 return mips_valid_base_register_p (info->reg, mode, strict_p);
2018 case PLUS:
2019 info->type = ADDRESS_REG;
2020 info->reg = XEXP (x, 0);
2021 info->offset = XEXP (x, 1);
2022 return (mips_valid_base_register_p (info->reg, mode, strict_p)
2023 && mips_valid_offset_p (info->offset, mode));
2025 case LO_SUM:
2026 info->type = ADDRESS_LO_SUM;
2027 info->reg = XEXP (x, 0);
2028 info->offset = XEXP (x, 1);
2029 /* We have to trust the creator of the LO_SUM to do something vaguely
2030 sane. Target-independent code that creates a LO_SUM should also
2031 create and verify the matching HIGH. Target-independent code that
2032 adds an offset to a LO_SUM must prove that the offset will not
2033 induce a carry. Failure to do either of these things would be
2034 a bug, and we are not required to check for it here. The MIPS
2035 backend itself should only create LO_SUMs for valid symbolic
2036 constants, with the high part being either a HIGH or a copy
2037 of _gp. */
2038 info->symbol_type
2039 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
2040 return (mips_valid_base_register_p (info->reg, mode, strict_p)
2041 && mips_valid_lo_sum_p (info->symbol_type, mode));
2043 case CONST_INT:
2044 /* Small-integer addresses don't occur very often, but they
2045 are legitimate if $0 is a valid base register. */
2046 info->type = ADDRESS_CONST_INT;
2047 return !TARGET_MIPS16 && SMALL_INT (x);
2049 case CONST:
2050 case LABEL_REF:
2051 case SYMBOL_REF:
2052 info->type = ADDRESS_SYMBOLIC;
2053 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
2054 &info->symbol_type)
2055 && mips_symbol_insns (info->symbol_type, mode) > 0
2056 && !mips_split_p[info->symbol_type]);
2058 default:
2059 return false;
2063 /* Return true if X is a legitimate address for a memory operand of mode
2064 MODE. STRICT_P is true if REG_OK_STRICT is in effect. */
2066 bool
2067 mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2069 struct mips_address_info addr;
2071 return mips_classify_address (&addr, x, mode, strict_p);
2074 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2076 bool
2077 mips_stack_address_p (rtx x, enum machine_mode mode)
2079 struct mips_address_info addr;
2081 return (mips_classify_address (&addr, x, mode, false)
2082 && addr.type == ADDRESS_REG
2083 && addr.reg == stack_pointer_rtx);
2086 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2087 address instruction. Note that such addresses are not considered
2088 legitimate in the GO_IF_LEGITIMATE_ADDRESS sense, because their use
2089 is so restricted. */
2091 static bool
2092 mips_lwxs_address_p (rtx addr)
2094 if (ISA_HAS_LWXS
2095 && GET_CODE (addr) == PLUS
2096 && REG_P (XEXP (addr, 1)))
2098 rtx offset = XEXP (addr, 0);
2099 if (GET_CODE (offset) == MULT
2100 && REG_P (XEXP (offset, 0))
2101 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2102 && INTVAL (XEXP (offset, 1)) == 4)
2103 return true;
2105 return false;
2108 /* Return true if a value at OFFSET bytes from base register BASE can be
2109 accessed using an unextended MIPS16 instruction. MODE is the mode of
2110 the value.
2112 Usually the offset in an unextended instruction is a 5-bit field.
2113 The offset is unsigned and shifted left once for LH and SH, twice
2114 for LW and SW, and so on. An exception is LWSP and SWSP, which have
2115 an 8-bit immediate field that's shifted left twice. */
2117 static bool
2118 mips16_unextended_reference_p (enum machine_mode mode, rtx base,
2119 unsigned HOST_WIDE_INT offset)
2121 if (offset % GET_MODE_SIZE (mode) == 0)
2123 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2124 return offset < 256U * GET_MODE_SIZE (mode);
2125 return offset < 32U * GET_MODE_SIZE (mode);
2127 return false;
2130 /* Return the number of instructions needed to load or store a value
2131 of mode MODE at address X. Return 0 if X isn't valid for MODE.
2132 Assume that multiword moves may need to be split into word moves
2133 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2134 enough.
2136 For MIPS16 code, count extended instructions as two instructions. */
2139 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2141 struct mips_address_info addr;
2142 int factor;
2144 /* BLKmode is used for single unaligned loads and stores and should
2145 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2146 meaningless, so we have to single it out as a special case one way
2147 or the other.) */
2148 if (mode != BLKmode && might_split_p)
2149 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2150 else
2151 factor = 1;
2153 if (mips_classify_address (&addr, x, mode, false))
2154 switch (addr.type)
2156 case ADDRESS_REG:
2157 if (TARGET_MIPS16
2158 && !mips16_unextended_reference_p (mode, addr.reg,
2159 UINTVAL (addr.offset)))
2160 return factor * 2;
2161 return factor;
2163 case ADDRESS_LO_SUM:
2164 return TARGET_MIPS16 ? factor * 2 : factor;
2166 case ADDRESS_CONST_INT:
2167 return factor;
2169 case ADDRESS_SYMBOLIC:
2170 return factor * mips_symbol_insns (addr.symbol_type, mode);
2172 return 0;
2175 /* Return the number of instructions needed to load constant X.
2176 Return 0 if X isn't a valid constant. */
2179 mips_const_insns (rtx x)
2181 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2182 enum mips_symbol_type symbol_type;
2183 rtx offset;
2185 switch (GET_CODE (x))
2187 case HIGH:
2188 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2189 &symbol_type)
2190 || !mips_split_p[symbol_type])
2191 return 0;
2193 /* This is simply an LUI for normal mode. It is an extended
2194 LI followed by an extended SLL for MIPS16. */
2195 return TARGET_MIPS16 ? 4 : 1;
2197 case CONST_INT:
2198 if (TARGET_MIPS16)
2199 /* Unsigned 8-bit constants can be loaded using an unextended
2200 LI instruction. Unsigned 16-bit constants can be loaded
2201 using an extended LI. Negative constants must be loaded
2202 using LI and then negated. */
2203 return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2204 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2205 : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2206 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2207 : 0);
2209 return mips_build_integer (codes, INTVAL (x));
2211 case CONST_DOUBLE:
2212 case CONST_VECTOR:
2213 /* Allow zeros for normal mode, where we can use $0. */
2214 return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2216 case CONST:
2217 if (CONST_GP_P (x))
2218 return 1;
2220 /* See if we can refer to X directly. */
2221 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2222 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2224 /* Otherwise try splitting the constant into a base and offset.
2225 If the offset is a 16-bit value, we can load the base address
2226 into a register and then use (D)ADDIU to add in the offset.
2227 If the offset is larger, we can load the base and offset
2228 into separate registers and add them together with (D)ADDU.
2229 However, the latter is only possible before reload; during
2230 and after reload, we must have the option of forcing the
2231 constant into the pool instead. */
2232 split_const (x, &x, &offset);
2233 if (offset != 0)
2235 int n = mips_const_insns (x);
2236 if (n != 0)
2238 if (SMALL_INT (offset))
2239 return n + 1;
2240 else if (!targetm.cannot_force_const_mem (x))
2241 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2244 return 0;
2246 case SYMBOL_REF:
2247 case LABEL_REF:
2248 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2249 MAX_MACHINE_MODE);
2251 default:
2252 return 0;
2256 /* X is a doubleword constant that can be handled by splitting it into
2257 two words and loading each word separately. Return the number of
2258 instructions required to do this. */
2261 mips_split_const_insns (rtx x)
2263 unsigned int low, high;
2265 low = mips_const_insns (mips_subword (x, false));
2266 high = mips_const_insns (mips_subword (x, true));
2267 gcc_assert (low > 0 && high > 0);
2268 return low + high;
2271 /* Return the number of instructions needed to implement INSN,
2272 given that it loads from or stores to MEM. Count extended
2273 MIPS16 instructions as two instructions. */
2276 mips_load_store_insns (rtx mem, rtx insn)
2278 enum machine_mode mode;
2279 bool might_split_p;
2280 rtx set;
2282 gcc_assert (MEM_P (mem));
2283 mode = GET_MODE (mem);
2285 /* Try to prove that INSN does not need to be split. */
2286 might_split_p = true;
2287 if (GET_MODE_BITSIZE (mode) == 64)
2289 set = single_set (insn);
2290 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2291 might_split_p = false;
2294 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2297 /* Return the number of instructions needed for an integer division. */
2300 mips_idiv_insns (void)
2302 int count;
2304 count = 1;
2305 if (TARGET_CHECK_ZERO_DIV)
2307 if (GENERATE_DIVIDE_TRAPS)
2308 count++;
2309 else
2310 count += 2;
2313 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2314 count++;
2315 return count;
2318 /* Emit a move from SRC to DEST. Assume that the move expanders can
2319 handle all moves if !can_create_pseudo_p (). The distinction is
2320 important because, unlike emit_move_insn, the move expanders know
2321 how to force Pmode objects into the constant pool even when the
2322 constant pool address is not itself legitimate. */
2325 mips_emit_move (rtx dest, rtx src)
2327 return (can_create_pseudo_p ()
2328 ? emit_move_insn (dest, src)
2329 : emit_move_insn_1 (dest, src));
2332 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2334 static void
2335 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2337 emit_insn (gen_rtx_SET (VOIDmode, target,
2338 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2341 /* Compute (CODE OP0 OP1) and store the result in a new register
2342 of mode MODE. Return that new register. */
2344 static rtx
2345 mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
2347 rtx reg;
2349 reg = gen_reg_rtx (mode);
2350 mips_emit_binary (code, reg, op0, op1);
2351 return reg;
2354 /* Copy VALUE to a register and return that register. If new pseudos
2355 are allowed, copy it into a new register, otherwise use DEST. */
2357 static rtx
2358 mips_force_temporary (rtx dest, rtx value)
2360 if (can_create_pseudo_p ())
2361 return force_reg (Pmode, value);
2362 else
2364 mips_emit_move (dest, value);
2365 return dest;
2369 /* Emit a call sequence with call pattern PATTERN and return the call
2370 instruction itself (which is not necessarily the last instruction
2371 emitted). ORIG_ADDR is the original, unlegitimized address,
2372 ADDR is the legitimized form, and LAZY_P is true if the call
2373 address is lazily-bound. */
2375 static rtx
2376 mips_emit_call_insn (rtx pattern, rtx orig_addr, rtx addr, bool lazy_p)
2378 rtx insn, reg;
2380 insn = emit_call_insn (pattern);
2382 if (TARGET_MIPS16 && mips_use_pic_fn_addr_reg_p (orig_addr))
2384 /* MIPS16 JALRs only take MIPS16 registers. If the target
2385 function requires $25 to be valid on entry, we must copy it
2386 there separately. The move instruction can be put in the
2387 call's delay slot. */
2388 reg = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
2389 emit_insn_before (gen_move_insn (reg, addr), insn);
2390 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
2393 if (lazy_p)
2394 /* Lazy-binding stubs require $gp to be valid on entry. */
2395 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2397 if (TARGET_USE_GOT)
2399 /* See the comment above load_call<mode> for details. */
2400 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2401 gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2402 emit_insn (gen_update_got_version ());
2404 return insn;
2407 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2408 then add CONST_INT OFFSET to the result. */
2410 static rtx
2411 mips_unspec_address_offset (rtx base, rtx offset,
2412 enum mips_symbol_type symbol_type)
2414 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2415 UNSPEC_ADDRESS_FIRST + symbol_type);
2416 if (offset != const0_rtx)
2417 base = gen_rtx_PLUS (Pmode, base, offset);
2418 return gen_rtx_CONST (Pmode, base);
2421 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2422 type SYMBOL_TYPE. */
2425 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2427 rtx base, offset;
2429 split_const (address, &base, &offset);
2430 return mips_unspec_address_offset (base, offset, symbol_type);
2433 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2434 high part to BASE and return the result. Just return BASE otherwise.
2435 TEMP is as for mips_force_temporary.
2437 The returned expression can be used as the first operand to a LO_SUM. */
2439 static rtx
2440 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2441 enum mips_symbol_type symbol_type)
2443 if (mips_split_p[symbol_type])
2445 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2446 addr = mips_force_temporary (temp, addr);
2447 base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2449 return base;
2452 /* Return an instruction that copies $gp into register REG. We want
2453 GCC to treat the register's value as constant, so that its value
2454 can be rematerialized on demand. */
2456 static rtx
2457 gen_load_const_gp (rtx reg)
2459 return (Pmode == SImode
2460 ? gen_load_const_gp_si (reg)
2461 : gen_load_const_gp_di (reg));
2464 /* Return a pseudo register that contains the value of $gp throughout
2465 the current function. Such registers are needed by MIPS16 functions,
2466 for which $gp itself is not a valid base register or addition operand. */
2468 static rtx
2469 mips16_gp_pseudo_reg (void)
2471 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2472 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2474 /* Don't emit an instruction to initialize the pseudo register if
2475 we are being called from the tree optimizers' cost-calculation
2476 routines. */
2477 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2478 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2480 rtx insn, scan;
2482 push_topmost_sequence ();
2484 scan = get_insns ();
2485 while (NEXT_INSN (scan) && !INSN_P (NEXT_INSN (scan)))
2486 scan = NEXT_INSN (scan);
2488 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2489 emit_insn_after (insn, scan);
2491 pop_topmost_sequence ();
2493 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2496 return cfun->machine->mips16_gp_pseudo_rtx;
2499 /* Return a base register that holds pic_offset_table_rtx.
2500 TEMP, if nonnull, is a scratch Pmode base register. */
2503 mips_pic_base_register (rtx temp)
2505 if (!TARGET_MIPS16)
2506 return pic_offset_table_rtx;
2508 if (can_create_pseudo_p ())
2509 return mips16_gp_pseudo_reg ();
2511 if (TARGET_USE_GOT)
2512 /* The first post-reload split exposes all references to $gp
2513 (both uses and definitions). All references must remain
2514 explicit after that point.
2516 It is safe to introduce uses of $gp at any time, so for
2517 simplicity, we do that before the split too. */
2518 mips_emit_move (temp, pic_offset_table_rtx);
2519 else
2520 emit_insn (gen_load_const_gp (temp));
2521 return temp;
2524 /* Create and return a GOT reference of type TYPE for address ADDR.
2525 TEMP, if nonnull, is a scratch Pmode base register. */
2528 mips_got_load (rtx temp, rtx addr, enum mips_symbol_type type)
2530 rtx base, high, lo_sum_symbol;
2532 base = mips_pic_base_register (temp);
2534 /* If we used the temporary register to load $gp, we can't use
2535 it for the high part as well. */
2536 if (temp != NULL && reg_overlap_mentioned_p (base, temp))
2537 temp = NULL;
2539 high = mips_unspec_offset_high (temp, base, addr, type);
2540 lo_sum_symbol = mips_unspec_address (addr, type);
2542 if (type == SYMBOL_GOTOFF_CALL)
2543 return (Pmode == SImode
2544 ? gen_unspec_callsi (high, lo_sum_symbol)
2545 : gen_unspec_calldi (high, lo_sum_symbol));
2546 else
2547 return (Pmode == SImode
2548 ? gen_unspec_gotsi (high, lo_sum_symbol)
2549 : gen_unspec_gotdi (high, lo_sum_symbol));
2552 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2553 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2554 constant in that context and can be split into high and low parts.
2555 If so, and if LOW_OUT is nonnull, emit the high part and store the
2556 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
2558 TEMP is as for mips_force_temporary and is used to load the high
2559 part into a register.
2561 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
2562 a legitimize SET_SRC for an .md pattern, otherwise the low part
2563 is guaranteed to be a legitimate address for mode MODE. */
2565 bool
2566 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
2568 enum mips_symbol_context context;
2569 enum mips_symbol_type symbol_type;
2570 rtx high;
2572 context = (mode == MAX_MACHINE_MODE
2573 ? SYMBOL_CONTEXT_LEA
2574 : SYMBOL_CONTEXT_MEM);
2575 if (GET_CODE (addr) == HIGH && context == SYMBOL_CONTEXT_LEA)
2577 addr = XEXP (addr, 0);
2578 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2579 && mips_symbol_insns (symbol_type, mode) > 0
2580 && mips_split_hi_p[symbol_type])
2582 if (low_out)
2583 switch (symbol_type)
2585 case SYMBOL_GOT_PAGE_OFST:
2586 /* The high part of a page/ofst pair is loaded from the GOT. */
2587 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_PAGE);
2588 break;
2590 default:
2591 gcc_unreachable ();
2593 return true;
2596 else
2598 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2599 && mips_symbol_insns (symbol_type, mode) > 0
2600 && mips_split_p[symbol_type])
2602 if (low_out)
2603 switch (symbol_type)
2605 case SYMBOL_GOT_DISP:
2606 /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
2607 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_DISP);
2608 break;
2610 case SYMBOL_GP_RELATIVE:
2611 high = mips_pic_base_register (temp);
2612 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2613 break;
2615 default:
2616 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2617 high = mips_force_temporary (temp, high);
2618 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2619 break;
2621 return true;
2624 return false;
2627 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2628 mips_force_temporary; it is only needed when OFFSET is not a
2629 SMALL_OPERAND. */
2631 static rtx
2632 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2634 if (!SMALL_OPERAND (offset))
2636 rtx high;
2638 if (TARGET_MIPS16)
2640 /* Load the full offset into a register so that we can use
2641 an unextended instruction for the address itself. */
2642 high = GEN_INT (offset);
2643 offset = 0;
2645 else
2647 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2648 high = GEN_INT (CONST_HIGH_PART (offset));
2649 offset = CONST_LOW_PART (offset);
2651 high = mips_force_temporary (temp, high);
2652 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2654 return plus_constant (reg, offset);
2657 /* The __tls_get_attr symbol. */
2658 static GTY(()) rtx mips_tls_symbol;
2660 /* Return an instruction sequence that calls __tls_get_addr. SYM is
2661 the TLS symbol we are referencing and TYPE is the symbol type to use
2662 (either global dynamic or local dynamic). V0 is an RTX for the
2663 return value location. */
2665 static rtx
2666 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2668 rtx insn, loc, a0;
2670 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2672 if (!mips_tls_symbol)
2673 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2675 loc = mips_unspec_address (sym, type);
2677 start_sequence ();
2679 emit_insn (gen_rtx_SET (Pmode, a0,
2680 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2681 insn = mips_expand_call (MIPS_CALL_NORMAL, v0, mips_tls_symbol,
2682 const0_rtx, NULL_RTX, false);
2683 RTL_CONST_CALL_P (insn) = 1;
2684 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2685 insn = get_insns ();
2687 end_sequence ();
2689 return insn;
2692 /* Return a pseudo register that contains the current thread pointer. */
2694 static rtx
2695 mips_get_tp (void)
2697 rtx tp;
2699 tp = gen_reg_rtx (Pmode);
2700 if (Pmode == DImode)
2701 emit_insn (gen_tls_get_tp_di (tp));
2702 else
2703 emit_insn (gen_tls_get_tp_si (tp));
2704 return tp;
2707 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2708 its address. The return value will be both a valid address and a valid
2709 SET_SRC (either a REG or a LO_SUM). */
2711 static rtx
2712 mips_legitimize_tls_address (rtx loc)
2714 rtx dest, insn, v0, tp, tmp1, tmp2, eqv;
2715 enum tls_model model;
2717 if (TARGET_MIPS16)
2719 sorry ("MIPS16 TLS");
2720 return gen_reg_rtx (Pmode);
2723 model = SYMBOL_REF_TLS_MODEL (loc);
2724 /* Only TARGET_ABICALLS code can have more than one module; other
2725 code must be be static and should not use a GOT. All TLS models
2726 reduce to local exec in this situation. */
2727 if (!TARGET_ABICALLS)
2728 model = TLS_MODEL_LOCAL_EXEC;
2730 switch (model)
2732 case TLS_MODEL_GLOBAL_DYNAMIC:
2733 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2734 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2735 dest = gen_reg_rtx (Pmode);
2736 emit_libcall_block (insn, dest, v0, loc);
2737 break;
2739 case TLS_MODEL_LOCAL_DYNAMIC:
2740 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2741 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2742 tmp1 = gen_reg_rtx (Pmode);
2744 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2745 share the LDM result with other LD model accesses. */
2746 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2747 UNSPEC_TLS_LDM);
2748 emit_libcall_block (insn, tmp1, v0, eqv);
2750 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2751 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2752 mips_unspec_address (loc, SYMBOL_DTPREL));
2753 break;
2755 case TLS_MODEL_INITIAL_EXEC:
2756 tp = mips_get_tp ();
2757 tmp1 = gen_reg_rtx (Pmode);
2758 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2759 if (Pmode == DImode)
2760 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2761 else
2762 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2763 dest = gen_reg_rtx (Pmode);
2764 emit_insn (gen_add3_insn (dest, tmp1, tp));
2765 break;
2767 case TLS_MODEL_LOCAL_EXEC:
2768 tp = mips_get_tp ();
2769 tmp1 = mips_unspec_offset_high (NULL, tp, loc, SYMBOL_TPREL);
2770 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2771 mips_unspec_address (loc, SYMBOL_TPREL));
2772 break;
2774 default:
2775 gcc_unreachable ();
2777 return dest;
2780 /* If X is not a valid address for mode MODE, force it into a register. */
2782 static rtx
2783 mips_force_address (rtx x, enum machine_mode mode)
2785 if (!mips_legitimate_address_p (mode, x, false))
2786 x = force_reg (Pmode, x);
2787 return x;
2790 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2791 be legitimized in a way that the generic machinery might not expect,
2792 put the new address in *XLOC and return true. MODE is the mode of
2793 the memory being accessed. */
2795 bool
2796 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2798 rtx base, addr;
2799 HOST_WIDE_INT offset;
2801 if (mips_tls_symbol_p (*xloc))
2803 *xloc = mips_legitimize_tls_address (*xloc);
2804 return true;
2807 /* See if the address can split into a high part and a LO_SUM. */
2808 if (mips_split_symbol (NULL, *xloc, mode, &addr))
2810 *xloc = mips_force_address (addr, mode);
2811 return true;
2814 /* Handle BASE + OFFSET using mips_add_offset. */
2815 mips_split_plus (*xloc, &base, &offset);
2816 if (offset != 0)
2818 if (!mips_valid_base_register_p (base, mode, false))
2819 base = copy_to_mode_reg (Pmode, base);
2820 addr = mips_add_offset (NULL, base, offset);
2821 *xloc = mips_force_address (addr, mode);
2822 return true;
2824 return false;
2827 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
2829 void
2830 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
2832 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2833 enum machine_mode mode;
2834 unsigned int i, num_ops;
2835 rtx x;
2837 mode = GET_MODE (dest);
2838 num_ops = mips_build_integer (codes, value);
2840 /* Apply each binary operation to X. Invariant: X is a legitimate
2841 source operand for a SET pattern. */
2842 x = GEN_INT (codes[0].value);
2843 for (i = 1; i < num_ops; i++)
2845 if (!can_create_pseudo_p ())
2847 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2848 x = temp;
2850 else
2851 x = force_reg (mode, x);
2852 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2855 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2858 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2859 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2860 move_operand. */
2862 static void
2863 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2865 rtx base, offset;
2867 /* Split moves of big integers into smaller pieces. */
2868 if (splittable_const_int_operand (src, mode))
2870 mips_move_integer (dest, dest, INTVAL (src));
2871 return;
2874 /* Split moves of symbolic constants into high/low pairs. */
2875 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2877 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2878 return;
2881 /* Generate the appropriate access sequences for TLS symbols. */
2882 if (mips_tls_symbol_p (src))
2884 mips_emit_move (dest, mips_legitimize_tls_address (src));
2885 return;
2888 /* If we have (const (plus symbol offset)), and that expression cannot
2889 be forced into memory, load the symbol first and add in the offset.
2890 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2891 forced into memory, as it usually produces better code. */
2892 split_const (src, &base, &offset);
2893 if (offset != const0_rtx
2894 && (targetm.cannot_force_const_mem (src)
2895 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2897 base = mips_force_temporary (dest, base);
2898 mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
2899 return;
2902 src = force_const_mem (mode, src);
2904 /* When using explicit relocs, constant pool references are sometimes
2905 not legitimate addresses. */
2906 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2907 mips_emit_move (dest, src);
2910 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
2911 sequence that is valid. */
2913 bool
2914 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2916 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2918 mips_emit_move (dest, force_reg (mode, src));
2919 return true;
2922 /* We need to deal with constants that would be legitimate
2923 immediate_operands but aren't legitimate move_operands. */
2924 if (CONSTANT_P (src) && !move_operand (src, mode))
2926 mips_legitimize_const_move (mode, dest, src);
2927 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2928 return true;
2930 return false;
2933 /* Return true if value X in context CONTEXT is a small-data address
2934 that can be rewritten as a LO_SUM. */
2936 static bool
2937 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2939 enum mips_symbol_type symbol_type;
2941 return (mips_lo_relocs[SYMBOL_GP_RELATIVE]
2942 && !mips_split_p[SYMBOL_GP_RELATIVE]
2943 && mips_symbolic_constant_p (x, context, &symbol_type)
2944 && symbol_type == SYMBOL_GP_RELATIVE);
2947 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2948 containing MEM, or null if none. */
2950 static int
2951 mips_small_data_pattern_1 (rtx *loc, void *data)
2953 enum mips_symbol_context context;
2955 if (GET_CODE (*loc) == LO_SUM)
2956 return -1;
2958 if (MEM_P (*loc))
2960 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
2961 return 1;
2962 return -1;
2965 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2966 return mips_rewrite_small_data_p (*loc, context);
2969 /* Return true if OP refers to small data symbols directly, not through
2970 a LO_SUM. */
2972 bool
2973 mips_small_data_pattern_p (rtx op)
2975 return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
2978 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2979 DATA is the containing MEM, or null if none. */
2981 static int
2982 mips_rewrite_small_data_1 (rtx *loc, void *data)
2984 enum mips_symbol_context context;
2986 if (MEM_P (*loc))
2988 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
2989 return -1;
2992 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2993 if (mips_rewrite_small_data_p (*loc, context))
2994 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
2996 if (GET_CODE (*loc) == LO_SUM)
2997 return -1;
2999 return 0;
3002 /* Rewrite instruction pattern PATTERN so that it refers to small data
3003 using explicit relocations. */
3006 mips_rewrite_small_data (rtx pattern)
3008 pattern = copy_insn (pattern);
3009 for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
3010 return pattern;
3013 /* We need a lot of little routines to check the range of MIPS16 immediate
3014 operands. */
3016 static int
3017 m16_check_op (rtx op, int low, int high, int mask)
3019 return (GET_CODE (op) == CONST_INT
3020 && IN_RANGE (INTVAL (op), low, high)
3021 && (INTVAL (op) & mask) == 0);
3025 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3027 return m16_check_op (op, 0x1, 0x8, 0);
3031 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3033 return m16_check_op (op, -0x8, 0x7, 0);
3037 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3039 return m16_check_op (op, -0x7, 0x8, 0);
3043 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3045 return m16_check_op (op, -0x10, 0xf, 0);
3049 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3051 return m16_check_op (op, -0xf, 0x10, 0);
3055 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3057 return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
3061 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3063 return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
3067 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3069 return m16_check_op (op, -0x80, 0x7f, 0);
3073 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3075 return m16_check_op (op, -0x7f, 0x80, 0);
3079 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3081 return m16_check_op (op, 0x0, 0xff, 0);
3085 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3087 return m16_check_op (op, -0xff, 0x0, 0);
3091 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3093 return m16_check_op (op, -0x1, 0xfe, 0);
3097 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3099 return m16_check_op (op, 0x0, 0xff << 2, 3);
3103 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3105 return m16_check_op (op, -0xff << 2, 0x0, 3);
3109 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3111 return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
3115 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3117 return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
3120 /* The cost of loading values from the constant pool. It should be
3121 larger than the cost of any constant we want to synthesize inline. */
3122 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3124 /* Return the cost of X when used as an operand to the MIPS16 instruction
3125 that implements CODE. Return -1 if there is no such instruction, or if
3126 X is not a valid immediate operand for it. */
3128 static int
3129 mips16_constant_cost (int code, HOST_WIDE_INT x)
3131 switch (code)
3133 case ASHIFT:
3134 case ASHIFTRT:
3135 case LSHIFTRT:
3136 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3137 other shifts are extended. The shift patterns truncate the shift
3138 count to the right size, so there are no out-of-range values. */
3139 if (IN_RANGE (x, 1, 8))
3140 return 0;
3141 return COSTS_N_INSNS (1);
3143 case PLUS:
3144 if (IN_RANGE (x, -128, 127))
3145 return 0;
3146 if (SMALL_OPERAND (x))
3147 return COSTS_N_INSNS (1);
3148 return -1;
3150 case LEU:
3151 /* Like LE, but reject the always-true case. */
3152 if (x == -1)
3153 return -1;
3154 case LE:
3155 /* We add 1 to the immediate and use SLT. */
3156 x += 1;
3157 case XOR:
3158 /* We can use CMPI for an xor with an unsigned 16-bit X. */
3159 case LT:
3160 case LTU:
3161 if (IN_RANGE (x, 0, 255))
3162 return 0;
3163 if (SMALL_OPERAND_UNSIGNED (x))
3164 return COSTS_N_INSNS (1);
3165 return -1;
3167 case EQ:
3168 case NE:
3169 /* Equality comparisons with 0 are cheap. */
3170 if (x == 0)
3171 return 0;
3172 return -1;
3174 default:
3175 return -1;
3179 /* Return true if there is a non-MIPS16 instruction that implements CODE
3180 and if that instruction accepts X as an immediate operand. */
3182 static int
3183 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
3185 switch (code)
3187 case ASHIFT:
3188 case ASHIFTRT:
3189 case LSHIFTRT:
3190 /* All shift counts are truncated to a valid constant. */
3191 return true;
3193 case ROTATE:
3194 case ROTATERT:
3195 /* Likewise rotates, if the target supports rotates at all. */
3196 return ISA_HAS_ROR;
3198 case AND:
3199 case IOR:
3200 case XOR:
3201 /* These instructions take 16-bit unsigned immediates. */
3202 return SMALL_OPERAND_UNSIGNED (x);
3204 case PLUS:
3205 case LT:
3206 case LTU:
3207 /* These instructions take 16-bit signed immediates. */
3208 return SMALL_OPERAND (x);
3210 case EQ:
3211 case NE:
3212 case GT:
3213 case GTU:
3214 /* The "immediate" forms of these instructions are really
3215 implemented as comparisons with register 0. */
3216 return x == 0;
3218 case GE:
3219 case GEU:
3220 /* Likewise, meaning that the only valid immediate operand is 1. */
3221 return x == 1;
3223 case LE:
3224 /* We add 1 to the immediate and use SLT. */
3225 return SMALL_OPERAND (x + 1);
3227 case LEU:
3228 /* Likewise SLTU, but reject the always-true case. */
3229 return SMALL_OPERAND (x + 1) && x + 1 != 0;
3231 case SIGN_EXTRACT:
3232 case ZERO_EXTRACT:
3233 /* The bit position and size are immediate operands. */
3234 return ISA_HAS_EXT_INS;
3236 default:
3237 /* By default assume that $0 can be used for 0. */
3238 return x == 0;
3242 /* Return the cost of binary operation X, given that the instruction
3243 sequence for a word-sized or smaller operation has cost SINGLE_COST
3244 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3246 static int
3247 mips_binary_cost (rtx x, int single_cost, int double_cost)
3249 int cost;
3251 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3252 cost = double_cost;
3253 else
3254 cost = single_cost;
3255 return (cost
3256 + rtx_cost (XEXP (x, 0), 0, !optimize_size)
3257 + rtx_cost (XEXP (x, 1), GET_CODE (x), !optimize_size));
3260 /* Return the cost of floating-point multiplications of mode MODE. */
3262 static int
3263 mips_fp_mult_cost (enum machine_mode mode)
3265 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3268 /* Return the cost of floating-point divisions of mode MODE. */
3270 static int
3271 mips_fp_div_cost (enum machine_mode mode)
3273 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3276 /* Return the cost of sign-extending OP to mode MODE, not including the
3277 cost of OP itself. */
3279 static int
3280 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3282 if (MEM_P (op))
3283 /* Extended loads are as cheap as unextended ones. */
3284 return 0;
3286 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3287 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3288 return 0;
3290 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3291 /* We can use SEB or SEH. */
3292 return COSTS_N_INSNS (1);
3294 /* We need to use a shift left and a shift right. */
3295 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3298 /* Return the cost of zero-extending OP to mode MODE, not including the
3299 cost of OP itself. */
3301 static int
3302 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3304 if (MEM_P (op))
3305 /* Extended loads are as cheap as unextended ones. */
3306 return 0;
3308 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3309 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3310 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3312 if (GENERATE_MIPS16E)
3313 /* We can use ZEB or ZEH. */
3314 return COSTS_N_INSNS (1);
3316 if (TARGET_MIPS16)
3317 /* We need to load 0xff or 0xffff into a register and use AND. */
3318 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3320 /* We can use ANDI. */
3321 return COSTS_N_INSNS (1);
3324 /* Implement TARGET_RTX_COSTS. */
3326 static bool
3327 mips_rtx_costs (rtx x, int code, int outer_code, int *total,
3328 bool speed)
3330 enum machine_mode mode = GET_MODE (x);
3331 bool float_mode_p = FLOAT_MODE_P (mode);
3332 int cost;
3333 rtx addr;
3335 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3336 appear in the instruction stream, and the cost of a comparison is
3337 really the cost of the branch or scc condition. At the time of
3338 writing, GCC only uses an explicit outer COMPARE code when optabs
3339 is testing whether a constant is expensive enough to force into a
3340 register. We want optabs to pass such constants through the MIPS
3341 expanders instead, so make all constants very cheap here. */
3342 if (outer_code == COMPARE)
3344 gcc_assert (CONSTANT_P (x));
3345 *total = 0;
3346 return true;
3349 switch (code)
3351 case CONST_INT:
3352 /* Treat *clear_upper32-style ANDs as having zero cost in the
3353 second operand. The cost is entirely in the first operand.
3355 ??? This is needed because we would otherwise try to CSE
3356 the constant operand. Although that's the right thing for
3357 instructions that continue to be a register operation throughout
3358 compilation, it is disastrous for instructions that could
3359 later be converted into a memory operation. */
3360 if (TARGET_64BIT
3361 && outer_code == AND
3362 && UINTVAL (x) == 0xffffffff)
3364 *total = 0;
3365 return true;
3368 if (TARGET_MIPS16)
3370 cost = mips16_constant_cost (outer_code, INTVAL (x));
3371 if (cost >= 0)
3373 *total = cost;
3374 return true;
3377 else
3379 /* When not optimizing for size, we care more about the cost
3380 of hot code, and hot code is often in a loop. If a constant
3381 operand needs to be forced into a register, we will often be
3382 able to hoist the constant load out of the loop, so the load
3383 should not contribute to the cost. */
3384 if (!optimize_size
3385 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3387 *total = 0;
3388 return true;
3391 /* Fall through. */
3393 case CONST:
3394 case SYMBOL_REF:
3395 case LABEL_REF:
3396 case CONST_DOUBLE:
3397 if (force_to_mem_operand (x, VOIDmode))
3399 *total = COSTS_N_INSNS (1);
3400 return true;
3402 cost = mips_const_insns (x);
3403 if (cost > 0)
3405 /* If the constant is likely to be stored in a GPR, SETs of
3406 single-insn constants are as cheap as register sets; we
3407 never want to CSE them.
3409 Don't reduce the cost of storing a floating-point zero in
3410 FPRs. If we have a zero in an FPR for other reasons, we
3411 can get better cfg-cleanup and delayed-branch results by
3412 using it consistently, rather than using $0 sometimes and
3413 an FPR at other times. Also, moves between floating-point
3414 registers are sometimes cheaper than (D)MTC1 $0. */
3415 if (cost == 1
3416 && outer_code == SET
3417 && !(float_mode_p && TARGET_HARD_FLOAT))
3418 cost = 0;
3419 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3420 want to CSE the constant itself. It is usually better to
3421 have N copies of the last operation in the sequence and one
3422 shared copy of the other operations. (Note that this is
3423 not true for MIPS16 code, where the final operation in the
3424 sequence is often an extended instruction.)
3426 Also, if we have a CONST_INT, we don't know whether it is
3427 for a word or doubleword operation, so we cannot rely on
3428 the result of mips_build_integer. */
3429 else if (!TARGET_MIPS16
3430 && (outer_code == SET || mode == VOIDmode))
3431 cost = 1;
3432 *total = COSTS_N_INSNS (cost);
3433 return true;
3435 /* The value will need to be fetched from the constant pool. */
3436 *total = CONSTANT_POOL_COST;
3437 return true;
3439 case MEM:
3440 /* If the address is legitimate, return the number of
3441 instructions it needs. */
3442 addr = XEXP (x, 0);
3443 cost = mips_address_insns (addr, mode, true);
3444 if (cost > 0)
3446 *total = COSTS_N_INSNS (cost + 1);
3447 return true;
3449 /* Check for a scaled indexed address. */
3450 if (mips_lwxs_address_p (addr))
3452 *total = COSTS_N_INSNS (2);
3453 return true;
3455 /* Otherwise use the default handling. */
3456 return false;
3458 case FFS:
3459 *total = COSTS_N_INSNS (6);
3460 return false;
3462 case NOT:
3463 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3464 return false;
3466 case AND:
3467 /* Check for a *clear_upper32 pattern and treat it like a zero
3468 extension. See the pattern's comment for details. */
3469 if (TARGET_64BIT
3470 && mode == DImode
3471 && CONST_INT_P (XEXP (x, 1))
3472 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3474 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3475 + rtx_cost (XEXP (x, 0), 0, speed));
3476 return true;
3478 /* Fall through. */
3480 case IOR:
3481 case XOR:
3482 /* Double-word operations use two single-word operations. */
3483 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3484 return true;
3486 case ASHIFT:
3487 case ASHIFTRT:
3488 case LSHIFTRT:
3489 case ROTATE:
3490 case ROTATERT:
3491 if (CONSTANT_P (XEXP (x, 1)))
3492 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3493 else
3494 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3495 return true;
3497 case ABS:
3498 if (float_mode_p)
3499 *total = mips_cost->fp_add;
3500 else
3501 *total = COSTS_N_INSNS (4);
3502 return false;
3504 case LO_SUM:
3505 /* Low-part immediates need an extended MIPS16 instruction. */
3506 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3507 + rtx_cost (XEXP (x, 0), 0, speed));
3508 return true;
3510 case LT:
3511 case LTU:
3512 case LE:
3513 case LEU:
3514 case GT:
3515 case GTU:
3516 case GE:
3517 case GEU:
3518 case EQ:
3519 case NE:
3520 case UNORDERED:
3521 case LTGT:
3522 /* Branch comparisons have VOIDmode, so use the first operand's
3523 mode instead. */
3524 mode = GET_MODE (XEXP (x, 0));
3525 if (FLOAT_MODE_P (mode))
3527 *total = mips_cost->fp_add;
3528 return false;
3530 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3531 return true;
3533 case MINUS:
3534 if (float_mode_p
3535 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3536 && TARGET_FUSED_MADD
3537 && !HONOR_NANS (mode)
3538 && !HONOR_SIGNED_ZEROS (mode))
3540 /* See if we can use NMADD or NMSUB. See mips.md for the
3541 associated patterns. */
3542 rtx op0 = XEXP (x, 0);
3543 rtx op1 = XEXP (x, 1);
3544 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3546 *total = (mips_fp_mult_cost (mode)
3547 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0, speed)
3548 + rtx_cost (XEXP (op0, 1), 0, speed)
3549 + rtx_cost (op1, 0, speed));
3550 return true;
3552 if (GET_CODE (op1) == MULT)
3554 *total = (mips_fp_mult_cost (mode)
3555 + rtx_cost (op0, 0, speed)
3556 + rtx_cost (XEXP (op1, 0), 0, speed)
3557 + rtx_cost (XEXP (op1, 1), 0, speed));
3558 return true;
3561 /* Fall through. */
3563 case PLUS:
3564 if (float_mode_p)
3566 /* If this is part of a MADD or MSUB, treat the PLUS as
3567 being free. */
3568 if (ISA_HAS_FP4
3569 && TARGET_FUSED_MADD
3570 && GET_CODE (XEXP (x, 0)) == MULT)
3571 *total = 0;
3572 else
3573 *total = mips_cost->fp_add;
3574 return false;
3577 /* Double-word operations require three single-word operations and
3578 an SLTU. The MIPS16 version then needs to move the result of
3579 the SLTU from $24 to a MIPS16 register. */
3580 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3581 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3582 return true;
3584 case NEG:
3585 if (float_mode_p
3586 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3587 && TARGET_FUSED_MADD
3588 && !HONOR_NANS (mode)
3589 && HONOR_SIGNED_ZEROS (mode))
3591 /* See if we can use NMADD or NMSUB. See mips.md for the
3592 associated patterns. */
3593 rtx op = XEXP (x, 0);
3594 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3595 && GET_CODE (XEXP (op, 0)) == MULT)
3597 *total = (mips_fp_mult_cost (mode)
3598 + rtx_cost (XEXP (XEXP (op, 0), 0), 0, speed)
3599 + rtx_cost (XEXP (XEXP (op, 0), 1), 0, speed)
3600 + rtx_cost (XEXP (op, 1), 0, speed));
3601 return true;
3605 if (float_mode_p)
3606 *total = mips_cost->fp_add;
3607 else
3608 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3609 return false;
3611 case MULT:
3612 if (float_mode_p)
3613 *total = mips_fp_mult_cost (mode);
3614 else if (mode == DImode && !TARGET_64BIT)
3615 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3616 where the mulsidi3 always includes an MFHI and an MFLO. */
3617 *total = (optimize_size
3618 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3619 : mips_cost->int_mult_si * 3 + 6);
3620 else if (optimize_size)
3621 *total = (ISA_HAS_MUL3 ? 1 : 2);
3622 else if (mode == DImode)
3623 *total = mips_cost->int_mult_di;
3624 else
3625 *total = mips_cost->int_mult_si;
3626 return false;
3628 case DIV:
3629 /* Check for a reciprocal. */
3630 if (float_mode_p
3631 && ISA_HAS_FP4
3632 && flag_unsafe_math_optimizations
3633 && XEXP (x, 0) == CONST1_RTX (mode))
3635 if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3636 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3637 division as being free. */
3638 *total = rtx_cost (XEXP (x, 1), 0, speed);
3639 else
3640 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0, speed);
3641 return true;
3643 /* Fall through. */
3645 case SQRT:
3646 case MOD:
3647 if (float_mode_p)
3649 *total = mips_fp_div_cost (mode);
3650 return false;
3652 /* Fall through. */
3654 case UDIV:
3655 case UMOD:
3656 if (optimize_size)
3658 /* It is our responsibility to make division by a power of 2
3659 as cheap as 2 register additions if we want the division
3660 expanders to be used for such operations; see the setting
3661 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3662 should always produce shorter code than using
3663 expand_sdiv2_pow2. */
3664 if (TARGET_MIPS16
3665 && CONST_INT_P (XEXP (x, 1))
3666 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3668 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0, speed);
3669 return true;
3671 *total = COSTS_N_INSNS (mips_idiv_insns ());
3673 else if (mode == DImode)
3674 *total = mips_cost->int_div_di;
3675 else
3676 *total = mips_cost->int_div_si;
3677 return false;
3679 case SIGN_EXTEND:
3680 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3681 return false;
3683 case ZERO_EXTEND:
3684 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3685 return false;
3687 case FLOAT:
3688 case UNSIGNED_FLOAT:
3689 case FIX:
3690 case FLOAT_EXTEND:
3691 case FLOAT_TRUNCATE:
3692 *total = mips_cost->fp_add;
3693 return false;
3695 default:
3696 return false;
3700 /* Implement TARGET_ADDRESS_COST. */
3702 static int
3703 mips_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
3705 return mips_address_insns (addr, SImode, false);
3708 /* Return one word of double-word value OP, taking into account the fixed
3709 endianness of certain registers. HIGH_P is true to select the high part,
3710 false to select the low part. */
3713 mips_subword (rtx op, bool high_p)
3715 unsigned int byte, offset;
3716 enum machine_mode mode;
3718 mode = GET_MODE (op);
3719 if (mode == VOIDmode)
3720 mode = TARGET_64BIT ? TImode : DImode;
3722 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3723 byte = UNITS_PER_WORD;
3724 else
3725 byte = 0;
3727 if (FP_REG_RTX_P (op))
3729 /* Paired FPRs are always ordered little-endian. */
3730 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3731 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3734 if (MEM_P (op))
3735 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3737 return simplify_gen_subreg (word_mode, op, mode, byte);
3740 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3742 bool
3743 mips_split_64bit_move_p (rtx dest, rtx src)
3745 if (TARGET_64BIT)
3746 return false;
3748 /* FPR-to-FPR moves can be done in a single instruction, if they're
3749 allowed at all. */
3750 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3751 return false;
3753 /* Check for floating-point loads and stores. */
3754 if (ISA_HAS_LDC1_SDC1)
3756 if (FP_REG_RTX_P (dest) && MEM_P (src))
3757 return false;
3758 if (FP_REG_RTX_P (src) && MEM_P (dest))
3759 return false;
3761 return true;
3764 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3765 this function handles 64-bit moves for which mips_split_64bit_move_p
3766 holds. For 64-bit targets, this function handles 128-bit moves. */
3768 void
3769 mips_split_doubleword_move (rtx dest, rtx src)
3771 rtx low_dest;
3773 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3775 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3776 emit_insn (gen_move_doubleword_fprdi (dest, src));
3777 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3778 emit_insn (gen_move_doubleword_fprdf (dest, src));
3779 else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
3780 emit_insn (gen_move_doubleword_fprv2sf (dest, src));
3781 else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
3782 emit_insn (gen_move_doubleword_fprv2si (dest, src));
3783 else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
3784 emit_insn (gen_move_doubleword_fprv4hi (dest, src));
3785 else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
3786 emit_insn (gen_move_doubleword_fprv8qi (dest, src));
3787 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3788 emit_insn (gen_move_doubleword_fprtf (dest, src));
3789 else
3790 gcc_unreachable ();
3792 else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
3794 low_dest = mips_subword (dest, false);
3795 mips_emit_move (low_dest, mips_subword (src, false));
3796 if (TARGET_64BIT)
3797 emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
3798 else
3799 emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
3801 else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
3803 mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
3804 if (TARGET_64BIT)
3805 emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
3806 else
3807 emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
3809 else
3811 /* The operation can be split into two normal moves. Decide in
3812 which order to do them. */
3813 low_dest = mips_subword (dest, false);
3814 if (REG_P (low_dest)
3815 && reg_overlap_mentioned_p (low_dest, src))
3817 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3818 mips_emit_move (low_dest, mips_subword (src, false));
3820 else
3822 mips_emit_move (low_dest, mips_subword (src, false));
3823 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3828 /* Return the appropriate instructions to move SRC into DEST. Assume
3829 that SRC is operand 1 and DEST is operand 0. */
3831 const char *
3832 mips_output_move (rtx dest, rtx src)
3834 enum rtx_code dest_code, src_code;
3835 enum machine_mode mode;
3836 enum mips_symbol_type symbol_type;
3837 bool dbl_p;
3839 dest_code = GET_CODE (dest);
3840 src_code = GET_CODE (src);
3841 mode = GET_MODE (dest);
3842 dbl_p = (GET_MODE_SIZE (mode) == 8);
3844 if (dbl_p && mips_split_64bit_move_p (dest, src))
3845 return "#";
3847 if ((src_code == REG && GP_REG_P (REGNO (src)))
3848 || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
3850 if (dest_code == REG)
3852 if (GP_REG_P (REGNO (dest)))
3853 return "move\t%0,%z1";
3855 /* Moves to HI are handled by special .md insns. */
3856 if (REGNO (dest) == LO_REGNUM)
3857 return "mtlo\t%z1";
3859 if (DSP_ACC_REG_P (REGNO (dest)))
3861 static char retval[] = "mt__\t%z1,%q0";
3863 retval[2] = reg_names[REGNO (dest)][4];
3864 retval[3] = reg_names[REGNO (dest)][5];
3865 return retval;
3868 if (FP_REG_P (REGNO (dest)))
3869 return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
3871 if (ALL_COP_REG_P (REGNO (dest)))
3873 static char retval[] = "dmtc_\t%z1,%0";
3875 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3876 return dbl_p ? retval : retval + 1;
3879 if (dest_code == MEM)
3880 switch (GET_MODE_SIZE (mode))
3882 case 1: return "sb\t%z1,%0";
3883 case 2: return "sh\t%z1,%0";
3884 case 4: return "sw\t%z1,%0";
3885 case 8: return "sd\t%z1,%0";
3888 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3890 if (src_code == REG)
3892 /* Moves from HI are handled by special .md insns. */
3893 if (REGNO (src) == LO_REGNUM)
3895 /* When generating VR4120 or VR4130 code, we use MACC and
3896 DMACC instead of MFLO. This avoids both the normal
3897 MIPS III HI/LO hazards and the errata related to
3898 -mfix-vr4130. */
3899 if (ISA_HAS_MACCHI)
3900 return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
3901 return "mflo\t%0";
3904 if (DSP_ACC_REG_P (REGNO (src)))
3906 static char retval[] = "mf__\t%0,%q1";
3908 retval[2] = reg_names[REGNO (src)][4];
3909 retval[3] = reg_names[REGNO (src)][5];
3910 return retval;
3913 if (FP_REG_P (REGNO (src)))
3914 return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
3916 if (ALL_COP_REG_P (REGNO (src)))
3918 static char retval[] = "dmfc_\t%0,%1";
3920 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3921 return dbl_p ? retval : retval + 1;
3924 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3925 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3928 if (src_code == MEM)
3929 switch (GET_MODE_SIZE (mode))
3931 case 1: return "lbu\t%0,%1";
3932 case 2: return "lhu\t%0,%1";
3933 case 4: return "lw\t%0,%1";
3934 case 8: return "ld\t%0,%1";
3937 if (src_code == CONST_INT)
3939 /* Don't use the X format for the operand itself, because that
3940 will give out-of-range numbers for 64-bit hosts and 32-bit
3941 targets. */
3942 if (!TARGET_MIPS16)
3943 return "li\t%0,%1\t\t\t# %X1";
3945 if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
3946 return "li\t%0,%1";
3948 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
3949 return "#";
3952 if (src_code == HIGH)
3953 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3955 if (CONST_GP_P (src))
3956 return "move\t%0,%1";
3958 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3959 && mips_lo_relocs[symbol_type] != 0)
3961 /* A signed 16-bit constant formed by applying a relocation
3962 operator to a symbolic address. */
3963 gcc_assert (!mips_split_p[symbol_type]);
3964 return "li\t%0,%R1";
3967 if (symbolic_operand (src, VOIDmode))
3969 gcc_assert (TARGET_MIPS16
3970 ? TARGET_MIPS16_TEXT_LOADS
3971 : !TARGET_EXPLICIT_RELOCS);
3972 return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
3975 if (src_code == REG && FP_REG_P (REGNO (src)))
3977 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3979 if (GET_MODE (dest) == V2SFmode)
3980 return "mov.ps\t%0,%1";
3981 else
3982 return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
3985 if (dest_code == MEM)
3986 return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
3988 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3990 if (src_code == MEM)
3991 return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
3993 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3995 static char retval[] = "l_c_\t%0,%1";
3997 retval[1] = (dbl_p ? 'd' : 'w');
3998 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3999 return retval;
4001 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
4003 static char retval[] = "s_c_\t%1,%0";
4005 retval[1] = (dbl_p ? 'd' : 'w');
4006 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
4007 return retval;
4009 gcc_unreachable ();
4012 /* Return true if CMP1 is a suitable second operand for integer ordering
4013 test CODE. See also the *sCC patterns in mips.md. */
4015 static bool
4016 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
4018 switch (code)
4020 case GT:
4021 case GTU:
4022 return reg_or_0_operand (cmp1, VOIDmode);
4024 case GE:
4025 case GEU:
4026 return !TARGET_MIPS16 && cmp1 == const1_rtx;
4028 case LT:
4029 case LTU:
4030 return arith_operand (cmp1, VOIDmode);
4032 case LE:
4033 return sle_operand (cmp1, VOIDmode);
4035 case LEU:
4036 return sleu_operand (cmp1, VOIDmode);
4038 default:
4039 gcc_unreachable ();
4043 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
4044 integer ordering test *CODE, or if an equivalent combination can
4045 be formed by adjusting *CODE and *CMP1. When returning true, update
4046 *CODE and *CMP1 with the chosen code and operand, otherwise leave
4047 them alone. */
4049 static bool
4050 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
4051 enum machine_mode mode)
4053 HOST_WIDE_INT plus_one;
4055 if (mips_int_order_operand_ok_p (*code, *cmp1))
4056 return true;
4058 if (GET_CODE (*cmp1) == CONST_INT)
4059 switch (*code)
4061 case LE:
4062 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4063 if (INTVAL (*cmp1) < plus_one)
4065 *code = LT;
4066 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4067 return true;
4069 break;
4071 case LEU:
4072 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4073 if (plus_one != 0)
4075 *code = LTU;
4076 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4077 return true;
4079 break;
4081 default:
4082 break;
4084 return false;
4087 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
4088 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
4089 is nonnull, it's OK to set TARGET to the inverse of the result and
4090 flip *INVERT_PTR instead. */
4092 static void
4093 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
4094 rtx target, rtx cmp0, rtx cmp1)
4096 enum machine_mode mode;
4098 /* First see if there is a MIPS instruction that can do this operation.
4099 If not, try doing the same for the inverse operation. If that also
4100 fails, force CMP1 into a register and try again. */
4101 mode = GET_MODE (cmp0);
4102 if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
4103 mips_emit_binary (code, target, cmp0, cmp1);
4104 else
4106 enum rtx_code inv_code = reverse_condition (code);
4107 if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
4109 cmp1 = force_reg (mode, cmp1);
4110 mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
4112 else if (invert_ptr == 0)
4114 rtx inv_target;
4116 inv_target = mips_force_binary (GET_MODE (target),
4117 inv_code, cmp0, cmp1);
4118 mips_emit_binary (XOR, target, inv_target, const1_rtx);
4120 else
4122 *invert_ptr = !*invert_ptr;
4123 mips_emit_binary (inv_code, target, cmp0, cmp1);
4128 /* Return a register that is zero iff CMP0 and CMP1 are equal.
4129 The register will have the same mode as CMP0. */
4131 static rtx
4132 mips_zero_if_equal (rtx cmp0, rtx cmp1)
4134 if (cmp1 == const0_rtx)
4135 return cmp0;
4137 if (uns_arith_operand (cmp1, VOIDmode))
4138 return expand_binop (GET_MODE (cmp0), xor_optab,
4139 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4141 return expand_binop (GET_MODE (cmp0), sub_optab,
4142 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4145 /* Convert *CODE into a code that can be used in a floating-point
4146 scc instruction (C.cond.fmt). Return true if the values of
4147 the condition code registers will be inverted, with 0 indicating
4148 that the condition holds. */
4150 static bool
4151 mips_reversed_fp_cond (enum rtx_code *code)
4153 switch (*code)
4155 case NE:
4156 case LTGT:
4157 case ORDERED:
4158 *code = reverse_condition_maybe_unordered (*code);
4159 return true;
4161 default:
4162 return false;
4166 /* Convert a comparison into something that can be used in a branch or
4167 conditional move. cmp_operands[0] and cmp_operands[1] are the values
4168 being compared and *CODE is the code used to compare them.
4170 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
4171 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
4172 otherwise any standard branch condition can be used. The standard branch
4173 conditions are:
4175 - EQ or NE between two registers.
4176 - any comparison between a register and zero. */
4178 static void
4179 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
4181 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
4183 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
4185 *op0 = cmp_operands[0];
4186 *op1 = cmp_operands[1];
4188 else if (*code == EQ || *code == NE)
4190 if (need_eq_ne_p)
4192 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4193 *op1 = const0_rtx;
4195 else
4197 *op0 = cmp_operands[0];
4198 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
4201 else
4203 /* The comparison needs a separate scc instruction. Store the
4204 result of the scc in *OP0 and compare it against zero. */
4205 bool invert = false;
4206 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
4207 mips_emit_int_order_test (*code, &invert, *op0,
4208 cmp_operands[0], cmp_operands[1]);
4209 *code = (invert ? EQ : NE);
4210 *op1 = const0_rtx;
4213 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
4215 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
4216 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
4217 *code = NE;
4218 *op1 = const0_rtx;
4220 else
4222 enum rtx_code cmp_code;
4224 /* Floating-point tests use a separate C.cond.fmt comparison to
4225 set a condition code register. The branch or conditional move
4226 will then compare that register against zero.
4228 Set CMP_CODE to the code of the comparison instruction and
4229 *CODE to the code that the branch or move should use. */
4230 cmp_code = *code;
4231 *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
4232 *op0 = (ISA_HAS_8CC
4233 ? gen_reg_rtx (CCmode)
4234 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4235 *op1 = const0_rtx;
4236 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
4240 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4241 Store the result in TARGET and return true if successful.
4243 On 64-bit targets, TARGET may be narrower than cmp_operands[0]. */
4245 bool
4246 mips_expand_scc (enum rtx_code code, rtx target)
4248 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4249 return false;
4251 if (code == EQ || code == NE)
4253 if (ISA_HAS_SEQ_SNE
4254 && reg_imm10_operand (cmp_operands[1], GET_MODE (cmp_operands[1])))
4255 mips_emit_binary (code, target, cmp_operands[0], cmp_operands[1]);
4256 else
4258 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4259 mips_emit_binary (code, target, zie, const0_rtx);
4262 else
4263 mips_emit_int_order_test (code, 0, target,
4264 cmp_operands[0], cmp_operands[1]);
4265 return true;
4268 /* Compare cmp_operands[0] with cmp_operands[1] using comparison code
4269 CODE and jump to OPERANDS[0] if the condition holds. */
4271 void
4272 mips_expand_conditional_branch (rtx *operands, enum rtx_code code)
4274 rtx op0, op1, condition;
4276 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4277 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4278 emit_jump_insn (gen_condjump (condition, operands[0]));
4281 /* Implement:
4283 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4284 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4286 void
4287 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4288 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4290 rtx cmp_result;
4291 bool reversed_p;
4293 reversed_p = mips_reversed_fp_cond (&cond);
4294 cmp_result = gen_reg_rtx (CCV2mode);
4295 emit_insn (gen_scc_ps (cmp_result,
4296 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4297 if (reversed_p)
4298 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4299 cmp_result));
4300 else
4301 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4302 cmp_result));
4305 /* Compare cmp_operands[0] with cmp_operands[1] using the code of
4306 OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] if the condition
4307 holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
4309 void
4310 mips_expand_conditional_move (rtx *operands)
4312 enum rtx_code code;
4313 rtx cond, op0, op1;
4315 code = GET_CODE (operands[1]);
4316 mips_emit_compare (&code, &op0, &op1, true);
4317 cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1),
4318 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4319 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
4320 operands[2], operands[3])));
4323 /* Compare cmp_operands[0] with cmp_operands[1] using rtl code CODE,
4324 then trap if the condition holds. */
4326 void
4327 mips_expand_conditional_trap (enum rtx_code code)
4329 rtx op0, op1;
4330 enum machine_mode mode;
4332 /* MIPS conditional trap instructions don't have GT or LE flavors,
4333 so we must swap the operands and convert to LT and GE respectively. */
4334 switch (code)
4336 case GT:
4337 case LE:
4338 case GTU:
4339 case LEU:
4340 code = swap_condition (code);
4341 op0 = cmp_operands[1];
4342 op1 = cmp_operands[0];
4343 break;
4345 default:
4346 op0 = cmp_operands[0];
4347 op1 = cmp_operands[1];
4348 break;
4351 mode = GET_MODE (cmp_operands[0]);
4352 op0 = force_reg (mode, op0);
4353 if (!arith_operand (op1, mode))
4354 op1 = force_reg (mode, op1);
4356 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4357 gen_rtx_fmt_ee (code, mode, op0, op1),
4358 const0_rtx));
4361 /* Initialize *CUM for a call to a function of type FNTYPE. */
4363 void
4364 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
4366 memset (cum, 0, sizeof (*cum));
4367 cum->prototype = (fntype && prototype_p (fntype));
4368 cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
4371 /* Fill INFO with information about a single argument. CUM is the
4372 cumulative state for earlier arguments. MODE is the mode of this
4373 argument and TYPE is its type (if known). NAMED is true if this
4374 is a named (fixed) argument rather than a variable one. */
4376 static void
4377 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4378 enum machine_mode mode, tree type, int named)
4380 bool doubleword_aligned_p;
4381 unsigned int num_bytes, num_words, max_regs;
4383 /* Work out the size of the argument. */
4384 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4385 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4387 /* Decide whether it should go in a floating-point register, assuming
4388 one is free. Later code checks for availability.
4390 The checks against UNITS_PER_FPVALUE handle the soft-float and
4391 single-float cases. */
4392 switch (mips_abi)
4394 case ABI_EABI:
4395 /* The EABI conventions have traditionally been defined in terms
4396 of TYPE_MODE, regardless of the actual type. */
4397 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4398 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4399 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4400 break;
4402 case ABI_32:
4403 case ABI_O64:
4404 /* Only leading floating-point scalars are passed in
4405 floating-point registers. We also handle vector floats the same
4406 say, which is OK because they are not covered by the standard ABI. */
4407 info->fpr_p = (!cum->gp_reg_found
4408 && cum->arg_number < 2
4409 && (type == 0
4410 || SCALAR_FLOAT_TYPE_P (type)
4411 || VECTOR_FLOAT_TYPE_P (type))
4412 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4413 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4414 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4415 break;
4417 case ABI_N32:
4418 case ABI_64:
4419 /* Scalar, complex and vector floating-point types are passed in
4420 floating-point registers, as long as this is a named rather
4421 than a variable argument. */
4422 info->fpr_p = (named
4423 && (type == 0 || FLOAT_TYPE_P (type))
4424 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4425 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4426 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4427 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4429 /* ??? According to the ABI documentation, the real and imaginary
4430 parts of complex floats should be passed in individual registers.
4431 The real and imaginary parts of stack arguments are supposed
4432 to be contiguous and there should be an extra word of padding
4433 at the end.
4435 This has two problems. First, it makes it impossible to use a
4436 single "void *" va_list type, since register and stack arguments
4437 are passed differently. (At the time of writing, MIPSpro cannot
4438 handle complex float varargs correctly.) Second, it's unclear
4439 what should happen when there is only one register free.
4441 For now, we assume that named complex floats should go into FPRs
4442 if there are two FPRs free, otherwise they should be passed in the
4443 same way as a struct containing two floats. */
4444 if (info->fpr_p
4445 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4446 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4448 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4449 info->fpr_p = false;
4450 else
4451 num_words = 2;
4453 break;
4455 default:
4456 gcc_unreachable ();
4459 /* See whether the argument has doubleword alignment. */
4460 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4462 /* Set REG_OFFSET to the register count we're interested in.
4463 The EABI allocates the floating-point registers separately,
4464 but the other ABIs allocate them like integer registers. */
4465 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4466 ? cum->num_fprs
4467 : cum->num_gprs);
4469 /* Advance to an even register if the argument is doubleword-aligned. */
4470 if (doubleword_aligned_p)
4471 info->reg_offset += info->reg_offset & 1;
4473 /* Work out the offset of a stack argument. */
4474 info->stack_offset = cum->stack_words;
4475 if (doubleword_aligned_p)
4476 info->stack_offset += info->stack_offset & 1;
4478 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4480 /* Partition the argument between registers and stack. */
4481 info->reg_words = MIN (num_words, max_regs);
4482 info->stack_words = num_words - info->reg_words;
4485 /* INFO describes a register argument that has the normal format for the
4486 argument's mode. Return the register it uses, assuming that FPRs are
4487 available if HARD_FLOAT_P. */
4489 static unsigned int
4490 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4492 if (!info->fpr_p || !hard_float_p)
4493 return GP_ARG_FIRST + info->reg_offset;
4494 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4495 /* In o32, the second argument is always passed in $f14
4496 for TARGET_DOUBLE_FLOAT, regardless of whether the
4497 first argument was a word or doubleword. */
4498 return FP_ARG_FIRST + 2;
4499 else
4500 return FP_ARG_FIRST + info->reg_offset;
4503 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
4505 static bool
4506 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4508 return !TARGET_OLDABI;
4511 /* Implement FUNCTION_ARG. */
4514 mips_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4515 tree type, int named)
4517 struct mips_arg_info info;
4519 /* We will be called with a mode of VOIDmode after the last argument
4520 has been seen. Whatever we return will be passed to the call expander.
4521 If we need a MIPS16 fp_code, return a REG with the code stored as
4522 the mode. */
4523 if (mode == VOIDmode)
4525 if (TARGET_MIPS16 && cum->fp_code != 0)
4526 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4527 else
4528 return NULL;
4531 mips_get_arg_info (&info, cum, mode, type, named);
4533 /* Return straight away if the whole argument is passed on the stack. */
4534 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4535 return NULL;
4537 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4538 contains a double in its entirety, then that 64-bit chunk is passed
4539 in a floating-point register. */
4540 if (TARGET_NEWABI
4541 && TARGET_HARD_FLOAT
4542 && named
4543 && type != 0
4544 && TREE_CODE (type) == RECORD_TYPE
4545 && TYPE_SIZE_UNIT (type)
4546 && host_integerp (TYPE_SIZE_UNIT (type), 1))
4548 tree field;
4550 /* First check to see if there is any such field. */
4551 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4552 if (TREE_CODE (field) == FIELD_DECL
4553 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4554 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4555 && host_integerp (bit_position (field), 0)
4556 && int_bit_position (field) % BITS_PER_WORD == 0)
4557 break;
4559 if (field != 0)
4561 /* Now handle the special case by returning a PARALLEL
4562 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4563 chunks are passed in registers. */
4564 unsigned int i;
4565 HOST_WIDE_INT bitpos;
4566 rtx ret;
4568 /* assign_parms checks the mode of ENTRY_PARM, so we must
4569 use the actual mode here. */
4570 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4572 bitpos = 0;
4573 field = TYPE_FIELDS (type);
4574 for (i = 0; i < info.reg_words; i++)
4576 rtx reg;
4578 for (; field; field = TREE_CHAIN (field))
4579 if (TREE_CODE (field) == FIELD_DECL
4580 && int_bit_position (field) >= bitpos)
4581 break;
4583 if (field
4584 && int_bit_position (field) == bitpos
4585 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4586 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4587 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4588 else
4589 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4591 XVECEXP (ret, 0, i)
4592 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4593 GEN_INT (bitpos / BITS_PER_UNIT));
4595 bitpos += BITS_PER_WORD;
4597 return ret;
4601 /* Handle the n32/n64 conventions for passing complex floating-point
4602 arguments in FPR pairs. The real part goes in the lower register
4603 and the imaginary part goes in the upper register. */
4604 if (TARGET_NEWABI
4605 && info.fpr_p
4606 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4608 rtx real, imag;
4609 enum machine_mode inner;
4610 unsigned int regno;
4612 inner = GET_MODE_INNER (mode);
4613 regno = FP_ARG_FIRST + info.reg_offset;
4614 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4616 /* Real part in registers, imaginary part on stack. */
4617 gcc_assert (info.stack_words == info.reg_words);
4618 return gen_rtx_REG (inner, regno);
4620 else
4622 gcc_assert (info.stack_words == 0);
4623 real = gen_rtx_EXPR_LIST (VOIDmode,
4624 gen_rtx_REG (inner, regno),
4625 const0_rtx);
4626 imag = gen_rtx_EXPR_LIST (VOIDmode,
4627 gen_rtx_REG (inner,
4628 regno + info.reg_words / 2),
4629 GEN_INT (GET_MODE_SIZE (inner)));
4630 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4634 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4637 /* Implement FUNCTION_ARG_ADVANCE. */
4639 void
4640 mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4641 tree type, int named)
4643 struct mips_arg_info info;
4645 mips_get_arg_info (&info, cum, mode, type, named);
4647 if (!info.fpr_p)
4648 cum->gp_reg_found = true;
4650 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4651 an explanation of what this code does. It assumes that we're using
4652 either the o32 or the o64 ABI, both of which pass at most 2 arguments
4653 in FPRs. */
4654 if (cum->arg_number < 2 && info.fpr_p)
4655 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4657 /* Advance the register count. This has the effect of setting
4658 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4659 argument required us to skip the final GPR and pass the whole
4660 argument on the stack. */
4661 if (mips_abi != ABI_EABI || !info.fpr_p)
4662 cum->num_gprs = info.reg_offset + info.reg_words;
4663 else if (info.reg_words > 0)
4664 cum->num_fprs += MAX_FPRS_PER_FMT;
4666 /* Advance the stack word count. */
4667 if (info.stack_words > 0)
4668 cum->stack_words = info.stack_offset + info.stack_words;
4670 cum->arg_number++;
4673 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4675 static int
4676 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4677 enum machine_mode mode, tree type, bool named)
4679 struct mips_arg_info info;
4681 mips_get_arg_info (&info, cum, mode, type, named);
4682 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4685 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4686 PARM_BOUNDARY bits of alignment, but will be given anything up
4687 to STACK_BOUNDARY bits if the type requires it. */
4690 mips_function_arg_boundary (enum machine_mode mode, tree type)
4692 unsigned int alignment;
4694 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4695 if (alignment < PARM_BOUNDARY)
4696 alignment = PARM_BOUNDARY;
4697 if (alignment > STACK_BOUNDARY)
4698 alignment = STACK_BOUNDARY;
4699 return alignment;
4702 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4703 upward rather than downward. In other words, return true if the
4704 first byte of the stack slot has useful data, false if the last
4705 byte does. */
4707 bool
4708 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4710 /* On little-endian targets, the first byte of every stack argument
4711 is passed in the first byte of the stack slot. */
4712 if (!BYTES_BIG_ENDIAN)
4713 return true;
4715 /* Otherwise, integral types are padded downward: the last byte of a
4716 stack argument is passed in the last byte of the stack slot. */
4717 if (type != 0
4718 ? (INTEGRAL_TYPE_P (type)
4719 || POINTER_TYPE_P (type)
4720 || FIXED_POINT_TYPE_P (type))
4721 : (SCALAR_INT_MODE_P (mode)
4722 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4723 return false;
4725 /* Big-endian o64 pads floating-point arguments downward. */
4726 if (mips_abi == ABI_O64)
4727 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4728 return false;
4730 /* Other types are padded upward for o32, o64, n32 and n64. */
4731 if (mips_abi != ABI_EABI)
4732 return true;
4734 /* Arguments smaller than a stack slot are padded downward. */
4735 if (mode != BLKmode)
4736 return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
4737 else
4738 return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
4741 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4742 if the least significant byte of the register has useful data. Return
4743 the opposite if the most significant byte does. */
4745 bool
4746 mips_pad_reg_upward (enum machine_mode mode, tree type)
4748 /* No shifting is required for floating-point arguments. */
4749 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4750 return !BYTES_BIG_ENDIAN;
4752 /* Otherwise, apply the same padding to register arguments as we do
4753 to stack arguments. */
4754 return mips_pad_arg_upward (mode, type);
4757 /* Return nonzero when an argument must be passed by reference. */
4759 static bool
4760 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4761 enum machine_mode mode, const_tree type,
4762 bool named ATTRIBUTE_UNUSED)
4764 if (mips_abi == ABI_EABI)
4766 int size;
4768 /* ??? How should SCmode be handled? */
4769 if (mode == DImode || mode == DFmode
4770 || mode == DQmode || mode == UDQmode
4771 || mode == DAmode || mode == UDAmode)
4772 return 0;
4774 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4775 return size == -1 || size > UNITS_PER_WORD;
4777 else
4779 /* If we have a variable-sized parameter, we have no choice. */
4780 return targetm.calls.must_pass_in_stack (mode, type);
4784 /* Implement TARGET_CALLEE_COPIES. */
4786 static bool
4787 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4788 enum machine_mode mode ATTRIBUTE_UNUSED,
4789 const_tree type ATTRIBUTE_UNUSED, bool named)
4791 return mips_abi == ABI_EABI && named;
4794 /* See whether VALTYPE is a record whose fields should be returned in
4795 floating-point registers. If so, return the number of fields and
4796 list them in FIELDS (which should have two elements). Return 0
4797 otherwise.
4799 For n32 & n64, a structure with one or two fields is returned in
4800 floating-point registers as long as every field has a floating-point
4801 type. */
4803 static int
4804 mips_fpr_return_fields (const_tree valtype, tree *fields)
4806 tree field;
4807 int i;
4809 if (!TARGET_NEWABI)
4810 return 0;
4812 if (TREE_CODE (valtype) != RECORD_TYPE)
4813 return 0;
4815 i = 0;
4816 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4818 if (TREE_CODE (field) != FIELD_DECL)
4819 continue;
4821 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
4822 return 0;
4824 if (i == 2)
4825 return 0;
4827 fields[i++] = field;
4829 return i;
4832 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4833 a value in the most significant part of $2/$3 if:
4835 - the target is big-endian;
4837 - the value has a structure or union type (we generalize this to
4838 cover aggregates from other languages too); and
4840 - the structure is not returned in floating-point registers. */
4842 static bool
4843 mips_return_in_msb (const_tree valtype)
4845 tree fields[2];
4847 return (TARGET_NEWABI
4848 && TARGET_BIG_ENDIAN
4849 && AGGREGATE_TYPE_P (valtype)
4850 && mips_fpr_return_fields (valtype, fields) == 0);
4853 /* Return true if the function return value MODE will get returned in a
4854 floating-point register. */
4856 static bool
4857 mips_return_mode_in_fpr_p (enum machine_mode mode)
4859 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4860 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4861 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4862 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4865 /* Return the representation of an FPR return register when the
4866 value being returned in FP_RETURN has mode VALUE_MODE and the
4867 return type itself has mode TYPE_MODE. On NewABI targets,
4868 the two modes may be different for structures like:
4870 struct __attribute__((packed)) foo { float f; }
4872 where we return the SFmode value of "f" in FP_RETURN, but where
4873 the structure itself has mode BLKmode. */
4875 static rtx
4876 mips_return_fpr_single (enum machine_mode type_mode,
4877 enum machine_mode value_mode)
4879 rtx x;
4881 x = gen_rtx_REG (value_mode, FP_RETURN);
4882 if (type_mode != value_mode)
4884 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
4885 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
4887 return x;
4890 /* Return a composite value in a pair of floating-point registers.
4891 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4892 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4893 complete value.
4895 For n32 & n64, $f0 always holds the first value and $f2 the second.
4896 Otherwise the values are packed together as closely as possible. */
4898 static rtx
4899 mips_return_fpr_pair (enum machine_mode mode,
4900 enum machine_mode mode1, HOST_WIDE_INT offset1,
4901 enum machine_mode mode2, HOST_WIDE_INT offset2)
4903 int inc;
4905 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4906 return gen_rtx_PARALLEL
4907 (mode,
4908 gen_rtvec (2,
4909 gen_rtx_EXPR_LIST (VOIDmode,
4910 gen_rtx_REG (mode1, FP_RETURN),
4911 GEN_INT (offset1)),
4912 gen_rtx_EXPR_LIST (VOIDmode,
4913 gen_rtx_REG (mode2, FP_RETURN + inc),
4914 GEN_INT (offset2))));
4918 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4919 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4920 VALTYPE is null and MODE is the mode of the return value. */
4923 mips_function_value (const_tree valtype, enum machine_mode mode)
4925 if (valtype)
4927 tree fields[2];
4928 int unsigned_p;
4930 mode = TYPE_MODE (valtype);
4931 unsigned_p = TYPE_UNSIGNED (valtype);
4933 /* Since TARGET_PROMOTE_FUNCTION_RETURN unconditionally returns true,
4934 we must promote the mode just as PROMOTE_MODE does. */
4935 mode = promote_mode (valtype, mode, &unsigned_p, 1);
4937 /* Handle structures whose fields are returned in $f0/$f2. */
4938 switch (mips_fpr_return_fields (valtype, fields))
4940 case 1:
4941 return mips_return_fpr_single (mode,
4942 TYPE_MODE (TREE_TYPE (fields[0])));
4944 case 2:
4945 return mips_return_fpr_pair (mode,
4946 TYPE_MODE (TREE_TYPE (fields[0])),
4947 int_byte_position (fields[0]),
4948 TYPE_MODE (TREE_TYPE (fields[1])),
4949 int_byte_position (fields[1]));
4952 /* If a value is passed in the most significant part of a register, see
4953 whether we have to round the mode up to a whole number of words. */
4954 if (mips_return_in_msb (valtype))
4956 HOST_WIDE_INT size = int_size_in_bytes (valtype);
4957 if (size % UNITS_PER_WORD != 0)
4959 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4960 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4964 /* For EABI, the class of return register depends entirely on MODE.
4965 For example, "struct { some_type x; }" and "union { some_type x; }"
4966 are returned in the same way as a bare "some_type" would be.
4967 Other ABIs only use FPRs for scalar, complex or vector types. */
4968 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
4969 return gen_rtx_REG (mode, GP_RETURN);
4972 if (!TARGET_MIPS16)
4974 /* Handle long doubles for n32 & n64. */
4975 if (mode == TFmode)
4976 return mips_return_fpr_pair (mode,
4977 DImode, 0,
4978 DImode, GET_MODE_SIZE (mode) / 2);
4980 if (mips_return_mode_in_fpr_p (mode))
4982 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4983 return mips_return_fpr_pair (mode,
4984 GET_MODE_INNER (mode), 0,
4985 GET_MODE_INNER (mode),
4986 GET_MODE_SIZE (mode) / 2);
4987 else
4988 return gen_rtx_REG (mode, FP_RETURN);
4992 return gen_rtx_REG (mode, GP_RETURN);
4995 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
4996 all BLKmode objects are returned in memory. Under the n32, n64
4997 and embedded ABIs, small structures are returned in a register.
4998 Objects with varying size must still be returned in memory, of
4999 course. */
5001 static bool
5002 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5004 return (TARGET_OLDABI
5005 ? TYPE_MODE (type) == BLKmode
5006 : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
5009 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
5011 static void
5012 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5013 tree type, int *pretend_size ATTRIBUTE_UNUSED,
5014 int no_rtl)
5016 CUMULATIVE_ARGS local_cum;
5017 int gp_saved, fp_saved;
5019 /* The caller has advanced CUM up to, but not beyond, the last named
5020 argument. Advance a local copy of CUM past the last "real" named
5021 argument, to find out how many registers are left over. */
5022 local_cum = *cum;
5023 FUNCTION_ARG_ADVANCE (local_cum, mode, type, true);
5025 /* Found out how many registers we need to save. */
5026 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
5027 fp_saved = (EABI_FLOAT_VARARGS_P
5028 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
5029 : 0);
5031 if (!no_rtl)
5033 if (gp_saved > 0)
5035 rtx ptr, mem;
5037 ptr = plus_constant (virtual_incoming_args_rtx,
5038 REG_PARM_STACK_SPACE (cfun->decl)
5039 - gp_saved * UNITS_PER_WORD);
5040 mem = gen_frame_mem (BLKmode, ptr);
5041 set_mem_alias_set (mem, get_varargs_alias_set ());
5043 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
5044 mem, gp_saved);
5046 if (fp_saved > 0)
5048 /* We can't use move_block_from_reg, because it will use
5049 the wrong mode. */
5050 enum machine_mode mode;
5051 int off, i;
5053 /* Set OFF to the offset from virtual_incoming_args_rtx of
5054 the first float register. The FP save area lies below
5055 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
5056 off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
5057 off -= fp_saved * UNITS_PER_FPREG;
5059 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
5061 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
5062 i += MAX_FPRS_PER_FMT)
5064 rtx ptr, mem;
5066 ptr = plus_constant (virtual_incoming_args_rtx, off);
5067 mem = gen_frame_mem (mode, ptr);
5068 set_mem_alias_set (mem, get_varargs_alias_set ());
5069 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
5070 off += UNITS_PER_HWFPVALUE;
5074 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
5075 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
5076 + fp_saved * UNITS_PER_FPREG);
5079 /* Implement TARGET_BUILTIN_VA_LIST. */
5081 static tree
5082 mips_build_builtin_va_list (void)
5084 if (EABI_FLOAT_VARARGS_P)
5086 /* We keep 3 pointers, and two offsets.
5088 Two pointers are to the overflow area, which starts at the CFA.
5089 One of these is constant, for addressing into the GPR save area
5090 below it. The other is advanced up the stack through the
5091 overflow region.
5093 The third pointer is to the bottom of the GPR save area.
5094 Since the FPR save area is just below it, we can address
5095 FPR slots off this pointer.
5097 We also keep two one-byte offsets, which are to be subtracted
5098 from the constant pointers to yield addresses in the GPR and
5099 FPR save areas. These are downcounted as float or non-float
5100 arguments are used, and when they get to zero, the argument
5101 must be obtained from the overflow region. */
5102 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
5103 tree array, index;
5105 record = lang_hooks.types.make_type (RECORD_TYPE);
5107 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
5108 ptr_type_node);
5109 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
5110 ptr_type_node);
5111 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
5112 ptr_type_node);
5113 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
5114 unsigned_char_type_node);
5115 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
5116 unsigned_char_type_node);
5117 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5118 warn on every user file. */
5119 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
5120 array = build_array_type (unsigned_char_type_node,
5121 build_index_type (index));
5122 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
5124 DECL_FIELD_CONTEXT (f_ovfl) = record;
5125 DECL_FIELD_CONTEXT (f_gtop) = record;
5126 DECL_FIELD_CONTEXT (f_ftop) = record;
5127 DECL_FIELD_CONTEXT (f_goff) = record;
5128 DECL_FIELD_CONTEXT (f_foff) = record;
5129 DECL_FIELD_CONTEXT (f_res) = record;
5131 TYPE_FIELDS (record) = f_ovfl;
5132 TREE_CHAIN (f_ovfl) = f_gtop;
5133 TREE_CHAIN (f_gtop) = f_ftop;
5134 TREE_CHAIN (f_ftop) = f_goff;
5135 TREE_CHAIN (f_goff) = f_foff;
5136 TREE_CHAIN (f_foff) = f_res;
5138 layout_type (record);
5139 return record;
5141 else if (TARGET_IRIX && TARGET_IRIX6)
5142 /* On IRIX 6, this type is 'char *'. */
5143 return build_pointer_type (char_type_node);
5144 else
5145 /* Otherwise, we use 'void *'. */
5146 return ptr_type_node;
5149 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
5151 static void
5152 mips_va_start (tree valist, rtx nextarg)
5154 if (EABI_FLOAT_VARARGS_P)
5156 const CUMULATIVE_ARGS *cum;
5157 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5158 tree ovfl, gtop, ftop, goff, foff;
5159 tree t;
5160 int gpr_save_area_size;
5161 int fpr_save_area_size;
5162 int fpr_offset;
5164 cum = &crtl->args.info;
5165 gpr_save_area_size
5166 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5167 fpr_save_area_size
5168 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5170 f_ovfl = TYPE_FIELDS (va_list_type_node);
5171 f_gtop = TREE_CHAIN (f_ovfl);
5172 f_ftop = TREE_CHAIN (f_gtop);
5173 f_goff = TREE_CHAIN (f_ftop);
5174 f_foff = TREE_CHAIN (f_goff);
5176 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5177 NULL_TREE);
5178 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5179 NULL_TREE);
5180 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5181 NULL_TREE);
5182 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5183 NULL_TREE);
5184 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5185 NULL_TREE);
5187 /* Emit code to initialize OVFL, which points to the next varargs
5188 stack argument. CUM->STACK_WORDS gives the number of stack
5189 words used by named arguments. */
5190 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5191 if (cum->stack_words > 0)
5192 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
5193 size_int (cum->stack_words * UNITS_PER_WORD));
5194 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5195 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5197 /* Emit code to initialize GTOP, the top of the GPR save area. */
5198 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5199 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
5200 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5202 /* Emit code to initialize FTOP, the top of the FPR save area.
5203 This address is gpr_save_area_bytes below GTOP, rounded
5204 down to the next fp-aligned boundary. */
5205 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5206 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5207 fpr_offset &= -UNITS_PER_FPVALUE;
5208 if (fpr_offset)
5209 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
5210 size_int (-fpr_offset));
5211 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
5212 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5214 /* Emit code to initialize GOFF, the offset from GTOP of the
5215 next GPR argument. */
5216 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
5217 build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
5218 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5220 /* Likewise emit code to initialize FOFF, the offset from FTOP
5221 of the next FPR argument. */
5222 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
5223 build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
5224 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5226 else
5228 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5229 std_expand_builtin_va_start (valist, nextarg);
5233 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
5235 static tree
5236 mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5237 gimple_seq *post_p)
5239 tree addr;
5240 bool indirect_p;
5242 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5243 if (indirect_p)
5244 type = build_pointer_type (type);
5246 if (!EABI_FLOAT_VARARGS_P)
5247 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5248 else
5250 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5251 tree ovfl, top, off, align;
5252 HOST_WIDE_INT size, rsize, osize;
5253 tree t, u;
5255 f_ovfl = TYPE_FIELDS (va_list_type_node);
5256 f_gtop = TREE_CHAIN (f_ovfl);
5257 f_ftop = TREE_CHAIN (f_gtop);
5258 f_goff = TREE_CHAIN (f_ftop);
5259 f_foff = TREE_CHAIN (f_goff);
5261 /* Let:
5263 TOP be the top of the GPR or FPR save area;
5264 OFF be the offset from TOP of the next register;
5265 ADDR_RTX be the address of the argument;
5266 SIZE be the number of bytes in the argument type;
5267 RSIZE be the number of bytes used to store the argument
5268 when it's in the register save area; and
5269 OSIZE be the number of bytes used to store it when it's
5270 in the stack overflow area.
5272 The code we want is:
5274 1: off &= -rsize; // round down
5275 2: if (off != 0)
5276 3: {
5277 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
5278 5: off -= rsize;
5279 6: }
5280 7: else
5281 8: {
5282 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
5283 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
5284 11: ovfl += osize;
5285 14: }
5287 [1] and [9] can sometimes be optimized away. */
5289 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5290 NULL_TREE);
5291 size = int_size_in_bytes (type);
5293 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5294 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5296 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5297 NULL_TREE);
5298 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5299 NULL_TREE);
5301 /* When va_start saves FPR arguments to the stack, each slot
5302 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
5303 argument's precision. */
5304 rsize = UNITS_PER_HWFPVALUE;
5306 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5307 (= PARM_BOUNDARY bits). This can be different from RSIZE
5308 in two cases:
5310 (1) On 32-bit targets when TYPE is a structure such as:
5312 struct s { float f; };
5314 Such structures are passed in paired FPRs, so RSIZE
5315 will be 8 bytes. However, the structure only takes
5316 up 4 bytes of memory, so OSIZE will only be 4.
5318 (2) In combinations such as -mgp64 -msingle-float
5319 -fshort-double. Doubles passed in registers will then take
5320 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
5321 stack take up UNITS_PER_WORD bytes. */
5322 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5324 else
5326 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5327 NULL_TREE);
5328 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5329 NULL_TREE);
5330 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5331 if (rsize > UNITS_PER_WORD)
5333 /* [1] Emit code for: off &= -rsize. */
5334 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5335 build_int_cst (TREE_TYPE (off), -rsize));
5336 gimplify_assign (off, t, pre_p);
5338 osize = rsize;
5341 /* [2] Emit code to branch if off == 0. */
5342 t = build2 (NE_EXPR, boolean_type_node, off,
5343 build_int_cst (TREE_TYPE (off), 0));
5344 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5346 /* [5] Emit code for: off -= rsize. We do this as a form of
5347 post-decrement not available to C. */
5348 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5349 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5351 /* [4] Emit code for:
5352 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
5353 t = fold_convert (sizetype, t);
5354 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5355 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5356 if (BYTES_BIG_ENDIAN && rsize > size)
5358 u = size_int (rsize - size);
5359 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5361 COND_EXPR_THEN (addr) = t;
5363 if (osize > UNITS_PER_WORD)
5365 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
5366 u = size_int (osize - 1);
5367 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5368 t = fold_convert (sizetype, t);
5369 u = size_int (-osize);
5370 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5371 t = fold_convert (TREE_TYPE (ovfl), t);
5372 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5374 else
5375 align = NULL;
5377 /* [10, 11] Emit code for:
5378 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5379 ovfl += osize. */
5380 u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
5381 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5382 if (BYTES_BIG_ENDIAN && osize > size)
5384 u = size_int (osize - size);
5385 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5388 /* String [9] and [10, 11] together. */
5389 if (align)
5390 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5391 COND_EXPR_ELSE (addr) = t;
5393 addr = fold_convert (build_pointer_type (type), addr);
5394 addr = build_va_arg_indirect_ref (addr);
5397 if (indirect_p)
5398 addr = build_va_arg_indirect_ref (addr);
5400 return addr;
5403 /* Start a definition of function NAME. MIPS16_P indicates whether the
5404 function contains MIPS16 code. */
5406 static void
5407 mips_start_function_definition (const char *name, bool mips16_p)
5409 if (mips16_p)
5410 fprintf (asm_out_file, "\t.set\tmips16\n");
5411 else
5412 fprintf (asm_out_file, "\t.set\tnomips16\n");
5414 if (!flag_inhibit_size_directive)
5416 fputs ("\t.ent\t", asm_out_file);
5417 assemble_name (asm_out_file, name);
5418 fputs ("\n", asm_out_file);
5421 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function");
5423 /* Start the definition proper. */
5424 assemble_name (asm_out_file, name);
5425 fputs (":\n", asm_out_file);
5428 /* End a function definition started by mips_start_function_definition. */
5430 static void
5431 mips_end_function_definition (const char *name)
5433 if (!flag_inhibit_size_directive)
5435 fputs ("\t.end\t", asm_out_file);
5436 assemble_name (asm_out_file, name);
5437 fputs ("\n", asm_out_file);
5441 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5443 static bool
5444 mips_ok_for_lazy_binding_p (rtx x)
5446 return (TARGET_USE_GOT
5447 && GET_CODE (x) == SYMBOL_REF
5448 && !SYMBOL_REF_BIND_NOW_P (x)
5449 && !mips_symbol_binds_local_p (x));
5452 /* Load function address ADDR into register DEST. TYPE is as for
5453 mips_expand_call. Return true if we used an explicit lazy-binding
5454 sequence. */
5456 static bool
5457 mips_load_call_address (enum mips_call_type type, rtx dest, rtx addr)
5459 /* If we're generating PIC, and this call is to a global function,
5460 try to allow its address to be resolved lazily. This isn't
5461 possible for sibcalls when $gp is call-saved because the value
5462 of $gp on entry to the stub would be our caller's gp, not ours. */
5463 if (TARGET_EXPLICIT_RELOCS
5464 && !(type == MIPS_CALL_SIBCALL && TARGET_CALL_SAVED_GP)
5465 && mips_ok_for_lazy_binding_p (addr))
5467 addr = mips_got_load (dest, addr, SYMBOL_GOTOFF_CALL);
5468 emit_insn (gen_rtx_SET (VOIDmode, dest, addr));
5469 return true;
5471 else
5473 mips_emit_move (dest, addr);
5474 return false;
5478 /* Each locally-defined hard-float MIPS16 function has a local symbol
5479 associated with it. This hash table maps the function symbol (FUNC)
5480 to the local symbol (LOCAL). */
5481 struct mips16_local_alias GTY(()) {
5482 rtx func;
5483 rtx local;
5485 static GTY ((param_is (struct mips16_local_alias))) htab_t mips16_local_aliases;
5487 /* Hash table callbacks for mips16_local_aliases. */
5489 static hashval_t
5490 mips16_local_aliases_hash (const void *entry)
5492 const struct mips16_local_alias *alias;
5494 alias = (const struct mips16_local_alias *) entry;
5495 return htab_hash_string (XSTR (alias->func, 0));
5498 static int
5499 mips16_local_aliases_eq (const void *entry1, const void *entry2)
5501 const struct mips16_local_alias *alias1, *alias2;
5503 alias1 = (const struct mips16_local_alias *) entry1;
5504 alias2 = (const struct mips16_local_alias *) entry2;
5505 return rtx_equal_p (alias1->func, alias2->func);
5508 /* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
5509 Return a local alias for it, creating a new one if necessary. */
5511 static rtx
5512 mips16_local_alias (rtx func)
5514 struct mips16_local_alias *alias, tmp_alias;
5515 void **slot;
5517 /* Create the hash table if this is the first call. */
5518 if (mips16_local_aliases == NULL)
5519 mips16_local_aliases = htab_create_ggc (37, mips16_local_aliases_hash,
5520 mips16_local_aliases_eq, NULL);
5522 /* Look up the function symbol, creating a new entry if need be. */
5523 tmp_alias.func = func;
5524 slot = htab_find_slot (mips16_local_aliases, &tmp_alias, INSERT);
5525 gcc_assert (slot != NULL);
5527 alias = (struct mips16_local_alias *) *slot;
5528 if (alias == NULL)
5530 const char *func_name, *local_name;
5531 rtx local;
5533 /* Create a new SYMBOL_REF for the local symbol. The choice of
5534 __fn_local_* is based on the __fn_stub_* names that we've
5535 traditionally used for the non-MIPS16 stub. */
5536 func_name = targetm.strip_name_encoding (XSTR (func, 0));
5537 local_name = ACONCAT (("__fn_local_", func_name, NULL));
5538 local = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (local_name));
5539 SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
5541 /* Create a new structure to represent the mapping. */
5542 alias = GGC_NEW (struct mips16_local_alias);
5543 alias->func = func;
5544 alias->local = local;
5545 *slot = alias;
5547 return alias->local;
5550 /* A chained list of functions for which mips16_build_call_stub has already
5551 generated a stub. NAME is the name of the function and FP_RET_P is true
5552 if the function returns a value in floating-point registers. */
5553 struct mips16_stub {
5554 struct mips16_stub *next;
5555 char *name;
5556 bool fp_ret_p;
5558 static struct mips16_stub *mips16_stubs;
5560 /* Return a SYMBOL_REF for a MIPS16 function called NAME. */
5562 static rtx
5563 mips16_stub_function (const char *name)
5565 rtx x;
5567 x = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5568 SYMBOL_REF_FLAGS (x) |= (SYMBOL_FLAG_EXTERNAL | SYMBOL_FLAG_FUNCTION);
5569 return x;
5572 /* Return the two-character string that identifies floating-point
5573 return mode MODE in the name of a MIPS16 function stub. */
5575 static const char *
5576 mips16_call_stub_mode_suffix (enum machine_mode mode)
5578 if (mode == SFmode)
5579 return "sf";
5580 else if (mode == DFmode)
5581 return "df";
5582 else if (mode == SCmode)
5583 return "sc";
5584 else if (mode == DCmode)
5585 return "dc";
5586 else if (mode == V2SFmode)
5587 return "df";
5588 else
5589 gcc_unreachable ();
5592 /* Write instructions to move a 32-bit value between general register
5593 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5594 from GPREG to FPREG and 'f' to move in the opposite direction. */
5596 static void
5597 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5599 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5600 reg_names[gpreg], reg_names[fpreg]);
5603 /* Likewise for 64-bit values. */
5605 static void
5606 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5608 if (TARGET_64BIT)
5609 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
5610 reg_names[gpreg], reg_names[fpreg]);
5611 else if (TARGET_FLOAT64)
5613 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5614 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5615 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
5616 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
5618 else
5620 /* Move the least-significant word. */
5621 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5622 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5623 /* ...then the most significant word. */
5624 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5625 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
5629 /* Write out code to move floating-point arguments into or out of
5630 general registers. FP_CODE is the code describing which arguments
5631 are present (see the comment above the definition of CUMULATIVE_ARGS
5632 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
5634 static void
5635 mips_output_args_xfer (int fp_code, char direction)
5637 unsigned int gparg, fparg, f;
5638 CUMULATIVE_ARGS cum;
5640 /* This code only works for o32 and o64. */
5641 gcc_assert (TARGET_OLDABI);
5643 mips_init_cumulative_args (&cum, NULL);
5645 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5647 enum machine_mode mode;
5648 struct mips_arg_info info;
5650 if ((f & 3) == 1)
5651 mode = SFmode;
5652 else if ((f & 3) == 2)
5653 mode = DFmode;
5654 else
5655 gcc_unreachable ();
5657 mips_get_arg_info (&info, &cum, mode, NULL, true);
5658 gparg = mips_arg_regno (&info, false);
5659 fparg = mips_arg_regno (&info, true);
5661 if (mode == SFmode)
5662 mips_output_32bit_xfer (direction, gparg, fparg);
5663 else
5664 mips_output_64bit_xfer (direction, gparg, fparg);
5666 mips_function_arg_advance (&cum, mode, NULL, true);
5670 /* Write a MIPS16 stub for the current function. This stub is used
5671 for functions which take arguments in the floating-point registers.
5672 It is normal-mode code that moves the floating-point arguments
5673 into the general registers and then jumps to the MIPS16 code. */
5675 static void
5676 mips16_build_function_stub (void)
5678 const char *fnname, *alias_name, *separator;
5679 char *secname, *stubname;
5680 tree stubdecl;
5681 unsigned int f;
5682 rtx symbol, alias;
5684 /* Create the name of the stub, and its unique section. */
5685 symbol = XEXP (DECL_RTL (current_function_decl), 0);
5686 alias = mips16_local_alias (symbol);
5688 fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
5689 alias_name = targetm.strip_name_encoding (XSTR (alias, 0));
5690 secname = ACONCAT ((".mips16.fn.", fnname, NULL));
5691 stubname = ACONCAT (("__fn_stub_", fnname, NULL));
5693 /* Build a decl for the stub. */
5694 stubdecl = build_decl (FUNCTION_DECL, get_identifier (stubname),
5695 build_function_type (void_type_node, NULL_TREE));
5696 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5697 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5699 /* Output a comment. */
5700 fprintf (asm_out_file, "\t# Stub function for %s (",
5701 current_function_name ());
5702 separator = "";
5703 for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
5705 fprintf (asm_out_file, "%s%s", separator,
5706 (f & 3) == 1 ? "float" : "double");
5707 separator = ", ";
5709 fprintf (asm_out_file, ")\n");
5711 /* Start the function definition. */
5712 assemble_start_function (stubdecl, stubname);
5713 mips_start_function_definition (stubname, false);
5715 /* If generating pic2 code, either set up the global pointer or
5716 switch to pic0. */
5717 if (TARGET_ABICALLS_PIC2)
5719 if (TARGET_ABSOLUTE_ABICALLS)
5720 fprintf (asm_out_file, "\t.option\tpic0\n");
5721 else
5723 output_asm_insn ("%(.cpload\t%^%)", NULL);
5724 /* Emit an R_MIPS_NONE relocation to tell the linker what the
5725 target function is. Use a local GOT access when loading the
5726 symbol, to cut down on the number of unnecessary GOT entries
5727 for stubs that aren't needed. */
5728 output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol);
5729 symbol = alias;
5733 /* Load the address of the MIPS16 function into $25. Do this first so
5734 that targets with coprocessor interlocks can use an MFC1 to fill the
5735 delay slot. */
5736 output_asm_insn ("la\t%^,%0", &symbol);
5738 /* Move the arguments from floating-point registers to general registers. */
5739 mips_output_args_xfer (crtl->args.info.fp_code, 'f');
5741 /* Jump to the MIPS16 function. */
5742 output_asm_insn ("jr\t%^", NULL);
5744 if (TARGET_ABICALLS_PIC2 && TARGET_ABSOLUTE_ABICALLS)
5745 fprintf (asm_out_file, "\t.option\tpic2\n");
5747 mips_end_function_definition (stubname);
5749 /* If the linker needs to create a dynamic symbol for the target
5750 function, it will associate the symbol with the stub (which,
5751 unlike the target function, follows the proper calling conventions).
5752 It is therefore useful to have a local alias for the target function,
5753 so that it can still be identified as MIPS16 code. As an optimization,
5754 this symbol can also be used for indirect MIPS16 references from
5755 within this file. */
5756 ASM_OUTPUT_DEF (asm_out_file, alias_name, fnname);
5758 switch_to_section (function_section (current_function_decl));
5761 /* The current function is a MIPS16 function that returns a value in an FPR.
5762 Copy the return value from its soft-float to its hard-float location.
5763 libgcc2 has special non-MIPS16 helper functions for each case. */
5765 static void
5766 mips16_copy_fpr_return_value (void)
5768 rtx fn, insn, retval;
5769 tree return_type;
5770 enum machine_mode return_mode;
5771 const char *name;
5773 return_type = DECL_RESULT (current_function_decl);
5774 return_mode = DECL_MODE (return_type);
5776 name = ACONCAT (("__mips16_ret_",
5777 mips16_call_stub_mode_suffix (return_mode),
5778 NULL));
5779 fn = mips16_stub_function (name);
5781 /* The function takes arguments in $2 (and possibly $3), so calls
5782 to it cannot be lazily bound. */
5783 SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_BIND_NOW;
5785 /* Model the call as something that takes the GPR return value as
5786 argument and returns an "updated" value. */
5787 retval = gen_rtx_REG (return_mode, GP_RETURN);
5788 insn = mips_expand_call (MIPS_CALL_EPILOGUE, retval, fn,
5789 const0_rtx, NULL_RTX, false);
5790 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
5793 /* Consider building a stub for a MIPS16 call to function *FN_PTR.
5794 RETVAL is the location of the return value, or null if this is
5795 a "call" rather than a "call_value". ARGS_SIZE is the size of the
5796 arguments and FP_CODE is the code built by mips_function_arg;
5797 see the comment above CUMULATIVE_ARGS for details.
5799 There are three alternatives:
5801 - If a stub was needed, emit the call and return the call insn itself.
5803 - If we can avoid using a stub by redirecting the call, set *FN_PTR
5804 to the new target and return null.
5806 - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
5807 unmodified.
5809 A stub is needed for calls to functions that, in normal mode,
5810 receive arguments in FPRs or return values in FPRs. The stub
5811 copies the arguments from their soft-float positions to their
5812 hard-float positions, calls the real function, then copies the
5813 return value from its hard-float position to its soft-float
5814 position.
5816 We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
5817 If *FN_PTR turns out to be to a non-MIPS16 function, the linker
5818 automatically redirects the JAL to the stub, otherwise the JAL
5819 continues to call FN directly. */
5821 static rtx
5822 mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
5824 const char *fnname;
5825 bool fp_ret_p;
5826 struct mips16_stub *l;
5827 rtx insn, fn;
5829 /* We don't need to do anything if we aren't in MIPS16 mode, or if
5830 we were invoked with the -msoft-float option. */
5831 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5832 return NULL_RTX;
5834 /* Figure out whether the value might come back in a floating-point
5835 register. */
5836 fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
5838 /* We don't need to do anything if there were no floating-point
5839 arguments and the value will not be returned in a floating-point
5840 register. */
5841 if (fp_code == 0 && !fp_ret_p)
5842 return NULL_RTX;
5844 /* We don't need to do anything if this is a call to a special
5845 MIPS16 support function. */
5846 fn = *fn_ptr;
5847 if (mips16_stub_function_p (fn))
5848 return NULL_RTX;
5850 /* This code will only work for o32 and o64 abis. The other ABI's
5851 require more sophisticated support. */
5852 gcc_assert (TARGET_OLDABI);
5854 /* If we're calling via a function pointer, use one of the magic
5855 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
5856 Each stub expects the function address to arrive in register $2. */
5857 if (GET_CODE (fn) != SYMBOL_REF
5858 || !call_insn_operand (fn, VOIDmode))
5860 char buf[30];
5861 rtx stub_fn, insn, addr;
5862 bool lazy_p;
5864 /* If this is a locally-defined and locally-binding function,
5865 avoid the stub by calling the local alias directly. */
5866 if (mips16_local_function_p (fn))
5868 *fn_ptr = mips16_local_alias (fn);
5869 return NULL_RTX;
5872 /* Create a SYMBOL_REF for the libgcc.a function. */
5873 if (fp_ret_p)
5874 sprintf (buf, "__mips16_call_stub_%s_%d",
5875 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5876 fp_code);
5877 else
5878 sprintf (buf, "__mips16_call_stub_%d", fp_code);
5879 stub_fn = mips16_stub_function (buf);
5881 /* The function uses $2 as an argument, so calls to it
5882 cannot be lazily bound. */
5883 SYMBOL_REF_FLAGS (stub_fn) |= SYMBOL_FLAG_BIND_NOW;
5885 /* Load the target function into $2. */
5886 addr = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
5887 lazy_p = mips_load_call_address (MIPS_CALL_NORMAL, addr, fn);
5889 /* Emit the call. */
5890 insn = mips_expand_call (MIPS_CALL_NORMAL, retval, stub_fn,
5891 args_size, NULL_RTX, lazy_p);
5893 /* Tell GCC that this call does indeed use the value of $2. */
5894 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), addr);
5896 /* If we are handling a floating-point return value, we need to
5897 save $18 in the function prologue. Putting a note on the
5898 call will mean that df_regs_ever_live_p ($18) will be true if the
5899 call is not eliminated, and we can check that in the prologue
5900 code. */
5901 if (fp_ret_p)
5902 CALL_INSN_FUNCTION_USAGE (insn) =
5903 gen_rtx_EXPR_LIST (VOIDmode,
5904 gen_rtx_CLOBBER (VOIDmode,
5905 gen_rtx_REG (word_mode, 18)),
5906 CALL_INSN_FUNCTION_USAGE (insn));
5908 return insn;
5911 /* We know the function we are going to call. If we have already
5912 built a stub, we don't need to do anything further. */
5913 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5914 for (l = mips16_stubs; l != NULL; l = l->next)
5915 if (strcmp (l->name, fnname) == 0)
5916 break;
5918 if (l == NULL)
5920 const char *separator;
5921 char *secname, *stubname;
5922 tree stubid, stubdecl;
5923 unsigned int f;
5925 /* If the function does not return in FPRs, the special stub
5926 section is named
5927 .mips16.call.FNNAME
5929 If the function does return in FPRs, the stub section is named
5930 .mips16.call.fp.FNNAME
5932 Build a decl for the stub. */
5933 secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
5934 fnname, NULL));
5935 stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
5936 fnname, NULL));
5937 stubid = get_identifier (stubname);
5938 stubdecl = build_decl (FUNCTION_DECL, stubid,
5939 build_function_type (void_type_node, NULL_TREE));
5940 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5941 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE,
5942 void_type_node);
5944 /* Output a comment. */
5945 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
5946 (fp_ret_p
5947 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
5948 : ""),
5949 fnname);
5950 separator = "";
5951 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5953 fprintf (asm_out_file, "%s%s", separator,
5954 (f & 3) == 1 ? "float" : "double");
5955 separator = ", ";
5957 fprintf (asm_out_file, ")\n");
5959 /* Start the function definition. */
5960 assemble_start_function (stubdecl, stubname);
5961 mips_start_function_definition (stubname, false);
5963 if (!fp_ret_p)
5965 /* Load the address of the MIPS16 function into $25. Do this
5966 first so that targets with coprocessor interlocks can use
5967 an MFC1 to fill the delay slot. */
5968 if (TARGET_EXPLICIT_RELOCS)
5970 output_asm_insn ("lui\t%^,%%hi(%0)", &fn);
5971 output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn);
5973 else
5974 output_asm_insn ("la\t%^,%0", &fn);
5977 /* Move the arguments from general registers to floating-point
5978 registers. */
5979 mips_output_args_xfer (fp_code, 't');
5981 if (!fp_ret_p)
5983 /* Jump to the previously-loaded address. */
5984 output_asm_insn ("jr\t%^", NULL);
5986 else
5988 /* Save the return address in $18 and call the non-MIPS16 function.
5989 The stub's caller knows that $18 might be clobbered, even though
5990 $18 is usually a call-saved register. */
5991 fprintf (asm_out_file, "\tmove\t%s,%s\n",
5992 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
5993 output_asm_insn (MIPS_CALL ("jal", &fn, 0), &fn);
5995 /* Move the result from floating-point registers to
5996 general registers. */
5997 switch (GET_MODE (retval))
5999 case SCmode:
6000 mips_output_32bit_xfer ('f', GP_RETURN + 1,
6001 FP_REG_FIRST + MAX_FPRS_PER_FMT);
6002 /* Fall though. */
6003 case SFmode:
6004 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6005 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
6007 /* On 64-bit targets, complex floats are returned in
6008 a single GPR, such that "sd" on a suitably-aligned
6009 target would store the value correctly. */
6010 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
6011 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
6012 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
6013 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
6014 reg_names[GP_RETURN],
6015 reg_names[GP_RETURN],
6016 reg_names[GP_RETURN + 1]);
6018 break;
6020 case DCmode:
6021 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
6022 FP_REG_FIRST + MAX_FPRS_PER_FMT);
6023 /* Fall though. */
6024 case DFmode:
6025 case V2SFmode:
6026 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6027 break;
6029 default:
6030 gcc_unreachable ();
6032 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
6035 #ifdef ASM_DECLARE_FUNCTION_SIZE
6036 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
6037 #endif
6039 mips_end_function_definition (stubname);
6041 /* Record this stub. */
6042 l = XNEW (struct mips16_stub);
6043 l->name = xstrdup (fnname);
6044 l->fp_ret_p = fp_ret_p;
6045 l->next = mips16_stubs;
6046 mips16_stubs = l;
6049 /* If we expect a floating-point return value, but we've built a
6050 stub which does not expect one, then we're in trouble. We can't
6051 use the existing stub, because it won't handle the floating-point
6052 value. We can't build a new stub, because the linker won't know
6053 which stub to use for the various calls in this object file.
6054 Fortunately, this case is illegal, since it means that a function
6055 was declared in two different ways in a single compilation. */
6056 if (fp_ret_p && !l->fp_ret_p)
6057 error ("cannot handle inconsistent calls to %qs", fnname);
6059 if (retval == NULL_RTX)
6060 insn = gen_call_internal_direct (fn, args_size);
6061 else
6062 insn = gen_call_value_internal_direct (retval, fn, args_size);
6063 insn = mips_emit_call_insn (insn, fn, fn, false);
6065 /* If we are calling a stub which handles a floating-point return
6066 value, we need to arrange to save $18 in the prologue. We do this
6067 by marking the function call as using the register. The prologue
6068 will later see that it is used, and emit code to save it. */
6069 if (fp_ret_p)
6070 CALL_INSN_FUNCTION_USAGE (insn) =
6071 gen_rtx_EXPR_LIST (VOIDmode,
6072 gen_rtx_CLOBBER (VOIDmode,
6073 gen_rtx_REG (word_mode, 18)),
6074 CALL_INSN_FUNCTION_USAGE (insn));
6076 return insn;
6079 /* Expand a call of type TYPE. RESULT is where the result will go (null
6080 for "call"s and "sibcall"s), ADDR is the address of the function,
6081 ARGS_SIZE is the size of the arguments and AUX is the value passed
6082 to us by mips_function_arg. LAZY_P is true if this call already
6083 involves a lazily-bound function address (such as when calling
6084 functions through a MIPS16 hard-float stub).
6086 Return the call itself. */
6089 mips_expand_call (enum mips_call_type type, rtx result, rtx addr,
6090 rtx args_size, rtx aux, bool lazy_p)
6092 rtx orig_addr, pattern, insn;
6093 int fp_code;
6095 fp_code = aux == 0 ? 0 : (int) GET_MODE (aux);
6096 insn = mips16_build_call_stub (result, &addr, args_size, fp_code);
6097 if (insn)
6099 gcc_assert (!lazy_p && type == MIPS_CALL_NORMAL);
6100 return insn;
6103 orig_addr = addr;
6104 if (!call_insn_operand (addr, VOIDmode))
6106 if (type == MIPS_CALL_EPILOGUE)
6107 addr = MIPS_EPILOGUE_TEMP (Pmode);
6108 else
6109 addr = gen_reg_rtx (Pmode);
6110 lazy_p |= mips_load_call_address (type, addr, orig_addr);
6113 if (result == 0)
6115 rtx (*fn) (rtx, rtx);
6117 if (type == MIPS_CALL_EPILOGUE && TARGET_SPLIT_CALLS)
6118 fn = gen_call_split;
6119 else if (type == MIPS_CALL_SIBCALL)
6120 fn = gen_sibcall_internal;
6121 else
6122 fn = gen_call_internal;
6124 pattern = fn (addr, args_size);
6126 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
6128 /* Handle return values created by mips_return_fpr_pair. */
6129 rtx (*fn) (rtx, rtx, rtx, rtx);
6130 rtx reg1, reg2;
6132 if (type == MIPS_CALL_EPILOGUE && TARGET_SPLIT_CALLS)
6133 fn = gen_call_value_multiple_split;
6134 else if (type == MIPS_CALL_SIBCALL)
6135 fn = gen_sibcall_value_multiple_internal;
6136 else
6137 fn = gen_call_value_multiple_internal;
6139 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
6140 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
6141 pattern = fn (reg1, addr, args_size, reg2);
6143 else
6145 rtx (*fn) (rtx, rtx, rtx);
6147 if (type == MIPS_CALL_EPILOGUE && TARGET_SPLIT_CALLS)
6148 fn = gen_call_value_split;
6149 else if (type == MIPS_CALL_SIBCALL)
6150 fn = gen_sibcall_value_internal;
6151 else
6152 fn = gen_call_value_internal;
6154 /* Handle return values created by mips_return_fpr_single. */
6155 if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
6156 result = XEXP (XVECEXP (result, 0, 0), 0);
6157 pattern = fn (result, addr, args_size);
6160 return mips_emit_call_insn (pattern, orig_addr, addr, lazy_p);
6163 /* Split call instruction INSN into a $gp-clobbering call and
6164 (where necessary) an instruction to restore $gp from its save slot.
6165 CALL_PATTERN is the pattern of the new call. */
6167 void
6168 mips_split_call (rtx insn, rtx call_pattern)
6170 rtx new_insn;
6172 new_insn = emit_call_insn (call_pattern);
6173 CALL_INSN_FUNCTION_USAGE (new_insn)
6174 = copy_rtx (CALL_INSN_FUNCTION_USAGE (insn));
6175 if (!find_reg_note (insn, REG_NORETURN, 0))
6176 /* Pick a temporary register that is suitable for both MIPS16 and
6177 non-MIPS16 code. $4 and $5 are used for returning complex double
6178 values in soft-float code, so $6 is the first suitable candidate. */
6179 mips_restore_gp (gen_rtx_REG (Pmode, GP_ARG_FIRST + 2));
6182 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
6184 static bool
6185 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
6187 if (!TARGET_SIBCALLS)
6188 return false;
6190 /* We can't do a sibcall if the called function is a MIPS16 function
6191 because there is no direct "jx" instruction equivalent to "jalx" to
6192 switch the ISA mode. We only care about cases where the sibling
6193 and normal calls would both be direct. */
6194 if (decl
6195 && mips_use_mips16_mode_p (decl)
6196 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6197 return false;
6199 /* When -minterlink-mips16 is in effect, assume that non-locally-binding
6200 functions could be MIPS16 ones unless an attribute explicitly tells
6201 us otherwise. */
6202 if (TARGET_INTERLINK_MIPS16
6203 && decl
6204 && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
6205 && !mips_nomips16_decl_p (decl)
6206 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6207 return false;
6209 /* Otherwise OK. */
6210 return true;
6213 /* Emit code to move general operand SRC into condition-code
6214 register DEST given that SCRATCH is a scratch TFmode FPR.
6215 The sequence is:
6217 FP1 = SRC
6218 FP2 = 0.0f
6219 DEST = FP2 < FP1
6221 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
6223 void
6224 mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
6226 rtx fp1, fp2;
6228 /* Change the source to SFmode. */
6229 if (MEM_P (src))
6230 src = adjust_address (src, SFmode, 0);
6231 else if (REG_P (src) || GET_CODE (src) == SUBREG)
6232 src = gen_rtx_REG (SFmode, true_regnum (src));
6234 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
6235 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
6237 mips_emit_move (copy_rtx (fp1), src);
6238 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
6239 emit_insn (gen_slt_sf (dest, fp2, fp1));
6242 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
6243 Assume that the areas do not overlap. */
6245 static void
6246 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
6248 HOST_WIDE_INT offset, delta;
6249 unsigned HOST_WIDE_INT bits;
6250 int i;
6251 enum machine_mode mode;
6252 rtx *regs;
6254 /* Work out how many bits to move at a time. If both operands have
6255 half-word alignment, it is usually better to move in half words.
6256 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
6257 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
6258 Otherwise move word-sized chunks. */
6259 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
6260 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
6261 bits = BITS_PER_WORD / 2;
6262 else
6263 bits = BITS_PER_WORD;
6265 mode = mode_for_size (bits, MODE_INT, 0);
6266 delta = bits / BITS_PER_UNIT;
6268 /* Allocate a buffer for the temporary registers. */
6269 regs = XALLOCAVEC (rtx, length / delta);
6271 /* Load as many BITS-sized chunks as possible. Use a normal load if
6272 the source has enough alignment, otherwise use left/right pairs. */
6273 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6275 regs[i] = gen_reg_rtx (mode);
6276 if (MEM_ALIGN (src) >= bits)
6277 mips_emit_move (regs[i], adjust_address (src, mode, offset));
6278 else
6280 rtx part = adjust_address (src, BLKmode, offset);
6281 if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
6282 gcc_unreachable ();
6286 /* Copy the chunks to the destination. */
6287 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6288 if (MEM_ALIGN (dest) >= bits)
6289 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
6290 else
6292 rtx part = adjust_address (dest, BLKmode, offset);
6293 if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
6294 gcc_unreachable ();
6297 /* Mop up any left-over bytes. */
6298 if (offset < length)
6300 src = adjust_address (src, BLKmode, offset);
6301 dest = adjust_address (dest, BLKmode, offset);
6302 move_by_pieces (dest, src, length - offset,
6303 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
6307 /* Helper function for doing a loop-based block operation on memory
6308 reference MEM. Each iteration of the loop will operate on LENGTH
6309 bytes of MEM.
6311 Create a new base register for use within the loop and point it to
6312 the start of MEM. Create a new memory reference that uses this
6313 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
6315 static void
6316 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
6317 rtx *loop_reg, rtx *loop_mem)
6319 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
6321 /* Although the new mem does not refer to a known location,
6322 it does keep up to LENGTH bytes of alignment. */
6323 *loop_mem = change_address (mem, BLKmode, *loop_reg);
6324 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
6327 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
6328 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
6329 the memory regions do not overlap. */
6331 static void
6332 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
6333 HOST_WIDE_INT bytes_per_iter)
6335 rtx label, src_reg, dest_reg, final_src;
6336 HOST_WIDE_INT leftover;
6338 leftover = length % bytes_per_iter;
6339 length -= leftover;
6341 /* Create registers and memory references for use within the loop. */
6342 mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
6343 mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
6345 /* Calculate the value that SRC_REG should have after the last iteration
6346 of the loop. */
6347 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
6348 0, 0, OPTAB_WIDEN);
6350 /* Emit the start of the loop. */
6351 label = gen_label_rtx ();
6352 emit_label (label);
6354 /* Emit the loop body. */
6355 mips_block_move_straight (dest, src, bytes_per_iter);
6357 /* Move on to the next block. */
6358 mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
6359 mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
6361 /* Emit the loop condition. */
6362 if (Pmode == DImode)
6363 emit_insn (gen_cmpdi (src_reg, final_src));
6364 else
6365 emit_insn (gen_cmpsi (src_reg, final_src));
6366 emit_jump_insn (gen_bne (label));
6368 /* Mop up any left-over bytes. */
6369 if (leftover)
6370 mips_block_move_straight (dest, src, leftover);
6373 /* Expand a movmemsi instruction, which copies LENGTH bytes from
6374 memory reference SRC to memory reference DEST. */
6376 bool
6377 mips_expand_block_move (rtx dest, rtx src, rtx length)
6379 if (GET_CODE (length) == CONST_INT)
6381 if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
6383 mips_block_move_straight (dest, src, INTVAL (length));
6384 return true;
6386 else if (optimize)
6388 mips_block_move_loop (dest, src, INTVAL (length),
6389 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
6390 return true;
6393 return false;
6396 /* Expand a loop of synci insns for the address range [BEGIN, END). */
6398 void
6399 mips_expand_synci_loop (rtx begin, rtx end)
6401 rtx inc, label, cmp, cmp_result;
6403 /* Load INC with the cache line size (rdhwr INC,$1). */
6404 inc = gen_reg_rtx (SImode);
6405 emit_insn (gen_rdhwr (inc, const1_rtx));
6407 /* Loop back to here. */
6408 label = gen_label_rtx ();
6409 emit_label (label);
6411 emit_insn (gen_synci (begin));
6413 cmp = mips_force_binary (Pmode, GTU, begin, end);
6415 mips_emit_binary (PLUS, begin, begin, inc);
6417 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
6418 emit_jump_insn (gen_condjump (cmp_result, label));
6421 /* Expand a QI or HI mode atomic memory operation.
6423 GENERATOR contains a pointer to the gen_* function that generates
6424 the SI mode underlying atomic operation using masks that we
6425 calculate.
6427 RESULT is the return register for the operation. Its value is NULL
6428 if unused.
6430 MEM is the location of the atomic access.
6432 OLDVAL is the first operand for the operation.
6434 NEWVAL is the optional second operand for the operation. Its value
6435 is NULL if unused. */
6437 void
6438 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
6439 rtx result, rtx mem, rtx oldval, rtx newval)
6441 rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
6442 rtx unshifted_mask_reg, mask, inverted_mask, si_op;
6443 rtx res = NULL;
6444 enum machine_mode mode;
6446 mode = GET_MODE (mem);
6448 /* Compute the address of the containing SImode value. */
6449 orig_addr = force_reg (Pmode, XEXP (mem, 0));
6450 memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
6451 force_reg (Pmode, GEN_INT (-4)));
6453 /* Create a memory reference for it. */
6454 memsi = gen_rtx_MEM (SImode, memsi_addr);
6455 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
6456 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
6458 /* Work out the byte offset of the QImode or HImode value,
6459 counting from the least significant byte. */
6460 shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
6461 if (TARGET_BIG_ENDIAN)
6462 mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
6464 /* Multiply by eight to convert the shift value from bytes to bits. */
6465 mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
6467 /* Make the final shift an SImode value, so that it can be used in
6468 SImode operations. */
6469 shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
6471 /* Set MASK to an inclusive mask of the QImode or HImode value. */
6472 unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
6473 unshifted_mask_reg = force_reg (SImode, unshifted_mask);
6474 mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
6476 /* Compute the equivalent exclusive mask. */
6477 inverted_mask = gen_reg_rtx (SImode);
6478 emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
6479 gen_rtx_NOT (SImode, mask)));
6481 /* Shift the old value into place. */
6482 if (oldval != const0_rtx)
6484 oldval = convert_modes (SImode, mode, oldval, true);
6485 oldval = force_reg (SImode, oldval);
6486 oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
6489 /* Do the same for the new value. */
6490 if (newval && newval != const0_rtx)
6492 newval = convert_modes (SImode, mode, newval, true);
6493 newval = force_reg (SImode, newval);
6494 newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
6497 /* Do the SImode atomic access. */
6498 if (result)
6499 res = gen_reg_rtx (SImode);
6500 if (newval)
6501 si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
6502 else if (result)
6503 si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
6504 else
6505 si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
6507 emit_insn (si_op);
6509 if (result)
6511 /* Shift and convert the result. */
6512 mips_emit_binary (AND, res, res, mask);
6513 mips_emit_binary (LSHIFTRT, res, res, shiftsi);
6514 mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
6518 /* Return true if it is possible to use left/right accesses for a
6519 bitfield of WIDTH bits starting BITPOS bits into *OP. When
6520 returning true, update *OP, *LEFT and *RIGHT as follows:
6522 *OP is a BLKmode reference to the whole field.
6524 *LEFT is a QImode reference to the first byte if big endian or
6525 the last byte if little endian. This address can be used in the
6526 left-side instructions (LWL, SWL, LDL, SDL).
6528 *RIGHT is a QImode reference to the opposite end of the field and
6529 can be used in the patterning right-side instruction. */
6531 static bool
6532 mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
6533 rtx *left, rtx *right)
6535 rtx first, last;
6537 /* Check that the operand really is a MEM. Not all the extv and
6538 extzv predicates are checked. */
6539 if (!MEM_P (*op))
6540 return false;
6542 /* Check that the size is valid. */
6543 if (width != 32 && (!TARGET_64BIT || width != 64))
6544 return false;
6546 /* We can only access byte-aligned values. Since we are always passed
6547 a reference to the first byte of the field, it is not necessary to
6548 do anything with BITPOS after this check. */
6549 if (bitpos % BITS_PER_UNIT != 0)
6550 return false;
6552 /* Reject aligned bitfields: we want to use a normal load or store
6553 instead of a left/right pair. */
6554 if (MEM_ALIGN (*op) >= width)
6555 return false;
6557 /* Adjust *OP to refer to the whole field. This also has the effect
6558 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
6559 *op = adjust_address (*op, BLKmode, 0);
6560 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
6562 /* Get references to both ends of the field. We deliberately don't
6563 use the original QImode *OP for FIRST since the new BLKmode one
6564 might have a simpler address. */
6565 first = adjust_address (*op, QImode, 0);
6566 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
6568 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
6569 correspond to the MSB and RIGHT to the LSB. */
6570 if (TARGET_BIG_ENDIAN)
6571 *left = first, *right = last;
6572 else
6573 *left = last, *right = first;
6575 return true;
6578 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
6579 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
6580 the operation is the equivalent of:
6582 (set DEST (*_extract SRC WIDTH BITPOS))
6584 Return true on success. */
6586 bool
6587 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
6588 HOST_WIDE_INT bitpos)
6590 rtx left, right, temp;
6592 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
6593 be a paradoxical word_mode subreg. This is the only case in which
6594 we allow the destination to be larger than the source. */
6595 if (GET_CODE (dest) == SUBREG
6596 && GET_MODE (dest) == DImode
6597 && GET_MODE (SUBREG_REG (dest)) == SImode)
6598 dest = SUBREG_REG (dest);
6600 /* After the above adjustment, the destination must be the same
6601 width as the source. */
6602 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
6603 return false;
6605 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
6606 return false;
6608 temp = gen_reg_rtx (GET_MODE (dest));
6609 if (GET_MODE (dest) == DImode)
6611 emit_insn (gen_mov_ldl (temp, src, left));
6612 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
6614 else
6616 emit_insn (gen_mov_lwl (temp, src, left));
6617 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
6619 return true;
6622 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
6623 BITPOS and SRC are the operands passed to the expander; the operation
6624 is the equivalent of:
6626 (set (zero_extract DEST WIDTH BITPOS) SRC)
6628 Return true on success. */
6630 bool
6631 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
6632 HOST_WIDE_INT bitpos)
6634 rtx left, right;
6635 enum machine_mode mode;
6637 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
6638 return false;
6640 mode = mode_for_size (width, MODE_INT, 0);
6641 src = gen_lowpart (mode, src);
6642 if (mode == DImode)
6644 emit_insn (gen_mov_sdl (dest, src, left));
6645 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
6647 else
6649 emit_insn (gen_mov_swl (dest, src, left));
6650 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
6652 return true;
6655 /* Return true if X is a MEM with the same size as MODE. */
6657 bool
6658 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6660 rtx size;
6662 if (!MEM_P (x))
6663 return false;
6665 size = MEM_SIZE (x);
6666 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6669 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
6670 source of an "ext" instruction or the destination of an "ins"
6671 instruction. OP must be a register operand and the following
6672 conditions must hold:
6674 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
6675 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6676 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6678 Also reject lengths equal to a word as they are better handled
6679 by the move patterns. */
6681 bool
6682 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
6684 if (!ISA_HAS_EXT_INS
6685 || !register_operand (op, VOIDmode)
6686 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6687 return false;
6689 if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
6690 return false;
6692 if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
6693 return false;
6695 return true;
6698 /* Check if MASK and SHIFT are valid in mask-low-and-shift-left
6699 operation if MAXLEN is the maxium length of consecutive bits that
6700 can make up MASK. MODE is the mode of the operation. See
6701 mask_low_and_shift_len for the actual definition. */
6703 bool
6704 mask_low_and_shift_p (enum machine_mode mode, rtx mask, rtx shift, int maxlen)
6706 return IN_RANGE (mask_low_and_shift_len (mode, mask, shift), 1, maxlen);
6709 /* The canonical form of a mask-low-and-shift-left operation is
6710 (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
6711 cleared. Thus we need to shift MASK to the right before checking if it
6712 is a valid mask value. MODE is the mode of the operation. If true
6713 return the length of the mask, otherwise return -1. */
6716 mask_low_and_shift_len (enum machine_mode mode, rtx mask, rtx shift)
6718 HOST_WIDE_INT shval;
6720 shval = INTVAL (shift) & (GET_MODE_BITSIZE (mode) - 1);
6721 return exact_log2 ((UINTVAL (mask) >> shval) + 1);
6724 /* Return true if -msplit-addresses is selected and should be honored.
6726 -msplit-addresses is a half-way house between explicit relocations
6727 and the traditional assembler macros. It can split absolute 32-bit
6728 symbolic constants into a high/lo_sum pair but uses macros for other
6729 sorts of access.
6731 Like explicit relocation support for REL targets, it relies
6732 on GNU extensions in the assembler and the linker.
6734 Although this code should work for -O0, it has traditionally
6735 been treated as an optimization. */
6737 static bool
6738 mips_split_addresses_p (void)
6740 return (TARGET_SPLIT_ADDRESSES
6741 && optimize
6742 && !TARGET_MIPS16
6743 && !flag_pic
6744 && !ABI_HAS_64BIT_SYMBOLS);
6747 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
6749 static void
6750 mips_init_relocs (void)
6752 memset (mips_split_p, '\0', sizeof (mips_split_p));
6753 memset (mips_split_hi_p, '\0', sizeof (mips_split_hi_p));
6754 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6755 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6757 if (ABI_HAS_64BIT_SYMBOLS)
6759 if (TARGET_EXPLICIT_RELOCS)
6761 mips_split_p[SYMBOL_64_HIGH] = true;
6762 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6763 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6765 mips_split_p[SYMBOL_64_MID] = true;
6766 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6767 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6769 mips_split_p[SYMBOL_64_LOW] = true;
6770 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6771 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6773 mips_split_p[SYMBOL_ABSOLUTE] = true;
6774 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6777 else
6779 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses_p () || TARGET_MIPS16)
6781 mips_split_p[SYMBOL_ABSOLUTE] = true;
6782 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6783 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6785 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6789 if (TARGET_MIPS16)
6791 /* The high part is provided by a pseudo copy of $gp. */
6792 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6793 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6795 else if (TARGET_EXPLICIT_RELOCS)
6796 /* Small data constants are kept whole until after reload,
6797 then lowered by mips_rewrite_small_data. */
6798 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6800 if (TARGET_EXPLICIT_RELOCS)
6802 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6803 if (TARGET_NEWABI)
6805 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6806 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6808 else
6810 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6811 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6813 if (TARGET_MIPS16)
6814 /* Expose the use of $28 as soon as possible. */
6815 mips_split_hi_p[SYMBOL_GOT_PAGE_OFST] = true;
6817 if (TARGET_XGOT)
6819 /* The HIGH and LO_SUM are matched by special .md patterns. */
6820 mips_split_p[SYMBOL_GOT_DISP] = true;
6822 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6823 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6824 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6826 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6827 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6828 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6830 else
6832 if (TARGET_NEWABI)
6833 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6834 else
6835 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6836 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6837 if (TARGET_MIPS16)
6838 /* Expose the use of $28 as soon as possible. */
6839 mips_split_p[SYMBOL_GOT_DISP] = true;
6843 if (TARGET_NEWABI)
6845 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6846 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6847 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6850 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6851 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6853 mips_split_p[SYMBOL_DTPREL] = true;
6854 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6855 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6857 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6859 mips_split_p[SYMBOL_TPREL] = true;
6860 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6861 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6863 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6866 /* If OP is an UNSPEC address, return the address to which it refers,
6867 otherwise return OP itself. */
6869 static rtx
6870 mips_strip_unspec_address (rtx op)
6872 rtx base, offset;
6874 split_const (op, &base, &offset);
6875 if (UNSPEC_ADDRESS_P (base))
6876 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6877 return op;
6880 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6881 in context CONTEXT. RELOCS is the array of relocations to use. */
6883 static void
6884 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6885 const char **relocs)
6887 enum mips_symbol_type symbol_type;
6888 const char *p;
6890 symbol_type = mips_classify_symbolic_expression (op, context);
6891 gcc_assert (relocs[symbol_type]);
6893 fputs (relocs[symbol_type], file);
6894 output_addr_const (file, mips_strip_unspec_address (op));
6895 for (p = relocs[symbol_type]; *p != 0; p++)
6896 if (*p == '(')
6897 fputc (')', file);
6900 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6901 The punctuation characters are:
6903 '(' Start a nested ".set noreorder" block.
6904 ')' End a nested ".set noreorder" block.
6905 '[' Start a nested ".set noat" block.
6906 ']' End a nested ".set noat" block.
6907 '<' Start a nested ".set nomacro" block.
6908 '>' End a nested ".set nomacro" block.
6909 '*' Behave like %(%< if generating a delayed-branch sequence.
6910 '#' Print a nop if in a ".set noreorder" block.
6911 '/' Like '#', but do nothing within a delayed-branch sequence.
6912 '?' Print "l" if mips_branch_likely is true
6913 '~' Print a nop if mips_branch_likely is true
6914 '.' Print the name of the register with a hard-wired zero (zero or $0).
6915 '@' Print the name of the assembler temporary register (at or $1).
6916 '^' Print the name of the pic call-through register (t9 or $25).
6917 '+' Print the name of the gp register (usually gp or $28).
6918 '$' Print the name of the stack pointer register (sp or $29).
6919 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6920 '-' Print ".set pop" under the same conditions for '|'.
6922 See also mips_init_print_operand_pucnt. */
6924 static void
6925 mips_print_operand_punctuation (FILE *file, int ch)
6927 switch (ch)
6929 case '(':
6930 if (set_noreorder++ == 0)
6931 fputs (".set\tnoreorder\n\t", file);
6932 break;
6934 case ')':
6935 gcc_assert (set_noreorder > 0);
6936 if (--set_noreorder == 0)
6937 fputs ("\n\t.set\treorder", file);
6938 break;
6940 case '[':
6941 if (set_noat++ == 0)
6942 fputs (".set\tnoat\n\t", file);
6943 break;
6945 case ']':
6946 gcc_assert (set_noat > 0);
6947 if (--set_noat == 0)
6948 fputs ("\n\t.set\tat", file);
6949 break;
6951 case '<':
6952 if (set_nomacro++ == 0)
6953 fputs (".set\tnomacro\n\t", file);
6954 break;
6956 case '>':
6957 gcc_assert (set_nomacro > 0);
6958 if (--set_nomacro == 0)
6959 fputs ("\n\t.set\tmacro", file);
6960 break;
6962 case '*':
6963 if (final_sequence != 0)
6965 mips_print_operand_punctuation (file, '(');
6966 mips_print_operand_punctuation (file, '<');
6968 break;
6970 case '#':
6971 if (set_noreorder != 0)
6972 fputs ("\n\tnop", file);
6973 break;
6975 case '/':
6976 /* Print an extra newline so that the delayed insn is separated
6977 from the following ones. This looks neater and is consistent
6978 with non-nop delayed sequences. */
6979 if (set_noreorder != 0 && final_sequence == 0)
6980 fputs ("\n\tnop\n", file);
6981 break;
6983 case '?':
6984 if (mips_branch_likely)
6985 putc ('l', file);
6986 break;
6988 case '~':
6989 if (mips_branch_likely)
6990 fputs ("\n\tnop", file);
6991 break;
6993 case '.':
6994 fputs (reg_names[GP_REG_FIRST + 0], file);
6995 break;
6997 case '@':
6998 fputs (reg_names[GP_REG_FIRST + 1], file);
6999 break;
7001 case '^':
7002 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
7003 break;
7005 case '+':
7006 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
7007 break;
7009 case '$':
7010 fputs (reg_names[STACK_POINTER_REGNUM], file);
7011 break;
7013 case '|':
7014 if (!ISA_HAS_LL_SC)
7015 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
7016 break;
7018 case '-':
7019 if (!ISA_HAS_LL_SC)
7020 fputs ("\n\t.set\tpop", file);
7021 break;
7023 default:
7024 gcc_unreachable ();
7025 break;
7029 /* Initialize mips_print_operand_punct. */
7031 static void
7032 mips_init_print_operand_punct (void)
7034 const char *p;
7036 for (p = "()[]<>*#/?~.@^+$|-"; *p; p++)
7037 mips_print_operand_punct[(unsigned char) *p] = true;
7040 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
7041 associated with condition CODE. Print the condition part of the
7042 opcode to FILE. */
7044 static void
7045 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
7047 switch (code)
7049 case EQ:
7050 case NE:
7051 case GT:
7052 case GE:
7053 case LT:
7054 case LE:
7055 case GTU:
7056 case GEU:
7057 case LTU:
7058 case LEU:
7059 /* Conveniently, the MIPS names for these conditions are the same
7060 as their RTL equivalents. */
7061 fputs (GET_RTX_NAME (code), file);
7062 break;
7064 default:
7065 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
7066 break;
7070 /* Likewise floating-point branches. */
7072 static void
7073 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
7075 switch (code)
7077 case EQ:
7078 fputs ("c1f", file);
7079 break;
7081 case NE:
7082 fputs ("c1t", file);
7083 break;
7085 default:
7086 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
7087 break;
7091 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
7093 'X' Print CONST_INT OP in hexadecimal format.
7094 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
7095 'd' Print CONST_INT OP in decimal.
7096 'm' Print one less than CONST_INT OP in decimal.
7097 'h' Print the high-part relocation associated with OP, after stripping
7098 any outermost HIGH.
7099 'R' Print the low-part relocation associated with OP.
7100 'C' Print the integer branch condition for comparison OP.
7101 'N' Print the inverse of the integer branch condition for comparison OP.
7102 'F' Print the FPU branch condition for comparison OP.
7103 'W' Print the inverse of the FPU branch condition for comparison OP.
7104 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
7105 'z' for (eq:?I ...), 'n' for (ne:?I ...).
7106 't' Like 'T', but with the EQ/NE cases reversed
7107 'Y' Print mips_fp_conditions[INTVAL (OP)]
7108 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
7109 'q' Print a DSP accumulator register.
7110 'D' Print the second part of a double-word register or memory operand.
7111 'L' Print the low-order register in a double-word register operand.
7112 'M' Print high-order register in a double-word register operand.
7113 'z' Print $0 if OP is zero, otherwise print OP normally. */
7115 void
7116 mips_print_operand (FILE *file, rtx op, int letter)
7118 enum rtx_code code;
7120 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
7122 mips_print_operand_punctuation (file, letter);
7123 return;
7126 gcc_assert (op);
7127 code = GET_CODE (op);
7129 switch (letter)
7131 case 'X':
7132 if (GET_CODE (op) == CONST_INT)
7133 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
7134 else
7135 output_operand_lossage ("invalid use of '%%%c'", letter);
7136 break;
7138 case 'x':
7139 if (GET_CODE (op) == CONST_INT)
7140 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
7141 else
7142 output_operand_lossage ("invalid use of '%%%c'", letter);
7143 break;
7145 case 'd':
7146 if (GET_CODE (op) == CONST_INT)
7147 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
7148 else
7149 output_operand_lossage ("invalid use of '%%%c'", letter);
7150 break;
7152 case 'm':
7153 if (GET_CODE (op) == CONST_INT)
7154 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
7155 else
7156 output_operand_lossage ("invalid use of '%%%c'", letter);
7157 break;
7159 case 'h':
7160 if (code == HIGH)
7161 op = XEXP (op, 0);
7162 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
7163 break;
7165 case 'R':
7166 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
7167 break;
7169 case 'C':
7170 mips_print_int_branch_condition (file, code, letter);
7171 break;
7173 case 'N':
7174 mips_print_int_branch_condition (file, reverse_condition (code), letter);
7175 break;
7177 case 'F':
7178 mips_print_float_branch_condition (file, code, letter);
7179 break;
7181 case 'W':
7182 mips_print_float_branch_condition (file, reverse_condition (code),
7183 letter);
7184 break;
7186 case 'T':
7187 case 't':
7189 int truth = (code == NE) == (letter == 'T');
7190 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
7192 break;
7194 case 'Y':
7195 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
7196 fputs (mips_fp_conditions[UINTVAL (op)], file);
7197 else
7198 output_operand_lossage ("'%%%c' is not a valid operand prefix",
7199 letter);
7200 break;
7202 case 'Z':
7203 if (ISA_HAS_8CC)
7205 mips_print_operand (file, op, 0);
7206 fputc (',', file);
7208 break;
7210 case 'q':
7211 if (code == REG && MD_REG_P (REGNO (op)))
7212 fprintf (file, "$ac0");
7213 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
7214 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
7215 else
7216 output_operand_lossage ("invalid use of '%%%c'", letter);
7217 break;
7219 default:
7220 switch (code)
7222 case REG:
7224 unsigned int regno = REGNO (op);
7225 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
7226 || (letter == 'L' && TARGET_BIG_ENDIAN)
7227 || letter == 'D')
7228 regno++;
7229 fprintf (file, "%s", reg_names[regno]);
7231 break;
7233 case MEM:
7234 if (letter == 'D')
7235 output_address (plus_constant (XEXP (op, 0), 4));
7236 else
7237 output_address (XEXP (op, 0));
7238 break;
7240 default:
7241 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
7242 fputs (reg_names[GP_REG_FIRST], file);
7243 else if (CONST_GP_P (op))
7244 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
7245 else
7246 output_addr_const (file, mips_strip_unspec_address (op));
7247 break;
7252 /* Output address operand X to FILE. */
7254 void
7255 mips_print_operand_address (FILE *file, rtx x)
7257 struct mips_address_info addr;
7259 if (mips_classify_address (&addr, x, word_mode, true))
7260 switch (addr.type)
7262 case ADDRESS_REG:
7263 mips_print_operand (file, addr.offset, 0);
7264 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7265 return;
7267 case ADDRESS_LO_SUM:
7268 mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
7269 mips_lo_relocs);
7270 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7271 return;
7273 case ADDRESS_CONST_INT:
7274 output_addr_const (file, x);
7275 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
7276 return;
7278 case ADDRESS_SYMBOLIC:
7279 output_addr_const (file, mips_strip_unspec_address (x));
7280 return;
7282 gcc_unreachable ();
7285 /* Implement TARGET_ENCODE_SECTION_INFO. */
7287 static void
7288 mips_encode_section_info (tree decl, rtx rtl, int first)
7290 default_encode_section_info (decl, rtl, first);
7292 if (TREE_CODE (decl) == FUNCTION_DECL)
7294 rtx symbol = XEXP (rtl, 0);
7295 tree type = TREE_TYPE (decl);
7297 /* Encode whether the symbol is short or long. */
7298 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
7299 || mips_far_type_p (type))
7300 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
7304 /* Implement TARGET_SELECT_RTX_SECTION. */
7306 static section *
7307 mips_select_rtx_section (enum machine_mode mode, rtx x,
7308 unsigned HOST_WIDE_INT align)
7310 /* ??? Consider using mergeable small data sections. */
7311 if (mips_rtx_constant_in_small_data_p (mode))
7312 return get_named_section (NULL, ".sdata", 0);
7314 return default_elf_select_rtx_section (mode, x, align);
7317 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7319 The complication here is that, with the combination TARGET_ABICALLS
7320 && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
7321 absolute addresses, and should therefore not be included in the
7322 read-only part of a DSO. Handle such cases by selecting a normal
7323 data section instead of a read-only one. The logic apes that in
7324 default_function_rodata_section. */
7326 static section *
7327 mips_function_rodata_section (tree decl)
7329 if (!TARGET_ABICALLS || TARGET_ABSOLUTE_ABICALLS || TARGET_GPWORD)
7330 return default_function_rodata_section (decl);
7332 if (decl && DECL_SECTION_NAME (decl))
7334 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7335 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7337 char *rname = ASTRDUP (name);
7338 rname[14] = 'd';
7339 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7341 else if (flag_function_sections
7342 && flag_data_sections
7343 && strncmp (name, ".text.", 6) == 0)
7345 char *rname = ASTRDUP (name);
7346 memcpy (rname + 1, "data", 4);
7347 return get_section (rname, SECTION_WRITE, decl);
7350 return data_section;
7353 /* Implement TARGET_IN_SMALL_DATA_P. */
7355 static bool
7356 mips_in_small_data_p (const_tree decl)
7358 unsigned HOST_WIDE_INT size;
7360 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7361 return false;
7363 /* We don't yet generate small-data references for -mabicalls
7364 or VxWorks RTP code. See the related -G handling in
7365 mips_override_options. */
7366 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
7367 return false;
7369 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7371 const char *name;
7373 /* Reject anything that isn't in a known small-data section. */
7374 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7375 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7376 return false;
7378 /* If a symbol is defined externally, the assembler will use the
7379 usual -G rules when deciding how to implement macros. */
7380 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
7381 return true;
7383 else if (TARGET_EMBEDDED_DATA)
7385 /* Don't put constants into the small data section: we want them
7386 to be in ROM rather than RAM. */
7387 if (TREE_CODE (decl) != VAR_DECL)
7388 return false;
7390 if (TREE_READONLY (decl)
7391 && !TREE_SIDE_EFFECTS (decl)
7392 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7393 return false;
7396 /* Enforce -mlocal-sdata. */
7397 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
7398 return false;
7400 /* Enforce -mextern-sdata. */
7401 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
7403 if (DECL_EXTERNAL (decl))
7404 return false;
7405 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
7406 return false;
7409 /* We have traditionally not treated zero-sized objects as small data,
7410 so this is now effectively part of the ABI. */
7411 size = int_size_in_bytes (TREE_TYPE (decl));
7412 return size > 0 && size <= mips_small_data_threshold;
7415 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7416 anchors for small data: the GP register acts as an anchor in that
7417 case. We also don't want to use them for PC-relative accesses,
7418 where the PC acts as an anchor. */
7420 static bool
7421 mips_use_anchors_for_symbol_p (const_rtx symbol)
7423 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
7425 case SYMBOL_PC_RELATIVE:
7426 case SYMBOL_GP_RELATIVE:
7427 return false;
7429 default:
7430 return default_use_anchors_for_symbol_p (symbol);
7434 /* The MIPS debug format wants all automatic variables and arguments
7435 to be in terms of the virtual frame pointer (stack pointer before
7436 any adjustment in the function), while the MIPS 3.0 linker wants
7437 the frame pointer to be the stack pointer after the initial
7438 adjustment. So, we do the adjustment here. The arg pointer (which
7439 is eliminated) points to the virtual frame pointer, while the frame
7440 pointer (which may be eliminated) points to the stack pointer after
7441 the initial adjustments. */
7443 HOST_WIDE_INT
7444 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
7446 rtx offset2 = const0_rtx;
7447 rtx reg = eliminate_constant_term (addr, &offset2);
7449 if (offset == 0)
7450 offset = INTVAL (offset2);
7452 if (reg == stack_pointer_rtx
7453 || reg == frame_pointer_rtx
7454 || reg == hard_frame_pointer_rtx)
7456 offset -= cfun->machine->frame.total_size;
7457 if (reg == hard_frame_pointer_rtx)
7458 offset += cfun->machine->frame.hard_frame_pointer_offset;
7461 /* sdbout_parms does not want this to crash for unrecognized cases. */
7462 #if 0
7463 else if (reg != arg_pointer_rtx)
7464 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
7465 addr);
7466 #endif
7468 return offset;
7471 /* Implement ASM_OUTPUT_EXTERNAL. */
7473 void
7474 mips_output_external (FILE *file, tree decl, const char *name)
7476 default_elf_asm_output_external (file, decl, name);
7478 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
7479 set in order to avoid putting out names that are never really
7480 used. */
7481 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
7483 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
7485 /* When using assembler macros, emit .extern directives for
7486 all small-data externs so that the assembler knows how
7487 big they are.
7489 In most cases it would be safe (though pointless) to emit
7490 .externs for other symbols too. One exception is when an
7491 object is within the -G limit but declared by the user to
7492 be in a section other than .sbss or .sdata. */
7493 fputs ("\t.extern\t", file);
7494 assemble_name (file, name);
7495 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
7496 int_size_in_bytes (TREE_TYPE (decl)));
7498 else if (TARGET_IRIX
7499 && mips_abi == ABI_32
7500 && TREE_CODE (decl) == FUNCTION_DECL)
7502 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
7503 `.global name .text' directive for every used but
7504 undefined function. If we don't, the linker may perform
7505 an optimization (skipping over the insns that set $gp)
7506 when it is unsafe. */
7507 fputs ("\t.globl ", file);
7508 assemble_name (file, name);
7509 fputs (" .text\n", file);
7514 /* Implement ASM_OUTPUT_SOURCE_FILENAME. */
7516 void
7517 mips_output_filename (FILE *stream, const char *name)
7519 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
7520 directives. */
7521 if (write_symbols == DWARF2_DEBUG)
7522 return;
7523 else if (mips_output_filename_first_time)
7525 mips_output_filename_first_time = 0;
7526 num_source_filenames += 1;
7527 current_function_file = name;
7528 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7529 output_quoted_string (stream, name);
7530 putc ('\n', stream);
7532 /* If we are emitting stabs, let dbxout.c handle this (except for
7533 the mips_output_filename_first_time case). */
7534 else if (write_symbols == DBX_DEBUG)
7535 return;
7536 else if (name != current_function_file
7537 && strcmp (name, current_function_file) != 0)
7539 num_source_filenames += 1;
7540 current_function_file = name;
7541 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7542 output_quoted_string (stream, name);
7543 putc ('\n', stream);
7547 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
7549 static void ATTRIBUTE_UNUSED
7550 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
7552 switch (size)
7554 case 4:
7555 fputs ("\t.dtprelword\t", file);
7556 break;
7558 case 8:
7559 fputs ("\t.dtpreldword\t", file);
7560 break;
7562 default:
7563 gcc_unreachable ();
7565 output_addr_const (file, x);
7566 fputs ("+0x8000", file);
7569 /* Implement TARGET_DWARF_REGISTER_SPAN. */
7571 static rtx
7572 mips_dwarf_register_span (rtx reg)
7574 rtx high, low;
7575 enum machine_mode mode;
7577 /* By default, GCC maps increasing register numbers to increasing
7578 memory locations, but paired FPRs are always little-endian,
7579 regardless of the prevailing endianness. */
7580 mode = GET_MODE (reg);
7581 if (FP_REG_P (REGNO (reg))
7582 && TARGET_BIG_ENDIAN
7583 && MAX_FPRS_PER_FMT > 1
7584 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
7586 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
7587 high = mips_subword (reg, true);
7588 low = mips_subword (reg, false);
7589 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
7592 return NULL_RTX;
7595 /* Implement ASM_OUTPUT_ASCII. */
7597 void
7598 mips_output_ascii (FILE *stream, const char *string, size_t len)
7600 size_t i;
7601 int cur_pos;
7603 cur_pos = 17;
7604 fprintf (stream, "\t.ascii\t\"");
7605 for (i = 0; i < len; i++)
7607 int c;
7609 c = (unsigned char) string[i];
7610 if (ISPRINT (c))
7612 if (c == '\\' || c == '\"')
7614 putc ('\\', stream);
7615 cur_pos++;
7617 putc (c, stream);
7618 cur_pos++;
7620 else
7622 fprintf (stream, "\\%03o", c);
7623 cur_pos += 4;
7626 if (cur_pos > 72 && i+1 < len)
7628 cur_pos = 17;
7629 fprintf (stream, "\"\n\t.ascii\t\"");
7632 fprintf (stream, "\"\n");
7635 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7636 macros, mark the symbol as written so that mips_asm_output_external
7637 won't emit an .extern for it. STREAM is the output file, NAME is the
7638 name of the symbol, INIT_STRING is the string that should be written
7639 before the symbol and FINAL_STRING is the string that should be
7640 written after it. FINAL_STRING is a printf format that consumes the
7641 remaining arguments. */
7643 void
7644 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7645 const char *final_string, ...)
7647 va_list ap;
7649 fputs (init_string, stream);
7650 assemble_name (stream, name);
7651 va_start (ap, final_string);
7652 vfprintf (stream, final_string, ap);
7653 va_end (ap);
7655 if (!TARGET_EXPLICIT_RELOCS)
7657 tree name_tree = get_identifier (name);
7658 TREE_ASM_WRITTEN (name_tree) = 1;
7662 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7663 NAME is the name of the object and ALIGN is the required alignment
7664 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7665 alignment argument. */
7667 void
7668 mips_declare_common_object (FILE *stream, const char *name,
7669 const char *init_string,
7670 unsigned HOST_WIDE_INT size,
7671 unsigned int align, bool takes_alignment_p)
7673 if (!takes_alignment_p)
7675 size += (align / BITS_PER_UNIT) - 1;
7676 size -= size % (align / BITS_PER_UNIT);
7677 mips_declare_object (stream, name, init_string,
7678 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7680 else
7681 mips_declare_object (stream, name, init_string,
7682 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7683 size, align / BITS_PER_UNIT);
7686 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7687 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7689 void
7690 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7691 unsigned HOST_WIDE_INT size,
7692 unsigned int align)
7694 /* If the target wants uninitialized const declarations in
7695 .rdata then don't put them in .comm. */
7696 if (TARGET_EMBEDDED_DATA
7697 && TARGET_UNINIT_CONST_IN_RODATA
7698 && TREE_CODE (decl) == VAR_DECL
7699 && TREE_READONLY (decl)
7700 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7702 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7703 targetm.asm_out.globalize_label (stream, name);
7705 switch_to_section (readonly_data_section);
7706 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7707 mips_declare_object (stream, name, "",
7708 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7709 size);
7711 else
7712 mips_declare_common_object (stream, name, "\n\t.comm\t",
7713 size, align, true);
7716 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7717 extern int size_directive_output;
7719 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7720 definitions except that it uses mips_declare_object to emit the label. */
7722 void
7723 mips_declare_object_name (FILE *stream, const char *name,
7724 tree decl ATTRIBUTE_UNUSED)
7726 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7727 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7728 #endif
7730 size_directive_output = 0;
7731 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7733 HOST_WIDE_INT size;
7735 size_directive_output = 1;
7736 size = int_size_in_bytes (TREE_TYPE (decl));
7737 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7740 mips_declare_object (stream, name, "", ":\n");
7743 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7745 void
7746 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7748 const char *name;
7750 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7751 if (!flag_inhibit_size_directive
7752 && DECL_SIZE (decl) != 0
7753 && !at_end
7754 && top_level
7755 && DECL_INITIAL (decl) == error_mark_node
7756 && !size_directive_output)
7758 HOST_WIDE_INT size;
7760 size_directive_output = 1;
7761 size = int_size_in_bytes (TREE_TYPE (decl));
7762 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7765 #endif
7767 /* Return the FOO in the name of the ".mdebug.FOO" section associated
7768 with the current ABI. */
7770 static const char *
7771 mips_mdebug_abi_name (void)
7773 switch (mips_abi)
7775 case ABI_32:
7776 return "abi32";
7777 case ABI_O64:
7778 return "abiO64";
7779 case ABI_N32:
7780 return "abiN32";
7781 case ABI_64:
7782 return "abiN64";
7783 case ABI_EABI:
7784 return TARGET_64BIT ? "eabi64" : "eabi32";
7785 default:
7786 gcc_unreachable ();
7790 /* Implement TARGET_ASM_FILE_START. */
7792 static void
7793 mips_file_start (void)
7795 default_file_start ();
7797 /* Generate a special section to describe the ABI switches used to
7798 produce the resultant binary. This is unnecessary on IRIX and
7799 causes unwanted warnings from the native linker. */
7800 if (!TARGET_IRIX)
7802 /* Record the ABI itself. Modern versions of binutils encode
7803 this information in the ELF header flags, but GDB needs the
7804 information in order to correctly debug binaries produced by
7805 older binutils. See the function mips_gdbarch_init in
7806 gdb/mips-tdep.c. */
7807 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7808 mips_mdebug_abi_name ());
7810 /* There is no ELF header flag to distinguish long32 forms of the
7811 EABI from long64 forms. Emit a special section to help tools
7812 such as GDB. Do the same for o64, which is sometimes used with
7813 -mlong64. */
7814 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7815 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7816 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7818 #ifdef HAVE_AS_GNU_ATTRIBUTE
7819 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7820 (TARGET_HARD_FLOAT_ABI
7821 ? (TARGET_DOUBLE_FLOAT
7822 ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
7823 #endif
7826 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
7827 if (TARGET_ABICALLS)
7829 fprintf (asm_out_file, "\t.abicalls\n");
7830 if (TARGET_ABICALLS_PIC0)
7831 fprintf (asm_out_file, "\t.option\tpic0\n");
7834 if (flag_verbose_asm)
7835 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7836 ASM_COMMENT_START,
7837 mips_small_data_threshold, mips_arch_info->name, mips_isa);
7840 /* Make the last instruction frame-related and note that it performs
7841 the operation described by FRAME_PATTERN. */
7843 static void
7844 mips_set_frame_expr (rtx frame_pattern)
7846 rtx insn;
7848 insn = get_last_insn ();
7849 RTX_FRAME_RELATED_P (insn) = 1;
7850 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7851 frame_pattern,
7852 REG_NOTES (insn));
7855 /* Return a frame-related rtx that stores REG at MEM.
7856 REG must be a single register. */
7858 static rtx
7859 mips_frame_set (rtx mem, rtx reg)
7861 rtx set;
7863 /* If we're saving the return address register and the DWARF return
7864 address column differs from the hard register number, adjust the
7865 note reg to refer to the former. */
7866 if (REGNO (reg) == GP_REG_FIRST + 31
7867 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7868 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7870 set = gen_rtx_SET (VOIDmode, mem, reg);
7871 RTX_FRAME_RELATED_P (set) = 1;
7873 return set;
7876 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7877 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7878 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7879 static const unsigned char mips16e_s2_s8_regs[] = {
7880 30, 23, 22, 21, 20, 19, 18
7882 static const unsigned char mips16e_a0_a3_regs[] = {
7883 4, 5, 6, 7
7886 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7887 ordered from the uppermost in memory to the lowest in memory. */
7888 static const unsigned char mips16e_save_restore_regs[] = {
7889 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7892 /* Return the index of the lowest X in the range [0, SIZE) for which
7893 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7895 static unsigned int
7896 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7897 unsigned int size)
7899 unsigned int i;
7901 for (i = 0; i < size; i++)
7902 if (BITSET_P (mask, regs[i]))
7903 break;
7905 return i;
7908 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7909 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7910 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7911 is true for all indexes (X, SIZE). */
7913 static void
7914 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7915 unsigned int size, unsigned int *num_regs_ptr)
7917 unsigned int i;
7919 i = mips16e_find_first_register (*mask_ptr, regs, size);
7920 for (i++; i < size; i++)
7921 if (!BITSET_P (*mask_ptr, regs[i]))
7923 *num_regs_ptr += 1;
7924 *mask_ptr |= 1 << regs[i];
7928 /* Return a simplified form of X using the register values in REG_VALUES.
7929 REG_VALUES[R] is the last value assigned to hard register R, or null
7930 if R has not been modified.
7932 This function is rather limited, but is good enough for our purposes. */
7934 static rtx
7935 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7937 x = avoid_constant_pool_reference (x);
7939 if (UNARY_P (x))
7941 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7942 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7943 x0, GET_MODE (XEXP (x, 0)));
7946 if (ARITHMETIC_P (x))
7948 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7949 rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7950 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7953 if (REG_P (x)
7954 && reg_values[REGNO (x)]
7955 && !rtx_unstable_p (reg_values[REGNO (x)]))
7956 return reg_values[REGNO (x)];
7958 return x;
7961 /* Return true if (set DEST SRC) stores an argument register into its
7962 caller-allocated save slot, storing the number of that argument
7963 register in *REGNO_PTR if so. REG_VALUES is as for
7964 mips16e_collect_propagate_value. */
7966 static bool
7967 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7968 unsigned int *regno_ptr)
7970 unsigned int argno, regno;
7971 HOST_WIDE_INT offset, required_offset;
7972 rtx addr, base;
7974 /* Check that this is a word-mode store. */
7975 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7976 return false;
7978 /* Check that the register being saved is an unmodified argument
7979 register. */
7980 regno = REGNO (src);
7981 if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
7982 return false;
7983 argno = regno - GP_ARG_FIRST;
7985 /* Check whether the address is an appropriate stack-pointer or
7986 frame-pointer access. */
7987 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7988 mips_split_plus (addr, &base, &offset);
7989 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7990 if (base == hard_frame_pointer_rtx)
7991 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
7992 else if (base != stack_pointer_rtx)
7993 return false;
7994 if (offset != required_offset)
7995 return false;
7997 *regno_ptr = regno;
7998 return true;
8001 /* A subroutine of mips_expand_prologue, called only when generating
8002 MIPS16e SAVE instructions. Search the start of the function for any
8003 instructions that save argument registers into their caller-allocated
8004 save slots. Delete such instructions and return a value N such that
8005 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8006 instructions redundant. */
8008 static unsigned int
8009 mips16e_collect_argument_saves (void)
8011 rtx reg_values[FIRST_PSEUDO_REGISTER];
8012 rtx insn, next, set, dest, src;
8013 unsigned int nargs, regno;
8015 push_topmost_sequence ();
8016 nargs = 0;
8017 memset (reg_values, 0, sizeof (reg_values));
8018 for (insn = get_insns (); insn; insn = next)
8020 next = NEXT_INSN (insn);
8021 if (NOTE_P (insn))
8022 continue;
8024 if (!INSN_P (insn))
8025 break;
8027 set = PATTERN (insn);
8028 if (GET_CODE (set) != SET)
8029 break;
8031 dest = SET_DEST (set);
8032 src = SET_SRC (set);
8033 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
8035 if (!BITSET_P (cfun->machine->frame.mask, regno))
8037 delete_insn (insn);
8038 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
8041 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
8042 reg_values[REGNO (dest)]
8043 = mips16e_collect_propagate_value (src, reg_values);
8044 else
8045 break;
8047 pop_topmost_sequence ();
8049 return nargs;
8052 /* Return a move between register REGNO and memory location SP + OFFSET.
8053 Make the move a load if RESTORE_P, otherwise make it a frame-related
8054 store. */
8056 static rtx
8057 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
8058 unsigned int regno)
8060 rtx reg, mem;
8062 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
8063 reg = gen_rtx_REG (SImode, regno);
8064 return (restore_p
8065 ? gen_rtx_SET (VOIDmode, reg, mem)
8066 : mips_frame_set (mem, reg));
8069 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
8070 The instruction must:
8072 - Allocate or deallocate SIZE bytes in total; SIZE is known
8073 to be nonzero.
8075 - Save or restore as many registers in *MASK_PTR as possible.
8076 The instruction saves the first registers at the top of the
8077 allocated area, with the other registers below it.
8079 - Save NARGS argument registers above the allocated area.
8081 (NARGS is always zero if RESTORE_P.)
8083 The SAVE and RESTORE instructions cannot save and restore all general
8084 registers, so there may be some registers left over for the caller to
8085 handle. Destructively modify *MASK_PTR so that it contains the registers
8086 that still need to be saved or restored. The caller can save these
8087 registers in the memory immediately below *OFFSET_PTR, which is a
8088 byte offset from the bottom of the allocated stack area. */
8090 static rtx
8091 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
8092 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
8093 HOST_WIDE_INT size)
8095 rtx pattern, set;
8096 HOST_WIDE_INT offset, top_offset;
8097 unsigned int i, regno;
8098 int n;
8100 gcc_assert (cfun->machine->frame.num_fp == 0);
8102 /* Calculate the number of elements in the PARALLEL. We need one element
8103 for the stack adjustment, one for each argument register save, and one
8104 for each additional register move. */
8105 n = 1 + nargs;
8106 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8107 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
8108 n++;
8110 /* Create the final PARALLEL. */
8111 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
8112 n = 0;
8114 /* Add the stack pointer adjustment. */
8115 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8116 plus_constant (stack_pointer_rtx,
8117 restore_p ? size : -size));
8118 RTX_FRAME_RELATED_P (set) = 1;
8119 XVECEXP (pattern, 0, n++) = set;
8121 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8122 top_offset = restore_p ? size : 0;
8124 /* Save the arguments. */
8125 for (i = 0; i < nargs; i++)
8127 offset = top_offset + i * UNITS_PER_WORD;
8128 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
8129 XVECEXP (pattern, 0, n++) = set;
8132 /* Then fill in the other register moves. */
8133 offset = top_offset;
8134 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8136 regno = mips16e_save_restore_regs[i];
8137 if (BITSET_P (*mask_ptr, regno))
8139 offset -= UNITS_PER_WORD;
8140 set = mips16e_save_restore_reg (restore_p, offset, regno);
8141 XVECEXP (pattern, 0, n++) = set;
8142 *mask_ptr &= ~(1 << regno);
8146 /* Tell the caller what offset it should use for the remaining registers. */
8147 *offset_ptr = size + (offset - top_offset);
8149 gcc_assert (n == XVECLEN (pattern, 0));
8151 return pattern;
8154 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8155 pointer. Return true if PATTERN matches the kind of instruction
8156 generated by mips16e_build_save_restore. If INFO is nonnull,
8157 initialize it when returning true. */
8159 bool
8160 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
8161 struct mips16e_save_restore_info *info)
8163 unsigned int i, nargs, mask, extra;
8164 HOST_WIDE_INT top_offset, save_offset, offset;
8165 rtx set, reg, mem, base;
8166 int n;
8168 if (!GENERATE_MIPS16E_SAVE_RESTORE)
8169 return false;
8171 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8172 top_offset = adjust > 0 ? adjust : 0;
8174 /* Interpret all other members of the PARALLEL. */
8175 save_offset = top_offset - UNITS_PER_WORD;
8176 mask = 0;
8177 nargs = 0;
8178 i = 0;
8179 for (n = 1; n < XVECLEN (pattern, 0); n++)
8181 /* Check that we have a SET. */
8182 set = XVECEXP (pattern, 0, n);
8183 if (GET_CODE (set) != SET)
8184 return false;
8186 /* Check that the SET is a load (if restoring) or a store
8187 (if saving). */
8188 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8189 if (!MEM_P (mem))
8190 return false;
8192 /* Check that the address is the sum of the stack pointer and a
8193 possibly-zero constant offset. */
8194 mips_split_plus (XEXP (mem, 0), &base, &offset);
8195 if (base != stack_pointer_rtx)
8196 return false;
8198 /* Check that SET's other operand is a register. */
8199 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8200 if (!REG_P (reg))
8201 return false;
8203 /* Check for argument saves. */
8204 if (offset == top_offset + nargs * UNITS_PER_WORD
8205 && REGNO (reg) == GP_ARG_FIRST + nargs)
8206 nargs++;
8207 else if (offset == save_offset)
8209 while (mips16e_save_restore_regs[i++] != REGNO (reg))
8210 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8211 return false;
8213 mask |= 1 << REGNO (reg);
8214 save_offset -= UNITS_PER_WORD;
8216 else
8217 return false;
8220 /* Check that the restrictions on register ranges are met. */
8221 extra = 0;
8222 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8223 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8224 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8225 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8226 if (extra != 0)
8227 return false;
8229 /* Make sure that the topmost argument register is not saved twice.
8230 The checks above ensure that the same is then true for the other
8231 argument registers. */
8232 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8233 return false;
8235 /* Pass back information, if requested. */
8236 if (info)
8238 info->nargs = nargs;
8239 info->mask = mask;
8240 info->size = (adjust > 0 ? adjust : -adjust);
8243 return true;
8246 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8247 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8248 the null terminator. */
8250 static char *
8251 mips16e_add_register_range (char *s, unsigned int min_reg,
8252 unsigned int max_reg)
8254 if (min_reg != max_reg)
8255 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8256 else
8257 s += sprintf (s, ",%s", reg_names[min_reg]);
8258 return s;
8261 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8262 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8264 const char *
8265 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8267 static char buffer[300];
8269 struct mips16e_save_restore_info info;
8270 unsigned int i, end;
8271 char *s;
8273 /* Parse the pattern. */
8274 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8275 gcc_unreachable ();
8277 /* Add the mnemonic. */
8278 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8279 s += strlen (s);
8281 /* Save the arguments. */
8282 if (info.nargs > 1)
8283 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8284 reg_names[GP_ARG_FIRST + info.nargs - 1]);
8285 else if (info.nargs == 1)
8286 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8288 /* Emit the amount of stack space to allocate or deallocate. */
8289 s += sprintf (s, "%d", (int) info.size);
8291 /* Save or restore $16. */
8292 if (BITSET_P (info.mask, 16))
8293 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8295 /* Save or restore $17. */
8296 if (BITSET_P (info.mask, 17))
8297 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8299 /* Save or restore registers in the range $s2...$s8, which
8300 mips16e_s2_s8_regs lists in decreasing order. Note that this
8301 is a software register range; the hardware registers are not
8302 numbered consecutively. */
8303 end = ARRAY_SIZE (mips16e_s2_s8_regs);
8304 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
8305 if (i < end)
8306 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
8307 mips16e_s2_s8_regs[i]);
8309 /* Save or restore registers in the range $a0...$a3. */
8310 end = ARRAY_SIZE (mips16e_a0_a3_regs);
8311 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
8312 if (i < end)
8313 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
8314 mips16e_a0_a3_regs[end - 1]);
8316 /* Save or restore $31. */
8317 if (BITSET_P (info.mask, 31))
8318 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
8320 return buffer;
8323 /* Return true if the current function has an insn that implicitly
8324 refers to $gp. */
8326 static bool
8327 mips_function_has_gp_insn (void)
8329 /* Don't bother rechecking if we found one last time. */
8330 if (!cfun->machine->has_gp_insn_p)
8332 rtx insn;
8334 push_topmost_sequence ();
8335 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8336 if (USEFUL_INSN_P (insn)
8337 && (get_attr_got (insn) != GOT_UNSET
8338 || mips_small_data_pattern_p (PATTERN (insn))))
8340 cfun->machine->has_gp_insn_p = true;
8341 break;
8343 pop_topmost_sequence ();
8345 return cfun->machine->has_gp_insn_p;
8348 /* Return true if the current function returns its value in a floating-point
8349 register in MIPS16 mode. */
8351 static bool
8352 mips16_cfun_returns_in_fpr_p (void)
8354 tree return_type = DECL_RESULT (current_function_decl);
8355 return (TARGET_MIPS16
8356 && TARGET_HARD_FLOAT_ABI
8357 && !aggregate_value_p (return_type, current_function_decl)
8358 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
8361 /* Return the register that should be used as the global pointer
8362 within this function. Return INVALID_REGNUM if the function
8363 doesn't need a global pointer. */
8365 static unsigned int
8366 mips_global_pointer (void)
8368 unsigned int regno;
8370 /* $gp is always available unless we're using a GOT. */
8371 if (!TARGET_USE_GOT)
8372 return GLOBAL_POINTER_REGNUM;
8374 /* We must always provide $gp when it is used implicitly. */
8375 if (!TARGET_EXPLICIT_RELOCS)
8376 return GLOBAL_POINTER_REGNUM;
8378 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
8379 a valid gp. */
8380 if (crtl->profile)
8381 return GLOBAL_POINTER_REGNUM;
8383 /* If the function has a nonlocal goto, $gp must hold the correct
8384 global pointer for the target function. */
8385 if (crtl->has_nonlocal_goto)
8386 return GLOBAL_POINTER_REGNUM;
8388 /* There's no need to initialize $gp if it isn't referenced now,
8389 and if we can be sure that no new references will be added during
8390 or after reload. */
8391 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
8392 && !mips_function_has_gp_insn ())
8394 /* The function doesn't use $gp at the moment. If we're generating
8395 -call_nonpic code, no new uses will be introduced during or after
8396 reload. */
8397 if (TARGET_ABICALLS_PIC0)
8398 return INVALID_REGNUM;
8400 /* We need to handle the following implicit gp references:
8402 - Reload can sometimes introduce constant pool references
8403 into a function that otherwise didn't need them. For example,
8404 suppose we have an instruction like:
8406 (set (reg:DF R1) (float:DF (reg:SI R2)))
8408 If R2 turns out to be constant such as 1, the instruction may
8409 have a REG_EQUAL note saying that R1 == 1.0. Reload then has
8410 the option of using this constant if R2 doesn't get allocated
8411 to a register.
8413 In cases like these, reload will have added the constant to the
8414 pool but no instruction will yet refer to it.
8416 - MIPS16 functions that return in FPRs need to call an
8417 external libgcc routine. */
8418 if (!crtl->uses_const_pool
8419 && !mips16_cfun_returns_in_fpr_p ())
8420 return INVALID_REGNUM;
8423 /* We need a global pointer, but perhaps we can use a call-clobbered
8424 register instead of $gp. */
8425 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
8426 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8427 if (!df_regs_ever_live_p (regno)
8428 && call_really_used_regs[regno]
8429 && !fixed_regs[regno]
8430 && regno != PIC_FUNCTION_ADDR_REGNUM)
8431 return regno;
8433 return GLOBAL_POINTER_REGNUM;
8436 /* Return true if the current function should treat register REGNO
8437 as call-saved. */
8439 static bool
8440 mips_cfun_call_saved_reg_p (unsigned int regno)
8442 /* call_insns preserve $28 unless they explicitly say otherwise,
8443 so call_really_used_regs[] treats $28 as call-saved. However,
8444 we want the ABI property rather than the default call_insn
8445 property here. */
8446 return (regno == GLOBAL_POINTER_REGNUM
8447 ? TARGET_CALL_SAVED_GP
8448 : !call_really_used_regs[regno]);
8451 /* Return true if the function body might clobber register REGNO.
8452 We know that REGNO is call-saved. */
8454 static bool
8455 mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno)
8457 /* Some functions should be treated as clobbering all call-saved
8458 registers. */
8459 if (crtl->saves_all_registers)
8460 return true;
8462 /* DF handles cases where a register is explicitly referenced in
8463 the rtl. Incoming values are passed in call-clobbered registers,
8464 so we can assume that any live call-saved register is set within
8465 the function. */
8466 if (df_regs_ever_live_p (regno))
8467 return true;
8469 /* Check for registers that are clobbered by FUNCTION_PROFILER.
8470 These clobbers are not explicit in the rtl. */
8471 if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
8472 return true;
8474 /* If we're using a call-saved global pointer, the function's
8475 prologue will need to set it up. */
8476 if (cfun->machine->global_pointer == regno)
8477 return true;
8479 /* The function's prologue will need to set the frame pointer if
8480 frame_pointer_needed. */
8481 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
8482 return true;
8484 /* If a MIPS16 function returns a value in FPRs, its epilogue
8485 will need to call an external libgcc routine. This yet-to-be
8486 generated call_insn will clobber $31. */
8487 if (regno == GP_REG_FIRST + 31 && mips16_cfun_returns_in_fpr_p ())
8488 return true;
8490 return false;
8493 /* Return true if the current function must save register REGNO. */
8495 static bool
8496 mips_save_reg_p (unsigned int regno)
8498 if (mips_cfun_call_saved_reg_p (regno))
8500 if (mips_cfun_might_clobber_call_saved_reg_p (regno))
8501 return true;
8503 /* Save both registers in an FPR pair if either one is used. This is
8504 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
8505 register to be used without the even register. */
8506 if (FP_REG_P (regno)
8507 && MAX_FPRS_PER_FMT == 2
8508 && mips_cfun_might_clobber_call_saved_reg_p (regno + 1))
8509 return true;
8512 /* We need to save the incoming return address if __builtin_eh_return
8513 is being used to set a different return address. */
8514 if (regno == GP_REG_FIRST + 31 && crtl->calls_eh_return)
8515 return true;
8517 return false;
8520 /* Populate the current function's mips_frame_info structure.
8522 MIPS stack frames look like:
8524 +-------------------------------+
8526 | incoming stack arguments |
8528 +-------------------------------+
8530 | caller-allocated save area |
8531 A | for register arguments |
8533 +-------------------------------+ <-- incoming stack pointer
8535 | callee-allocated save area |
8536 B | for arguments that are |
8537 | split between registers and |
8538 | the stack |
8540 +-------------------------------+ <-- arg_pointer_rtx
8542 C | callee-allocated save area |
8543 | for register varargs |
8545 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
8546 | | + UNITS_PER_HWFPVALUE
8547 | FPR save area |
8549 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
8550 | | + UNITS_PER_WORD
8551 | GPR save area |
8553 +-------------------------------+
8554 | | \
8555 | local variables | | var_size
8556 | | /
8557 +-------------------------------+
8558 | | \
8559 | $gp save area | | cprestore_size
8560 | | /
8561 P +-------------------------------+ <-- hard_frame_pointer_rtx for
8562 | | MIPS16 code
8563 | outgoing stack arguments |
8565 +-------------------------------+
8567 | caller-allocated save area |
8568 | for register arguments |
8570 +-------------------------------+ <-- stack_pointer_rtx
8571 frame_pointer_rtx
8572 hard_frame_pointer_rtx for
8573 non-MIPS16 code.
8575 At least two of A, B and C will be empty.
8577 Dynamic stack allocations such as alloca insert data at point P.
8578 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
8579 hard_frame_pointer_rtx unchanged. */
8581 static void
8582 mips_compute_frame_info (void)
8584 struct mips_frame_info *frame;
8585 HOST_WIDE_INT offset, size;
8586 unsigned int regno, i;
8588 frame = &cfun->machine->frame;
8589 memset (frame, 0, sizeof (*frame));
8590 size = get_frame_size ();
8592 cfun->machine->global_pointer = mips_global_pointer ();
8594 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
8595 area and the $gp save slot. This area isn't needed in leaf functions,
8596 but if the target-independent frame size is nonzero, we're committed
8597 to allocating it anyway. */
8598 if (size == 0 && current_function_is_leaf)
8600 /* The MIPS 3.0 linker does not like functions that dynamically
8601 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
8602 looks like we are trying to create a second frame pointer to the
8603 function, so allocate some stack space to make it happy. */
8604 if (cfun->calls_alloca)
8605 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
8606 else
8607 frame->args_size = 0;
8608 frame->cprestore_size = 0;
8610 else
8612 frame->args_size = crtl->outgoing_args_size;
8613 frame->cprestore_size = STARTING_FRAME_OFFSET - frame->args_size;
8615 offset = frame->args_size + frame->cprestore_size;
8617 /* Move above the local variables. */
8618 frame->var_size = MIPS_STACK_ALIGN (size);
8619 offset += frame->var_size;
8621 /* Find out which GPRs we need to save. */
8622 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8623 if (mips_save_reg_p (regno))
8625 frame->num_gp++;
8626 frame->mask |= 1 << (regno - GP_REG_FIRST);
8629 /* If this function calls eh_return, we must also save and restore the
8630 EH data registers. */
8631 if (crtl->calls_eh_return)
8632 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
8634 frame->num_gp++;
8635 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
8638 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
8639 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
8640 save all later registers too. */
8641 if (GENERATE_MIPS16E_SAVE_RESTORE)
8643 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
8644 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
8645 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
8646 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
8649 /* Move above the GPR save area. */
8650 if (frame->num_gp > 0)
8652 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
8653 frame->gp_sp_offset = offset - UNITS_PER_WORD;
8656 /* Find out which FPRs we need to save. This loop must iterate over
8657 the same space as its companion in mips_for_each_saved_reg. */
8658 if (TARGET_HARD_FLOAT)
8659 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
8660 if (mips_save_reg_p (regno))
8662 frame->num_fp += MAX_FPRS_PER_FMT;
8663 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
8666 /* Move above the FPR save area. */
8667 if (frame->num_fp > 0)
8669 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
8670 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
8673 /* Move above the callee-allocated varargs save area. */
8674 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
8675 frame->arg_pointer_offset = offset;
8677 /* Move above the callee-allocated area for pretend stack arguments. */
8678 offset += crtl->args.pretend_args_size;
8679 frame->total_size = offset;
8681 /* Work out the offsets of the save areas from the top of the frame. */
8682 if (frame->gp_sp_offset > 0)
8683 frame->gp_save_offset = frame->gp_sp_offset - offset;
8684 if (frame->fp_sp_offset > 0)
8685 frame->fp_save_offset = frame->fp_sp_offset - offset;
8687 /* MIPS16 code offsets the frame pointer by the size of the outgoing
8688 arguments. This tends to increase the chances of using unextended
8689 instructions for local variables and incoming arguments. */
8690 if (TARGET_MIPS16)
8691 frame->hard_frame_pointer_offset = frame->args_size;
8694 /* Return the style of GP load sequence that is being used for the
8695 current function. */
8697 enum mips_loadgp_style
8698 mips_current_loadgp_style (void)
8700 if (!TARGET_USE_GOT || cfun->machine->global_pointer == INVALID_REGNUM)
8701 return LOADGP_NONE;
8703 if (TARGET_RTP_PIC)
8704 return LOADGP_RTP;
8706 if (TARGET_ABSOLUTE_ABICALLS)
8707 return LOADGP_ABSOLUTE;
8709 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
8712 /* Implement FRAME_POINTER_REQUIRED. */
8714 bool
8715 mips_frame_pointer_required (void)
8717 /* If the function contains dynamic stack allocations, we need to
8718 use the frame pointer to access the static parts of the frame. */
8719 if (cfun->calls_alloca)
8720 return true;
8722 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
8723 reload may be unable to compute the address of a local variable,
8724 since there is no way to add a large constant to the stack pointer
8725 without using a second temporary register. */
8726 if (TARGET_MIPS16)
8728 mips_compute_frame_info ();
8729 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
8730 return true;
8733 return false;
8736 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
8737 or argument pointer. TO is either the stack pointer or hard frame
8738 pointer. */
8740 HOST_WIDE_INT
8741 mips_initial_elimination_offset (int from, int to)
8743 HOST_WIDE_INT offset;
8745 mips_compute_frame_info ();
8747 /* Set OFFSET to the offset from the soft frame pointer, which is also
8748 the offset from the end-of-prologue stack pointer. */
8749 switch (from)
8751 case FRAME_POINTER_REGNUM:
8752 offset = 0;
8753 break;
8755 case ARG_POINTER_REGNUM:
8756 offset = cfun->machine->frame.arg_pointer_offset;
8757 break;
8759 default:
8760 gcc_unreachable ();
8763 if (to == HARD_FRAME_POINTER_REGNUM)
8764 offset -= cfun->machine->frame.hard_frame_pointer_offset;
8766 return offset;
8769 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
8771 static void
8772 mips_extra_live_on_entry (bitmap regs)
8774 if (TARGET_USE_GOT)
8776 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
8777 the global pointer. */
8778 if (!TARGET_ABSOLUTE_ABICALLS)
8779 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8781 /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
8782 the global pointer. */
8783 if (TARGET_MIPS16)
8784 bitmap_set_bit (regs, MIPS16_PIC_TEMP_REGNUM);
8786 /* See the comment above load_call<mode> for details. */
8787 bitmap_set_bit (regs, GOT_VERSION_REGNUM);
8791 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
8792 previous frame. */
8795 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8797 if (count != 0)
8798 return const0_rtx;
8800 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8803 /* Emit code to change the current function's return address to
8804 ADDRESS. SCRATCH is available as a scratch register, if needed.
8805 ADDRESS and SCRATCH are both word-mode GPRs. */
8807 void
8808 mips_set_return_address (rtx address, rtx scratch)
8810 rtx slot_address;
8812 gcc_assert (BITSET_P (cfun->machine->frame.mask, 31));
8813 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
8814 cfun->machine->frame.gp_sp_offset);
8815 mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
8818 /* Return a MEM rtx for the cprestore slot, using TEMP as a temporary base
8819 register if need be. */
8821 static rtx
8822 mips_cprestore_slot (rtx temp)
8824 const struct mips_frame_info *frame;
8825 rtx base;
8826 HOST_WIDE_INT offset;
8828 frame = &cfun->machine->frame;
8829 if (frame_pointer_needed)
8831 base = hard_frame_pointer_rtx;
8832 offset = frame->args_size - frame->hard_frame_pointer_offset;
8834 else
8836 base = stack_pointer_rtx;
8837 offset = frame->args_size;
8839 return gen_frame_mem (Pmode, mips_add_offset (temp, base, offset));
8842 /* Restore $gp from its save slot, using TEMP as a temporary base register
8843 if need be. This function is for o32 and o64 abicalls only. */
8845 void
8846 mips_restore_gp (rtx temp)
8848 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
8850 if (cfun->machine->global_pointer == INVALID_REGNUM)
8851 return;
8853 if (TARGET_MIPS16)
8855 mips_emit_move (temp, mips_cprestore_slot (temp));
8856 mips_emit_move (pic_offset_table_rtx, temp);
8858 else
8859 mips_emit_move (pic_offset_table_rtx, mips_cprestore_slot (temp));
8860 if (!TARGET_EXPLICIT_RELOCS)
8861 emit_insn (gen_blockage ());
8864 /* A function to save or store a register. The first argument is the
8865 register and the second is the stack slot. */
8866 typedef void (*mips_save_restore_fn) (rtx, rtx);
8868 /* Use FN to save or restore register REGNO. MODE is the register's
8869 mode and OFFSET is the offset of its save slot from the current
8870 stack pointer. */
8872 static void
8873 mips_save_restore_reg (enum machine_mode mode, int regno,
8874 HOST_WIDE_INT offset, mips_save_restore_fn fn)
8876 rtx mem;
8878 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
8879 fn (gen_rtx_REG (mode, regno), mem);
8882 /* Call FN for each register that is saved by the current function.
8883 SP_OFFSET is the offset of the current stack pointer from the start
8884 of the frame. */
8886 static void
8887 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
8889 enum machine_mode fpr_mode;
8890 HOST_WIDE_INT offset;
8891 int regno;
8893 /* Save registers starting from high to low. The debuggers prefer at least
8894 the return register be stored at func+4, and also it allows us not to
8895 need a nop in the epilogue if at least one register is reloaded in
8896 addition to return address. */
8897 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
8898 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
8899 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
8901 mips_save_restore_reg (word_mode, regno, offset, fn);
8902 offset -= UNITS_PER_WORD;
8905 /* This loop must iterate over the same space as its companion in
8906 mips_compute_frame_info. */
8907 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
8908 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
8909 for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
8910 regno >= FP_REG_FIRST;
8911 regno -= MAX_FPRS_PER_FMT)
8912 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
8914 mips_save_restore_reg (fpr_mode, regno, offset, fn);
8915 offset -= GET_MODE_SIZE (fpr_mode);
8919 /* If we're generating n32 or n64 abicalls, and the current function
8920 does not use $28 as its global pointer, emit a cplocal directive.
8921 Use pic_offset_table_rtx as the argument to the directive. */
8923 static void
8924 mips_output_cplocal (void)
8926 if (!TARGET_EXPLICIT_RELOCS
8927 && cfun->machine->global_pointer != INVALID_REGNUM
8928 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
8929 output_asm_insn (".cplocal %+", 0);
8932 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
8934 static void
8935 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8937 const char *fnname;
8939 #ifdef SDB_DEBUGGING_INFO
8940 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
8941 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
8942 #endif
8944 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
8945 floating-point arguments. */
8946 if (TARGET_MIPS16
8947 && TARGET_HARD_FLOAT_ABI
8948 && crtl->args.info.fp_code != 0)
8949 mips16_build_function_stub ();
8951 /* Get the function name the same way that toplev.c does before calling
8952 assemble_start_function. This is needed so that the name used here
8953 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8954 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8955 mips_start_function_definition (fnname, TARGET_MIPS16);
8957 /* Stop mips_file_end from treating this function as external. */
8958 if (TARGET_IRIX && mips_abi == ABI_32)
8959 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
8961 /* Output MIPS-specific frame information. */
8962 if (!flag_inhibit_size_directive)
8964 const struct mips_frame_info *frame;
8966 frame = &cfun->machine->frame;
8968 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
8969 fprintf (file,
8970 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
8971 "# vars= " HOST_WIDE_INT_PRINT_DEC
8972 ", regs= %d/%d"
8973 ", args= " HOST_WIDE_INT_PRINT_DEC
8974 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
8975 reg_names[frame_pointer_needed
8976 ? HARD_FRAME_POINTER_REGNUM
8977 : STACK_POINTER_REGNUM],
8978 (frame_pointer_needed
8979 ? frame->total_size - frame->hard_frame_pointer_offset
8980 : frame->total_size),
8981 reg_names[GP_REG_FIRST + 31],
8982 frame->var_size,
8983 frame->num_gp, frame->num_fp,
8984 frame->args_size,
8985 frame->cprestore_size);
8987 /* .mask MASK, OFFSET. */
8988 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8989 frame->mask, frame->gp_save_offset);
8991 /* .fmask MASK, OFFSET. */
8992 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8993 frame->fmask, frame->fp_save_offset);
8996 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
8997 Also emit the ".set noreorder; .set nomacro" sequence for functions
8998 that need it. */
8999 if (mips_current_loadgp_style () == LOADGP_OLDABI)
9001 if (TARGET_MIPS16)
9003 /* This is a fixed-form sequence. The position of the
9004 first two instructions is important because of the
9005 way _gp_disp is defined. */
9006 output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
9007 output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
9008 output_asm_insn ("sll\t$2,16", 0);
9009 output_asm_insn ("addu\t$2,$3", 0);
9011 /* .cpload must be in a .set noreorder but not a .set nomacro block. */
9012 else if (!cfun->machine->all_noreorder_p)
9013 output_asm_insn ("%(.cpload\t%^%)", 0);
9014 else
9015 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
9017 else if (cfun->machine->all_noreorder_p)
9018 output_asm_insn ("%(%<", 0);
9020 /* Tell the assembler which register we're using as the global
9021 pointer. This is needed for thunks, since they can use either
9022 explicit relocs or assembler macros. */
9023 mips_output_cplocal ();
9026 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
9028 static void
9029 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9030 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9032 const char *fnname;
9034 /* Reinstate the normal $gp. */
9035 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
9036 mips_output_cplocal ();
9038 if (cfun->machine->all_noreorder_p)
9040 /* Avoid using %>%) since it adds excess whitespace. */
9041 output_asm_insn (".set\tmacro", 0);
9042 output_asm_insn (".set\treorder", 0);
9043 set_noreorder = set_nomacro = 0;
9046 /* Get the function name the same way that toplev.c does before calling
9047 assemble_start_function. This is needed so that the name used here
9048 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
9049 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9050 mips_end_function_definition (fnname);
9053 /* Save register REG to MEM. Make the instruction frame-related. */
9055 static void
9056 mips_save_reg (rtx reg, rtx mem)
9058 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
9060 rtx x1, x2;
9062 if (mips_split_64bit_move_p (mem, reg))
9063 mips_split_doubleword_move (mem, reg);
9064 else
9065 mips_emit_move (mem, reg);
9067 x1 = mips_frame_set (mips_subword (mem, false),
9068 mips_subword (reg, false));
9069 x2 = mips_frame_set (mips_subword (mem, true),
9070 mips_subword (reg, true));
9071 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
9073 else
9075 if (TARGET_MIPS16
9076 && REGNO (reg) != GP_REG_FIRST + 31
9077 && !M16_REG_P (REGNO (reg)))
9079 /* Save a non-MIPS16 register by moving it through a temporary.
9080 We don't need to do this for $31 since there's a special
9081 instruction for it. */
9082 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
9083 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
9085 else
9086 mips_emit_move (mem, reg);
9088 mips_set_frame_expr (mips_frame_set (mem, reg));
9092 /* The __gnu_local_gp symbol. */
9094 static GTY(()) rtx mips_gnu_local_gp;
9096 /* If we're generating n32 or n64 abicalls, emit instructions
9097 to set up the global pointer. */
9099 static void
9100 mips_emit_loadgp (void)
9102 rtx addr, offset, incoming_address, base, index, pic_reg;
9104 pic_reg = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
9105 switch (mips_current_loadgp_style ())
9107 case LOADGP_ABSOLUTE:
9108 if (mips_gnu_local_gp == NULL)
9110 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
9111 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
9113 emit_insn (Pmode == SImode
9114 ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
9115 : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
9116 break;
9118 case LOADGP_OLDABI:
9119 /* Added by mips_output_function_prologue. */
9120 break;
9122 case LOADGP_NEWABI:
9123 addr = XEXP (DECL_RTL (current_function_decl), 0);
9124 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
9125 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
9126 emit_insn (Pmode == SImode
9127 ? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
9128 : gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
9129 break;
9131 case LOADGP_RTP:
9132 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
9133 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
9134 emit_insn (Pmode == SImode
9135 ? gen_loadgp_rtp_si (pic_reg, base, index)
9136 : gen_loadgp_rtp_di (pic_reg, base, index));
9137 break;
9139 default:
9140 return;
9143 if (TARGET_MIPS16)
9144 emit_insn (gen_copygp_mips16 (pic_offset_table_rtx, pic_reg));
9146 /* Emit a blockage if there are implicit uses of the GP register.
9147 This includes profiled functions, because FUNCTION_PROFILE uses
9148 a jal macro. */
9149 if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
9150 emit_insn (gen_loadgp_blockage ());
9153 /* Expand the "prologue" pattern. */
9155 void
9156 mips_expand_prologue (void)
9158 const struct mips_frame_info *frame;
9159 HOST_WIDE_INT size;
9160 unsigned int nargs;
9161 rtx insn;
9163 if (cfun->machine->global_pointer != INVALID_REGNUM)
9164 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
9166 frame = &cfun->machine->frame;
9167 size = frame->total_size;
9169 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
9170 bytes beforehand; this is enough to cover the register save area
9171 without going out of range. */
9172 if ((frame->mask | frame->fmask) != 0)
9174 HOST_WIDE_INT step1;
9176 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
9177 if (GENERATE_MIPS16E_SAVE_RESTORE)
9179 HOST_WIDE_INT offset;
9180 unsigned int mask, regno;
9182 /* Try to merge argument stores into the save instruction. */
9183 nargs = mips16e_collect_argument_saves ();
9185 /* Build the save instruction. */
9186 mask = frame->mask;
9187 insn = mips16e_build_save_restore (false, &mask, &offset,
9188 nargs, step1);
9189 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9190 size -= step1;
9192 /* Check if we need to save other registers. */
9193 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
9194 if (BITSET_P (mask, regno - GP_REG_FIRST))
9196 offset -= UNITS_PER_WORD;
9197 mips_save_restore_reg (word_mode, regno,
9198 offset, mips_save_reg);
9201 else
9203 insn = gen_add3_insn (stack_pointer_rtx,
9204 stack_pointer_rtx,
9205 GEN_INT (-step1));
9206 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9207 size -= step1;
9208 mips_for_each_saved_reg (size, mips_save_reg);
9212 /* Allocate the rest of the frame. */
9213 if (size > 0)
9215 if (SMALL_OPERAND (-size))
9216 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
9217 stack_pointer_rtx,
9218 GEN_INT (-size)))) = 1;
9219 else
9221 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
9222 if (TARGET_MIPS16)
9224 /* There are no instructions to add or subtract registers
9225 from the stack pointer, so use the frame pointer as a
9226 temporary. We should always be using a frame pointer
9227 in this case anyway. */
9228 gcc_assert (frame_pointer_needed);
9229 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
9230 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
9231 hard_frame_pointer_rtx,
9232 MIPS_PROLOGUE_TEMP (Pmode)));
9233 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
9235 else
9236 emit_insn (gen_sub3_insn (stack_pointer_rtx,
9237 stack_pointer_rtx,
9238 MIPS_PROLOGUE_TEMP (Pmode)));
9240 /* Describe the combined effect of the previous instructions. */
9241 mips_set_frame_expr
9242 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9243 plus_constant (stack_pointer_rtx, -size)));
9247 /* Set up the frame pointer, if we're using one. */
9248 if (frame_pointer_needed)
9250 HOST_WIDE_INT offset;
9252 offset = frame->hard_frame_pointer_offset;
9253 if (offset == 0)
9255 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
9256 RTX_FRAME_RELATED_P (insn) = 1;
9258 else if (SMALL_OPERAND (offset))
9260 insn = gen_add3_insn (hard_frame_pointer_rtx,
9261 stack_pointer_rtx, GEN_INT (offset));
9262 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9264 else
9266 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
9267 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
9268 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
9269 hard_frame_pointer_rtx,
9270 MIPS_PROLOGUE_TEMP (Pmode)));
9271 mips_set_frame_expr
9272 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
9273 plus_constant (stack_pointer_rtx, offset)));
9277 mips_emit_loadgp ();
9279 /* Initialize the $gp save slot. */
9280 if (frame->cprestore_size > 0
9281 && cfun->machine->global_pointer != INVALID_REGNUM)
9283 if (TARGET_MIPS16)
9284 mips_emit_move (mips_cprestore_slot (MIPS_PROLOGUE_TEMP (Pmode)),
9285 MIPS16_PIC_TEMP);
9286 else if (TARGET_ABICALLS_PIC2)
9287 emit_insn (gen_cprestore (GEN_INT (frame->args_size)));
9288 else
9289 emit_move_insn (mips_cprestore_slot (MIPS_PROLOGUE_TEMP (Pmode)),
9290 pic_offset_table_rtx);
9293 /* If we are profiling, make sure no instructions are scheduled before
9294 the call to mcount. */
9295 if (crtl->profile)
9296 emit_insn (gen_blockage ());
9299 /* Emit instructions to restore register REG from slot MEM. */
9301 static void
9302 mips_restore_reg (rtx reg, rtx mem)
9304 /* There's no MIPS16 instruction to load $31 directly. Load into
9305 $7 instead and adjust the return insn appropriately. */
9306 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
9307 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
9309 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
9311 /* Can't restore directly; move through a temporary. */
9312 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
9313 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
9315 else
9316 mips_emit_move (reg, mem);
9319 /* Emit any instructions needed before a return. */
9321 void
9322 mips_expand_before_return (void)
9324 /* When using a call-clobbered gp, we start out with unified call
9325 insns that include instructions to restore the gp. We then split
9326 these unified calls after reload. These split calls explicitly
9327 clobber gp, so there is no need to define
9328 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
9330 For consistency, we should also insert an explicit clobber of $28
9331 before return insns, so that the post-reload optimizers know that
9332 the register is not live on exit. */
9333 if (TARGET_CALL_CLOBBERED_GP)
9334 emit_clobber (pic_offset_table_rtx);
9337 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
9338 says which. */
9340 void
9341 mips_expand_epilogue (bool sibcall_p)
9343 const struct mips_frame_info *frame;
9344 HOST_WIDE_INT step1, step2;
9345 rtx base, target;
9347 if (!sibcall_p && mips_can_use_return_insn ())
9349 emit_jump_insn (gen_return ());
9350 return;
9353 /* In MIPS16 mode, if the return value should go into a floating-point
9354 register, we need to call a helper routine to copy it over. */
9355 if (mips16_cfun_returns_in_fpr_p ())
9356 mips16_copy_fpr_return_value ();
9358 /* Split the frame into two. STEP1 is the amount of stack we should
9359 deallocate before restoring the registers. STEP2 is the amount we
9360 should deallocate afterwards.
9362 Start off by assuming that no registers need to be restored. */
9363 frame = &cfun->machine->frame;
9364 step1 = frame->total_size;
9365 step2 = 0;
9367 /* Work out which register holds the frame address. */
9368 if (!frame_pointer_needed)
9369 base = stack_pointer_rtx;
9370 else
9372 base = hard_frame_pointer_rtx;
9373 step1 -= frame->hard_frame_pointer_offset;
9376 /* If we need to restore registers, deallocate as much stack as
9377 possible in the second step without going out of range. */
9378 if ((frame->mask | frame->fmask) != 0)
9380 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
9381 step1 -= step2;
9384 /* Set TARGET to BASE + STEP1. */
9385 target = base;
9386 if (step1 > 0)
9388 rtx adjust;
9390 /* Get an rtx for STEP1 that we can add to BASE. */
9391 adjust = GEN_INT (step1);
9392 if (!SMALL_OPERAND (step1))
9394 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
9395 adjust = MIPS_EPILOGUE_TEMP (Pmode);
9398 /* Normal mode code can copy the result straight into $sp. */
9399 if (!TARGET_MIPS16)
9400 target = stack_pointer_rtx;
9402 emit_insn (gen_add3_insn (target, base, adjust));
9405 /* Copy TARGET into the stack pointer. */
9406 if (target != stack_pointer_rtx)
9407 mips_emit_move (stack_pointer_rtx, target);
9409 /* If we're using addressing macros, $gp is implicitly used by all
9410 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
9411 from the stack. */
9412 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
9413 emit_insn (gen_blockage ());
9415 if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
9417 unsigned int regno, mask;
9418 HOST_WIDE_INT offset;
9419 rtx restore;
9421 /* Generate the restore instruction. */
9422 mask = frame->mask;
9423 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
9425 /* Restore any other registers manually. */
9426 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
9427 if (BITSET_P (mask, regno - GP_REG_FIRST))
9429 offset -= UNITS_PER_WORD;
9430 mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
9433 /* Restore the remaining registers and deallocate the final bit
9434 of the frame. */
9435 emit_insn (restore);
9437 else
9439 /* Restore the registers. */
9440 mips_for_each_saved_reg (frame->total_size - step2, mips_restore_reg);
9442 /* Deallocate the final bit of the frame. */
9443 if (step2 > 0)
9444 emit_insn (gen_add3_insn (stack_pointer_rtx,
9445 stack_pointer_rtx,
9446 GEN_INT (step2)));
9449 /* Add in the __builtin_eh_return stack adjustment. We need to
9450 use a temporary in MIPS16 code. */
9451 if (crtl->calls_eh_return)
9453 if (TARGET_MIPS16)
9455 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
9456 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
9457 MIPS_EPILOGUE_TEMP (Pmode),
9458 EH_RETURN_STACKADJ_RTX));
9459 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
9461 else
9462 emit_insn (gen_add3_insn (stack_pointer_rtx,
9463 stack_pointer_rtx,
9464 EH_RETURN_STACKADJ_RTX));
9467 if (!sibcall_p)
9469 unsigned int regno;
9471 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
9472 path will restore the return address into $7 rather than $31. */
9473 if (TARGET_MIPS16
9474 && !GENERATE_MIPS16E_SAVE_RESTORE
9475 && BITSET_P (frame->mask, 31))
9476 regno = GP_REG_FIRST + 7;
9477 else
9478 regno = GP_REG_FIRST + 31;
9479 mips_expand_before_return ();
9480 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
9484 /* Return nonzero if this function is known to have a null epilogue.
9485 This allows the optimizer to omit jumps to jumps if no stack
9486 was created. */
9488 bool
9489 mips_can_use_return_insn (void)
9491 if (!reload_completed)
9492 return false;
9494 if (crtl->profile)
9495 return false;
9497 /* In MIPS16 mode, a function that returns a floating-point value
9498 needs to arrange to copy the return value into the floating-point
9499 registers. */
9500 if (mips16_cfun_returns_in_fpr_p ())
9501 return false;
9503 return cfun->machine->frame.total_size == 0;
9506 /* Return true if register REGNO can store a value of mode MODE.
9507 The result of this function is cached in mips_hard_regno_mode_ok. */
9509 static bool
9510 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
9512 unsigned int size;
9513 enum mode_class mclass;
9515 if (mode == CCV2mode)
9516 return (ISA_HAS_8CC
9517 && ST_REG_P (regno)
9518 && (regno - ST_REG_FIRST) % 2 == 0);
9520 if (mode == CCV4mode)
9521 return (ISA_HAS_8CC
9522 && ST_REG_P (regno)
9523 && (regno - ST_REG_FIRST) % 4 == 0);
9525 if (mode == CCmode)
9527 if (!ISA_HAS_8CC)
9528 return regno == FPSW_REGNUM;
9530 return (ST_REG_P (regno)
9531 || GP_REG_P (regno)
9532 || FP_REG_P (regno));
9535 size = GET_MODE_SIZE (mode);
9536 mclass = GET_MODE_CLASS (mode);
9538 if (GP_REG_P (regno))
9539 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
9541 if (FP_REG_P (regno)
9542 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
9543 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
9545 /* Allow TFmode for CCmode reloads. */
9546 if (mode == TFmode && ISA_HAS_8CC)
9547 return true;
9549 /* Allow 64-bit vector modes for Loongson-2E/2F. */
9550 if (TARGET_LOONGSON_VECTORS
9551 && (mode == V2SImode
9552 || mode == V4HImode
9553 || mode == V8QImode
9554 || mode == DImode))
9555 return true;
9557 if (mclass == MODE_FLOAT
9558 || mclass == MODE_COMPLEX_FLOAT
9559 || mclass == MODE_VECTOR_FLOAT)
9560 return size <= UNITS_PER_FPVALUE;
9562 /* Allow integer modes that fit into a single register. We need
9563 to put integers into FPRs when using instructions like CVT
9564 and TRUNC. There's no point allowing sizes smaller than a word,
9565 because the FPU has no appropriate load/store instructions. */
9566 if (mclass == MODE_INT)
9567 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
9570 if (ACC_REG_P (regno)
9571 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
9573 if (MD_REG_P (regno))
9575 /* After a multiplication or division, clobbering HI makes
9576 the value of LO unpredictable, and vice versa. This means
9577 that, for all interesting cases, HI and LO are effectively
9578 a single register.
9580 We model this by requiring that any value that uses HI
9581 also uses LO. */
9582 if (size <= UNITS_PER_WORD * 2)
9583 return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
9585 else
9587 /* DSP accumulators do not have the same restrictions as
9588 HI and LO, so we can treat them as normal doubleword
9589 registers. */
9590 if (size <= UNITS_PER_WORD)
9591 return true;
9593 if (size <= UNITS_PER_WORD * 2
9594 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
9595 return true;
9599 if (ALL_COP_REG_P (regno))
9600 return mclass == MODE_INT && size <= UNITS_PER_WORD;
9602 if (regno == GOT_VERSION_REGNUM)
9603 return mode == SImode;
9605 return false;
9608 /* Implement HARD_REGNO_NREGS. */
9610 unsigned int
9611 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9613 if (ST_REG_P (regno))
9614 /* The size of FP status registers is always 4, because they only hold
9615 CCmode values, and CCmode is always considered to be 4 bytes wide. */
9616 return (GET_MODE_SIZE (mode) + 3) / 4;
9618 if (FP_REG_P (regno))
9619 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9621 /* All other registers are word-sized. */
9622 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9625 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
9626 in mips_hard_regno_nregs. */
9629 mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
9631 int size;
9632 HARD_REG_SET left;
9634 size = 0x8000;
9635 COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
9636 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
9638 size = MIN (size, 4);
9639 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
9641 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
9643 size = MIN (size, UNITS_PER_FPREG);
9644 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
9646 if (!hard_reg_set_empty_p (left))
9647 size = MIN (size, UNITS_PER_WORD);
9648 return (GET_MODE_SIZE (mode) + size - 1) / size;
9651 /* Implement CANNOT_CHANGE_MODE_CLASS. */
9653 bool
9654 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
9655 enum machine_mode to ATTRIBUTE_UNUSED,
9656 enum reg_class rclass)
9658 /* There are several problems with changing the modes of values
9659 in floating-point registers:
9661 - When a multi-word value is stored in paired floating-point
9662 registers, the first register always holds the low word.
9663 We therefore can't allow FPRs to change between single-word
9664 and multi-word modes on big-endian targets.
9666 - GCC assumes that each word of a multiword register can be accessed
9667 individually using SUBREGs. This is not true for floating-point
9668 registers if they are bigger than a word.
9670 - Loading a 32-bit value into a 64-bit floating-point register
9671 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9672 We can't allow FPRs to change from SImode to to a wider mode on
9673 64-bit targets.
9675 - If the FPU has already interpreted a value in one format, we must
9676 not ask it to treat the value as having a different format.
9678 We therefore disallow all mode changes involving FPRs. */
9679 return reg_classes_intersect_p (FP_REGS, rclass);
9682 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
9684 static bool
9685 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
9687 switch (mode)
9689 case SFmode:
9690 return TARGET_HARD_FLOAT;
9692 case DFmode:
9693 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
9695 case V2SFmode:
9696 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
9698 default:
9699 return false;
9703 /* Implement MODES_TIEABLE_P. */
9705 bool
9706 mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9708 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
9709 prefer to put one of them in FPRs. */
9710 return (mode1 == mode2
9711 || (!mips_mode_ok_for_mov_fmt_p (mode1)
9712 && !mips_mode_ok_for_mov_fmt_p (mode2)));
9715 /* Implement PREFERRED_RELOAD_CLASS. */
9717 enum reg_class
9718 mips_preferred_reload_class (rtx x, enum reg_class rclass)
9720 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
9721 return LEA_REGS;
9723 if (reg_class_subset_p (FP_REGS, rclass)
9724 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
9725 return FP_REGS;
9727 if (reg_class_subset_p (GR_REGS, rclass))
9728 rclass = GR_REGS;
9730 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
9731 rclass = M16_REGS;
9733 return rclass;
9736 /* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
9737 Return a "canonical" class to represent it in later calculations. */
9739 static enum reg_class
9740 mips_canonicalize_move_class (enum reg_class rclass)
9742 /* All moves involving accumulator registers have the same cost. */
9743 if (reg_class_subset_p (rclass, ACC_REGS))
9744 rclass = ACC_REGS;
9746 /* Likewise promote subclasses of general registers to the most
9747 interesting containing class. */
9748 if (TARGET_MIPS16 && reg_class_subset_p (rclass, M16_REGS))
9749 rclass = M16_REGS;
9750 else if (reg_class_subset_p (rclass, GENERAL_REGS))
9751 rclass = GENERAL_REGS;
9753 return rclass;
9756 /* Return the cost of moving a value of mode MODE from a register of
9757 class FROM to a GPR. Return 0 for classes that are unions of other
9758 classes handled by this function. */
9760 static int
9761 mips_move_to_gpr_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9762 enum reg_class from)
9764 switch (from)
9766 case GENERAL_REGS:
9767 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
9768 return 2;
9770 case ACC_REGS:
9771 /* MFLO and MFHI. */
9772 return 6;
9774 case FP_REGS:
9775 /* MFC1, etc. */
9776 return 4;
9778 case ST_REGS:
9779 /* LUI followed by MOVF. */
9780 return 4;
9782 case COP0_REGS:
9783 case COP2_REGS:
9784 case COP3_REGS:
9785 /* This choice of value is historical. */
9786 return 5;
9788 default:
9789 return 0;
9793 /* Return the cost of moving a value of mode MODE from a GPR to a
9794 register of class TO. Return 0 for classes that are unions of
9795 other classes handled by this function. */
9797 static int
9798 mips_move_from_gpr_cost (enum machine_mode mode, enum reg_class to)
9800 switch (to)
9802 case GENERAL_REGS:
9803 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
9804 return 2;
9806 case ACC_REGS:
9807 /* MTLO and MTHI. */
9808 return 6;
9810 case FP_REGS:
9811 /* MTC1, etc. */
9812 return 4;
9814 case ST_REGS:
9815 /* A secondary reload through an FPR scratch. */
9816 return (mips_register_move_cost (mode, GENERAL_REGS, FP_REGS)
9817 + mips_register_move_cost (mode, FP_REGS, ST_REGS));
9819 case COP0_REGS:
9820 case COP2_REGS:
9821 case COP3_REGS:
9822 /* This choice of value is historical. */
9823 return 5;
9825 default:
9826 return 0;
9830 /* Implement REGISTER_MOVE_COST. Return 0 for classes that are the
9831 maximum of the move costs for subclasses; regclass will work out
9832 the maximum for us. */
9835 mips_register_move_cost (enum machine_mode mode,
9836 enum reg_class from, enum reg_class to)
9838 enum reg_class dregs;
9839 int cost1, cost2;
9841 from = mips_canonicalize_move_class (from);
9842 to = mips_canonicalize_move_class (to);
9844 /* Handle moves that can be done without using general-purpose registers. */
9845 if (from == FP_REGS)
9847 if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode))
9848 /* MOV.FMT. */
9849 return 4;
9850 if (to == ST_REGS)
9851 /* The sequence generated by mips_expand_fcc_reload. */
9852 return 8;
9855 /* Handle cases in which only one class deviates from the ideal. */
9856 dregs = TARGET_MIPS16 ? M16_REGS : GENERAL_REGS;
9857 if (from == dregs)
9858 return mips_move_from_gpr_cost (mode, to);
9859 if (to == dregs)
9860 return mips_move_to_gpr_cost (mode, from);
9862 /* Handles cases that require a GPR temporary. */
9863 cost1 = mips_move_to_gpr_cost (mode, from);
9864 if (cost1 != 0)
9866 cost2 = mips_move_from_gpr_cost (mode, to);
9867 if (cost2 != 0)
9868 return cost1 + cost2;
9871 return 0;
9874 /* Implement TARGET_IRA_COVER_CLASSES. */
9876 static const enum reg_class *
9877 mips_ira_cover_classes (void)
9879 static const enum reg_class acc_classes[] = {
9880 GR_AND_ACC_REGS, FP_REGS, COP0_REGS, COP2_REGS, COP3_REGS,
9881 ST_REGS, LIM_REG_CLASSES
9883 static const enum reg_class no_acc_classes[] = {
9884 GR_REGS, FP_REGS, COP0_REGS, COP2_REGS, COP3_REGS,
9885 ST_REGS, LIM_REG_CLASSES
9888 /* Don't allow the register allocators to use LO and HI in MIPS16 mode,
9889 which has no MTLO or MTHI instructions. Also, using GR_AND_ACC_REGS
9890 as a cover class only works well when we keep per-register costs.
9891 Using it when not optimizing can cause us to think accumulators
9892 have the same cost as GPRs in cases where GPRs are actually much
9893 cheaper. */
9894 return TARGET_MIPS16 || !optimize ? no_acc_classes : acc_classes;
9897 /* Return the register class required for a secondary register when
9898 copying between one of the registers in RCLASS and value X, which
9899 has mode MODE. X is the source of the move if IN_P, otherwise it
9900 is the destination. Return NO_REGS if no secondary register is
9901 needed. */
9903 enum reg_class
9904 mips_secondary_reload_class (enum reg_class rclass,
9905 enum machine_mode mode, rtx x, bool in_p)
9907 int regno;
9909 /* If X is a constant that cannot be loaded into $25, it must be loaded
9910 into some other GPR. No other register class allows a direct move. */
9911 if (mips_dangerous_for_la25_p (x))
9912 return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
9914 regno = true_regnum (x);
9915 if (TARGET_MIPS16)
9917 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
9918 if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
9919 return M16_REGS;
9921 return NO_REGS;
9924 /* Copying from accumulator registers to anywhere other than a general
9925 register requires a temporary general register. */
9926 if (reg_class_subset_p (rclass, ACC_REGS))
9927 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9928 if (ACC_REG_P (regno))
9929 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
9931 /* We can only copy a value to a condition code register from a
9932 floating-point register, and even then we require a scratch
9933 floating-point register. We can only copy a value out of a
9934 condition-code register into a general register. */
9935 if (reg_class_subset_p (rclass, ST_REGS))
9937 if (in_p)
9938 return FP_REGS;
9939 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9941 if (ST_REG_P (regno))
9943 if (!in_p)
9944 return FP_REGS;
9945 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
9948 if (reg_class_subset_p (rclass, FP_REGS))
9950 if (MEM_P (x)
9951 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
9952 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9953 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9954 return NO_REGS;
9956 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
9957 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9958 return NO_REGS;
9960 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
9961 /* We can force the constant to memory and use lwc1
9962 and ldc1. As above, we will use pairs of lwc1s if
9963 ldc1 is not supported. */
9964 return NO_REGS;
9966 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
9967 /* In this case we can use mov.fmt. */
9968 return NO_REGS;
9970 /* Otherwise, we need to reload through an integer register. */
9971 return GR_REGS;
9973 if (FP_REG_P (regno))
9974 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
9976 return NO_REGS;
9979 /* Implement TARGET_MODE_REP_EXTENDED. */
9981 static int
9982 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
9984 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
9985 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
9986 return SIGN_EXTEND;
9988 return UNKNOWN;
9991 /* Implement TARGET_VALID_POINTER_MODE. */
9993 static bool
9994 mips_valid_pointer_mode (enum machine_mode mode)
9996 return mode == SImode || (TARGET_64BIT && mode == DImode);
9999 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
10001 static bool
10002 mips_vector_mode_supported_p (enum machine_mode mode)
10004 switch (mode)
10006 case V2SFmode:
10007 return TARGET_PAIRED_SINGLE_FLOAT;
10009 case V2HImode:
10010 case V4QImode:
10011 case V2HQmode:
10012 case V2UHQmode:
10013 case V2HAmode:
10014 case V2UHAmode:
10015 case V4QQmode:
10016 case V4UQQmode:
10017 return TARGET_DSP;
10019 case V2SImode:
10020 case V4HImode:
10021 case V8QImode:
10022 return TARGET_LOONGSON_VECTORS;
10024 default:
10025 return false;
10029 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
10031 static bool
10032 mips_scalar_mode_supported_p (enum machine_mode mode)
10034 if (ALL_FIXED_POINT_MODE_P (mode)
10035 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
10036 return true;
10038 return default_scalar_mode_supported_p (mode);
10041 /* Implement TARGET_INIT_LIBFUNCS. */
10043 #include "config/gofast.h"
10045 static void
10046 mips_init_libfuncs (void)
10048 if (TARGET_FIX_VR4120)
10050 /* Register the special divsi3 and modsi3 functions needed to work
10051 around VR4120 division errata. */
10052 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10053 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10056 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10058 /* Register the MIPS16 -mhard-float stubs. */
10059 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10060 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10061 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10062 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10064 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10065 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10066 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10067 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10068 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10069 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10070 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10072 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10073 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10074 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10076 if (TARGET_DOUBLE_FLOAT)
10078 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10079 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10080 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10081 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10083 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10084 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10085 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10086 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10087 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10088 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10089 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10091 set_conv_libfunc (sext_optab, DFmode, SFmode,
10092 "__mips16_extendsfdf2");
10093 set_conv_libfunc (trunc_optab, SFmode, DFmode,
10094 "__mips16_truncdfsf2");
10095 set_conv_libfunc (sfix_optab, SImode, DFmode,
10096 "__mips16_fix_truncdfsi");
10097 set_conv_libfunc (sfloat_optab, DFmode, SImode,
10098 "__mips16_floatsidf");
10099 set_conv_libfunc (ufloat_optab, DFmode, SImode,
10100 "__mips16_floatunsidf");
10103 else
10104 /* Register the gofast functions if selected using --enable-gofast. */
10105 gofast_maybe_init_libfuncs ();
10107 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
10108 on an external non-MIPS16 routine to implement __sync_synchronize. */
10109 if (TARGET_MIPS16)
10110 synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
10113 /* Return the length of INSN. LENGTH is the initial length computed by
10114 attributes in the machine-description file. */
10117 mips_adjust_insn_length (rtx insn, int length)
10119 /* A unconditional jump has an unfilled delay slot if it is not part
10120 of a sequence. A conditional jump normally has a delay slot, but
10121 does not on MIPS16. */
10122 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10123 length += 4;
10125 /* See how many nops might be needed to avoid hardware hazards. */
10126 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10127 switch (get_attr_hazard (insn))
10129 case HAZARD_NONE:
10130 break;
10132 case HAZARD_DELAY:
10133 length += 4;
10134 break;
10136 case HAZARD_HILO:
10137 length += 8;
10138 break;
10141 /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
10142 the .md file length attributes are 4-based for both modes.
10143 Adjust the MIPS16 ones here. */
10144 if (TARGET_MIPS16)
10145 length /= 2;
10147 return length;
10150 /* Return an asm sequence to start a noat block and load the address
10151 of a label into $1. */
10153 const char *
10154 mips_output_load_label (void)
10156 if (TARGET_EXPLICIT_RELOCS)
10157 switch (mips_abi)
10159 case ABI_N32:
10160 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10162 case ABI_64:
10163 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10165 default:
10166 if (ISA_HAS_LOAD_DELAY)
10167 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10168 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10170 else
10172 if (Pmode == DImode)
10173 return "%[dla\t%@,%0";
10174 else
10175 return "%[la\t%@,%0";
10179 /* Return the assembly code for INSN, which has the operands given by
10180 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10181 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10182 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10183 version of BRANCH_IF_TRUE. */
10185 const char *
10186 mips_output_conditional_branch (rtx insn, rtx *operands,
10187 const char *branch_if_true,
10188 const char *branch_if_false)
10190 unsigned int length;
10191 rtx taken, not_taken;
10193 length = get_attr_length (insn);
10194 if (length <= 8)
10196 /* Just a simple conditional branch. */
10197 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10198 return branch_if_true;
10201 /* Generate a reversed branch around a direct jump. This fallback does
10202 not use branch-likely instructions. */
10203 mips_branch_likely = false;
10204 not_taken = gen_label_rtx ();
10205 taken = operands[1];
10207 /* Generate the reversed branch to NOT_TAKEN. */
10208 operands[1] = not_taken;
10209 output_asm_insn (branch_if_false, operands);
10211 /* If INSN has a delay slot, we must provide delay slots for both the
10212 branch to NOT_TAKEN and the conditional jump. We must also ensure
10213 that INSN's delay slot is executed in the appropriate cases. */
10214 if (final_sequence)
10216 /* This first delay slot will always be executed, so use INSN's
10217 delay slot if is not annulled. */
10218 if (!INSN_ANNULLED_BRANCH_P (insn))
10220 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10221 asm_out_file, optimize, 1, NULL);
10222 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10224 else
10225 output_asm_insn ("nop", 0);
10226 fprintf (asm_out_file, "\n");
10229 /* Output the unconditional branch to TAKEN. */
10230 if (length <= 16)
10231 output_asm_insn ("j\t%0%/", &taken);
10232 else
10234 output_asm_insn (mips_output_load_label (), &taken);
10235 output_asm_insn ("jr\t%@%]%/", 0);
10238 /* Now deal with its delay slot; see above. */
10239 if (final_sequence)
10241 /* This delay slot will only be executed if the branch is taken.
10242 Use INSN's delay slot if is annulled. */
10243 if (INSN_ANNULLED_BRANCH_P (insn))
10245 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10246 asm_out_file, optimize, 1, NULL);
10247 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10249 else
10250 output_asm_insn ("nop", 0);
10251 fprintf (asm_out_file, "\n");
10254 /* Output NOT_TAKEN. */
10255 targetm.asm_out.internal_label (asm_out_file, "L",
10256 CODE_LABEL_NUMBER (not_taken));
10257 return "";
10260 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10261 if some ordering condition is true. The condition is given by
10262 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10263 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10264 its second is always zero. */
10266 const char *
10267 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10269 const char *branch[2];
10271 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10272 Make BRANCH[0] branch on the inverse condition. */
10273 switch (GET_CODE (operands[0]))
10275 /* These cases are equivalent to comparisons against zero. */
10276 case LEU:
10277 inverted_p = !inverted_p;
10278 /* Fall through. */
10279 case GTU:
10280 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10281 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10282 break;
10284 /* These cases are always true or always false. */
10285 case LTU:
10286 inverted_p = !inverted_p;
10287 /* Fall through. */
10288 case GEU:
10289 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10290 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10291 break;
10293 default:
10294 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10295 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10296 break;
10298 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10301 /* Return the assembly code for __sync_*() loop LOOP. The loop should support
10302 both normal and likely branches, using %? and %~ where appropriate. */
10304 const char *
10305 mips_output_sync_loop (const char *loop)
10307 /* Use branch-likely instructions to work around the LL/SC R10000 errata. */
10308 mips_branch_likely = TARGET_FIX_R10000;
10309 return loop;
10312 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
10313 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
10315 When working around R4000 and R4400 errata, we need to make sure that
10316 the division is not immediately followed by a shift[1][2]. We also
10317 need to stop the division from being put into a branch delay slot[3].
10318 The easiest way to avoid both problems is to add a nop after the
10319 division. When a divide-by-zero check is needed, this nop can be
10320 used to fill the branch delay slot.
10322 [1] If a double-word or a variable shift executes immediately
10323 after starting an integer division, the shift may give an
10324 incorrect result. See quotations of errata #16 and #28 from
10325 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10326 in mips.md for details.
10328 [2] A similar bug to [1] exists for all revisions of the
10329 R4000 and the R4400 when run in an MC configuration.
10330 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10332 "19. In this following sequence:
10334 ddiv (or ddivu or div or divu)
10335 dsll32 (or dsrl32, dsra32)
10337 if an MPT stall occurs, while the divide is slipping the cpu
10338 pipeline, then the following double shift would end up with an
10339 incorrect result.
10341 Workaround: The compiler needs to avoid generating any
10342 sequence with divide followed by extended double shift."
10344 This erratum is also present in "MIPS R4400MC Errata, Processor
10345 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10346 & 3.0" as errata #10 and #4, respectively.
10348 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10349 (also valid for MIPS R4000MC processors):
10351 "52. R4000SC: This bug does not apply for the R4000PC.
10353 There are two flavors of this bug:
10355 1) If the instruction just after divide takes an RF exception
10356 (tlb-refill, tlb-invalid) and gets an instruction cache
10357 miss (both primary and secondary) and the line which is
10358 currently in secondary cache at this index had the first
10359 data word, where the bits 5..2 are set, then R4000 would
10360 get a wrong result for the div.
10364 div r8, r9
10365 ------------------- # end-of page. -tlb-refill
10369 div r8, r9
10370 ------------------- # end-of page. -tlb-invalid
10373 2) If the divide is in the taken branch delay slot, where the
10374 target takes RF exception and gets an I-cache miss for the
10375 exception vector or where I-cache miss occurs for the
10376 target address, under the above mentioned scenarios, the
10377 div would get wrong results.
10380 j r2 # to next page mapped or unmapped
10381 div r8,r9 # this bug would be there as long
10382 # as there is an ICache miss and
10383 nop # the "data pattern" is present
10386 beq r0, r0, NextPage # to Next page
10387 div r8,r9
10390 This bug is present for div, divu, ddiv, and ddivu
10391 instructions.
10393 Workaround: For item 1), OS could make sure that the next page
10394 after the divide instruction is also mapped. For item 2), the
10395 compiler could make sure that the divide instruction is not in
10396 the branch delay slot."
10398 These processors have PRId values of 0x00004220 and 0x00004300 for
10399 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10401 const char *
10402 mips_output_division (const char *division, rtx *operands)
10404 const char *s;
10406 s = division;
10407 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10409 output_asm_insn (s, operands);
10410 s = "nop";
10412 if (TARGET_CHECK_ZERO_DIV)
10414 if (TARGET_MIPS16)
10416 output_asm_insn (s, operands);
10417 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10419 else if (GENERATE_DIVIDE_TRAPS)
10421 output_asm_insn (s, operands);
10422 s = "teq\t%2,%.,7";
10424 else
10426 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10427 output_asm_insn (s, operands);
10428 s = "break\t7%)\n1:";
10431 return s;
10434 /* Return true if IN_INSN is a multiply-add or multiply-subtract
10435 instruction and if OUT_INSN assigns to the accumulator operand. */
10437 bool
10438 mips_linked_madd_p (rtx out_insn, rtx in_insn)
10440 rtx x;
10442 x = single_set (in_insn);
10443 if (x == 0)
10444 return false;
10446 x = SET_SRC (x);
10448 if (GET_CODE (x) == PLUS
10449 && GET_CODE (XEXP (x, 0)) == MULT
10450 && reg_set_p (XEXP (x, 1), out_insn))
10451 return true;
10453 if (GET_CODE (x) == MINUS
10454 && GET_CODE (XEXP (x, 1)) == MULT
10455 && reg_set_p (XEXP (x, 0), out_insn))
10456 return true;
10458 return false;
10461 /* True if the dependency between OUT_INSN and IN_INSN is on the store
10462 data rather than the address. We need this because the cprestore
10463 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
10464 which causes the default routine to abort. We just return false
10465 for that case. */
10467 bool
10468 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
10470 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
10471 return false;
10473 return !store_data_bypass_p (out_insn, in_insn);
10477 /* Variables and flags used in scheduler hooks when tuning for
10478 Loongson 2E/2F. */
10479 static struct
10481 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
10482 strategy. */
10484 /* If true, then next ALU1/2 instruction will go to ALU1. */
10485 bool alu1_turn_p;
10487 /* If true, then next FALU1/2 unstruction will go to FALU1. */
10488 bool falu1_turn_p;
10490 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
10491 int alu1_core_unit_code;
10492 int alu2_core_unit_code;
10493 int falu1_core_unit_code;
10494 int falu2_core_unit_code;
10496 /* True if current cycle has a multi instruction.
10497 This flag is used in mips_ls2_dfa_post_advance_cycle. */
10498 bool cycle_has_multi_p;
10500 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
10501 These are used in mips_ls2_dfa_post_advance_cycle to initialize
10502 DFA state.
10503 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
10504 instruction to go ALU1. */
10505 rtx alu1_turn_enabled_insn;
10506 rtx alu2_turn_enabled_insn;
10507 rtx falu1_turn_enabled_insn;
10508 rtx falu2_turn_enabled_insn;
10509 } mips_ls2;
10511 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
10512 dependencies have no cost, except on the 20Kc where output-dependence
10513 is treated like input-dependence. */
10515 static int
10516 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
10517 rtx dep ATTRIBUTE_UNUSED, int cost)
10519 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
10520 && TUNE_20KC)
10521 return cost;
10522 if (REG_NOTE_KIND (link) != 0)
10523 return 0;
10524 return cost;
10527 /* Return the number of instructions that can be issued per cycle. */
10529 static int
10530 mips_issue_rate (void)
10532 switch (mips_tune)
10534 case PROCESSOR_74KC:
10535 case PROCESSOR_74KF2_1:
10536 case PROCESSOR_74KF1_1:
10537 case PROCESSOR_74KF3_2:
10538 /* The 74k is not strictly quad-issue cpu, but can be seen as one
10539 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
10540 but in reality only a maximum of 3 insns can be issued as
10541 floating-point loads and stores also require a slot in the
10542 AGEN pipe. */
10543 case PROCESSOR_R10000:
10544 /* All R10K Processors are quad-issue (being the first MIPS
10545 processors to support this feature). */
10546 return 4;
10548 case PROCESSOR_20KC:
10549 case PROCESSOR_R4130:
10550 case PROCESSOR_R5400:
10551 case PROCESSOR_R5500:
10552 case PROCESSOR_R7000:
10553 case PROCESSOR_R9000:
10554 case PROCESSOR_OCTEON:
10555 return 2;
10557 case PROCESSOR_SB1:
10558 case PROCESSOR_SB1A:
10559 /* This is actually 4, but we get better performance if we claim 3.
10560 This is partly because of unwanted speculative code motion with the
10561 larger number, and partly because in most common cases we can't
10562 reach the theoretical max of 4. */
10563 return 3;
10565 case PROCESSOR_LOONGSON_2E:
10566 case PROCESSOR_LOONGSON_2F:
10567 return 4;
10569 default:
10570 return 1;
10574 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
10576 static void
10577 mips_ls2_init_dfa_post_cycle_insn (void)
10579 start_sequence ();
10580 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
10581 mips_ls2.alu1_turn_enabled_insn = get_insns ();
10582 end_sequence ();
10584 start_sequence ();
10585 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
10586 mips_ls2.alu2_turn_enabled_insn = get_insns ();
10587 end_sequence ();
10589 start_sequence ();
10590 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
10591 mips_ls2.falu1_turn_enabled_insn = get_insns ();
10592 end_sequence ();
10594 start_sequence ();
10595 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
10596 mips_ls2.falu2_turn_enabled_insn = get_insns ();
10597 end_sequence ();
10599 mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
10600 mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
10601 mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
10602 mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
10605 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
10606 Init data used in mips_dfa_post_advance_cycle. */
10608 static void
10609 mips_init_dfa_post_cycle_insn (void)
10611 if (TUNE_LOONGSON_2EF)
10612 mips_ls2_init_dfa_post_cycle_insn ();
10615 /* Initialize STATE when scheduling for Loongson 2E/2F.
10616 Support round-robin dispatch scheme by enabling only one of
10617 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
10618 respectively. */
10620 static void
10621 mips_ls2_dfa_post_advance_cycle (state_t state)
10623 if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
10625 /* Though there are no non-pipelined ALU1 insns,
10626 we can get an instruction of type 'multi' before reload. */
10627 gcc_assert (mips_ls2.cycle_has_multi_p);
10628 mips_ls2.alu1_turn_p = false;
10631 mips_ls2.cycle_has_multi_p = false;
10633 if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
10634 /* We have a non-pipelined alu instruction in the core,
10635 adjust round-robin counter. */
10636 mips_ls2.alu1_turn_p = true;
10638 if (mips_ls2.alu1_turn_p)
10640 if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
10641 gcc_unreachable ();
10643 else
10645 if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
10646 gcc_unreachable ();
10649 if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
10651 /* There are no non-pipelined FALU1 insns. */
10652 gcc_unreachable ();
10653 mips_ls2.falu1_turn_p = false;
10656 if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
10657 /* We have a non-pipelined falu instruction in the core,
10658 adjust round-robin counter. */
10659 mips_ls2.falu1_turn_p = true;
10661 if (mips_ls2.falu1_turn_p)
10663 if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
10664 gcc_unreachable ();
10666 else
10668 if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
10669 gcc_unreachable ();
10673 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
10674 This hook is being called at the start of each cycle. */
10676 static void
10677 mips_dfa_post_advance_cycle (void)
10679 if (TUNE_LOONGSON_2EF)
10680 mips_ls2_dfa_post_advance_cycle (curr_state);
10683 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
10684 be as wide as the scheduling freedom in the DFA. */
10686 static int
10687 mips_multipass_dfa_lookahead (void)
10689 /* Can schedule up to 4 of the 6 function units in any one cycle. */
10690 if (TUNE_SB1)
10691 return 4;
10693 if (TUNE_LOONGSON_2EF)
10694 return 4;
10696 if (TUNE_OCTEON)
10697 return 2;
10699 return 0;
10702 /* Remove the instruction at index LOWER from ready queue READY and
10703 reinsert it in front of the instruction at index HIGHER. LOWER must
10704 be <= HIGHER. */
10706 static void
10707 mips_promote_ready (rtx *ready, int lower, int higher)
10709 rtx new_head;
10710 int i;
10712 new_head = ready[lower];
10713 for (i = lower; i < higher; i++)
10714 ready[i] = ready[i + 1];
10715 ready[i] = new_head;
10718 /* If the priority of the instruction at POS2 in the ready queue READY
10719 is within LIMIT units of that of the instruction at POS1, swap the
10720 instructions if POS2 is not already less than POS1. */
10722 static void
10723 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
10725 if (pos1 < pos2
10726 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
10728 rtx temp;
10730 temp = ready[pos1];
10731 ready[pos1] = ready[pos2];
10732 ready[pos2] = temp;
10736 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10737 that may clobber hi or lo. */
10738 static rtx mips_macc_chains_last_hilo;
10740 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10741 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10743 static void
10744 mips_macc_chains_record (rtx insn)
10746 if (get_attr_may_clobber_hilo (insn))
10747 mips_macc_chains_last_hilo = insn;
10750 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10751 has NREADY elements, looking for a multiply-add or multiply-subtract
10752 instruction that is cumulative with mips_macc_chains_last_hilo.
10753 If there is one, promote it ahead of anything else that might
10754 clobber hi or lo. */
10756 static void
10757 mips_macc_chains_reorder (rtx *ready, int nready)
10759 int i, j;
10761 if (mips_macc_chains_last_hilo != 0)
10762 for (i = nready - 1; i >= 0; i--)
10763 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10765 for (j = nready - 1; j > i; j--)
10766 if (recog_memoized (ready[j]) >= 0
10767 && get_attr_may_clobber_hilo (ready[j]))
10769 mips_promote_ready (ready, i, j);
10770 break;
10772 break;
10776 /* The last instruction to be scheduled. */
10777 static rtx vr4130_last_insn;
10779 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10780 points to an rtx that is initially an instruction. Nullify the rtx
10781 if the instruction uses the value of register X. */
10783 static void
10784 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
10785 void *data)
10787 rtx *insn_ptr;
10789 insn_ptr = (rtx *) data;
10790 if (REG_P (x)
10791 && *insn_ptr != 0
10792 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10793 *insn_ptr = 0;
10796 /* Return true if there is true register dependence between vr4130_last_insn
10797 and INSN. */
10799 static bool
10800 vr4130_true_reg_dependence_p (rtx insn)
10802 note_stores (PATTERN (vr4130_last_insn),
10803 vr4130_true_reg_dependence_p_1, &insn);
10804 return insn == 0;
10807 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10808 the ready queue and that INSN2 is the instruction after it, return
10809 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10810 in which INSN1 and INSN2 can probably issue in parallel, but for
10811 which (INSN2, INSN1) should be less sensitive to instruction
10812 alignment than (INSN1, INSN2). See 4130.md for more details. */
10814 static bool
10815 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10817 sd_iterator_def sd_it;
10818 dep_t dep;
10820 /* Check for the following case:
10822 1) there is some other instruction X with an anti dependence on INSN1;
10823 2) X has a higher priority than INSN2; and
10824 3) X is an arithmetic instruction (and thus has no unit restrictions).
10826 If INSN1 is the last instruction blocking X, it would better to
10827 choose (INSN1, X) over (INSN2, INSN1). */
10828 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
10829 if (DEP_TYPE (dep) == REG_DEP_ANTI
10830 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
10831 && recog_memoized (DEP_CON (dep)) >= 0
10832 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
10833 return false;
10835 if (vr4130_last_insn != 0
10836 && recog_memoized (insn1) >= 0
10837 && recog_memoized (insn2) >= 0)
10839 /* See whether INSN1 and INSN2 use different execution units,
10840 or if they are both ALU-type instructions. If so, they can
10841 probably execute in parallel. */
10842 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10843 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10844 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10846 /* If only one of the instructions has a dependence on
10847 vr4130_last_insn, prefer to schedule the other one first. */
10848 bool dep1_p = vr4130_true_reg_dependence_p (insn1);
10849 bool dep2_p = vr4130_true_reg_dependence_p (insn2);
10850 if (dep1_p != dep2_p)
10851 return dep1_p;
10853 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10854 is not an ALU-type instruction and if INSN1 uses the same
10855 execution unit. (Note that if this condition holds, we already
10856 know that INSN2 uses a different execution unit.) */
10857 if (class1 != VR4130_CLASS_ALU
10858 && recog_memoized (vr4130_last_insn) >= 0
10859 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10860 return true;
10863 return false;
10866 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10867 queue with at least two instructions. Swap the first two if
10868 vr4130_swap_insns_p says that it could be worthwhile. */
10870 static void
10871 vr4130_reorder (rtx *ready, int nready)
10873 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10874 mips_promote_ready (ready, nready - 2, nready - 1);
10877 /* Record whether last 74k AGEN instruction was a load or store. */
10878 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
10880 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
10881 resets to TYPE_UNKNOWN state. */
10883 static void
10884 mips_74k_agen_init (rtx insn)
10886 if (!insn || !NONJUMP_INSN_P (insn))
10887 mips_last_74k_agen_insn = TYPE_UNKNOWN;
10888 else
10890 enum attr_type type = get_attr_type (insn);
10891 if (type == TYPE_LOAD || type == TYPE_STORE)
10892 mips_last_74k_agen_insn = type;
10896 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
10897 loads to be grouped together, and multiple stores to be grouped
10898 together. Swap things around in the ready queue to make this happen. */
10900 static void
10901 mips_74k_agen_reorder (rtx *ready, int nready)
10903 int i;
10904 int store_pos, load_pos;
10906 store_pos = -1;
10907 load_pos = -1;
10909 for (i = nready - 1; i >= 0; i--)
10911 rtx insn = ready[i];
10912 if (USEFUL_INSN_P (insn))
10913 switch (get_attr_type (insn))
10915 case TYPE_STORE:
10916 if (store_pos == -1)
10917 store_pos = i;
10918 break;
10920 case TYPE_LOAD:
10921 if (load_pos == -1)
10922 load_pos = i;
10923 break;
10925 default:
10926 break;
10930 if (load_pos == -1 || store_pos == -1)
10931 return;
10933 switch (mips_last_74k_agen_insn)
10935 case TYPE_UNKNOWN:
10936 /* Prefer to schedule loads since they have a higher latency. */
10937 case TYPE_LOAD:
10938 /* Swap loads to the front of the queue. */
10939 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
10940 break;
10941 case TYPE_STORE:
10942 /* Swap stores to the front of the queue. */
10943 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
10944 break;
10945 default:
10946 break;
10950 /* Implement TARGET_SCHED_INIT. */
10952 static void
10953 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10954 int max_ready ATTRIBUTE_UNUSED)
10956 mips_macc_chains_last_hilo = 0;
10957 vr4130_last_insn = 0;
10958 mips_74k_agen_init (NULL_RTX);
10960 /* When scheduling for Loongson2, branch instructions go to ALU1,
10961 therefore basic block is most likely to start with round-robin counter
10962 pointed to ALU2. */
10963 mips_ls2.alu1_turn_p = false;
10964 mips_ls2.falu1_turn_p = true;
10967 /* Implement TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
10969 static int
10970 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10971 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
10973 if (!reload_completed
10974 && TUNE_MACC_CHAINS
10975 && *nreadyp > 0)
10976 mips_macc_chains_reorder (ready, *nreadyp);
10978 if (reload_completed
10979 && TUNE_MIPS4130
10980 && !TARGET_VR4130_ALIGN
10981 && *nreadyp > 1)
10982 vr4130_reorder (ready, *nreadyp);
10984 if (TUNE_74K)
10985 mips_74k_agen_reorder (ready, *nreadyp);
10987 return mips_issue_rate ();
10990 /* Update round-robin counters for ALU1/2 and FALU1/2. */
10992 static void
10993 mips_ls2_variable_issue (rtx insn)
10995 if (mips_ls2.alu1_turn_p)
10997 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
10998 mips_ls2.alu1_turn_p = false;
11000 else
11002 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
11003 mips_ls2.alu1_turn_p = true;
11006 if (mips_ls2.falu1_turn_p)
11008 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
11009 mips_ls2.falu1_turn_p = false;
11011 else
11013 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
11014 mips_ls2.falu1_turn_p = true;
11017 if (recog_memoized (insn) >= 0)
11018 mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
11021 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11023 static int
11024 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11025 rtx insn, int more)
11027 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
11028 if (USEFUL_INSN_P (insn))
11030 more--;
11031 if (!reload_completed && TUNE_MACC_CHAINS)
11032 mips_macc_chains_record (insn);
11033 vr4130_last_insn = insn;
11034 if (TUNE_74K)
11035 mips_74k_agen_init (insn);
11036 else if (TUNE_LOONGSON_2EF)
11037 mips_ls2_variable_issue (insn);
11040 /* Instructions of type 'multi' should all be split before
11041 the second scheduling pass. */
11042 gcc_assert (!reload_completed
11043 || recog_memoized (insn) < 0
11044 || get_attr_type (insn) != TYPE_MULTI);
11046 return more;
11049 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11050 return the first operand of the associated PREF or PREFX insn. */
11053 mips_prefetch_cookie (rtx write, rtx locality)
11055 /* store_streamed / load_streamed. */
11056 if (INTVAL (locality) <= 0)
11057 return GEN_INT (INTVAL (write) + 4);
11059 /* store / load. */
11060 if (INTVAL (locality) <= 2)
11061 return write;
11063 /* store_retained / load_retained. */
11064 return GEN_INT (INTVAL (write) + 6);
11067 /* Flags that indicate when a built-in function is available.
11069 BUILTIN_AVAIL_NON_MIPS16
11070 The function is available on the current target, but only
11071 in non-MIPS16 mode. */
11072 #define BUILTIN_AVAIL_NON_MIPS16 1
11074 /* Declare an availability predicate for built-in functions that
11075 require non-MIPS16 mode and also require COND to be true.
11076 NAME is the main part of the predicate's name. */
11077 #define AVAIL_NON_MIPS16(NAME, COND) \
11078 static unsigned int \
11079 mips_builtin_avail_##NAME (void) \
11081 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
11084 /* This structure describes a single built-in function. */
11085 struct mips_builtin_description {
11086 /* The code of the main .md file instruction. See mips_builtin_type
11087 for more information. */
11088 enum insn_code icode;
11090 /* The floating-point comparison code to use with ICODE, if any. */
11091 enum mips_fp_condition cond;
11093 /* The name of the built-in function. */
11094 const char *name;
11096 /* Specifies how the function should be expanded. */
11097 enum mips_builtin_type builtin_type;
11099 /* The function's prototype. */
11100 enum mips_function_type function_type;
11102 /* Whether the function is available. */
11103 unsigned int (*avail) (void);
11106 AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
11107 AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
11108 AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
11109 AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
11110 AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
11111 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
11112 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
11113 AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
11114 AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
11116 /* Construct a mips_builtin_description from the given arguments.
11118 INSN is the name of the associated instruction pattern, without the
11119 leading CODE_FOR_mips_.
11121 CODE is the floating-point condition code associated with the
11122 function. It can be 'f' if the field is not applicable.
11124 NAME is the name of the function itself, without the leading
11125 "__builtin_mips_".
11127 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
11129 AVAIL is the name of the availability predicate, without the leading
11130 mips_builtin_avail_. */
11131 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
11132 FUNCTION_TYPE, AVAIL) \
11133 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
11134 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
11135 mips_builtin_avail_ ## AVAIL }
11137 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
11138 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
11139 are as for MIPS_BUILTIN. */
11140 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
11141 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
11143 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
11144 are subject to mips_builtin_avail_<AVAIL>. */
11145 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
11146 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
11147 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
11148 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
11149 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
11151 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11152 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
11153 while the any and all forms are subject to mips_builtin_avail_mips3d. */
11154 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
11155 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
11156 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
11157 mips3d), \
11158 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
11159 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
11160 mips3d), \
11161 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
11162 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
11163 AVAIL), \
11164 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
11165 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
11166 AVAIL)
11168 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11169 are subject to mips_builtin_avail_mips3d. */
11170 #define CMP_4S_BUILTINS(INSN, COND) \
11171 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
11172 MIPS_BUILTIN_CMP_ANY, \
11173 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
11174 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
11175 MIPS_BUILTIN_CMP_ALL, \
11176 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
11178 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11179 instruction requires mips_builtin_avail_<AVAIL>. */
11180 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
11181 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
11182 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11183 AVAIL), \
11184 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
11185 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11186 AVAIL)
11188 /* Define all the built-in functions related to C.cond.fmt condition COND. */
11189 #define CMP_BUILTINS(COND) \
11190 MOVTF_BUILTINS (c, COND, paired_single), \
11191 MOVTF_BUILTINS (cabs, COND, mips3d), \
11192 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
11193 CMP_PS_BUILTINS (c, COND, paired_single), \
11194 CMP_PS_BUILTINS (cabs, COND, mips3d), \
11195 CMP_4S_BUILTINS (c, COND), \
11196 CMP_4S_BUILTINS (cabs, COND)
11198 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
11199 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
11200 and AVAIL are as for MIPS_BUILTIN. */
11201 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
11202 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
11203 FUNCTION_TYPE, AVAIL)
11205 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11206 branch instruction. AVAIL is as for MIPS_BUILTIN. */
11207 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
11208 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
11209 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
11211 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
11212 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
11213 builtin_description field. */
11214 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
11215 { CODE_FOR_loongson_ ## INSN, 0, "__builtin_loongson_" #FN_NAME, \
11216 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, mips_builtin_avail_loongson }
11218 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
11219 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
11220 builtin_description field. */
11221 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
11222 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
11224 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
11225 We use functions of this form when the same insn can be usefully applied
11226 to more than one datatype. */
11227 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
11228 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
11230 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11231 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11232 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11233 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11234 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11235 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11237 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
11238 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
11239 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
11240 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
11241 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
11242 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
11243 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
11244 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
11245 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
11246 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
11247 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
11248 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
11249 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
11250 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
11251 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
11252 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
11253 #define CODE_FOR_loongson_biadd CODE_FOR_reduc_uplus_v8qi
11254 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
11255 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
11256 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
11257 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
11258 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
11259 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
11260 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
11261 #define CODE_FOR_loongson_punpckhbh CODE_FOR_vec_interleave_highv8qi
11262 #define CODE_FOR_loongson_punpckhhw CODE_FOR_vec_interleave_highv4hi
11263 #define CODE_FOR_loongson_punpckhwd CODE_FOR_vec_interleave_highv2si
11264 #define CODE_FOR_loongson_punpcklbh CODE_FOR_vec_interleave_lowv8qi
11265 #define CODE_FOR_loongson_punpcklhw CODE_FOR_vec_interleave_lowv4hi
11266 #define CODE_FOR_loongson_punpcklwd CODE_FOR_vec_interleave_lowv2si
11268 static const struct mips_builtin_description mips_builtins[] = {
11269 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11270 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11271 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11272 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11273 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
11274 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
11275 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
11276 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
11278 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
11279 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11280 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11281 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
11282 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
11284 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
11285 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
11286 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
11287 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
11288 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
11289 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11291 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
11292 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
11293 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
11294 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
11295 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
11296 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11298 MIPS_FP_CONDITIONS (CMP_BUILTINS),
11300 /* Built-in functions for the SB-1 processor. */
11301 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
11303 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
11304 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11305 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11306 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
11307 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11308 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11309 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11310 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11311 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
11312 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11313 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11314 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
11315 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
11316 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
11317 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
11318 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
11319 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
11320 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
11321 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
11322 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
11323 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
11324 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
11325 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
11326 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
11327 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
11328 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
11329 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
11330 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
11331 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
11332 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
11333 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
11334 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
11335 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11336 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11337 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
11338 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
11339 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11340 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11341 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
11342 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
11343 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
11344 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11345 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
11346 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
11347 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
11348 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
11349 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
11350 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
11351 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
11352 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
11353 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
11354 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
11355 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
11356 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
11357 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
11358 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
11359 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
11360 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11361 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11362 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11363 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
11364 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
11365 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
11366 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
11367 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
11368 BPOSGE_BUILTIN (32, dsp),
11370 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
11371 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
11372 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11373 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11374 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11375 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11376 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
11377 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
11378 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
11379 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
11380 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
11381 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11382 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11383 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11384 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11385 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11386 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
11387 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
11388 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
11389 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
11390 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
11391 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
11392 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
11393 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11394 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11395 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11396 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11397 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11398 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11399 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11400 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11401 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11402 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11403 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11404 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11406 /* Built-in functions for the DSP ASE (32-bit only). */
11407 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11408 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11409 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11410 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11411 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11412 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11413 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11414 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
11415 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
11416 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11417 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11418 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11419 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11420 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
11421 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
11422 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
11423 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
11424 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
11425 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
11426 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
11427 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
11429 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
11430 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11431 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11432 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
11433 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
11434 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
11435 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
11436 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11437 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dspr2_32),
11438 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dspr2_32),
11439 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11440 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11441 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11442 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11443 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11444 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11446 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
11447 LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
11448 LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
11449 LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
11450 LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11451 LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11452 LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11453 LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11454 LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11455 LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11456 LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
11457 LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
11458 LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11459 LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
11460 LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11461 LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11462 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
11463 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11464 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11465 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11466 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
11467 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
11468 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11469 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
11470 LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11471 LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11472 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11473 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11474 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11475 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11476 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11477 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11478 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11479 LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11480 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11481 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11482 LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11483 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11484 LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
11485 LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
11486 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11487 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11488 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11489 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11490 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11491 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11492 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11493 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11494 LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
11495 LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11496 LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11497 LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11498 LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11499 LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
11500 LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
11501 LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11502 LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11503 LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11504 LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
11505 LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11506 LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
11507 LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
11508 LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI_UQI),
11509 LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_V4HI_UQI),
11510 LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
11511 LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
11512 LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
11513 LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
11514 LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
11515 LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
11516 LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
11517 LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
11518 LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
11519 LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
11520 LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
11521 LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
11522 LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11523 LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11524 LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11525 LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11526 LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11527 LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11528 LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
11529 LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
11530 LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11531 LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
11532 LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11533 LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11534 LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11535 LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11536 LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11537 LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11538 LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11539 LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11540 LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11541 LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11542 LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11543 LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11544 LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11545 LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11547 /* Sundry other built-in functions. */
11548 DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache)
11551 /* MODE is a vector mode whose elements have type TYPE. Return the type
11552 of the vector itself. */
11554 static tree
11555 mips_builtin_vector_type (tree type, enum machine_mode mode)
11557 static tree types[2 * (int) MAX_MACHINE_MODE];
11558 int mode_index;
11560 mode_index = (int) mode;
11562 if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
11563 mode_index += MAX_MACHINE_MODE;
11565 if (types[mode_index] == NULL_TREE)
11566 types[mode_index] = build_vector_type_for_mode (type, mode);
11567 return types[mode_index];
11570 /* Return a type for 'const volatile void *'. */
11572 static tree
11573 mips_build_cvpointer_type (void)
11575 static tree cache;
11577 if (cache == NULL_TREE)
11578 cache = build_pointer_type (build_qualified_type
11579 (void_type_node,
11580 TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE));
11581 return cache;
11584 /* Source-level argument types. */
11585 #define MIPS_ATYPE_VOID void_type_node
11586 #define MIPS_ATYPE_INT integer_type_node
11587 #define MIPS_ATYPE_POINTER ptr_type_node
11588 #define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
11590 /* Standard mode-based argument types. */
11591 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
11592 #define MIPS_ATYPE_SI intSI_type_node
11593 #define MIPS_ATYPE_USI unsigned_intSI_type_node
11594 #define MIPS_ATYPE_DI intDI_type_node
11595 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
11596 #define MIPS_ATYPE_SF float_type_node
11597 #define MIPS_ATYPE_DF double_type_node
11599 /* Vector argument types. */
11600 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
11601 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
11602 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
11603 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
11604 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
11605 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
11606 #define MIPS_ATYPE_UV2SI \
11607 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
11608 #define MIPS_ATYPE_UV4HI \
11609 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
11610 #define MIPS_ATYPE_UV8QI \
11611 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
11613 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
11614 their associated MIPS_ATYPEs. */
11615 #define MIPS_FTYPE_ATYPES1(A, B) \
11616 MIPS_ATYPE_##A, MIPS_ATYPE_##B
11618 #define MIPS_FTYPE_ATYPES2(A, B, C) \
11619 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
11621 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
11622 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
11624 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
11625 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
11626 MIPS_ATYPE_##E
11628 /* Return the function type associated with function prototype TYPE. */
11630 static tree
11631 mips_build_function_type (enum mips_function_type type)
11633 static tree types[(int) MIPS_MAX_FTYPE_MAX];
11635 if (types[(int) type] == NULL_TREE)
11636 switch (type)
11638 #define DEF_MIPS_FTYPE(NUM, ARGS) \
11639 case MIPS_FTYPE_NAME##NUM ARGS: \
11640 types[(int) type] \
11641 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
11642 NULL_TREE); \
11643 break;
11644 #include "config/mips/mips-ftypes.def"
11645 #undef DEF_MIPS_FTYPE
11646 default:
11647 gcc_unreachable ();
11650 return types[(int) type];
11653 /* Implement TARGET_INIT_BUILTINS. */
11655 static void
11656 mips_init_builtins (void)
11658 const struct mips_builtin_description *d;
11659 unsigned int i;
11661 /* Iterate through all of the bdesc arrays, initializing all of the
11662 builtin functions. */
11663 for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
11665 d = &mips_builtins[i];
11666 if (d->avail ())
11667 add_builtin_function (d->name,
11668 mips_build_function_type (d->function_type),
11669 i, BUILT_IN_MD, NULL, NULL);
11673 /* Take argument ARGNO from EXP's argument list and convert it into a
11674 form suitable for input operand OPNO of instruction ICODE. Return the
11675 value. */
11677 static rtx
11678 mips_prepare_builtin_arg (enum insn_code icode,
11679 unsigned int opno, tree exp, unsigned int argno)
11681 rtx value;
11682 enum machine_mode mode;
11684 value = expand_normal (CALL_EXPR_ARG (exp, argno));
11685 mode = insn_data[icode].operand[opno].mode;
11686 if (!insn_data[icode].operand[opno].predicate (value, mode))
11688 /* Cope with address operands, where MODE is not the mode of
11689 VALUE itself. */
11690 if (GET_MODE (value) == VOIDmode)
11691 value = copy_to_mode_reg (mode, value);
11692 else
11693 value = copy_to_reg (value);
11695 /* Check the predicate again. */
11696 if (!insn_data[icode].operand[opno].predicate (value, mode))
11698 error ("invalid argument to built-in function");
11699 return const0_rtx;
11703 return value;
11706 /* Return an rtx suitable for output operand OP of instruction ICODE.
11707 If TARGET is non-null, try to use it where possible. */
11709 static rtx
11710 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11712 enum machine_mode mode;
11714 mode = insn_data[icode].operand[op].mode;
11715 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11716 target = gen_reg_rtx (mode);
11718 return target;
11721 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
11722 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
11723 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
11724 suggests a good place to put the result. */
11726 static rtx
11727 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11728 bool has_target_p)
11730 rtx ops[MAX_RECOG_OPERANDS];
11731 int opno, argno;
11733 /* Map any target to operand 0. */
11734 opno = 0;
11735 if (has_target_p)
11737 ops[opno] = mips_prepare_builtin_target (icode, opno, target);
11738 opno++;
11741 /* Map the arguments to the other operands. The n_operands value
11742 for an expander includes match_dups and match_scratches as well as
11743 match_operands, so n_operands is only an upper bound on the number
11744 of arguments to the expander function. */
11745 gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
11746 for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
11747 ops[opno] = mips_prepare_builtin_arg (icode, opno, exp, argno);
11749 switch (opno)
11751 case 2:
11752 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11753 break;
11755 case 3:
11756 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
11757 break;
11759 case 4:
11760 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
11761 break;
11763 default:
11764 gcc_unreachable ();
11766 return target;
11769 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
11770 function; TYPE says which. EXP is the CALL_EXPR that calls the
11771 function, ICODE is the instruction that should be used to compare
11772 the first two arguments, and COND is the condition it should test.
11773 TARGET, if nonnull, suggests a good place to put the result. */
11775 static rtx
11776 mips_expand_builtin_movtf (enum mips_builtin_type type,
11777 enum insn_code icode, enum mips_fp_condition cond,
11778 rtx target, tree exp)
11780 rtx cmp_result, op0, op1;
11782 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11783 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
11784 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
11785 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
11787 icode = CODE_FOR_mips_cond_move_tf_ps;
11788 target = mips_prepare_builtin_target (icode, 0, target);
11789 if (type == MIPS_BUILTIN_MOVT)
11791 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
11792 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
11794 else
11796 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
11797 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
11799 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
11800 return target;
11803 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11804 into TARGET otherwise. Return TARGET. */
11806 static rtx
11807 mips_builtin_branch_and_move (rtx condition, rtx target,
11808 rtx value_if_true, rtx value_if_false)
11810 rtx true_label, done_label;
11812 true_label = gen_label_rtx ();
11813 done_label = gen_label_rtx ();
11815 /* First assume that CONDITION is false. */
11816 mips_emit_move (target, value_if_false);
11818 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11819 emit_jump_insn (gen_condjump (condition, true_label));
11820 emit_jump_insn (gen_jump (done_label));
11821 emit_barrier ();
11823 /* Fix TARGET if CONDITION is true. */
11824 emit_label (true_label);
11825 mips_emit_move (target, value_if_true);
11827 emit_label (done_label);
11828 return target;
11831 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
11832 the CALL_EXPR that calls the function, ICODE is the code of the
11833 comparison instruction, and COND is the condition it should test.
11834 TARGET, if nonnull, suggests a good place to put the boolean result. */
11836 static rtx
11837 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
11838 enum insn_code icode, enum mips_fp_condition cond,
11839 rtx target, tree exp)
11841 rtx offset, condition, cmp_result, args[MAX_RECOG_OPERANDS];
11842 int argno;
11844 if (target == 0 || GET_MODE (target) != SImode)
11845 target = gen_reg_rtx (SImode);
11847 /* The instruction should have a target operand, an operand for each
11848 argument, and an operand for COND. */
11849 gcc_assert (call_expr_nargs (exp) + 2 == insn_data[icode].n_operands);
11851 /* Prepare the operands to the comparison. */
11852 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11853 for (argno = 0; argno < call_expr_nargs (exp); argno++)
11854 args[argno] = mips_prepare_builtin_arg (icode, argno + 1, exp, argno);
11856 switch (insn_data[icode].n_operands)
11858 case 4:
11859 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
11860 GEN_INT (cond)));
11861 break;
11863 case 6:
11864 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
11865 args[2], args[3], GEN_INT (cond)));
11866 break;
11868 default:
11869 gcc_unreachable ();
11872 /* If the comparison sets more than one register, we define the result
11873 to be 0 if all registers are false and -1 if all registers are true.
11874 The value of the complete result is indeterminate otherwise. */
11875 switch (builtin_type)
11877 case MIPS_BUILTIN_CMP_ALL:
11878 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
11879 return mips_builtin_branch_and_move (condition, target,
11880 const0_rtx, const1_rtx);
11882 case MIPS_BUILTIN_CMP_UPPER:
11883 case MIPS_BUILTIN_CMP_LOWER:
11884 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
11885 condition = gen_single_cc (cmp_result, offset);
11886 return mips_builtin_branch_and_move (condition, target,
11887 const1_rtx, const0_rtx);
11889 default:
11890 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
11891 return mips_builtin_branch_and_move (condition, target,
11892 const1_rtx, const0_rtx);
11896 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
11897 if nonnull, suggests a good place to put the boolean result. */
11899 static rtx
11900 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
11902 rtx condition, cmp_result;
11903 int cmp_value;
11905 if (target == 0 || GET_MODE (target) != SImode)
11906 target = gen_reg_rtx (SImode);
11908 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
11910 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
11911 cmp_value = 32;
11912 else
11913 gcc_assert (0);
11915 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
11916 return mips_builtin_branch_and_move (condition, target,
11917 const1_rtx, const0_rtx);
11920 /* Implement TARGET_EXPAND_BUILTIN. */
11922 static rtx
11923 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11924 enum machine_mode mode ATTRIBUTE_UNUSED,
11925 int ignore ATTRIBUTE_UNUSED)
11927 tree fndecl;
11928 unsigned int fcode, avail;
11929 const struct mips_builtin_description *d;
11931 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11932 fcode = DECL_FUNCTION_CODE (fndecl);
11933 gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
11934 d = &mips_builtins[fcode];
11935 avail = d->avail ();
11936 gcc_assert (avail != 0);
11937 if (TARGET_MIPS16)
11939 error ("built-in function %qs not supported for MIPS16",
11940 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
11941 return const0_rtx;
11943 switch (d->builtin_type)
11945 case MIPS_BUILTIN_DIRECT:
11946 return mips_expand_builtin_direct (d->icode, target, exp, true);
11948 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11949 return mips_expand_builtin_direct (d->icode, target, exp, false);
11951 case MIPS_BUILTIN_MOVT:
11952 case MIPS_BUILTIN_MOVF:
11953 return mips_expand_builtin_movtf (d->builtin_type, d->icode,
11954 d->cond, target, exp);
11956 case MIPS_BUILTIN_CMP_ANY:
11957 case MIPS_BUILTIN_CMP_ALL:
11958 case MIPS_BUILTIN_CMP_UPPER:
11959 case MIPS_BUILTIN_CMP_LOWER:
11960 case MIPS_BUILTIN_CMP_SINGLE:
11961 return mips_expand_builtin_compare (d->builtin_type, d->icode,
11962 d->cond, target, exp);
11964 case MIPS_BUILTIN_BPOSGE32:
11965 return mips_expand_builtin_bposge (d->builtin_type, target);
11967 gcc_unreachable ();
11970 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
11971 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
11972 struct mips16_constant {
11973 struct mips16_constant *next;
11974 rtx value;
11975 rtx label;
11976 enum machine_mode mode;
11979 /* Information about an incomplete MIPS16 constant pool. FIRST is the
11980 first constant, HIGHEST_ADDRESS is the highest address that the first
11981 byte of the pool can have, and INSN_ADDRESS is the current instruction
11982 address. */
11983 struct mips16_constant_pool {
11984 struct mips16_constant *first;
11985 int highest_address;
11986 int insn_address;
11989 /* Add constant VALUE to POOL and return its label. MODE is the
11990 value's mode (used for CONST_INTs, etc.). */
11992 static rtx
11993 mips16_add_constant (struct mips16_constant_pool *pool,
11994 rtx value, enum machine_mode mode)
11996 struct mips16_constant **p, *c;
11997 bool first_of_size_p;
11999 /* See whether the constant is already in the pool. If so, return the
12000 existing label, otherwise leave P pointing to the place where the
12001 constant should be added.
12003 Keep the pool sorted in increasing order of mode size so that we can
12004 reduce the number of alignments needed. */
12005 first_of_size_p = true;
12006 for (p = &pool->first; *p != 0; p = &(*p)->next)
12008 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
12009 return (*p)->label;
12010 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
12011 break;
12012 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
12013 first_of_size_p = false;
12016 /* In the worst case, the constant needed by the earliest instruction
12017 will end up at the end of the pool. The entire pool must then be
12018 accessible from that instruction.
12020 When adding the first constant, set the pool's highest address to
12021 the address of the first out-of-range byte. Adjust this address
12022 downwards each time a new constant is added. */
12023 if (pool->first == 0)
12024 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
12025 of the instruction with the lowest two bits clear. The base PC
12026 value for LDPC has the lowest three bits clear. Assume the worst
12027 case here; namely that the PC-relative instruction occupies the
12028 last 2 bytes in an aligned word. */
12029 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
12030 pool->highest_address -= GET_MODE_SIZE (mode);
12031 if (first_of_size_p)
12032 /* Take into account the worst possible padding due to alignment. */
12033 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
12035 /* Create a new entry. */
12036 c = XNEW (struct mips16_constant);
12037 c->value = value;
12038 c->mode = mode;
12039 c->label = gen_label_rtx ();
12040 c->next = *p;
12041 *p = c;
12043 return c->label;
12046 /* Output constant VALUE after instruction INSN and return the last
12047 instruction emitted. MODE is the mode of the constant. */
12049 static rtx
12050 mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
12052 if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
12054 rtx size = GEN_INT (GET_MODE_SIZE (mode));
12055 return emit_insn_after (gen_consttable_int (value, size), insn);
12058 if (SCALAR_FLOAT_MODE_P (mode))
12059 return emit_insn_after (gen_consttable_float (value), insn);
12061 if (VECTOR_MODE_P (mode))
12063 int i;
12065 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
12066 insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
12067 CONST_VECTOR_ELT (value, i), insn);
12068 return insn;
12071 gcc_unreachable ();
12074 /* Dump out the constants in CONSTANTS after INSN. */
12076 static void
12077 mips16_emit_constants (struct mips16_constant *constants, rtx insn)
12079 struct mips16_constant *c, *next;
12080 int align;
12082 align = 0;
12083 for (c = constants; c != NULL; c = next)
12085 /* If necessary, increase the alignment of PC. */
12086 if (align < GET_MODE_SIZE (c->mode))
12088 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
12089 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
12091 align = GET_MODE_SIZE (c->mode);
12093 insn = emit_label_after (c->label, insn);
12094 insn = mips16_emit_constants_1 (c->mode, c->value, insn);
12096 next = c->next;
12097 free (c);
12100 emit_barrier_after (insn);
12103 /* Return the length of instruction INSN. */
12105 static int
12106 mips16_insn_length (rtx insn)
12108 if (JUMP_P (insn))
12110 rtx body = PATTERN (insn);
12111 if (GET_CODE (body) == ADDR_VEC)
12112 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
12113 if (GET_CODE (body) == ADDR_DIFF_VEC)
12114 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
12116 return get_attr_length (insn);
12119 /* If *X is a symbolic constant that refers to the constant pool, add
12120 the constant to POOL and rewrite *X to use the constant's label. */
12122 static void
12123 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
12125 rtx base, offset, label;
12127 split_const (*x, &base, &offset);
12128 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
12130 label = mips16_add_constant (pool, get_pool_constant (base),
12131 get_pool_mode (base));
12132 base = gen_rtx_LABEL_REF (Pmode, label);
12133 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
12137 /* This structure is used to communicate with mips16_rewrite_pool_refs.
12138 INSN is the instruction we're rewriting and POOL points to the current
12139 constant pool. */
12140 struct mips16_rewrite_pool_refs_info {
12141 rtx insn;
12142 struct mips16_constant_pool *pool;
12145 /* Rewrite *X so that constant pool references refer to the constant's
12146 label instead. DATA points to a mips16_rewrite_pool_refs_info
12147 structure. */
12149 static int
12150 mips16_rewrite_pool_refs (rtx *x, void *data)
12152 struct mips16_rewrite_pool_refs_info *info =
12153 (struct mips16_rewrite_pool_refs_info *) data;
12155 if (force_to_mem_operand (*x, Pmode))
12157 rtx mem = force_const_mem (GET_MODE (*x), *x);
12158 validate_change (info->insn, x, mem, false);
12161 if (MEM_P (*x))
12163 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
12164 return -1;
12167 if (TARGET_MIPS16_TEXT_LOADS)
12168 mips16_rewrite_pool_constant (info->pool, x);
12170 return GET_CODE (*x) == CONST ? -1 : 0;
12173 /* Build MIPS16 constant pools. */
12175 static void
12176 mips16_lay_out_constants (void)
12178 struct mips16_constant_pool pool;
12179 struct mips16_rewrite_pool_refs_info info;
12180 rtx insn, barrier;
12182 if (!TARGET_MIPS16_PCREL_LOADS)
12183 return;
12185 split_all_insns_noflow ();
12186 barrier = 0;
12187 memset (&pool, 0, sizeof (pool));
12188 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12190 /* Rewrite constant pool references in INSN. */
12191 if (INSN_P (insn))
12193 info.insn = insn;
12194 info.pool = &pool;
12195 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
12198 pool.insn_address += mips16_insn_length (insn);
12200 if (pool.first != NULL)
12202 /* If there are no natural barriers between the first user of
12203 the pool and the highest acceptable address, we'll need to
12204 create a new instruction to jump around the constant pool.
12205 In the worst case, this instruction will be 4 bytes long.
12207 If it's too late to do this transformation after INSN,
12208 do it immediately before INSN. */
12209 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
12211 rtx label, jump;
12213 label = gen_label_rtx ();
12215 jump = emit_jump_insn_before (gen_jump (label), insn);
12216 JUMP_LABEL (jump) = label;
12217 LABEL_NUSES (label) = 1;
12218 barrier = emit_barrier_after (jump);
12220 emit_label_after (label, barrier);
12221 pool.insn_address += 4;
12224 /* See whether the constant pool is now out of range of the first
12225 user. If so, output the constants after the previous barrier.
12226 Note that any instructions between BARRIER and INSN (inclusive)
12227 will use negative offsets to refer to the pool. */
12228 if (pool.insn_address > pool.highest_address)
12230 mips16_emit_constants (pool.first, barrier);
12231 pool.first = NULL;
12232 barrier = 0;
12234 else if (BARRIER_P (insn))
12235 barrier = insn;
12238 mips16_emit_constants (pool.first, get_last_insn ());
12241 /* Return true if it is worth r10k_simplify_address's while replacing
12242 an address with X. We are looking for constants, and for addresses
12243 at a known offset from the incoming stack pointer. */
12245 static bool
12246 r10k_simplified_address_p (rtx x)
12248 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
12249 x = XEXP (x, 0);
12250 return x == virtual_incoming_args_rtx || CONSTANT_P (x);
12253 /* X is an expression that appears in INSN. Try to use the UD chains
12254 to simplify it, returning the simplified form on success and the
12255 original form otherwise. Replace the incoming value of $sp with
12256 virtual_incoming_args_rtx (which should never occur in X otherwise). */
12258 static rtx
12259 r10k_simplify_address (rtx x, rtx insn)
12261 rtx newx, op0, op1, set, def_insn, note;
12262 df_ref use, def;
12263 struct df_link *defs;
12265 newx = NULL_RTX;
12266 if (UNARY_P (x))
12268 op0 = r10k_simplify_address (XEXP (x, 0), insn);
12269 if (op0 != XEXP (x, 0))
12270 newx = simplify_gen_unary (GET_CODE (x), GET_MODE (x),
12271 op0, GET_MODE (XEXP (x, 0)));
12273 else if (BINARY_P (x))
12275 op0 = r10k_simplify_address (XEXP (x, 0), insn);
12276 op1 = r10k_simplify_address (XEXP (x, 1), insn);
12277 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
12278 newx = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
12280 else if (GET_CODE (x) == LO_SUM)
12282 /* LO_SUMs can be offset from HIGHs, if we know they won't
12283 overflow. See mips_classify_address for the rationale behind
12284 the lax check. */
12285 op0 = r10k_simplify_address (XEXP (x, 0), insn);
12286 if (GET_CODE (op0) == HIGH)
12287 newx = XEXP (x, 1);
12289 else if (REG_P (x))
12291 /* Uses are recorded by regno_reg_rtx, not X itself. */
12292 use = df_find_use (insn, regno_reg_rtx[REGNO (x)]);
12293 gcc_assert (use);
12294 defs = DF_REF_CHAIN (use);
12296 /* Require a single definition. */
12297 if (defs && defs->next == NULL)
12299 def = defs->ref;
12300 if (DF_REF_IS_ARTIFICIAL (def))
12302 /* Replace the incoming value of $sp with
12303 virtual_incoming_args_rtx. */
12304 if (x == stack_pointer_rtx
12305 && DF_REF_BB (def) == ENTRY_BLOCK_PTR)
12306 newx = virtual_incoming_args_rtx;
12308 else if (dominated_by_p (CDI_DOMINATORS, DF_REF_BB (use),
12309 DF_REF_BB (def)))
12311 /* Make sure that DEF_INSN is a single set of REG. */
12312 def_insn = DF_REF_INSN (def);
12313 if (NONJUMP_INSN_P (def_insn))
12315 set = single_set (def_insn);
12316 if (set && rtx_equal_p (SET_DEST (set), x))
12318 /* Prefer to use notes, since the def-use chains
12319 are often shorter. */
12320 note = find_reg_equal_equiv_note (def_insn);
12321 if (note)
12322 newx = XEXP (note, 0);
12323 else
12324 newx = SET_SRC (set);
12325 newx = r10k_simplify_address (newx, def_insn);
12331 if (newx && r10k_simplified_address_p (newx))
12332 return newx;
12333 return x;
12336 /* Return true if ADDRESS is known to be an uncached address
12337 on R10K systems. */
12339 static bool
12340 r10k_uncached_address_p (unsigned HOST_WIDE_INT address)
12342 unsigned HOST_WIDE_INT upper;
12344 /* Check for KSEG1. */
12345 if (address + 0x60000000 < 0x20000000)
12346 return true;
12348 /* Check for uncached XKPHYS addresses. */
12349 if (Pmode == DImode)
12351 upper = (address >> 40) & 0xf9ffff;
12352 if (upper == 0x900000 || upper == 0xb80000)
12353 return true;
12355 return false;
12358 /* Return true if we can prove that an access to address X in instruction
12359 INSN would be safe from R10K speculation. This X is a general
12360 expression; it might not be a legitimate address. */
12362 static bool
12363 r10k_safe_address_p (rtx x, rtx insn)
12365 rtx base, offset;
12366 HOST_WIDE_INT offset_val;
12368 x = r10k_simplify_address (x, insn);
12370 /* Check for references to the stack frame. It doesn't really matter
12371 how much of the frame has been allocated at INSN; -mr10k-cache-barrier
12372 allows us to assume that accesses to any part of the eventual frame
12373 is safe from speculation at any point in the function. */
12374 mips_split_plus (x, &base, &offset_val);
12375 if (base == virtual_incoming_args_rtx
12376 && offset_val >= -cfun->machine->frame.total_size
12377 && offset_val < cfun->machine->frame.args_size)
12378 return true;
12380 /* Check for uncached addresses. */
12381 if (CONST_INT_P (x))
12382 return r10k_uncached_address_p (INTVAL (x));
12384 /* Check for accesses to a static object. */
12385 split_const (x, &base, &offset);
12386 return offset_within_block_p (base, INTVAL (offset));
12389 /* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
12390 an in-range access to an automatic variable, or to an object with
12391 a link-time-constant address. */
12393 static bool
12394 r10k_safe_mem_expr_p (tree expr, rtx offset)
12396 if (expr == NULL_TREE
12397 || offset == NULL_RTX
12398 || !CONST_INT_P (offset)
12399 || INTVAL (offset) < 0
12400 || INTVAL (offset) >= int_size_in_bytes (TREE_TYPE (expr)))
12401 return false;
12403 while (TREE_CODE (expr) == COMPONENT_REF)
12405 expr = TREE_OPERAND (expr, 0);
12406 if (expr == NULL_TREE)
12407 return false;
12410 return DECL_P (expr);
12413 /* A for_each_rtx callback for which DATA points to the instruction
12414 containing *X. Stop the search if we find a MEM that is not safe
12415 from R10K speculation. */
12417 static int
12418 r10k_needs_protection_p_1 (rtx *loc, void *data)
12420 rtx mem;
12422 mem = *loc;
12423 if (!MEM_P (mem))
12424 return 0;
12426 if (r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
12427 return -1;
12429 if (r10k_safe_address_p (XEXP (mem, 0), (rtx) data))
12430 return -1;
12432 return 1;
12435 /* A note_stores callback for which DATA points to an instruction pointer.
12436 If *DATA is nonnull, make it null if it X contains a MEM that is not
12437 safe from R10K speculation. */
12439 static void
12440 r10k_needs_protection_p_store (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
12441 void *data)
12443 rtx *insn_ptr;
12445 insn_ptr = (rtx *) data;
12446 if (*insn_ptr && for_each_rtx (&x, r10k_needs_protection_p_1, *insn_ptr))
12447 *insn_ptr = NULL_RTX;
12450 /* A for_each_rtx callback that iterates over the pattern of a CALL_INSN.
12451 Return nonzero if the call is not to a declared function. */
12453 static int
12454 r10k_needs_protection_p_call (rtx *loc, void *data ATTRIBUTE_UNUSED)
12456 rtx x;
12458 x = *loc;
12459 if (!MEM_P (x))
12460 return 0;
12462 x = XEXP (x, 0);
12463 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DECL (x))
12464 return -1;
12466 return 1;
12469 /* Return true if instruction INSN needs to be protected by an R10K
12470 cache barrier. */
12472 static bool
12473 r10k_needs_protection_p (rtx insn)
12475 if (CALL_P (insn))
12476 return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_call, NULL);
12478 if (mips_r10k_cache_barrier == R10K_CACHE_BARRIER_STORE)
12480 note_stores (PATTERN (insn), r10k_needs_protection_p_store, &insn);
12481 return insn == NULL_RTX;
12484 return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_1, insn);
12487 /* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
12488 edge is unconditional. */
12490 static bool
12491 r10k_protected_bb_p (basic_block bb, sbitmap protected_bbs)
12493 edge_iterator ei;
12494 edge e;
12496 FOR_EACH_EDGE (e, ei, bb->preds)
12497 if (!single_succ_p (e->src)
12498 || !TEST_BIT (protected_bbs, e->src->index)
12499 || (e->flags & EDGE_COMPLEX) != 0)
12500 return false;
12501 return true;
12504 /* Implement -mr10k-cache-barrier= for the current function. */
12506 static void
12507 r10k_insert_cache_barriers (void)
12509 int *rev_post_order;
12510 unsigned int i, n;
12511 basic_block bb;
12512 sbitmap protected_bbs;
12513 rtx insn, end, unprotected_region;
12515 if (TARGET_MIPS16)
12517 sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
12518 return;
12521 /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF. */
12522 compute_bb_for_insn ();
12524 /* Create def-use chains. */
12525 df_set_flags (DF_EQ_NOTES);
12526 df_chain_add_problem (DF_UD_CHAIN);
12527 df_analyze ();
12529 /* Calculate dominators. */
12530 calculate_dominance_info (CDI_DOMINATORS);
12532 /* Bit X of PROTECTED_BBS is set if the last operation in basic block
12533 X is protected by a cache barrier. */
12534 protected_bbs = sbitmap_alloc (last_basic_block);
12535 sbitmap_zero (protected_bbs);
12537 /* Iterate over the basic blocks in reverse post-order. */
12538 rev_post_order = XNEWVEC (int, last_basic_block);
12539 n = pre_and_rev_post_order_compute (NULL, rev_post_order, false);
12540 for (i = 0; i < n; i++)
12542 bb = BASIC_BLOCK (rev_post_order[i]);
12544 /* If this block is only reached by unconditional edges, and if the
12545 source of every edge is protected, the beginning of the block is
12546 also protected. */
12547 if (r10k_protected_bb_p (bb, protected_bbs))
12548 unprotected_region = NULL_RTX;
12549 else
12550 unprotected_region = pc_rtx;
12551 end = NEXT_INSN (BB_END (bb));
12553 /* UNPROTECTED_REGION is:
12555 - null if we are processing a protected region,
12556 - pc_rtx if we are processing an unprotected region but have
12557 not yet found the first instruction in it
12558 - the first instruction in an unprotected region otherwise. */
12559 for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn))
12561 if (unprotected_region && INSN_P (insn))
12563 if (recog_memoized (insn) == CODE_FOR_mips_cache)
12564 /* This CACHE instruction protects the following code. */
12565 unprotected_region = NULL_RTX;
12566 else
12568 /* See if INSN is the first instruction in this
12569 unprotected region. */
12570 if (unprotected_region == pc_rtx)
12571 unprotected_region = insn;
12573 /* See if INSN needs to be protected. If so,
12574 we must insert a cache barrier somewhere between
12575 PREV_INSN (UNPROTECTED_REGION) and INSN. It isn't
12576 clear which position is better performance-wise,
12577 but as a tie-breaker, we assume that it is better
12578 to allow delay slots to be back-filled where
12579 possible, and that it is better not to insert
12580 barriers in the middle of already-scheduled code.
12581 We therefore insert the barrier at the beginning
12582 of the region. */
12583 if (r10k_needs_protection_p (insn))
12585 emit_insn_before (gen_r10k_cache_barrier (),
12586 unprotected_region);
12587 unprotected_region = NULL_RTX;
12592 if (CALL_P (insn))
12593 /* The called function is not required to protect the exit path.
12594 The code that follows a call is therefore unprotected. */
12595 unprotected_region = pc_rtx;
12598 /* Record whether the end of this block is protected. */
12599 if (unprotected_region == NULL_RTX)
12600 SET_BIT (protected_bbs, bb->index);
12602 XDELETEVEC (rev_post_order);
12604 sbitmap_free (protected_bbs);
12606 free_dominance_info (CDI_DOMINATORS);
12608 df_finish_pass (false);
12610 free_bb_for_insn ();
12613 /* A temporary variable used by for_each_rtx callbacks, etc. */
12614 static rtx mips_sim_insn;
12616 /* A structure representing the state of the processor pipeline.
12617 Used by the mips_sim_* family of functions. */
12618 struct mips_sim {
12619 /* The maximum number of instructions that can be issued in a cycle.
12620 (Caches mips_issue_rate.) */
12621 unsigned int issue_rate;
12623 /* The current simulation time. */
12624 unsigned int time;
12626 /* How many more instructions can be issued in the current cycle. */
12627 unsigned int insns_left;
12629 /* LAST_SET[X].INSN is the last instruction to set register X.
12630 LAST_SET[X].TIME is the time at which that instruction was issued.
12631 INSN is null if no instruction has yet set register X. */
12632 struct {
12633 rtx insn;
12634 unsigned int time;
12635 } last_set[FIRST_PSEUDO_REGISTER];
12637 /* The pipeline's current DFA state. */
12638 state_t dfa_state;
12641 /* Reset STATE to the initial simulation state. */
12643 static void
12644 mips_sim_reset (struct mips_sim *state)
12646 state->time = 0;
12647 state->insns_left = state->issue_rate;
12648 memset (&state->last_set, 0, sizeof (state->last_set));
12649 state_reset (state->dfa_state);
12652 /* Initialize STATE before its first use. DFA_STATE points to an
12653 allocated but uninitialized DFA state. */
12655 static void
12656 mips_sim_init (struct mips_sim *state, state_t dfa_state)
12658 state->issue_rate = mips_issue_rate ();
12659 state->dfa_state = dfa_state;
12660 mips_sim_reset (state);
12663 /* Advance STATE by one clock cycle. */
12665 static void
12666 mips_sim_next_cycle (struct mips_sim *state)
12668 state->time++;
12669 state->insns_left = state->issue_rate;
12670 state_transition (state->dfa_state, 0);
12673 /* Advance simulation state STATE until instruction INSN can read
12674 register REG. */
12676 static void
12677 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
12679 unsigned int regno, end_regno;
12681 end_regno = END_REGNO (reg);
12682 for (regno = REGNO (reg); regno < end_regno; regno++)
12683 if (state->last_set[regno].insn != 0)
12685 unsigned int t;
12687 t = (state->last_set[regno].time
12688 + insn_latency (state->last_set[regno].insn, insn));
12689 while (state->time < t)
12690 mips_sim_next_cycle (state);
12694 /* A for_each_rtx callback. If *X is a register, advance simulation state
12695 DATA until mips_sim_insn can read the register's value. */
12697 static int
12698 mips_sim_wait_regs_2 (rtx *x, void *data)
12700 if (REG_P (*x))
12701 mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
12702 return 0;
12705 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
12707 static void
12708 mips_sim_wait_regs_1 (rtx *x, void *data)
12710 for_each_rtx (x, mips_sim_wait_regs_2, data);
12713 /* Advance simulation state STATE until all of INSN's register
12714 dependencies are satisfied. */
12716 static void
12717 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
12719 mips_sim_insn = insn;
12720 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
12723 /* Advance simulation state STATE until the units required by
12724 instruction INSN are available. */
12726 static void
12727 mips_sim_wait_units (struct mips_sim *state, rtx insn)
12729 state_t tmp_state;
12731 tmp_state = alloca (state_size ());
12732 while (state->insns_left == 0
12733 || (memcpy (tmp_state, state->dfa_state, state_size ()),
12734 state_transition (tmp_state, insn) >= 0))
12735 mips_sim_next_cycle (state);
12738 /* Advance simulation state STATE until INSN is ready to issue. */
12740 static void
12741 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
12743 mips_sim_wait_regs (state, insn);
12744 mips_sim_wait_units (state, insn);
12747 /* mips_sim_insn has just set X. Update the LAST_SET array
12748 in simulation state DATA. */
12750 static void
12751 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
12753 struct mips_sim *state;
12755 state = (struct mips_sim *) data;
12756 if (REG_P (x))
12758 unsigned int regno, end_regno;
12760 end_regno = END_REGNO (x);
12761 for (regno = REGNO (x); regno < end_regno; regno++)
12763 state->last_set[regno].insn = mips_sim_insn;
12764 state->last_set[regno].time = state->time;
12769 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
12770 can issue immediately (i.e., that mips_sim_wait_insn has already
12771 been called). */
12773 static void
12774 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
12776 state_transition (state->dfa_state, insn);
12777 state->insns_left--;
12779 mips_sim_insn = insn;
12780 note_stores (PATTERN (insn), mips_sim_record_set, state);
12783 /* Simulate issuing a NOP in state STATE. */
12785 static void
12786 mips_sim_issue_nop (struct mips_sim *state)
12788 if (state->insns_left == 0)
12789 mips_sim_next_cycle (state);
12790 state->insns_left--;
12793 /* Update simulation state STATE so that it's ready to accept the instruction
12794 after INSN. INSN should be part of the main rtl chain, not a member of a
12795 SEQUENCE. */
12797 static void
12798 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
12800 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
12801 if (JUMP_P (insn))
12802 mips_sim_issue_nop (state);
12804 switch (GET_CODE (SEQ_BEGIN (insn)))
12806 case CODE_LABEL:
12807 case CALL_INSN:
12808 /* We can't predict the processor state after a call or label. */
12809 mips_sim_reset (state);
12810 break;
12812 case JUMP_INSN:
12813 /* The delay slots of branch likely instructions are only executed
12814 when the branch is taken. Therefore, if the caller has simulated
12815 the delay slot instruction, STATE does not really reflect the state
12816 of the pipeline for the instruction after the delay slot. Also,
12817 branch likely instructions tend to incur a penalty when not taken,
12818 so there will probably be an extra delay between the branch and
12819 the instruction after the delay slot. */
12820 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
12821 mips_sim_reset (state);
12822 break;
12824 default:
12825 break;
12829 /* The VR4130 pipeline issues aligned pairs of instructions together,
12830 but it stalls the second instruction if it depends on the first.
12831 In order to cut down the amount of logic required, this dependence
12832 check is not based on a full instruction decode. Instead, any non-SPECIAL
12833 instruction is assumed to modify the register specified by bits 20-16
12834 (which is usually the "rt" field).
12836 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
12837 input, so we can end up with a false dependence between the branch
12838 and its delay slot. If this situation occurs in instruction INSN,
12839 try to avoid it by swapping rs and rt. */
12841 static void
12842 vr4130_avoid_branch_rt_conflict (rtx insn)
12844 rtx first, second;
12846 first = SEQ_BEGIN (insn);
12847 second = SEQ_END (insn);
12848 if (JUMP_P (first)
12849 && NONJUMP_INSN_P (second)
12850 && GET_CODE (PATTERN (first)) == SET
12851 && GET_CODE (SET_DEST (PATTERN (first))) == PC
12852 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
12854 /* Check for the right kind of condition. */
12855 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
12856 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
12857 && REG_P (XEXP (cond, 0))
12858 && REG_P (XEXP (cond, 1))
12859 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
12860 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
12862 /* SECOND mentions the rt register but not the rs register. */
12863 rtx tmp = XEXP (cond, 0);
12864 XEXP (cond, 0) = XEXP (cond, 1);
12865 XEXP (cond, 1) = tmp;
12870 /* Implement -mvr4130-align. Go through each basic block and simulate the
12871 processor pipeline. If we find that a pair of instructions could execute
12872 in parallel, and the first of those instructions is not 8-byte aligned,
12873 insert a nop to make it aligned. */
12875 static void
12876 vr4130_align_insns (void)
12878 struct mips_sim state;
12879 rtx insn, subinsn, last, last2, next;
12880 bool aligned_p;
12882 dfa_start ();
12884 /* LAST is the last instruction before INSN to have a nonzero length.
12885 LAST2 is the last such instruction before LAST. */
12886 last = 0;
12887 last2 = 0;
12889 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
12890 aligned_p = true;
12892 mips_sim_init (&state, alloca (state_size ()));
12893 for (insn = get_insns (); insn != 0; insn = next)
12895 unsigned int length;
12897 next = NEXT_INSN (insn);
12899 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
12900 This isn't really related to the alignment pass, but we do it on
12901 the fly to avoid a separate instruction walk. */
12902 vr4130_avoid_branch_rt_conflict (insn);
12904 if (USEFUL_INSN_P (insn))
12905 FOR_EACH_SUBINSN (subinsn, insn)
12907 mips_sim_wait_insn (&state, subinsn);
12909 /* If we want this instruction to issue in parallel with the
12910 previous one, make sure that the previous instruction is
12911 aligned. There are several reasons why this isn't worthwhile
12912 when the second instruction is a call:
12914 - Calls are less likely to be performance critical,
12915 - There's a good chance that the delay slot can execute
12916 in parallel with the call.
12917 - The return address would then be unaligned.
12919 In general, if we're going to insert a nop between instructions
12920 X and Y, it's better to insert it immediately after X. That
12921 way, if the nop makes Y aligned, it will also align any labels
12922 between X and Y. */
12923 if (state.insns_left != state.issue_rate
12924 && !CALL_P (subinsn))
12926 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
12928 /* SUBINSN is the first instruction in INSN and INSN is
12929 aligned. We want to align the previous instruction
12930 instead, so insert a nop between LAST2 and LAST.
12932 Note that LAST could be either a single instruction
12933 or a branch with a delay slot. In the latter case,
12934 LAST, like INSN, is already aligned, but the delay
12935 slot must have some extra delay that stops it from
12936 issuing at the same time as the branch. We therefore
12937 insert a nop before the branch in order to align its
12938 delay slot. */
12939 emit_insn_after (gen_nop (), last2);
12940 aligned_p = false;
12942 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
12944 /* SUBINSN is the delay slot of INSN, but INSN is
12945 currently unaligned. Insert a nop between
12946 LAST and INSN to align it. */
12947 emit_insn_after (gen_nop (), last);
12948 aligned_p = true;
12951 mips_sim_issue_insn (&state, subinsn);
12953 mips_sim_finish_insn (&state, insn);
12955 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
12956 length = get_attr_length (insn);
12957 if (length > 0)
12959 /* If the instruction is an asm statement or multi-instruction
12960 mips.md patern, the length is only an estimate. Insert an
12961 8 byte alignment after it so that the following instructions
12962 can be handled correctly. */
12963 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
12964 && (recog_memoized (insn) < 0 || length >= 8))
12966 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
12967 next = NEXT_INSN (next);
12968 mips_sim_next_cycle (&state);
12969 aligned_p = true;
12971 else if (length & 4)
12972 aligned_p = !aligned_p;
12973 last2 = last;
12974 last = insn;
12977 /* See whether INSN is an aligned label. */
12978 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
12979 aligned_p = true;
12981 dfa_finish ();
12984 /* This structure records that the current function has a LO_SUM
12985 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
12986 the largest offset applied to BASE by all such LO_SUMs. */
12987 struct mips_lo_sum_offset {
12988 rtx base;
12989 HOST_WIDE_INT offset;
12992 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
12994 static hashval_t
12995 mips_hash_base (rtx base)
12997 int do_not_record_p;
12999 return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
13002 /* Hash-table callbacks for mips_lo_sum_offsets. */
13004 static hashval_t
13005 mips_lo_sum_offset_hash (const void *entry)
13007 return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
13010 static int
13011 mips_lo_sum_offset_eq (const void *entry, const void *value)
13013 return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
13014 (const_rtx) value);
13017 /* Look up symbolic constant X in HTAB, which is a hash table of
13018 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
13019 paired with a recorded LO_SUM, otherwise record X in the table. */
13021 static bool
13022 mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
13024 rtx base, offset;
13025 void **slot;
13026 struct mips_lo_sum_offset *entry;
13028 /* Split X into a base and offset. */
13029 split_const (x, &base, &offset);
13030 if (UNSPEC_ADDRESS_P (base))
13031 base = UNSPEC_ADDRESS (base);
13033 /* Look up the base in the hash table. */
13034 slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
13035 if (slot == NULL)
13036 return false;
13038 entry = (struct mips_lo_sum_offset *) *slot;
13039 if (option == INSERT)
13041 if (entry == NULL)
13043 entry = XNEW (struct mips_lo_sum_offset);
13044 entry->base = base;
13045 entry->offset = INTVAL (offset);
13046 *slot = entry;
13048 else
13050 if (INTVAL (offset) > entry->offset)
13051 entry->offset = INTVAL (offset);
13054 return INTVAL (offset) <= entry->offset;
13057 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
13058 Record every LO_SUM in *LOC. */
13060 static int
13061 mips_record_lo_sum (rtx *loc, void *data)
13063 if (GET_CODE (*loc) == LO_SUM)
13064 mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
13065 return 0;
13068 /* Return true if INSN is a SET of an orphaned high-part relocation.
13069 HTAB is a hash table of mips_lo_sum_offsets that describes all the
13070 LO_SUMs in the current function. */
13072 static bool
13073 mips_orphaned_high_part_p (htab_t htab, rtx insn)
13075 enum mips_symbol_type type;
13076 rtx x, set;
13078 set = single_set (insn);
13079 if (set)
13081 /* Check for %his. */
13082 x = SET_SRC (set);
13083 if (GET_CODE (x) == HIGH
13084 && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
13085 return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
13087 /* Check for local %gots (and %got_pages, which is redundant but OK). */
13088 if (GET_CODE (x) == UNSPEC
13089 && XINT (x, 1) == UNSPEC_LOAD_GOT
13090 && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
13091 SYMBOL_CONTEXT_LEA, &type)
13092 && type == SYMBOL_GOTOFF_PAGE)
13093 return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
13095 return false;
13098 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
13099 INSN and a previous instruction, avoid it by inserting nops after
13100 instruction AFTER.
13102 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
13103 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
13104 before using the value of that register. *HILO_DELAY counts the
13105 number of instructions since the last hilo hazard (that is,
13106 the number of instructions since the last MFLO or MFHI).
13108 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
13109 for the next instruction.
13111 LO_REG is an rtx for the LO register, used in dependence checking. */
13113 static void
13114 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
13115 rtx *delayed_reg, rtx lo_reg)
13117 rtx pattern, set;
13118 int nops, ninsns;
13120 pattern = PATTERN (insn);
13122 /* Do not put the whole function in .set noreorder if it contains
13123 an asm statement. We don't know whether there will be hazards
13124 between the asm statement and the gcc-generated code. */
13125 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
13126 cfun->machine->all_noreorder_p = false;
13128 /* Ignore zero-length instructions (barriers and the like). */
13129 ninsns = get_attr_length (insn) / 4;
13130 if (ninsns == 0)
13131 return;
13133 /* Work out how many nops are needed. Note that we only care about
13134 registers that are explicitly mentioned in the instruction's pattern.
13135 It doesn't matter that calls use the argument registers or that they
13136 clobber hi and lo. */
13137 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
13138 nops = 2 - *hilo_delay;
13139 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
13140 nops = 1;
13141 else
13142 nops = 0;
13144 /* Insert the nops between this instruction and the previous one.
13145 Each new nop takes us further from the last hilo hazard. */
13146 *hilo_delay += nops;
13147 while (nops-- > 0)
13148 emit_insn_after (gen_hazard_nop (), after);
13150 /* Set up the state for the next instruction. */
13151 *hilo_delay += ninsns;
13152 *delayed_reg = 0;
13153 if (INSN_CODE (insn) >= 0)
13154 switch (get_attr_hazard (insn))
13156 case HAZARD_NONE:
13157 break;
13159 case HAZARD_HILO:
13160 *hilo_delay = 0;
13161 break;
13163 case HAZARD_DELAY:
13164 set = single_set (insn);
13165 gcc_assert (set);
13166 *delayed_reg = SET_DEST (set);
13167 break;
13171 /* Go through the instruction stream and insert nops where necessary.
13172 Also delete any high-part relocations whose partnering low parts
13173 are now all dead. See if the whole function can then be put into
13174 .set noreorder and .set nomacro. */
13176 static void
13177 mips_reorg_process_insns (void)
13179 rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
13180 int hilo_delay;
13181 htab_t htab;
13183 /* Force all instructions to be split into their final form. */
13184 split_all_insns_noflow ();
13186 /* Recalculate instruction lengths without taking nops into account. */
13187 cfun->machine->ignore_hazard_length_p = true;
13188 shorten_branches (get_insns ());
13190 cfun->machine->all_noreorder_p = true;
13192 /* We don't track MIPS16 PC-relative offsets closely enough to make
13193 a good job of "set .noreorder" code in MIPS16 mode. */
13194 if (TARGET_MIPS16)
13195 cfun->machine->all_noreorder_p = false;
13197 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
13198 if (!TARGET_EXPLICIT_RELOCS)
13199 cfun->machine->all_noreorder_p = false;
13201 /* Profiled functions can't be all noreorder because the profiler
13202 support uses assembler macros. */
13203 if (crtl->profile)
13204 cfun->machine->all_noreorder_p = false;
13206 /* Code compiled with -mfix-vr4120 can't be all noreorder because
13207 we rely on the assembler to work around some errata. */
13208 if (TARGET_FIX_VR4120)
13209 cfun->machine->all_noreorder_p = false;
13211 /* The same is true for -mfix-vr4130 if we might generate MFLO or
13212 MFHI instructions. Note that we avoid using MFLO and MFHI if
13213 the VR4130 MACC and DMACC instructions are available instead;
13214 see the *mfhilo_{si,di}_macc patterns. */
13215 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
13216 cfun->machine->all_noreorder_p = false;
13218 htab = htab_create (37, mips_lo_sum_offset_hash,
13219 mips_lo_sum_offset_eq, free);
13221 /* Make a first pass over the instructions, recording all the LO_SUMs. */
13222 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
13223 FOR_EACH_SUBINSN (subinsn, insn)
13224 if (INSN_P (subinsn))
13225 for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
13227 last_insn = 0;
13228 hilo_delay = 2;
13229 delayed_reg = 0;
13230 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
13232 /* Make a second pass over the instructions. Delete orphaned
13233 high-part relocations or turn them into NOPs. Avoid hazards
13234 by inserting NOPs. */
13235 for (insn = get_insns (); insn != 0; insn = next_insn)
13237 next_insn = NEXT_INSN (insn);
13238 if (INSN_P (insn))
13240 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
13242 /* If we find an orphaned high-part relocation in a delay
13243 slot, it's easier to turn that instruction into a NOP than
13244 to delete it. The delay slot will be a NOP either way. */
13245 FOR_EACH_SUBINSN (subinsn, insn)
13246 if (INSN_P (subinsn))
13248 if (mips_orphaned_high_part_p (htab, subinsn))
13250 PATTERN (subinsn) = gen_nop ();
13251 INSN_CODE (subinsn) = CODE_FOR_nop;
13253 mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
13254 &delayed_reg, lo_reg);
13256 last_insn = insn;
13258 else
13260 /* INSN is a single instruction. Delete it if it's an
13261 orphaned high-part relocation. */
13262 if (mips_orphaned_high_part_p (htab, insn))
13263 delete_insn (insn);
13264 /* Also delete cache barriers if the last instruction
13265 was an annulled branch. INSN will not be speculatively
13266 executed. */
13267 else if (recog_memoized (insn) == CODE_FOR_r10k_cache_barrier
13268 && last_insn
13269 && INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn)))
13270 delete_insn (insn);
13271 else
13273 mips_avoid_hazard (last_insn, insn, &hilo_delay,
13274 &delayed_reg, lo_reg);
13275 last_insn = insn;
13281 htab_delete (htab);
13284 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
13286 static void
13287 mips_reorg (void)
13289 mips16_lay_out_constants ();
13290 if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE)
13291 r10k_insert_cache_barriers ();
13292 if (mips_base_delayed_branch)
13293 dbr_schedule (get_insns ());
13294 mips_reorg_process_insns ();
13295 if (!TARGET_MIPS16
13296 && TARGET_EXPLICIT_RELOCS
13297 && TUNE_MIPS4130
13298 && TARGET_VR4130_ALIGN)
13299 vr4130_align_insns ();
13302 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
13303 in order to avoid duplicating too much logic from elsewhere. */
13305 static void
13306 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
13307 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
13308 tree function)
13310 rtx this_rtx, temp1, temp2, insn, fnaddr;
13311 bool use_sibcall_p;
13313 /* Pretend to be a post-reload pass while generating rtl. */
13314 reload_completed = 1;
13316 /* Mark the end of the (empty) prologue. */
13317 emit_note (NOTE_INSN_PROLOGUE_END);
13319 /* Determine if we can use a sibcall to call FUNCTION directly. */
13320 fnaddr = XEXP (DECL_RTL (function), 0);
13321 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
13322 && const_call_insn_operand (fnaddr, Pmode));
13324 /* Determine if we need to load FNADDR from the GOT. */
13325 if (!use_sibcall_p
13326 && (mips_got_symbol_type_p
13327 (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))))
13329 /* Pick a global pointer. Use a call-clobbered register if
13330 TARGET_CALL_SAVED_GP. */
13331 cfun->machine->global_pointer
13332 = TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
13333 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
13335 /* Set up the global pointer for n32 or n64 abicalls. */
13336 mips_emit_loadgp ();
13339 /* We need two temporary registers in some cases. */
13340 temp1 = gen_rtx_REG (Pmode, 2);
13341 temp2 = gen_rtx_REG (Pmode, 3);
13343 /* Find out which register contains the "this" pointer. */
13344 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
13345 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
13346 else
13347 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
13349 /* Add DELTA to THIS_RTX. */
13350 if (delta != 0)
13352 rtx offset = GEN_INT (delta);
13353 if (!SMALL_OPERAND (delta))
13355 mips_emit_move (temp1, offset);
13356 offset = temp1;
13358 emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
13361 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
13362 if (vcall_offset != 0)
13364 rtx addr;
13366 /* Set TEMP1 to *THIS_RTX. */
13367 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
13369 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
13370 addr = mips_add_offset (temp2, temp1, vcall_offset);
13372 /* Load the offset and add it to THIS_RTX. */
13373 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
13374 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
13377 /* Jump to the target function. Use a sibcall if direct jumps are
13378 allowed, otherwise load the address into a register first. */
13379 if (use_sibcall_p)
13381 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
13382 SIBLING_CALL_P (insn) = 1;
13384 else
13386 /* This is messy. GAS treats "la $25,foo" as part of a call
13387 sequence and may allow a global "foo" to be lazily bound.
13388 The general move patterns therefore reject this combination.
13390 In this context, lazy binding would actually be OK
13391 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
13392 TARGET_CALL_SAVED_GP; see mips_load_call_address.
13393 We must therefore load the address via a temporary
13394 register if mips_dangerous_for_la25_p.
13396 If we jump to the temporary register rather than $25,
13397 the assembler can use the move insn to fill the jump's
13398 delay slot.
13400 We can use the same technique for MIPS16 code, where $25
13401 is not a valid JR register. */
13402 if (TARGET_USE_PIC_FN_ADDR_REG
13403 && !TARGET_MIPS16
13404 && !mips_dangerous_for_la25_p (fnaddr))
13405 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
13406 mips_load_call_address (MIPS_CALL_SIBCALL, temp1, fnaddr);
13408 if (TARGET_USE_PIC_FN_ADDR_REG
13409 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
13410 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
13411 emit_jump_insn (gen_indirect_jump (temp1));
13414 /* Run just enough of rest_of_compilation. This sequence was
13415 "borrowed" from alpha.c. */
13416 insn = get_insns ();
13417 insn_locators_alloc ();
13418 split_all_insns_noflow ();
13419 mips16_lay_out_constants ();
13420 shorten_branches (insn);
13421 final_start_function (insn, file, 1);
13422 final (insn, file, 1);
13423 final_end_function ();
13424 free_after_compilation (cfun);
13426 /* Clean up the vars set above. Note that final_end_function resets
13427 the global pointer for us. */
13428 reload_completed = 0;
13431 /* The last argument passed to mips_set_mips16_mode, or negative if the
13432 function hasn't been called yet.
13434 There are two copies of this information. One is saved and restored
13435 by the PCH process while the other is specific to this compiler
13436 invocation. The information calculated by mips_set_mips16_mode
13437 is invalid unless the two variables are the same. */
13438 static int was_mips16_p = -1;
13439 static GTY(()) int was_mips16_pch_p = -1;
13441 /* Set up the target-dependent global state so that it matches the
13442 current function's ISA mode. */
13444 static void
13445 mips_set_mips16_mode (int mips16_p)
13447 if (mips16_p == was_mips16_p
13448 && mips16_p == was_mips16_pch_p)
13449 return;
13451 /* Restore base settings of various flags. */
13452 target_flags = mips_base_target_flags;
13453 flag_schedule_insns = mips_base_schedule_insns;
13454 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
13455 flag_move_loop_invariants = mips_base_move_loop_invariants;
13456 align_loops = mips_base_align_loops;
13457 align_jumps = mips_base_align_jumps;
13458 align_functions = mips_base_align_functions;
13460 if (mips16_p)
13462 /* Switch to MIPS16 mode. */
13463 target_flags |= MASK_MIPS16;
13465 /* Don't run the scheduler before reload, since it tends to
13466 increase register pressure. */
13467 flag_schedule_insns = 0;
13469 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
13470 the whole function to be in a single section. */
13471 flag_reorder_blocks_and_partition = 0;
13473 /* Don't move loop invariants, because it tends to increase
13474 register pressure. It also introduces an extra move in cases
13475 where the constant is the first operand in a two-operand binary
13476 instruction, or when it forms a register argument to a functon
13477 call. */
13478 flag_move_loop_invariants = 0;
13480 target_flags |= MASK_EXPLICIT_RELOCS;
13482 /* Experiments suggest we get the best overall section-anchor
13483 results from using the range of an unextended LW or SW. Code
13484 that makes heavy use of byte or short accesses can do better
13485 with ranges of 0...31 and 0...63 respectively, but most code is
13486 sensitive to the range of LW and SW instead. */
13487 targetm.min_anchor_offset = 0;
13488 targetm.max_anchor_offset = 127;
13490 if (flag_pic && !TARGET_OLDABI)
13491 sorry ("MIPS16 PIC for ABIs other than o32 and o64");
13493 if (TARGET_XGOT)
13494 sorry ("MIPS16 -mxgot code");
13496 if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
13497 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
13499 else
13501 /* Switch to normal (non-MIPS16) mode. */
13502 target_flags &= ~MASK_MIPS16;
13504 /* Provide default values for align_* for 64-bit targets. */
13505 if (TARGET_64BIT)
13507 if (align_loops == 0)
13508 align_loops = 8;
13509 if (align_jumps == 0)
13510 align_jumps = 8;
13511 if (align_functions == 0)
13512 align_functions = 8;
13515 targetm.min_anchor_offset = -32768;
13516 targetm.max_anchor_offset = 32767;
13519 /* (Re)initialize MIPS target internals for new ISA. */
13520 mips_init_relocs ();
13522 if (was_mips16_p >= 0 || was_mips16_pch_p >= 0)
13523 /* Reinitialize target-dependent state. */
13524 target_reinit ();
13526 was_mips16_p = mips16_p;
13527 was_mips16_pch_p = mips16_p;
13530 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
13531 function should use the MIPS16 ISA and switch modes accordingly. */
13533 static void
13534 mips_set_current_function (tree fndecl)
13536 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
13539 /* Allocate a chunk of memory for per-function machine-dependent data. */
13541 static struct machine_function *
13542 mips_init_machine_status (void)
13544 return ((struct machine_function *)
13545 ggc_alloc_cleared (sizeof (struct machine_function)));
13548 /* Return the processor associated with the given ISA level, or null
13549 if the ISA isn't valid. */
13551 static const struct mips_cpu_info *
13552 mips_cpu_info_from_isa (int isa)
13554 unsigned int i;
13556 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
13557 if (mips_cpu_info_table[i].isa == isa)
13558 return mips_cpu_info_table + i;
13560 return NULL;
13563 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
13564 with a final "000" replaced by "k". Ignore case.
13566 Note: this function is shared between GCC and GAS. */
13568 static bool
13569 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
13571 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
13572 given++, canonical++;
13574 return ((*given == 0 && *canonical == 0)
13575 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
13578 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
13579 CPU name. We've traditionally allowed a lot of variation here.
13581 Note: this function is shared between GCC and GAS. */
13583 static bool
13584 mips_matching_cpu_name_p (const char *canonical, const char *given)
13586 /* First see if the name matches exactly, or with a final "000"
13587 turned into "k". */
13588 if (mips_strict_matching_cpu_name_p (canonical, given))
13589 return true;
13591 /* If not, try comparing based on numerical designation alone.
13592 See if GIVEN is an unadorned number, or 'r' followed by a number. */
13593 if (TOLOWER (*given) == 'r')
13594 given++;
13595 if (!ISDIGIT (*given))
13596 return false;
13598 /* Skip over some well-known prefixes in the canonical name,
13599 hoping to find a number there too. */
13600 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
13601 canonical += 2;
13602 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
13603 canonical += 2;
13604 else if (TOLOWER (canonical[0]) == 'r')
13605 canonical += 1;
13607 return mips_strict_matching_cpu_name_p (canonical, given);
13610 /* Return the mips_cpu_info entry for the processor or ISA given
13611 by CPU_STRING. Return null if the string isn't recognized.
13613 A similar function exists in GAS. */
13615 static const struct mips_cpu_info *
13616 mips_parse_cpu (const char *cpu_string)
13618 unsigned int i;
13619 const char *s;
13621 /* In the past, we allowed upper-case CPU names, but it doesn't
13622 work well with the multilib machinery. */
13623 for (s = cpu_string; *s != 0; s++)
13624 if (ISUPPER (*s))
13626 warning (0, "CPU names must be lower case");
13627 break;
13630 /* 'from-abi' selects the most compatible architecture for the given
13631 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
13632 EABIs, we have to decide whether we're using the 32-bit or 64-bit
13633 version. */
13634 if (strcasecmp (cpu_string, "from-abi") == 0)
13635 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
13636 : ABI_NEEDS_64BIT_REGS ? 3
13637 : (TARGET_64BIT ? 3 : 1));
13639 /* 'default' has traditionally been a no-op. Probably not very useful. */
13640 if (strcasecmp (cpu_string, "default") == 0)
13641 return NULL;
13643 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
13644 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
13645 return mips_cpu_info_table + i;
13647 return NULL;
13650 /* Set up globals to generate code for the ISA or processor
13651 described by INFO. */
13653 static void
13654 mips_set_architecture (const struct mips_cpu_info *info)
13656 if (info != 0)
13658 mips_arch_info = info;
13659 mips_arch = info->cpu;
13660 mips_isa = info->isa;
13664 /* Likewise for tuning. */
13666 static void
13667 mips_set_tune (const struct mips_cpu_info *info)
13669 if (info != 0)
13671 mips_tune_info = info;
13672 mips_tune = info->cpu;
13676 /* Implement TARGET_HANDLE_OPTION. */
13678 static bool
13679 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
13681 switch (code)
13683 case OPT_mabi_:
13684 if (strcmp (arg, "32") == 0)
13685 mips_abi = ABI_32;
13686 else if (strcmp (arg, "o64") == 0)
13687 mips_abi = ABI_O64;
13688 else if (strcmp (arg, "n32") == 0)
13689 mips_abi = ABI_N32;
13690 else if (strcmp (arg, "64") == 0)
13691 mips_abi = ABI_64;
13692 else if (strcmp (arg, "eabi") == 0)
13693 mips_abi = ABI_EABI;
13694 else
13695 return false;
13696 return true;
13698 case OPT_march_:
13699 case OPT_mtune_:
13700 return mips_parse_cpu (arg) != 0;
13702 case OPT_mips:
13703 mips_isa_option_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
13704 return mips_isa_option_info != 0;
13706 case OPT_mno_flush_func:
13707 mips_cache_flush_func = NULL;
13708 return true;
13710 case OPT_mcode_readable_:
13711 if (strcmp (arg, "yes") == 0)
13712 mips_code_readable = CODE_READABLE_YES;
13713 else if (strcmp (arg, "pcrel") == 0)
13714 mips_code_readable = CODE_READABLE_PCREL;
13715 else if (strcmp (arg, "no") == 0)
13716 mips_code_readable = CODE_READABLE_NO;
13717 else
13718 return false;
13719 return true;
13721 case OPT_mr10k_cache_barrier_:
13722 if (strcmp (arg, "load-store") == 0)
13723 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_LOAD_STORE;
13724 else if (strcmp (arg, "store") == 0)
13725 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_STORE;
13726 else if (strcmp (arg, "none") == 0)
13727 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
13728 else
13729 return false;
13730 return true;
13732 default:
13733 return true;
13737 /* Implement OVERRIDE_OPTIONS. */
13739 void
13740 mips_override_options (void)
13742 int i, start, regno, mode;
13744 /* Process flags as though we were generating non-MIPS16 code. */
13745 mips_base_mips16 = TARGET_MIPS16;
13746 target_flags &= ~MASK_MIPS16;
13748 #ifdef SUBTARGET_OVERRIDE_OPTIONS
13749 SUBTARGET_OVERRIDE_OPTIONS;
13750 #endif
13752 /* Set the small data limit. */
13753 mips_small_data_threshold = (g_switch_set
13754 ? g_switch_value
13755 : MIPS_DEFAULT_GVALUE);
13757 /* The following code determines the architecture and register size.
13758 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
13759 The GAS and GCC code should be kept in sync as much as possible. */
13761 if (mips_arch_string != 0)
13762 mips_set_architecture (mips_parse_cpu (mips_arch_string));
13764 if (mips_isa_option_info != 0)
13766 if (mips_arch_info == 0)
13767 mips_set_architecture (mips_isa_option_info);
13768 else if (mips_arch_info->isa != mips_isa_option_info->isa)
13769 error ("%<-%s%> conflicts with the other architecture options, "
13770 "which specify a %s processor",
13771 mips_isa_option_info->name,
13772 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
13775 if (mips_arch_info == 0)
13777 #ifdef MIPS_CPU_STRING_DEFAULT
13778 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
13779 #else
13780 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
13781 #endif
13784 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
13785 error ("%<-march=%s%> is not compatible with the selected ABI",
13786 mips_arch_info->name);
13788 /* Optimize for mips_arch, unless -mtune selects a different processor. */
13789 if (mips_tune_string != 0)
13790 mips_set_tune (mips_parse_cpu (mips_tune_string));
13792 if (mips_tune_info == 0)
13793 mips_set_tune (mips_arch_info);
13795 if ((target_flags_explicit & MASK_64BIT) != 0)
13797 /* The user specified the size of the integer registers. Make sure
13798 it agrees with the ABI and ISA. */
13799 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
13800 error ("%<-mgp64%> used with a 32-bit processor");
13801 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
13802 error ("%<-mgp32%> used with a 64-bit ABI");
13803 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
13804 error ("%<-mgp64%> used with a 32-bit ABI");
13806 else
13808 /* Infer the integer register size from the ABI and processor.
13809 Restrict ourselves to 32-bit registers if that's all the
13810 processor has, or if the ABI cannot handle 64-bit registers. */
13811 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
13812 target_flags &= ~MASK_64BIT;
13813 else
13814 target_flags |= MASK_64BIT;
13817 if ((target_flags_explicit & MASK_FLOAT64) != 0)
13819 if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
13820 error ("unsupported combination: %s", "-mfp64 -msingle-float");
13821 else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
13822 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
13823 else if (!TARGET_64BIT && TARGET_FLOAT64)
13825 if (!ISA_HAS_MXHC1)
13826 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
13827 " the target supports the mfhc1 and mthc1 instructions");
13828 else if (mips_abi != ABI_32)
13829 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
13830 " the o32 ABI");
13833 else
13835 /* -msingle-float selects 32-bit float registers. Otherwise the
13836 float registers should be the same size as the integer ones. */
13837 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
13838 target_flags |= MASK_FLOAT64;
13839 else
13840 target_flags &= ~MASK_FLOAT64;
13843 /* End of code shared with GAS. */
13845 /* If no -mlong* option was given, infer it from the other options. */
13846 if ((target_flags_explicit & MASK_LONG64) == 0)
13848 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
13849 target_flags |= MASK_LONG64;
13850 else
13851 target_flags &= ~MASK_LONG64;
13854 if (!TARGET_OLDABI)
13855 flag_pcc_struct_return = 0;
13857 /* Decide which rtx_costs structure to use. */
13858 if (optimize_size)
13859 mips_cost = &mips_rtx_cost_optimize_size;
13860 else
13861 mips_cost = &mips_rtx_cost_data[mips_tune];
13863 /* If the user hasn't specified a branch cost, use the processor's
13864 default. */
13865 if (mips_branch_cost == 0)
13866 mips_branch_cost = mips_cost->branch_cost;
13868 /* If neither -mbranch-likely nor -mno-branch-likely was given
13869 on the command line, set MASK_BRANCHLIKELY based on the target
13870 architecture and tuning flags. Annulled delay slots are a
13871 size win, so we only consider the processor-specific tuning
13872 for !optimize_size. */
13873 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
13875 if (ISA_HAS_BRANCHLIKELY
13876 && (optimize_size
13877 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
13878 target_flags |= MASK_BRANCHLIKELY;
13879 else
13880 target_flags &= ~MASK_BRANCHLIKELY;
13882 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
13883 warning (0, "the %qs architecture does not support branch-likely"
13884 " instructions", mips_arch_info->name);
13886 /* The effect of -mabicalls isn't defined for the EABI. */
13887 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
13889 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
13890 target_flags &= ~MASK_ABICALLS;
13893 if (TARGET_ABICALLS_PIC2)
13894 /* We need to set flag_pic for executables as well as DSOs
13895 because we may reference symbols that are not defined in
13896 the final executable. (MIPS does not use things like
13897 copy relocs, for example.)
13899 There is a body of code that uses __PIC__ to distinguish
13900 between -mabicalls and -mno-abicalls code. The non-__PIC__
13901 variant is usually appropriate for TARGET_ABICALLS_PIC0, as
13902 long as any indirect jumps use $25. */
13903 flag_pic = 1;
13905 /* -mvr4130-align is a "speed over size" optimization: it usually produces
13906 faster code, but at the expense of more nops. Enable it at -O3 and
13907 above. */
13908 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
13909 target_flags |= MASK_VR4130_ALIGN;
13911 /* Prefer a call to memcpy over inline code when optimizing for size,
13912 though see MOVE_RATIO in mips.h. */
13913 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
13914 target_flags |= MASK_MEMCPY;
13916 /* If we have a nonzero small-data limit, check that the -mgpopt
13917 setting is consistent with the other target flags. */
13918 if (mips_small_data_threshold > 0)
13920 if (!TARGET_GPOPT)
13922 if (!TARGET_EXPLICIT_RELOCS)
13923 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
13925 TARGET_LOCAL_SDATA = false;
13926 TARGET_EXTERN_SDATA = false;
13928 else
13930 if (TARGET_VXWORKS_RTP)
13931 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
13933 if (TARGET_ABICALLS)
13934 warning (0, "cannot use small-data accesses for %qs",
13935 "-mabicalls");
13939 #ifdef MIPS_TFMODE_FORMAT
13940 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
13941 #endif
13943 /* Make sure that the user didn't turn off paired single support when
13944 MIPS-3D support is requested. */
13945 if (TARGET_MIPS3D
13946 && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
13947 && !TARGET_PAIRED_SINGLE_FLOAT)
13948 error ("%<-mips3d%> requires %<-mpaired-single%>");
13950 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
13951 if (TARGET_MIPS3D)
13952 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
13954 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
13955 and TARGET_HARD_FLOAT_ABI are both true. */
13956 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
13957 error ("%qs must be used with %qs",
13958 TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
13959 TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
13961 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
13962 enabled. */
13963 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
13964 warning (0, "the %qs architecture does not support paired-single"
13965 " instructions", mips_arch_info->name);
13967 if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
13968 && !TARGET_CACHE_BUILTIN)
13970 error ("%qs requires a target that provides the %qs instruction",
13971 "-mr10k-cache-barrier", "cache");
13972 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
13975 /* If TARGET_DSPR2, enable MASK_DSP. */
13976 if (TARGET_DSPR2)
13977 target_flags |= MASK_DSP;
13979 mips_init_print_operand_punct ();
13981 /* Set up array to map GCC register number to debug register number.
13982 Ignore the special purpose register numbers. */
13984 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
13986 mips_dbx_regno[i] = INVALID_REGNUM;
13987 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
13988 mips_dwarf_regno[i] = i;
13989 else
13990 mips_dwarf_regno[i] = INVALID_REGNUM;
13993 start = GP_DBX_FIRST - GP_REG_FIRST;
13994 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
13995 mips_dbx_regno[i] = i + start;
13997 start = FP_DBX_FIRST - FP_REG_FIRST;
13998 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
13999 mips_dbx_regno[i] = i + start;
14001 /* Accumulator debug registers use big-endian ordering. */
14002 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
14003 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
14004 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
14005 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
14006 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
14008 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
14009 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
14012 /* Set up mips_hard_regno_mode_ok. */
14013 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
14014 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
14015 mips_hard_regno_mode_ok[(int)mode][regno]
14016 = mips_hard_regno_mode_ok_p (regno, mode);
14018 /* Function to allocate machine-dependent function status. */
14019 init_machine_status = &mips_init_machine_status;
14021 /* Default to working around R4000 errata only if the processor
14022 was selected explicitly. */
14023 if ((target_flags_explicit & MASK_FIX_R4000) == 0
14024 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
14025 target_flags |= MASK_FIX_R4000;
14027 /* Default to working around R4400 errata only if the processor
14028 was selected explicitly. */
14029 if ((target_flags_explicit & MASK_FIX_R4400) == 0
14030 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
14031 target_flags |= MASK_FIX_R4400;
14033 /* Default to working around R10000 errata only if the processor
14034 was selected explicitly. */
14035 if ((target_flags_explicit & MASK_FIX_R10000) == 0
14036 && mips_matching_cpu_name_p (mips_arch_info->name, "r10000"))
14037 target_flags |= MASK_FIX_R10000;
14039 /* Make sure that branch-likely instructions available when using
14040 -mfix-r10000. The instructions are not available if either:
14042 1. -mno-branch-likely was passed.
14043 2. The selected ISA does not support branch-likely and
14044 the command line does not include -mbranch-likely. */
14045 if (TARGET_FIX_R10000
14046 && ((target_flags_explicit & MASK_BRANCHLIKELY) == 0
14047 ? !ISA_HAS_BRANCHLIKELY
14048 : !TARGET_BRANCHLIKELY))
14049 sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
14051 /* Save base state of options. */
14052 mips_base_target_flags = target_flags;
14053 mips_base_delayed_branch = flag_delayed_branch;
14054 mips_base_schedule_insns = flag_schedule_insns;
14055 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
14056 mips_base_move_loop_invariants = flag_move_loop_invariants;
14057 mips_base_align_loops = align_loops;
14058 mips_base_align_jumps = align_jumps;
14059 mips_base_align_functions = align_functions;
14061 /* Now select the ISA mode.
14063 Do all CPP-sensitive stuff in non-MIPS16 mode; we'll switch to
14064 MIPS16 mode afterwards if need be. */
14065 mips_set_mips16_mode (false);
14067 /* We call dbr_schedule from within mips_reorg. */
14068 flag_delayed_branch = 0;
14071 /* Swap the register information for registers I and I + 1, which
14072 currently have the wrong endianness. Note that the registers'
14073 fixedness and call-clobberedness might have been set on the
14074 command line. */
14076 static void
14077 mips_swap_registers (unsigned int i)
14079 int tmpi;
14080 const char *tmps;
14082 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
14083 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
14085 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
14086 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
14087 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
14088 SWAP_STRING (reg_names[i], reg_names[i + 1]);
14090 #undef SWAP_STRING
14091 #undef SWAP_INT
14094 /* Implement CONDITIONAL_REGISTER_USAGE. */
14096 void
14097 mips_conditional_register_usage (void)
14100 if (ISA_HAS_DSP)
14102 /* These DSP control register fields are global. */
14103 global_regs[CCDSP_PO_REGNUM] = 1;
14104 global_regs[CCDSP_SC_REGNUM] = 1;
14106 else
14108 int regno;
14110 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
14111 fixed_regs[regno] = call_used_regs[regno] = 1;
14113 if (!TARGET_HARD_FLOAT)
14115 int regno;
14117 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
14118 fixed_regs[regno] = call_used_regs[regno] = 1;
14119 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
14120 fixed_regs[regno] = call_used_regs[regno] = 1;
14122 else if (! ISA_HAS_8CC)
14124 int regno;
14126 /* We only have a single condition-code register. We implement
14127 this by fixing all the condition-code registers and generating
14128 RTL that refers directly to ST_REG_FIRST. */
14129 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
14130 fixed_regs[regno] = call_used_regs[regno] = 1;
14132 /* In MIPS16 mode, we permit the $t temporary registers to be used
14133 for reload. We prohibit the unused $s registers, since they
14134 are call-saved, and saving them via a MIPS16 register would
14135 probably waste more time than just reloading the value. */
14136 if (TARGET_MIPS16)
14138 fixed_regs[18] = call_used_regs[18] = 1;
14139 fixed_regs[19] = call_used_regs[19] = 1;
14140 fixed_regs[20] = call_used_regs[20] = 1;
14141 fixed_regs[21] = call_used_regs[21] = 1;
14142 fixed_regs[22] = call_used_regs[22] = 1;
14143 fixed_regs[23] = call_used_regs[23] = 1;
14144 fixed_regs[26] = call_used_regs[26] = 1;
14145 fixed_regs[27] = call_used_regs[27] = 1;
14146 fixed_regs[30] = call_used_regs[30] = 1;
14148 /* $f20-$f23 are call-clobbered for n64. */
14149 if (mips_abi == ABI_64)
14151 int regno;
14152 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
14153 call_really_used_regs[regno] = call_used_regs[regno] = 1;
14155 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
14156 for n32. */
14157 if (mips_abi == ABI_N32)
14159 int regno;
14160 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
14161 call_really_used_regs[regno] = call_used_regs[regno] = 1;
14163 /* Make sure that double-register accumulator values are correctly
14164 ordered for the current endianness. */
14165 if (TARGET_LITTLE_ENDIAN)
14167 unsigned int regno;
14169 mips_swap_registers (MD_REG_FIRST);
14170 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
14171 mips_swap_registers (regno);
14175 /* Initialize vector TARGET to VALS. */
14177 void
14178 mips_expand_vector_init (rtx target, rtx vals)
14180 enum machine_mode mode;
14181 enum machine_mode inner;
14182 unsigned int i, n_elts;
14183 rtx mem;
14185 mode = GET_MODE (target);
14186 inner = GET_MODE_INNER (mode);
14187 n_elts = GET_MODE_NUNITS (mode);
14189 gcc_assert (VECTOR_MODE_P (mode));
14191 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
14192 for (i = 0; i < n_elts; i++)
14193 emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)),
14194 XVECEXP (vals, 0, i));
14196 emit_move_insn (target, mem);
14199 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
14200 other registers for instructions for which it is possible. This
14201 encourages the compiler to use CMP in cases where an XOR would
14202 require some register shuffling. */
14204 void
14205 mips_order_regs_for_local_alloc (void)
14207 int i;
14209 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
14210 reg_alloc_order[i] = i;
14212 if (TARGET_MIPS16)
14214 /* It really doesn't matter where we put register 0, since it is
14215 a fixed register anyhow. */
14216 reg_alloc_order[0] = 24;
14217 reg_alloc_order[24] = 0;
14221 /* Initialize the GCC target structure. */
14222 #undef TARGET_ASM_ALIGNED_HI_OP
14223 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
14224 #undef TARGET_ASM_ALIGNED_SI_OP
14225 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
14226 #undef TARGET_ASM_ALIGNED_DI_OP
14227 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
14229 #undef TARGET_ASM_FUNCTION_PROLOGUE
14230 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
14231 #undef TARGET_ASM_FUNCTION_EPILOGUE
14232 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
14233 #undef TARGET_ASM_SELECT_RTX_SECTION
14234 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
14235 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
14236 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
14238 #undef TARGET_SCHED_INIT
14239 #define TARGET_SCHED_INIT mips_sched_init
14240 #undef TARGET_SCHED_REORDER
14241 #define TARGET_SCHED_REORDER mips_sched_reorder
14242 #undef TARGET_SCHED_REORDER2
14243 #define TARGET_SCHED_REORDER2 mips_sched_reorder
14244 #undef TARGET_SCHED_VARIABLE_ISSUE
14245 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
14246 #undef TARGET_SCHED_ADJUST_COST
14247 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
14248 #undef TARGET_SCHED_ISSUE_RATE
14249 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
14250 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
14251 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
14252 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
14253 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
14254 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
14255 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
14256 mips_multipass_dfa_lookahead
14258 #undef TARGET_DEFAULT_TARGET_FLAGS
14259 #define TARGET_DEFAULT_TARGET_FLAGS \
14260 (TARGET_DEFAULT \
14261 | TARGET_CPU_DEFAULT \
14262 | TARGET_ENDIAN_DEFAULT \
14263 | TARGET_FP_EXCEPTIONS_DEFAULT \
14264 | MASK_CHECK_ZERO_DIV \
14265 | MASK_FUSED_MADD)
14266 #undef TARGET_HANDLE_OPTION
14267 #define TARGET_HANDLE_OPTION mips_handle_option
14269 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
14270 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
14272 #undef TARGET_INSERT_ATTRIBUTES
14273 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
14274 #undef TARGET_MERGE_DECL_ATTRIBUTES
14275 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
14276 #undef TARGET_SET_CURRENT_FUNCTION
14277 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
14279 #undef TARGET_VALID_POINTER_MODE
14280 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
14281 #undef TARGET_RTX_COSTS
14282 #define TARGET_RTX_COSTS mips_rtx_costs
14283 #undef TARGET_ADDRESS_COST
14284 #define TARGET_ADDRESS_COST mips_address_cost
14286 #undef TARGET_IN_SMALL_DATA_P
14287 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
14289 #undef TARGET_MACHINE_DEPENDENT_REORG
14290 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
14292 #undef TARGET_ASM_FILE_START
14293 #define TARGET_ASM_FILE_START mips_file_start
14294 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
14295 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
14297 #undef TARGET_INIT_LIBFUNCS
14298 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
14300 #undef TARGET_BUILD_BUILTIN_VA_LIST
14301 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
14302 #undef TARGET_EXPAND_BUILTIN_VA_START
14303 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
14304 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
14305 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
14307 #undef TARGET_PROMOTE_FUNCTION_ARGS
14308 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
14309 #undef TARGET_PROMOTE_FUNCTION_RETURN
14310 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
14311 #undef TARGET_PROMOTE_PROTOTYPES
14312 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
14314 #undef TARGET_RETURN_IN_MEMORY
14315 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
14316 #undef TARGET_RETURN_IN_MSB
14317 #define TARGET_RETURN_IN_MSB mips_return_in_msb
14319 #undef TARGET_ASM_OUTPUT_MI_THUNK
14320 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
14321 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
14322 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
14324 #undef TARGET_SETUP_INCOMING_VARARGS
14325 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
14326 #undef TARGET_STRICT_ARGUMENT_NAMING
14327 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
14328 #undef TARGET_MUST_PASS_IN_STACK
14329 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
14330 #undef TARGET_PASS_BY_REFERENCE
14331 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
14332 #undef TARGET_CALLEE_COPIES
14333 #define TARGET_CALLEE_COPIES mips_callee_copies
14334 #undef TARGET_ARG_PARTIAL_BYTES
14335 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
14337 #undef TARGET_MODE_REP_EXTENDED
14338 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
14340 #undef TARGET_VECTOR_MODE_SUPPORTED_P
14341 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
14343 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14344 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
14346 #undef TARGET_INIT_BUILTINS
14347 #define TARGET_INIT_BUILTINS mips_init_builtins
14348 #undef TARGET_EXPAND_BUILTIN
14349 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
14351 #undef TARGET_HAVE_TLS
14352 #define TARGET_HAVE_TLS HAVE_AS_TLS
14354 #undef TARGET_CANNOT_FORCE_CONST_MEM
14355 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
14357 #undef TARGET_ENCODE_SECTION_INFO
14358 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
14360 #undef TARGET_ATTRIBUTE_TABLE
14361 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
14362 /* All our function attributes are related to how out-of-line copies should
14363 be compiled or called. They don't in themselves prevent inlining. */
14364 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
14365 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
14367 #undef TARGET_EXTRA_LIVE_ON_ENTRY
14368 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
14370 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
14371 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
14372 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
14373 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
14375 #undef TARGET_COMP_TYPE_ATTRIBUTES
14376 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
14378 #ifdef HAVE_AS_DTPRELWORD
14379 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
14380 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
14381 #endif
14382 #undef TARGET_DWARF_REGISTER_SPAN
14383 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
14385 #undef TARGET_IRA_COVER_CLASSES
14386 #define TARGET_IRA_COVER_CLASSES mips_ira_cover_classes
14388 struct gcc_target targetm = TARGET_INITIALIZER;
14390 #include "gt-mips.h"