hashtab.h: Update GTY annotations to new syntax
[official-gcc.git] / gcc / config / mips / mips.c
bloba677f9392b4d3cf674416db7ed8366eceaba7be5
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "libfuncs.h"
46 #include "flags.h"
47 #include "reload.h"
48 #include "tm_p.h"
49 #include "ggc.h"
50 #include "gstab.h"
51 #include "hashtab.h"
52 #include "debug.h"
53 #include "target.h"
54 #include "target-def.h"
55 #include "integrate.h"
56 #include "langhooks.h"
57 #include "cfglayout.h"
58 #include "sched-int.h"
59 #include "gimple.h"
60 #include "bitmap.h"
61 #include "diagnostic.h"
63 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
64 #define UNSPEC_ADDRESS_P(X) \
65 (GET_CODE (X) == UNSPEC \
66 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
67 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
69 /* Extract the symbol or label from UNSPEC wrapper X. */
70 #define UNSPEC_ADDRESS(X) \
71 XVECEXP (X, 0, 0)
73 /* Extract the symbol type from UNSPEC wrapper X. */
74 #define UNSPEC_ADDRESS_TYPE(X) \
75 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
77 /* The maximum distance between the top of the stack frame and the
78 value $sp has when we save and restore registers.
80 The value for normal-mode code must be a SMALL_OPERAND and must
81 preserve the maximum stack alignment. We therefore use a value
82 of 0x7ff0 in this case.
84 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
85 up to 0x7f8 bytes and can usually save or restore all the registers
86 that we need to save or restore. (Note that we can only use these
87 instructions for o32, for which the stack alignment is 8 bytes.)
89 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
90 RESTORE are not available. We can then use unextended instructions
91 to save and restore registers, and to allocate and deallocate the top
92 part of the frame. */
93 #define MIPS_MAX_FIRST_STACK_STEP \
94 (!TARGET_MIPS16 ? 0x7ff0 \
95 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
96 : TARGET_64BIT ? 0x100 : 0x400)
98 /* True if INSN is a mips.md pattern or asm statement. */
99 #define USEFUL_INSN_P(INSN) \
100 (INSN_P (INSN) \
101 && GET_CODE (PATTERN (INSN)) != USE \
102 && GET_CODE (PATTERN (INSN)) != CLOBBER \
103 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
104 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
106 /* If INSN is a delayed branch sequence, return the first instruction
107 in the sequence, otherwise return INSN itself. */
108 #define SEQ_BEGIN(INSN) \
109 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
110 ? XVECEXP (PATTERN (INSN), 0, 0) \
111 : (INSN))
113 /* Likewise for the last instruction in a delayed branch sequence. */
114 #define SEQ_END(INSN) \
115 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
116 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117 : (INSN))
119 /* Execute the following loop body with SUBINSN set to each instruction
120 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
121 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
122 for ((SUBINSN) = SEQ_BEGIN (INSN); \
123 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
124 (SUBINSN) = NEXT_INSN (SUBINSN))
126 /* True if bit BIT is set in VALUE. */
127 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
129 /* Classifies an address.
131 ADDRESS_REG
132 A natural register + offset address. The register satisfies
133 mips_valid_base_register_p and the offset is a const_arith_operand.
135 ADDRESS_LO_SUM
136 A LO_SUM rtx. The first operand is a valid base register and
137 the second operand is a symbolic address.
139 ADDRESS_CONST_INT
140 A signed 16-bit constant address.
142 ADDRESS_SYMBOLIC:
143 A constant symbolic address. */
144 enum mips_address_type {
145 ADDRESS_REG,
146 ADDRESS_LO_SUM,
147 ADDRESS_CONST_INT,
148 ADDRESS_SYMBOLIC
151 /* Enumerates the setting of the -mr10k-cache-barrier option. */
152 enum mips_r10k_cache_barrier_setting {
153 R10K_CACHE_BARRIER_NONE,
154 R10K_CACHE_BARRIER_STORE,
155 R10K_CACHE_BARRIER_LOAD_STORE
158 /* Macros to create an enumeration identifier for a function prototype. */
159 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
160 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
161 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
162 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
164 /* Classifies the prototype of a built-in function. */
165 enum mips_function_type {
166 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
167 #include "config/mips/mips-ftypes.def"
168 #undef DEF_MIPS_FTYPE
169 MIPS_MAX_FTYPE_MAX
172 /* Specifies how a built-in function should be converted into rtl. */
173 enum mips_builtin_type {
174 /* The function corresponds directly to an .md pattern. The return
175 value is mapped to operand 0 and the arguments are mapped to
176 operands 1 and above. */
177 MIPS_BUILTIN_DIRECT,
179 /* The function corresponds directly to an .md pattern. There is no return
180 value and the arguments are mapped to operands 0 and above. */
181 MIPS_BUILTIN_DIRECT_NO_TARGET,
183 /* The function corresponds to a comparison instruction followed by
184 a mips_cond_move_tf_ps pattern. The first two arguments are the
185 values to compare and the second two arguments are the vector
186 operands for the movt.ps or movf.ps instruction (in assembly order). */
187 MIPS_BUILTIN_MOVF,
188 MIPS_BUILTIN_MOVT,
190 /* The function corresponds to a V2SF comparison instruction. Operand 0
191 of this instruction is the result of the comparison, which has mode
192 CCV2 or CCV4. The function arguments are mapped to operands 1 and
193 above. The function's return value is an SImode boolean that is
194 true under the following conditions:
196 MIPS_BUILTIN_CMP_ANY: one of the registers is true
197 MIPS_BUILTIN_CMP_ALL: all of the registers are true
198 MIPS_BUILTIN_CMP_LOWER: the first register is true
199 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
200 MIPS_BUILTIN_CMP_ANY,
201 MIPS_BUILTIN_CMP_ALL,
202 MIPS_BUILTIN_CMP_UPPER,
203 MIPS_BUILTIN_CMP_LOWER,
205 /* As above, but the instruction only sets a single $fcc register. */
206 MIPS_BUILTIN_CMP_SINGLE,
208 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
209 MIPS_BUILTIN_BPOSGE32
212 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
213 #define MIPS_FP_CONDITIONS(MACRO) \
214 MACRO (f), \
215 MACRO (un), \
216 MACRO (eq), \
217 MACRO (ueq), \
218 MACRO (olt), \
219 MACRO (ult), \
220 MACRO (ole), \
221 MACRO (ule), \
222 MACRO (sf), \
223 MACRO (ngle), \
224 MACRO (seq), \
225 MACRO (ngl), \
226 MACRO (lt), \
227 MACRO (nge), \
228 MACRO (le), \
229 MACRO (ngt)
231 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
232 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
233 enum mips_fp_condition {
234 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
237 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
238 #define STRINGIFY(X) #X
239 static const char *const mips_fp_conditions[] = {
240 MIPS_FP_CONDITIONS (STRINGIFY)
243 /* Information about a function's frame layout. */
244 struct GTY(()) mips_frame_info {
245 /* The size of the frame in bytes. */
246 HOST_WIDE_INT total_size;
248 /* The number of bytes allocated to variables. */
249 HOST_WIDE_INT var_size;
251 /* The number of bytes allocated to outgoing function arguments. */
252 HOST_WIDE_INT args_size;
254 /* The number of bytes allocated to the .cprestore slot, or 0 if there
255 is no such slot. */
256 HOST_WIDE_INT cprestore_size;
258 /* Bit X is set if the function saves or restores GPR X. */
259 unsigned int mask;
261 /* Likewise FPR X. */
262 unsigned int fmask;
264 /* Likewise doubleword accumulator X ($acX). */
265 unsigned int acc_mask;
267 /* The number of GPRs, FPRs, doubleword accumulators and COP0
268 registers saved. */
269 unsigned int num_gp;
270 unsigned int num_fp;
271 unsigned int num_acc;
272 unsigned int num_cop0_regs;
274 /* The offset of the topmost GPR, FPR, accumulator and COP0-register
275 save slots from the top of the frame, or zero if no such slots are
276 needed. */
277 HOST_WIDE_INT gp_save_offset;
278 HOST_WIDE_INT fp_save_offset;
279 HOST_WIDE_INT acc_save_offset;
280 HOST_WIDE_INT cop0_save_offset;
282 /* Likewise, but giving offsets from the bottom of the frame. */
283 HOST_WIDE_INT gp_sp_offset;
284 HOST_WIDE_INT fp_sp_offset;
285 HOST_WIDE_INT acc_sp_offset;
286 HOST_WIDE_INT cop0_sp_offset;
288 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
289 HOST_WIDE_INT arg_pointer_offset;
291 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
292 HOST_WIDE_INT hard_frame_pointer_offset;
295 struct GTY(()) machine_function {
296 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
297 rtx mips16_gp_pseudo_rtx;
299 /* The number of extra stack bytes taken up by register varargs.
300 This area is allocated by the callee at the very top of the frame. */
301 int varargs_size;
303 /* The current frame information, calculated by mips_compute_frame_info. */
304 struct mips_frame_info frame;
306 /* The register to use as the function's global pointer, or INVALID_REGNUM
307 if the function doesn't need one. */
308 unsigned int global_pointer;
310 /* True if mips_adjust_insn_length should ignore an instruction's
311 hazard attribute. */
312 bool ignore_hazard_length_p;
314 /* True if the whole function is suitable for .set noreorder and
315 .set nomacro. */
316 bool all_noreorder_p;
318 /* True if the function is known to have an instruction that needs $gp. */
319 bool has_gp_insn_p;
321 /* True if we have emitted an instruction to initialize
322 mips16_gp_pseudo_rtx. */
323 bool initialized_mips16_gp_pseudo_p;
325 /* True if this is an interrupt handler. */
326 bool interrupt_handler_p;
328 /* True if this is an interrupt handler that uses shadow registers. */
329 bool use_shadow_register_set_p;
331 /* True if this is an interrupt handler that should keep interrupts
332 masked. */
333 bool keep_interrupts_masked_p;
335 /* True if this is an interrupt handler that should use DERET
336 instead of ERET. */
337 bool use_debug_exception_return_p;
340 /* Information about a single argument. */
341 struct mips_arg_info {
342 /* True if the argument is passed in a floating-point register, or
343 would have been if we hadn't run out of registers. */
344 bool fpr_p;
346 /* The number of words passed in registers, rounded up. */
347 unsigned int reg_words;
349 /* For EABI, the offset of the first register from GP_ARG_FIRST or
350 FP_ARG_FIRST. For other ABIs, the offset of the first register from
351 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
352 comment for details).
354 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
355 on the stack. */
356 unsigned int reg_offset;
358 /* The number of words that must be passed on the stack, rounded up. */
359 unsigned int stack_words;
361 /* The offset from the start of the stack overflow area of the argument's
362 first stack word. Only meaningful when STACK_WORDS is nonzero. */
363 unsigned int stack_offset;
366 /* Information about an address described by mips_address_type.
368 ADDRESS_CONST_INT
369 No fields are used.
371 ADDRESS_REG
372 REG is the base register and OFFSET is the constant offset.
374 ADDRESS_LO_SUM
375 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
376 is the type of symbol it references.
378 ADDRESS_SYMBOLIC
379 SYMBOL_TYPE is the type of symbol that the address references. */
380 struct mips_address_info {
381 enum mips_address_type type;
382 rtx reg;
383 rtx offset;
384 enum mips_symbol_type symbol_type;
387 /* One stage in a constant building sequence. These sequences have
388 the form:
390 A = VALUE[0]
391 A = A CODE[1] VALUE[1]
392 A = A CODE[2] VALUE[2]
395 where A is an accumulator, each CODE[i] is a binary rtl operation
396 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
397 struct mips_integer_op {
398 enum rtx_code code;
399 unsigned HOST_WIDE_INT value;
402 /* The largest number of operations needed to load an integer constant.
403 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
404 When the lowest bit is clear, we can try, but reject a sequence with
405 an extra SLL at the end. */
406 #define MIPS_MAX_INTEGER_OPS 7
408 /* Information about a MIPS16e SAVE or RESTORE instruction. */
409 struct mips16e_save_restore_info {
410 /* The number of argument registers saved by a SAVE instruction.
411 0 for RESTORE instructions. */
412 unsigned int nargs;
414 /* Bit X is set if the instruction saves or restores GPR X. */
415 unsigned int mask;
417 /* The total number of bytes to allocate. */
418 HOST_WIDE_INT size;
421 /* Global variables for machine-dependent things. */
423 /* The -G setting, or the configuration's default small-data limit if
424 no -G option is given. */
425 static unsigned int mips_small_data_threshold;
427 /* The number of file directives written by mips_output_filename. */
428 int num_source_filenames;
430 /* The name that appeared in the last .file directive written by
431 mips_output_filename, or "" if mips_output_filename hasn't
432 written anything yet. */
433 const char *current_function_file = "";
435 /* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
436 int sdb_label_count;
438 /* Arrays that map GCC register numbers to debugger register numbers. */
439 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
440 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
442 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
443 int set_noreorder;
444 int set_nomacro;
445 static int set_noat;
447 /* True if we're writing out a branch-likely instruction rather than a
448 normal branch. */
449 static bool mips_branch_likely;
451 /* The operands passed to the last cmpMM expander. */
452 rtx cmp_operands[2];
454 /* The current instruction-set architecture. */
455 enum processor_type mips_arch;
456 const struct mips_cpu_info *mips_arch_info;
458 /* The processor that we should tune the code for. */
459 enum processor_type mips_tune;
460 const struct mips_cpu_info *mips_tune_info;
462 /* The ISA level associated with mips_arch. */
463 int mips_isa;
465 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
466 static const struct mips_cpu_info *mips_isa_option_info;
468 /* Which ABI to use. */
469 int mips_abi = MIPS_ABI_DEFAULT;
471 /* Which cost information to use. */
472 const struct mips_rtx_cost_data *mips_cost;
474 /* The ambient target flags, excluding MASK_MIPS16. */
475 static int mips_base_target_flags;
477 /* True if MIPS16 is the default mode. */
478 bool mips_base_mips16;
480 /* The ambient values of other global variables. */
481 static int mips_base_schedule_insns; /* flag_schedule_insns */
482 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
483 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
484 static int mips_base_align_loops; /* align_loops */
485 static int mips_base_align_jumps; /* align_jumps */
486 static int mips_base_align_functions; /* align_functions */
488 /* The -mcode-readable setting. */
489 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
491 /* The -mr10k-cache-barrier setting. */
492 static enum mips_r10k_cache_barrier_setting mips_r10k_cache_barrier;
494 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
495 bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
497 /* Index C is true if character C is a valid PRINT_OPERAND punctation
498 character. */
499 bool mips_print_operand_punct[256];
501 static GTY (()) int mips_output_filename_first_time = 1;
503 /* mips_split_p[X] is true if symbols of type X can be split by
504 mips_split_symbol. */
505 bool mips_split_p[NUM_SYMBOL_TYPES];
507 /* mips_split_hi_p[X] is true if the high parts of symbols of type X
508 can be split by mips_split_symbol. */
509 bool mips_split_hi_p[NUM_SYMBOL_TYPES];
511 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
512 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
513 if they are matched by a special .md file pattern. */
514 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
516 /* Likewise for HIGHs. */
517 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
519 /* Index R is the smallest register class that contains register R. */
520 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
521 LEA_REGS, LEA_REGS, M16_REGS, V1_REG,
522 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
523 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
524 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
525 M16_REGS, M16_REGS, LEA_REGS, LEA_REGS,
526 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
527 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
528 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
529 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
530 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
531 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
532 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
533 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
534 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
535 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
536 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
537 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
538 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
539 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
540 NO_REGS, FRAME_REGS, FRAME_REGS, NO_REGS,
541 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
542 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
543 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
544 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
545 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
546 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
547 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
548 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
549 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
550 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
551 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
552 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
553 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
554 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
555 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
556 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
557 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
558 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
559 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
560 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
561 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
562 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
563 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
564 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
565 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
566 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
567 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
570 /* The value of TARGET_ATTRIBUTE_TABLE. */
571 const struct attribute_spec mips_attribute_table[] = {
572 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
573 { "long_call", 0, 0, false, true, true, NULL },
574 { "far", 0, 0, false, true, true, NULL },
575 { "near", 0, 0, false, true, true, NULL },
576 /* We would really like to treat "mips16" and "nomips16" as type
577 attributes, but GCC doesn't provide the hooks we need to support
578 the right conversion rules. As declaration attributes, they affect
579 code generation but don't carry other semantics. */
580 { "mips16", 0, 0, true, false, false, NULL },
581 { "nomips16", 0, 0, true, false, false, NULL },
582 /* Allow functions to be specified as interrupt handlers */
583 { "interrupt", 0, 0, false, true, true, NULL },
584 { "use_shadow_register_set", 0, 0, false, true, true, NULL },
585 { "keep_interrupts_masked", 0, 0, false, true, true, NULL },
586 { "use_debug_exception_return", 0, 0, false, true, true, NULL },
587 { NULL, 0, 0, false, false, false, NULL }
590 /* A table describing all the processors GCC knows about. Names are
591 matched in the order listed. The first mention of an ISA level is
592 taken as the canonical name for that ISA.
594 To ease comparison, please keep this table in the same order
595 as GAS's mips_cpu_info_table. Please also make sure that
596 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
597 options correctly. */
598 static const struct mips_cpu_info mips_cpu_info_table[] = {
599 /* Entries for generic ISAs. */
600 { "mips1", PROCESSOR_R3000, 1, 0 },
601 { "mips2", PROCESSOR_R6000, 2, 0 },
602 { "mips3", PROCESSOR_R4000, 3, 0 },
603 { "mips4", PROCESSOR_R8000, 4, 0 },
604 /* Prefer not to use branch-likely instructions for generic MIPS32rX
605 and MIPS64rX code. The instructions were officially deprecated
606 in revisions 2 and earlier, but revision 3 is likely to downgrade
607 that to a recommendation to avoid the instructions in code that
608 isn't tuned to a specific processor. */
609 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
610 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
611 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
612 /* ??? For now just tune the generic MIPS64r2 for 5KC as well. */
613 { "mips64r2", PROCESSOR_5KC, 65, PTF_AVOID_BRANCHLIKELY },
615 /* MIPS I processors. */
616 { "r3000", PROCESSOR_R3000, 1, 0 },
617 { "r2000", PROCESSOR_R3000, 1, 0 },
618 { "r3900", PROCESSOR_R3900, 1, 0 },
620 /* MIPS II processors. */
621 { "r6000", PROCESSOR_R6000, 2, 0 },
623 /* MIPS III processors. */
624 { "r4000", PROCESSOR_R4000, 3, 0 },
625 { "vr4100", PROCESSOR_R4100, 3, 0 },
626 { "vr4111", PROCESSOR_R4111, 3, 0 },
627 { "vr4120", PROCESSOR_R4120, 3, 0 },
628 { "vr4130", PROCESSOR_R4130, 3, 0 },
629 { "vr4300", PROCESSOR_R4300, 3, 0 },
630 { "r4400", PROCESSOR_R4000, 3, 0 },
631 { "r4600", PROCESSOR_R4600, 3, 0 },
632 { "orion", PROCESSOR_R4600, 3, 0 },
633 { "r4650", PROCESSOR_R4650, 3, 0 },
634 /* ST Loongson 2E/2F processors. */
635 { "loongson2e", PROCESSOR_LOONGSON_2E, 3, PTF_AVOID_BRANCHLIKELY },
636 { "loongson2f", PROCESSOR_LOONGSON_2F, 3, PTF_AVOID_BRANCHLIKELY },
638 /* MIPS IV processors. */
639 { "r8000", PROCESSOR_R8000, 4, 0 },
640 { "r10000", PROCESSOR_R10000, 4, 0 },
641 { "r12000", PROCESSOR_R10000, 4, 0 },
642 { "r14000", PROCESSOR_R10000, 4, 0 },
643 { "r16000", PROCESSOR_R10000, 4, 0 },
644 { "vr5000", PROCESSOR_R5000, 4, 0 },
645 { "vr5400", PROCESSOR_R5400, 4, 0 },
646 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
647 { "rm7000", PROCESSOR_R7000, 4, 0 },
648 { "rm9000", PROCESSOR_R9000, 4, 0 },
650 /* MIPS32 processors. */
651 { "4kc", PROCESSOR_4KC, 32, 0 },
652 { "4km", PROCESSOR_4KC, 32, 0 },
653 { "4kp", PROCESSOR_4KP, 32, 0 },
654 { "4ksc", PROCESSOR_4KC, 32, 0 },
656 /* MIPS32 Release 2 processors. */
657 { "m4k", PROCESSOR_M4K, 33, 0 },
658 { "4kec", PROCESSOR_4KC, 33, 0 },
659 { "4kem", PROCESSOR_4KC, 33, 0 },
660 { "4kep", PROCESSOR_4KP, 33, 0 },
661 { "4ksd", PROCESSOR_4KC, 33, 0 },
663 { "24kc", PROCESSOR_24KC, 33, 0 },
664 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
665 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
666 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
667 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
668 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
670 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP. */
671 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
672 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
673 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
674 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
675 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
677 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP. */
678 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
679 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
680 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
681 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
682 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
684 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2. */
685 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
686 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
687 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
688 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
689 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
690 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
692 /* MIPS64 processors. */
693 { "5kc", PROCESSOR_5KC, 64, 0 },
694 { "5kf", PROCESSOR_5KF, 64, 0 },
695 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
696 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
697 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
698 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
699 { "xlr", PROCESSOR_XLR, 64, 0 },
701 /* MIPS64 Release 2 processors. */
702 { "octeon", PROCESSOR_OCTEON, 65, PTF_AVOID_BRANCHLIKELY }
705 /* Default costs. If these are used for a processor we should look
706 up the actual costs. */
707 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
708 COSTS_N_INSNS (7), /* fp_mult_sf */ \
709 COSTS_N_INSNS (8), /* fp_mult_df */ \
710 COSTS_N_INSNS (23), /* fp_div_sf */ \
711 COSTS_N_INSNS (36), /* fp_div_df */ \
712 COSTS_N_INSNS (10), /* int_mult_si */ \
713 COSTS_N_INSNS (10), /* int_mult_di */ \
714 COSTS_N_INSNS (69), /* int_div_si */ \
715 COSTS_N_INSNS (69), /* int_div_di */ \
716 2, /* branch_cost */ \
717 4 /* memory_latency */
719 /* Floating-point costs for processors without an FPU. Just assume that
720 all floating-point libcalls are very expensive. */
721 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
722 COSTS_N_INSNS (256), /* fp_mult_sf */ \
723 COSTS_N_INSNS (256), /* fp_mult_df */ \
724 COSTS_N_INSNS (256), /* fp_div_sf */ \
725 COSTS_N_INSNS (256) /* fp_div_df */
727 /* Costs to use when optimizing for size. */
728 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
729 COSTS_N_INSNS (1), /* fp_add */
730 COSTS_N_INSNS (1), /* fp_mult_sf */
731 COSTS_N_INSNS (1), /* fp_mult_df */
732 COSTS_N_INSNS (1), /* fp_div_sf */
733 COSTS_N_INSNS (1), /* fp_div_df */
734 COSTS_N_INSNS (1), /* int_mult_si */
735 COSTS_N_INSNS (1), /* int_mult_di */
736 COSTS_N_INSNS (1), /* int_div_si */
737 COSTS_N_INSNS (1), /* int_div_di */
738 2, /* branch_cost */
739 4 /* memory_latency */
742 /* Costs to use when optimizing for speed, indexed by processor. */
743 static const struct mips_rtx_cost_data mips_rtx_cost_data[PROCESSOR_MAX] = {
744 { /* R3000 */
745 COSTS_N_INSNS (2), /* fp_add */
746 COSTS_N_INSNS (4), /* fp_mult_sf */
747 COSTS_N_INSNS (5), /* fp_mult_df */
748 COSTS_N_INSNS (12), /* fp_div_sf */
749 COSTS_N_INSNS (19), /* fp_div_df */
750 COSTS_N_INSNS (12), /* int_mult_si */
751 COSTS_N_INSNS (12), /* int_mult_di */
752 COSTS_N_INSNS (35), /* int_div_si */
753 COSTS_N_INSNS (35), /* int_div_di */
754 1, /* branch_cost */
755 4 /* memory_latency */
757 { /* 4KC */
758 SOFT_FP_COSTS,
759 COSTS_N_INSNS (6), /* int_mult_si */
760 COSTS_N_INSNS (6), /* int_mult_di */
761 COSTS_N_INSNS (36), /* int_div_si */
762 COSTS_N_INSNS (36), /* int_div_di */
763 1, /* branch_cost */
764 4 /* memory_latency */
766 { /* 4KP */
767 SOFT_FP_COSTS,
768 COSTS_N_INSNS (36), /* int_mult_si */
769 COSTS_N_INSNS (36), /* int_mult_di */
770 COSTS_N_INSNS (37), /* int_div_si */
771 COSTS_N_INSNS (37), /* int_div_di */
772 1, /* branch_cost */
773 4 /* memory_latency */
775 { /* 5KC */
776 SOFT_FP_COSTS,
777 COSTS_N_INSNS (4), /* int_mult_si */
778 COSTS_N_INSNS (11), /* int_mult_di */
779 COSTS_N_INSNS (36), /* int_div_si */
780 COSTS_N_INSNS (68), /* int_div_di */
781 1, /* branch_cost */
782 4 /* memory_latency */
784 { /* 5KF */
785 COSTS_N_INSNS (4), /* fp_add */
786 COSTS_N_INSNS (4), /* fp_mult_sf */
787 COSTS_N_INSNS (5), /* fp_mult_df */
788 COSTS_N_INSNS (17), /* fp_div_sf */
789 COSTS_N_INSNS (32), /* fp_div_df */
790 COSTS_N_INSNS (4), /* int_mult_si */
791 COSTS_N_INSNS (11), /* int_mult_di */
792 COSTS_N_INSNS (36), /* int_div_si */
793 COSTS_N_INSNS (68), /* int_div_di */
794 1, /* branch_cost */
795 4 /* memory_latency */
797 { /* 20KC */
798 COSTS_N_INSNS (4), /* fp_add */
799 COSTS_N_INSNS (4), /* fp_mult_sf */
800 COSTS_N_INSNS (5), /* fp_mult_df */
801 COSTS_N_INSNS (17), /* fp_div_sf */
802 COSTS_N_INSNS (32), /* fp_div_df */
803 COSTS_N_INSNS (4), /* int_mult_si */
804 COSTS_N_INSNS (7), /* int_mult_di */
805 COSTS_N_INSNS (42), /* int_div_si */
806 COSTS_N_INSNS (72), /* int_div_di */
807 1, /* branch_cost */
808 4 /* memory_latency */
810 { /* 24KC */
811 SOFT_FP_COSTS,
812 COSTS_N_INSNS (5), /* int_mult_si */
813 COSTS_N_INSNS (5), /* int_mult_di */
814 COSTS_N_INSNS (41), /* int_div_si */
815 COSTS_N_INSNS (41), /* int_div_di */
816 1, /* branch_cost */
817 4 /* memory_latency */
819 { /* 24KF2_1 */
820 COSTS_N_INSNS (8), /* fp_add */
821 COSTS_N_INSNS (8), /* fp_mult_sf */
822 COSTS_N_INSNS (10), /* fp_mult_df */
823 COSTS_N_INSNS (34), /* fp_div_sf */
824 COSTS_N_INSNS (64), /* fp_div_df */
825 COSTS_N_INSNS (5), /* int_mult_si */
826 COSTS_N_INSNS (5), /* int_mult_di */
827 COSTS_N_INSNS (41), /* int_div_si */
828 COSTS_N_INSNS (41), /* int_div_di */
829 1, /* branch_cost */
830 4 /* memory_latency */
832 { /* 24KF1_1 */
833 COSTS_N_INSNS (4), /* fp_add */
834 COSTS_N_INSNS (4), /* fp_mult_sf */
835 COSTS_N_INSNS (5), /* fp_mult_df */
836 COSTS_N_INSNS (17), /* fp_div_sf */
837 COSTS_N_INSNS (32), /* fp_div_df */
838 COSTS_N_INSNS (5), /* int_mult_si */
839 COSTS_N_INSNS (5), /* int_mult_di */
840 COSTS_N_INSNS (41), /* int_div_si */
841 COSTS_N_INSNS (41), /* int_div_di */
842 1, /* branch_cost */
843 4 /* memory_latency */
845 { /* 74KC */
846 SOFT_FP_COSTS,
847 COSTS_N_INSNS (5), /* int_mult_si */
848 COSTS_N_INSNS (5), /* int_mult_di */
849 COSTS_N_INSNS (41), /* int_div_si */
850 COSTS_N_INSNS (41), /* int_div_di */
851 1, /* branch_cost */
852 4 /* memory_latency */
854 { /* 74KF2_1 */
855 COSTS_N_INSNS (8), /* fp_add */
856 COSTS_N_INSNS (8), /* fp_mult_sf */
857 COSTS_N_INSNS (10), /* fp_mult_df */
858 COSTS_N_INSNS (34), /* fp_div_sf */
859 COSTS_N_INSNS (64), /* fp_div_df */
860 COSTS_N_INSNS (5), /* int_mult_si */
861 COSTS_N_INSNS (5), /* int_mult_di */
862 COSTS_N_INSNS (41), /* int_div_si */
863 COSTS_N_INSNS (41), /* int_div_di */
864 1, /* branch_cost */
865 4 /* memory_latency */
867 { /* 74KF1_1 */
868 COSTS_N_INSNS (4), /* fp_add */
869 COSTS_N_INSNS (4), /* fp_mult_sf */
870 COSTS_N_INSNS (5), /* fp_mult_df */
871 COSTS_N_INSNS (17), /* fp_div_sf */
872 COSTS_N_INSNS (32), /* fp_div_df */
873 COSTS_N_INSNS (5), /* int_mult_si */
874 COSTS_N_INSNS (5), /* int_mult_di */
875 COSTS_N_INSNS (41), /* int_div_si */
876 COSTS_N_INSNS (41), /* int_div_di */
877 1, /* branch_cost */
878 4 /* memory_latency */
880 { /* 74KF3_2 */
881 COSTS_N_INSNS (6), /* fp_add */
882 COSTS_N_INSNS (6), /* fp_mult_sf */
883 COSTS_N_INSNS (7), /* fp_mult_df */
884 COSTS_N_INSNS (25), /* fp_div_sf */
885 COSTS_N_INSNS (48), /* fp_div_df */
886 COSTS_N_INSNS (5), /* int_mult_si */
887 COSTS_N_INSNS (5), /* int_mult_di */
888 COSTS_N_INSNS (41), /* int_div_si */
889 COSTS_N_INSNS (41), /* int_div_di */
890 1, /* branch_cost */
891 4 /* memory_latency */
893 { /* Loongson-2E */
894 DEFAULT_COSTS
896 { /* Loongson-2F */
897 DEFAULT_COSTS
899 { /* M4k */
900 DEFAULT_COSTS
902 /* Octeon */
904 SOFT_FP_COSTS,
905 COSTS_N_INSNS (5), /* int_mult_si */
906 COSTS_N_INSNS (5), /* int_mult_di */
907 COSTS_N_INSNS (72), /* int_div_si */
908 COSTS_N_INSNS (72), /* int_div_di */
909 1, /* branch_cost */
910 4 /* memory_latency */
912 { /* R3900 */
913 COSTS_N_INSNS (2), /* fp_add */
914 COSTS_N_INSNS (4), /* fp_mult_sf */
915 COSTS_N_INSNS (5), /* fp_mult_df */
916 COSTS_N_INSNS (12), /* fp_div_sf */
917 COSTS_N_INSNS (19), /* fp_div_df */
918 COSTS_N_INSNS (2), /* int_mult_si */
919 COSTS_N_INSNS (2), /* int_mult_di */
920 COSTS_N_INSNS (35), /* int_div_si */
921 COSTS_N_INSNS (35), /* int_div_di */
922 1, /* branch_cost */
923 4 /* memory_latency */
925 { /* R6000 */
926 COSTS_N_INSNS (3), /* fp_add */
927 COSTS_N_INSNS (5), /* fp_mult_sf */
928 COSTS_N_INSNS (6), /* fp_mult_df */
929 COSTS_N_INSNS (15), /* fp_div_sf */
930 COSTS_N_INSNS (16), /* fp_div_df */
931 COSTS_N_INSNS (17), /* int_mult_si */
932 COSTS_N_INSNS (17), /* int_mult_di */
933 COSTS_N_INSNS (38), /* int_div_si */
934 COSTS_N_INSNS (38), /* int_div_di */
935 2, /* branch_cost */
936 6 /* memory_latency */
938 { /* R4000 */
939 COSTS_N_INSNS (6), /* fp_add */
940 COSTS_N_INSNS (7), /* fp_mult_sf */
941 COSTS_N_INSNS (8), /* fp_mult_df */
942 COSTS_N_INSNS (23), /* fp_div_sf */
943 COSTS_N_INSNS (36), /* fp_div_df */
944 COSTS_N_INSNS (10), /* int_mult_si */
945 COSTS_N_INSNS (10), /* int_mult_di */
946 COSTS_N_INSNS (69), /* int_div_si */
947 COSTS_N_INSNS (69), /* int_div_di */
948 2, /* branch_cost */
949 6 /* memory_latency */
951 { /* R4100 */
952 DEFAULT_COSTS
954 { /* R4111 */
955 DEFAULT_COSTS
957 { /* R4120 */
958 DEFAULT_COSTS
960 { /* R4130 */
961 /* The only costs that appear to be updated here are
962 integer multiplication. */
963 SOFT_FP_COSTS,
964 COSTS_N_INSNS (4), /* int_mult_si */
965 COSTS_N_INSNS (6), /* int_mult_di */
966 COSTS_N_INSNS (69), /* int_div_si */
967 COSTS_N_INSNS (69), /* int_div_di */
968 1, /* branch_cost */
969 4 /* memory_latency */
971 { /* R4300 */
972 DEFAULT_COSTS
974 { /* R4600 */
975 DEFAULT_COSTS
977 { /* R4650 */
978 DEFAULT_COSTS
980 { /* R5000 */
981 COSTS_N_INSNS (6), /* fp_add */
982 COSTS_N_INSNS (4), /* fp_mult_sf */
983 COSTS_N_INSNS (5), /* fp_mult_df */
984 COSTS_N_INSNS (23), /* fp_div_sf */
985 COSTS_N_INSNS (36), /* fp_div_df */
986 COSTS_N_INSNS (5), /* int_mult_si */
987 COSTS_N_INSNS (5), /* int_mult_di */
988 COSTS_N_INSNS (36), /* int_div_si */
989 COSTS_N_INSNS (36), /* int_div_di */
990 1, /* branch_cost */
991 4 /* memory_latency */
993 { /* R5400 */
994 COSTS_N_INSNS (6), /* fp_add */
995 COSTS_N_INSNS (5), /* fp_mult_sf */
996 COSTS_N_INSNS (6), /* fp_mult_df */
997 COSTS_N_INSNS (30), /* fp_div_sf */
998 COSTS_N_INSNS (59), /* fp_div_df */
999 COSTS_N_INSNS (3), /* int_mult_si */
1000 COSTS_N_INSNS (4), /* int_mult_di */
1001 COSTS_N_INSNS (42), /* int_div_si */
1002 COSTS_N_INSNS (74), /* int_div_di */
1003 1, /* branch_cost */
1004 4 /* memory_latency */
1006 { /* R5500 */
1007 COSTS_N_INSNS (6), /* fp_add */
1008 COSTS_N_INSNS (5), /* fp_mult_sf */
1009 COSTS_N_INSNS (6), /* fp_mult_df */
1010 COSTS_N_INSNS (30), /* fp_div_sf */
1011 COSTS_N_INSNS (59), /* fp_div_df */
1012 COSTS_N_INSNS (5), /* int_mult_si */
1013 COSTS_N_INSNS (9), /* int_mult_di */
1014 COSTS_N_INSNS (42), /* int_div_si */
1015 COSTS_N_INSNS (74), /* int_div_di */
1016 1, /* branch_cost */
1017 4 /* memory_latency */
1019 { /* R7000 */
1020 /* The only costs that are changed here are
1021 integer multiplication. */
1022 COSTS_N_INSNS (6), /* fp_add */
1023 COSTS_N_INSNS (7), /* fp_mult_sf */
1024 COSTS_N_INSNS (8), /* fp_mult_df */
1025 COSTS_N_INSNS (23), /* fp_div_sf */
1026 COSTS_N_INSNS (36), /* fp_div_df */
1027 COSTS_N_INSNS (5), /* int_mult_si */
1028 COSTS_N_INSNS (9), /* int_mult_di */
1029 COSTS_N_INSNS (69), /* int_div_si */
1030 COSTS_N_INSNS (69), /* int_div_di */
1031 1, /* branch_cost */
1032 4 /* memory_latency */
1034 { /* R8000 */
1035 DEFAULT_COSTS
1037 { /* R9000 */
1038 /* The only costs that are changed here are
1039 integer multiplication. */
1040 COSTS_N_INSNS (6), /* fp_add */
1041 COSTS_N_INSNS (7), /* fp_mult_sf */
1042 COSTS_N_INSNS (8), /* fp_mult_df */
1043 COSTS_N_INSNS (23), /* fp_div_sf */
1044 COSTS_N_INSNS (36), /* fp_div_df */
1045 COSTS_N_INSNS (3), /* int_mult_si */
1046 COSTS_N_INSNS (8), /* int_mult_di */
1047 COSTS_N_INSNS (69), /* int_div_si */
1048 COSTS_N_INSNS (69), /* int_div_di */
1049 1, /* branch_cost */
1050 4 /* memory_latency */
1052 { /* R1x000 */
1053 COSTS_N_INSNS (2), /* fp_add */
1054 COSTS_N_INSNS (2), /* fp_mult_sf */
1055 COSTS_N_INSNS (2), /* fp_mult_df */
1056 COSTS_N_INSNS (12), /* fp_div_sf */
1057 COSTS_N_INSNS (19), /* fp_div_df */
1058 COSTS_N_INSNS (5), /* int_mult_si */
1059 COSTS_N_INSNS (9), /* int_mult_di */
1060 COSTS_N_INSNS (34), /* int_div_si */
1061 COSTS_N_INSNS (66), /* int_div_di */
1062 1, /* branch_cost */
1063 4 /* memory_latency */
1065 { /* SB1 */
1066 /* These costs are the same as the SB-1A below. */
1067 COSTS_N_INSNS (4), /* fp_add */
1068 COSTS_N_INSNS (4), /* fp_mult_sf */
1069 COSTS_N_INSNS (4), /* fp_mult_df */
1070 COSTS_N_INSNS (24), /* fp_div_sf */
1071 COSTS_N_INSNS (32), /* fp_div_df */
1072 COSTS_N_INSNS (3), /* int_mult_si */
1073 COSTS_N_INSNS (4), /* int_mult_di */
1074 COSTS_N_INSNS (36), /* int_div_si */
1075 COSTS_N_INSNS (68), /* int_div_di */
1076 1, /* branch_cost */
1077 4 /* memory_latency */
1079 { /* SB1-A */
1080 /* These costs are the same as the SB-1 above. */
1081 COSTS_N_INSNS (4), /* fp_add */
1082 COSTS_N_INSNS (4), /* fp_mult_sf */
1083 COSTS_N_INSNS (4), /* fp_mult_df */
1084 COSTS_N_INSNS (24), /* fp_div_sf */
1085 COSTS_N_INSNS (32), /* fp_div_df */
1086 COSTS_N_INSNS (3), /* int_mult_si */
1087 COSTS_N_INSNS (4), /* int_mult_di */
1088 COSTS_N_INSNS (36), /* int_div_si */
1089 COSTS_N_INSNS (68), /* int_div_di */
1090 1, /* branch_cost */
1091 4 /* memory_latency */
1093 { /* SR71000 */
1094 DEFAULT_COSTS
1096 { /* XLR */
1097 SOFT_FP_COSTS,
1098 COSTS_N_INSNS (8), /* int_mult_si */
1099 COSTS_N_INSNS (8), /* int_mult_di */
1100 COSTS_N_INSNS (72), /* int_div_si */
1101 COSTS_N_INSNS (72), /* int_div_di */
1102 1, /* branch_cost */
1103 4 /* memory_latency */
1107 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1108 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1109 struct GTY (()) mflip_mips16_entry {
1110 const char *name;
1111 bool mips16_p;
1113 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1115 /* Hash table callbacks for mflip_mips16_htab. */
1117 static hashval_t
1118 mflip_mips16_htab_hash (const void *entry)
1120 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1123 static int
1124 mflip_mips16_htab_eq (const void *entry, const void *name)
1126 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1127 (const char *) name) == 0;
1130 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1131 mode, false if it should next add an attribute for the opposite mode. */
1132 static GTY(()) bool mips16_flipper;
1134 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1135 for -mflip-mips16. Return true if it should use "mips16" and false if
1136 it should use "nomips16". */
1138 static bool
1139 mflip_mips16_use_mips16_p (tree decl)
1141 struct mflip_mips16_entry *entry;
1142 const char *name;
1143 hashval_t hash;
1144 void **slot;
1146 /* Use the opposite of the command-line setting for anonymous decls. */
1147 if (!DECL_NAME (decl))
1148 return !mips_base_mips16;
1150 if (!mflip_mips16_htab)
1151 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1152 mflip_mips16_htab_eq, NULL);
1154 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1155 hash = htab_hash_string (name);
1156 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1157 entry = (struct mflip_mips16_entry *) *slot;
1158 if (!entry)
1160 mips16_flipper = !mips16_flipper;
1161 entry = GGC_NEW (struct mflip_mips16_entry);
1162 entry->name = name;
1163 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1164 *slot = entry;
1166 return entry->mips16_p;
1169 /* Predicates to test for presence of "near" and "far"/"long_call"
1170 attributes on the given TYPE. */
1172 static bool
1173 mips_near_type_p (const_tree type)
1175 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1178 static bool
1179 mips_far_type_p (const_tree type)
1181 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1182 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1185 /* Similar predicates for "mips16"/"nomips16" function attributes. */
1187 static bool
1188 mips_mips16_decl_p (const_tree decl)
1190 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1193 static bool
1194 mips_nomips16_decl_p (const_tree decl)
1196 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1199 /* Check if the interrupt attribute is set for a function. */
1201 static bool
1202 mips_interrupt_type_p (tree type)
1204 return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL;
1207 /* Check if the attribute to use shadow register set is set for a function. */
1209 static bool
1210 mips_use_shadow_register_set_p (tree type)
1212 return lookup_attribute ("use_shadow_register_set",
1213 TYPE_ATTRIBUTES (type)) != NULL;
1216 /* Check if the attribute to keep interrupts masked is set for a function. */
1218 static bool
1219 mips_keep_interrupts_masked_p (tree type)
1221 return lookup_attribute ("keep_interrupts_masked",
1222 TYPE_ATTRIBUTES (type)) != NULL;
1225 /* Check if the attribute to use debug exception return is set for
1226 a function. */
1228 static bool
1229 mips_use_debug_exception_return_p (tree type)
1231 return lookup_attribute ("use_debug_exception_return",
1232 TYPE_ATTRIBUTES (type)) != NULL;
1235 /* Return true if function DECL is a MIPS16 function. Return the ambient
1236 setting if DECL is null. */
1238 static bool
1239 mips_use_mips16_mode_p (tree decl)
1241 if (decl)
1243 /* Nested functions must use the same frame pointer as their
1244 parent and must therefore use the same ISA mode. */
1245 tree parent = decl_function_context (decl);
1246 if (parent)
1247 decl = parent;
1248 if (mips_mips16_decl_p (decl))
1249 return true;
1250 if (mips_nomips16_decl_p (decl))
1251 return false;
1253 return mips_base_mips16;
1256 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1258 static int
1259 mips_comp_type_attributes (const_tree type1, const_tree type2)
1261 /* Disallow mixed near/far attributes. */
1262 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1263 return 0;
1264 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1265 return 0;
1266 return 1;
1269 /* Implement TARGET_INSERT_ATTRIBUTES. */
1271 static void
1272 mips_insert_attributes (tree decl, tree *attributes)
1274 const char *name;
1275 bool mips16_p, nomips16_p;
1277 /* Check for "mips16" and "nomips16" attributes. */
1278 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1279 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1280 if (TREE_CODE (decl) != FUNCTION_DECL)
1282 if (mips16_p)
1283 error ("%qs attribute only applies to functions", "mips16");
1284 if (nomips16_p)
1285 error ("%qs attribute only applies to functions", "nomips16");
1287 else
1289 mips16_p |= mips_mips16_decl_p (decl);
1290 nomips16_p |= mips_nomips16_decl_p (decl);
1291 if (mips16_p || nomips16_p)
1293 /* DECL cannot be simultaneously "mips16" and "nomips16". */
1294 if (mips16_p && nomips16_p)
1295 error ("%qs cannot have both %<mips16%> and "
1296 "%<nomips16%> attributes",
1297 IDENTIFIER_POINTER (DECL_NAME (decl)));
1299 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1301 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1302 "mips16" attribute, arbitrarily pick one. We must pick the same
1303 setting for duplicate declarations of a function. */
1304 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1305 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1310 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1312 static tree
1313 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1315 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1316 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1317 error ("%qs redeclared with conflicting %qs attributes",
1318 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1319 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1320 error ("%qs redeclared with conflicting %qs attributes",
1321 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1323 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1324 DECL_ATTRIBUTES (newdecl));
1327 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1328 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1330 static void
1331 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1333 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1335 *base_ptr = XEXP (x, 0);
1336 *offset_ptr = INTVAL (XEXP (x, 1));
1338 else
1340 *base_ptr = x;
1341 *offset_ptr = 0;
1345 static unsigned int mips_build_integer (struct mips_integer_op *,
1346 unsigned HOST_WIDE_INT);
1348 /* A subroutine of mips_build_integer, with the same interface.
1349 Assume that the final action in the sequence should be a left shift. */
1351 static unsigned int
1352 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1354 unsigned int i, shift;
1356 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1357 since signed numbers are easier to load than unsigned ones. */
1358 shift = 0;
1359 while ((value & 1) == 0)
1360 value /= 2, shift++;
1362 i = mips_build_integer (codes, value);
1363 codes[i].code = ASHIFT;
1364 codes[i].value = shift;
1365 return i + 1;
1368 /* As for mips_build_shift, but assume that the final action will be
1369 an IOR or PLUS operation. */
1371 static unsigned int
1372 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1374 unsigned HOST_WIDE_INT high;
1375 unsigned int i;
1377 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1378 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1380 /* The constant is too complex to load with a simple LUI/ORI pair,
1381 so we want to give the recursive call as many trailing zeros as
1382 possible. In this case, we know bit 16 is set and that the
1383 low 16 bits form a negative number. If we subtract that number
1384 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1385 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1386 codes[i].code = PLUS;
1387 codes[i].value = CONST_LOW_PART (value);
1389 else
1391 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1392 bits gives a value with at least 17 trailing zeros. */
1393 i = mips_build_integer (codes, high);
1394 codes[i].code = IOR;
1395 codes[i].value = value & 0xffff;
1397 return i + 1;
1400 /* Fill CODES with a sequence of rtl operations to load VALUE.
1401 Return the number of operations needed. */
1403 static unsigned int
1404 mips_build_integer (struct mips_integer_op *codes,
1405 unsigned HOST_WIDE_INT value)
1407 if (SMALL_OPERAND (value)
1408 || SMALL_OPERAND_UNSIGNED (value)
1409 || LUI_OPERAND (value))
1411 /* The value can be loaded with a single instruction. */
1412 codes[0].code = UNKNOWN;
1413 codes[0].value = value;
1414 return 1;
1416 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1418 /* Either the constant is a simple LUI/ORI combination or its
1419 lowest bit is set. We don't want to shift in this case. */
1420 return mips_build_lower (codes, value);
1422 else if ((value & 0xffff) == 0)
1424 /* The constant will need at least three actions. The lowest
1425 16 bits are clear, so the final action will be a shift. */
1426 return mips_build_shift (codes, value);
1428 else
1430 /* The final action could be a shift, add or inclusive OR.
1431 Rather than use a complex condition to select the best
1432 approach, try both mips_build_shift and mips_build_lower
1433 and pick the one that gives the shortest sequence.
1434 Note that this case is only used once per constant. */
1435 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1436 unsigned int cost, alt_cost;
1438 cost = mips_build_shift (codes, value);
1439 alt_cost = mips_build_lower (alt_codes, value);
1440 if (alt_cost < cost)
1442 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1443 cost = alt_cost;
1445 return cost;
1449 /* Return true if symbols of type TYPE require a GOT access. */
1451 static bool
1452 mips_got_symbol_type_p (enum mips_symbol_type type)
1454 switch (type)
1456 case SYMBOL_GOT_PAGE_OFST:
1457 case SYMBOL_GOT_DISP:
1458 return true;
1460 default:
1461 return false;
1465 /* Return true if X is a thread-local symbol. */
1467 static bool
1468 mips_tls_symbol_p (rtx x)
1470 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1473 /* Return true if SYMBOL_REF X is associated with a global symbol
1474 (in the STB_GLOBAL sense). */
1476 static bool
1477 mips_global_symbol_p (const_rtx x)
1479 const_tree decl = SYMBOL_REF_DECL (x);
1481 if (!decl)
1482 return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x);
1484 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1485 or weak symbols. Relocations in the object file will be against
1486 the target symbol, so it's that symbol's binding that matters here. */
1487 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1490 /* Return true if function X is a libgcc MIPS16 stub function. */
1492 static bool
1493 mips16_stub_function_p (const_rtx x)
1495 return (GET_CODE (x) == SYMBOL_REF
1496 && strncmp (XSTR (x, 0), "__mips16_", 9) == 0);
1499 /* Return true if function X is a locally-defined and locally-binding
1500 MIPS16 function. */
1502 static bool
1503 mips16_local_function_p (const_rtx x)
1505 return (GET_CODE (x) == SYMBOL_REF
1506 && SYMBOL_REF_LOCAL_P (x)
1507 && !SYMBOL_REF_EXTERNAL_P (x)
1508 && mips_use_mips16_mode_p (SYMBOL_REF_DECL (x)));
1511 /* Return true if SYMBOL_REF X binds locally. */
1513 static bool
1514 mips_symbol_binds_local_p (const_rtx x)
1516 return (SYMBOL_REF_DECL (x)
1517 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1518 : SYMBOL_REF_LOCAL_P (x));
1521 /* Return true if rtx constants of mode MODE should be put into a small
1522 data section. */
1524 static bool
1525 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1527 return (!TARGET_EMBEDDED_DATA
1528 && TARGET_LOCAL_SDATA
1529 && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1532 /* Return true if X should not be moved directly into register $25.
1533 We need this because many versions of GAS will treat "la $25,foo" as
1534 part of a call sequence and so allow a global "foo" to be lazily bound. */
1536 bool
1537 mips_dangerous_for_la25_p (rtx x)
1539 return (!TARGET_EXPLICIT_RELOCS
1540 && TARGET_USE_GOT
1541 && GET_CODE (x) == SYMBOL_REF
1542 && mips_global_symbol_p (x));
1545 /* Return true if calls to X might need $25 to be valid on entry. */
1547 bool
1548 mips_use_pic_fn_addr_reg_p (const_rtx x)
1550 if (!TARGET_USE_PIC_FN_ADDR_REG)
1551 return false;
1553 /* MIPS16 stub functions are guaranteed not to use $25. */
1554 if (mips16_stub_function_p (x))
1555 return false;
1557 if (GET_CODE (x) == SYMBOL_REF)
1559 /* If PLTs and copy relocations are available, the static linker
1560 will make sure that $25 is valid on entry to the target function. */
1561 if (TARGET_ABICALLS_PIC0)
1562 return false;
1564 /* Locally-defined functions use absolute accesses to set up
1565 the global pointer. */
1566 if (TARGET_ABSOLUTE_ABICALLS
1567 && mips_symbol_binds_local_p (x)
1568 && !SYMBOL_REF_EXTERNAL_P (x))
1569 return false;
1572 return true;
1575 /* Return the method that should be used to access SYMBOL_REF or
1576 LABEL_REF X in context CONTEXT. */
1578 static enum mips_symbol_type
1579 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1581 if (TARGET_RTP_PIC)
1582 return SYMBOL_GOT_DISP;
1584 if (GET_CODE (x) == LABEL_REF)
1586 /* LABEL_REFs are used for jump tables as well as text labels.
1587 Only return SYMBOL_PC_RELATIVE if we know the label is in
1588 the text section. */
1589 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1590 return SYMBOL_PC_RELATIVE;
1592 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1593 return SYMBOL_GOT_PAGE_OFST;
1595 return SYMBOL_ABSOLUTE;
1598 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1600 if (SYMBOL_REF_TLS_MODEL (x))
1601 return SYMBOL_TLS;
1603 if (CONSTANT_POOL_ADDRESS_P (x))
1605 if (TARGET_MIPS16_TEXT_LOADS)
1606 return SYMBOL_PC_RELATIVE;
1608 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1609 return SYMBOL_PC_RELATIVE;
1611 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1612 return SYMBOL_GP_RELATIVE;
1615 /* Do not use small-data accesses for weak symbols; they may end up
1616 being zero. */
1617 if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1618 return SYMBOL_GP_RELATIVE;
1620 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1621 is in effect. */
1622 if (TARGET_ABICALLS_PIC2
1623 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1625 /* There are three cases to consider:
1627 - o32 PIC (either with or without explicit relocs)
1628 - n32/n64 PIC without explicit relocs
1629 - n32/n64 PIC with explicit relocs
1631 In the first case, both local and global accesses will use an
1632 R_MIPS_GOT16 relocation. We must correctly predict which of
1633 the two semantics (local or global) the assembler and linker
1634 will apply. The choice depends on the symbol's binding rather
1635 than its visibility.
1637 In the second case, the assembler will not use R_MIPS_GOT16
1638 relocations, but it chooses between local and global accesses
1639 in the same way as for o32 PIC.
1641 In the third case we have more freedom since both forms of
1642 access will work for any kind of symbol. However, there seems
1643 little point in doing things differently. */
1644 if (mips_global_symbol_p (x))
1645 return SYMBOL_GOT_DISP;
1647 return SYMBOL_GOT_PAGE_OFST;
1650 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1651 return SYMBOL_FORCE_TO_MEM;
1653 return SYMBOL_ABSOLUTE;
1656 /* Classify the base of symbolic expression X, given that X appears in
1657 context CONTEXT. */
1659 static enum mips_symbol_type
1660 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1662 rtx offset;
1664 split_const (x, &x, &offset);
1665 if (UNSPEC_ADDRESS_P (x))
1666 return UNSPEC_ADDRESS_TYPE (x);
1668 return mips_classify_symbol (x, context);
1671 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1672 is the alignment in bytes of SYMBOL_REF X. */
1674 static bool
1675 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1677 HOST_WIDE_INT align;
1679 align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1680 return IN_RANGE (offset, 0, align - 1);
1683 /* Return true if X is a symbolic constant that can be used in context
1684 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1686 bool
1687 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1688 enum mips_symbol_type *symbol_type)
1690 rtx offset;
1692 split_const (x, &x, &offset);
1693 if (UNSPEC_ADDRESS_P (x))
1695 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1696 x = UNSPEC_ADDRESS (x);
1698 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1700 *symbol_type = mips_classify_symbol (x, context);
1701 if (*symbol_type == SYMBOL_TLS)
1702 return false;
1704 else
1705 return false;
1707 if (offset == const0_rtx)
1708 return true;
1710 /* Check whether a nonzero offset is valid for the underlying
1711 relocations. */
1712 switch (*symbol_type)
1714 case SYMBOL_ABSOLUTE:
1715 case SYMBOL_FORCE_TO_MEM:
1716 case SYMBOL_32_HIGH:
1717 case SYMBOL_64_HIGH:
1718 case SYMBOL_64_MID:
1719 case SYMBOL_64_LOW:
1720 /* If the target has 64-bit pointers and the object file only
1721 supports 32-bit symbols, the values of those symbols will be
1722 sign-extended. In this case we can't allow an arbitrary offset
1723 in case the 32-bit value X + OFFSET has a different sign from X. */
1724 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1725 return offset_within_block_p (x, INTVAL (offset));
1727 /* In other cases the relocations can handle any offset. */
1728 return true;
1730 case SYMBOL_PC_RELATIVE:
1731 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1732 In this case, we no longer have access to the underlying constant,
1733 but the original symbol-based access was known to be valid. */
1734 if (GET_CODE (x) == LABEL_REF)
1735 return true;
1737 /* Fall through. */
1739 case SYMBOL_GP_RELATIVE:
1740 /* Make sure that the offset refers to something within the
1741 same object block. This should guarantee that the final
1742 PC- or GP-relative offset is within the 16-bit limit. */
1743 return offset_within_block_p (x, INTVAL (offset));
1745 case SYMBOL_GOT_PAGE_OFST:
1746 case SYMBOL_GOTOFF_PAGE:
1747 /* If the symbol is global, the GOT entry will contain the symbol's
1748 address, and we will apply a 16-bit offset after loading it.
1749 If the symbol is local, the linker should provide enough local
1750 GOT entries for a 16-bit offset, but larger offsets may lead
1751 to GOT overflow. */
1752 return SMALL_INT (offset);
1754 case SYMBOL_TPREL:
1755 case SYMBOL_DTPREL:
1756 /* There is no carry between the HI and LO REL relocations, so the
1757 offset is only valid if we know it won't lead to such a carry. */
1758 return mips_offset_within_alignment_p (x, INTVAL (offset));
1760 case SYMBOL_GOT_DISP:
1761 case SYMBOL_GOTOFF_DISP:
1762 case SYMBOL_GOTOFF_CALL:
1763 case SYMBOL_GOTOFF_LOADGP:
1764 case SYMBOL_TLSGD:
1765 case SYMBOL_TLSLDM:
1766 case SYMBOL_GOTTPREL:
1767 case SYMBOL_TLS:
1768 case SYMBOL_HALF:
1769 return false;
1771 gcc_unreachable ();
1774 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1775 single instruction. We rely on the fact that, in the worst case,
1776 all instructions involved in a MIPS16 address calculation are usually
1777 extended ones. */
1779 static int
1780 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1782 switch (type)
1784 case SYMBOL_ABSOLUTE:
1785 /* When using 64-bit symbols, we need 5 preparatory instructions,
1786 such as:
1788 lui $at,%highest(symbol)
1789 daddiu $at,$at,%higher(symbol)
1790 dsll $at,$at,16
1791 daddiu $at,$at,%hi(symbol)
1792 dsll $at,$at,16
1794 The final address is then $at + %lo(symbol). With 32-bit
1795 symbols we just need a preparatory LUI for normal mode and
1796 a preparatory LI and SLL for MIPS16. */
1797 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1799 case SYMBOL_GP_RELATIVE:
1800 /* Treat GP-relative accesses as taking a single instruction on
1801 MIPS16 too; the copy of $gp can often be shared. */
1802 return 1;
1804 case SYMBOL_PC_RELATIVE:
1805 /* PC-relative constants can be only be used with ADDIUPC,
1806 DADDIUPC, LWPC and LDPC. */
1807 if (mode == MAX_MACHINE_MODE
1808 || GET_MODE_SIZE (mode) == 4
1809 || GET_MODE_SIZE (mode) == 8)
1810 return 1;
1812 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1813 return 0;
1815 case SYMBOL_FORCE_TO_MEM:
1816 /* LEAs will be converted into constant-pool references by
1817 mips_reorg. */
1818 if (mode == MAX_MACHINE_MODE)
1819 return 1;
1821 /* The constant must be loaded and then dereferenced. */
1822 return 0;
1824 case SYMBOL_GOT_DISP:
1825 /* The constant will have to be loaded from the GOT before it
1826 is used in an address. */
1827 if (mode != MAX_MACHINE_MODE)
1828 return 0;
1830 /* Fall through. */
1832 case SYMBOL_GOT_PAGE_OFST:
1833 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1834 local/global classification is accurate. The worst cases are:
1836 (1) For local symbols when generating o32 or o64 code. The assembler
1837 will use:
1839 lw $at,%got(symbol)
1842 ...and the final address will be $at + %lo(symbol).
1844 (2) For global symbols when -mxgot. The assembler will use:
1846 lui $at,%got_hi(symbol)
1847 (d)addu $at,$at,$gp
1849 ...and the final address will be $at + %got_lo(symbol). */
1850 return 3;
1852 case SYMBOL_GOTOFF_PAGE:
1853 case SYMBOL_GOTOFF_DISP:
1854 case SYMBOL_GOTOFF_CALL:
1855 case SYMBOL_GOTOFF_LOADGP:
1856 case SYMBOL_32_HIGH:
1857 case SYMBOL_64_HIGH:
1858 case SYMBOL_64_MID:
1859 case SYMBOL_64_LOW:
1860 case SYMBOL_TLSGD:
1861 case SYMBOL_TLSLDM:
1862 case SYMBOL_DTPREL:
1863 case SYMBOL_GOTTPREL:
1864 case SYMBOL_TPREL:
1865 case SYMBOL_HALF:
1866 /* A 16-bit constant formed by a single relocation, or a 32-bit
1867 constant formed from a high 16-bit relocation and a low 16-bit
1868 relocation. Use mips_split_p to determine which. 32-bit
1869 constants need an "lui; addiu" sequence for normal mode and
1870 an "li; sll; addiu" sequence for MIPS16 mode. */
1871 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1873 case SYMBOL_TLS:
1874 /* We don't treat a bare TLS symbol as a constant. */
1875 return 0;
1877 gcc_unreachable ();
1880 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1881 to load symbols of type TYPE into a register. Return 0 if the given
1882 type of symbol cannot be used as an immediate operand.
1884 Otherwise, return the number of instructions needed to load or store
1885 values of mode MODE to or from addresses of type TYPE. Return 0 if
1886 the given type of symbol is not valid in addresses.
1888 In both cases, treat extended MIPS16 instructions as two instructions. */
1890 static int
1891 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1893 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1896 /* A for_each_rtx callback. Stop the search if *X references a
1897 thread-local symbol. */
1899 static int
1900 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1902 return mips_tls_symbol_p (*x);
1905 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1907 static bool
1908 mips_cannot_force_const_mem (rtx x)
1910 enum mips_symbol_type type;
1911 rtx base, offset;
1913 /* There is no assembler syntax for expressing an address-sized
1914 high part. */
1915 if (GET_CODE (x) == HIGH)
1916 return true;
1918 /* As an optimization, reject constants that mips_legitimize_move
1919 can expand inline.
1921 Suppose we have a multi-instruction sequence that loads constant C
1922 into register R. If R does not get allocated a hard register, and
1923 R is used in an operand that allows both registers and memory
1924 references, reload will consider forcing C into memory and using
1925 one of the instruction's memory alternatives. Returning false
1926 here will force it to use an input reload instead. */
1927 if (GET_CODE (x) == CONST_INT && LEGITIMATE_CONSTANT_P (x))
1928 return true;
1930 split_const (x, &base, &offset);
1931 if (mips_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type)
1932 && type != SYMBOL_FORCE_TO_MEM)
1934 /* The same optimization as for CONST_INT. */
1935 if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0)
1936 return true;
1938 /* If MIPS16 constant pools live in the text section, they should
1939 not refer to anything that might need run-time relocation. */
1940 if (TARGET_MIPS16_PCREL_LOADS && mips_got_symbol_type_p (type))
1941 return true;
1944 /* TLS symbols must be computed by mips_legitimize_move. */
1945 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
1946 return true;
1948 return false;
1951 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1952 constants when we're using a per-function constant pool. */
1954 static bool
1955 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1956 const_rtx x ATTRIBUTE_UNUSED)
1958 return !TARGET_MIPS16_PCREL_LOADS;
1961 /* Return true if register REGNO is a valid base register for mode MODE.
1962 STRICT_P is true if REG_OK_STRICT is in effect. */
1965 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
1966 bool strict_p)
1968 if (!HARD_REGISTER_NUM_P (regno))
1970 if (!strict_p)
1971 return true;
1972 regno = reg_renumber[regno];
1975 /* These fake registers will be eliminated to either the stack or
1976 hard frame pointer, both of which are usually valid base registers.
1977 Reload deals with the cases where the eliminated form isn't valid. */
1978 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1979 return true;
1981 /* In MIPS16 mode, the stack pointer can only address word and doubleword
1982 values, nothing smaller. There are two problems here:
1984 (a) Instantiating virtual registers can introduce new uses of the
1985 stack pointer. If these virtual registers are valid addresses,
1986 the stack pointer should be too.
1988 (b) Most uses of the stack pointer are not made explicit until
1989 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1990 We don't know until that stage whether we'll be eliminating to the
1991 stack pointer (which needs the restriction) or the hard frame
1992 pointer (which doesn't).
1994 All in all, it seems more consistent to only enforce this restriction
1995 during and after reload. */
1996 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1997 return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1999 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
2002 /* Return true if X is a valid base register for mode MODE.
2003 STRICT_P is true if REG_OK_STRICT is in effect. */
2005 static bool
2006 mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
2008 if (!strict_p && GET_CODE (x) == SUBREG)
2009 x = SUBREG_REG (x);
2011 return (REG_P (x)
2012 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
2015 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
2016 can address a value of mode MODE. */
2018 static bool
2019 mips_valid_offset_p (rtx x, enum machine_mode mode)
2021 /* Check that X is a signed 16-bit number. */
2022 if (!const_arith_operand (x, Pmode))
2023 return false;
2025 /* We may need to split multiword moves, so make sure that every word
2026 is accessible. */
2027 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2028 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
2029 return false;
2031 return true;
2034 /* Return true if a LO_SUM can address a value of mode MODE when the
2035 LO_SUM symbol has type SYMBOL_TYPE. */
2037 static bool
2038 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
2040 /* Check that symbols of type SYMBOL_TYPE can be used to access values
2041 of mode MODE. */
2042 if (mips_symbol_insns (symbol_type, mode) == 0)
2043 return false;
2045 /* Check that there is a known low-part relocation. */
2046 if (mips_lo_relocs[symbol_type] == NULL)
2047 return false;
2049 /* We may need to split multiword moves, so make sure that each word
2050 can be accessed without inducing a carry. This is mainly needed
2051 for o64, which has historically only guaranteed 64-bit alignment
2052 for 128-bit types. */
2053 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2054 && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
2055 return false;
2057 return true;
2060 /* Return true if X is a valid address for machine mode MODE. If it is,
2061 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
2062 effect. */
2064 static bool
2065 mips_classify_address (struct mips_address_info *info, rtx x,
2066 enum machine_mode mode, bool strict_p)
2068 switch (GET_CODE (x))
2070 case REG:
2071 case SUBREG:
2072 info->type = ADDRESS_REG;
2073 info->reg = x;
2074 info->offset = const0_rtx;
2075 return mips_valid_base_register_p (info->reg, mode, strict_p);
2077 case PLUS:
2078 info->type = ADDRESS_REG;
2079 info->reg = XEXP (x, 0);
2080 info->offset = XEXP (x, 1);
2081 return (mips_valid_base_register_p (info->reg, mode, strict_p)
2082 && mips_valid_offset_p (info->offset, mode));
2084 case LO_SUM:
2085 info->type = ADDRESS_LO_SUM;
2086 info->reg = XEXP (x, 0);
2087 info->offset = XEXP (x, 1);
2088 /* We have to trust the creator of the LO_SUM to do something vaguely
2089 sane. Target-independent code that creates a LO_SUM should also
2090 create and verify the matching HIGH. Target-independent code that
2091 adds an offset to a LO_SUM must prove that the offset will not
2092 induce a carry. Failure to do either of these things would be
2093 a bug, and we are not required to check for it here. The MIPS
2094 backend itself should only create LO_SUMs for valid symbolic
2095 constants, with the high part being either a HIGH or a copy
2096 of _gp. */
2097 info->symbol_type
2098 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
2099 return (mips_valid_base_register_p (info->reg, mode, strict_p)
2100 && mips_valid_lo_sum_p (info->symbol_type, mode));
2102 case CONST_INT:
2103 /* Small-integer addresses don't occur very often, but they
2104 are legitimate if $0 is a valid base register. */
2105 info->type = ADDRESS_CONST_INT;
2106 return !TARGET_MIPS16 && SMALL_INT (x);
2108 case CONST:
2109 case LABEL_REF:
2110 case SYMBOL_REF:
2111 info->type = ADDRESS_SYMBOLIC;
2112 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
2113 &info->symbol_type)
2114 && mips_symbol_insns (info->symbol_type, mode) > 0
2115 && !mips_split_p[info->symbol_type]);
2117 default:
2118 return false;
2122 /* Return true if X is a legitimate address for a memory operand of mode
2123 MODE. STRICT_P is true if REG_OK_STRICT is in effect. */
2125 bool
2126 mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2128 struct mips_address_info addr;
2130 return mips_classify_address (&addr, x, mode, strict_p);
2133 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2135 bool
2136 mips_stack_address_p (rtx x, enum machine_mode mode)
2138 struct mips_address_info addr;
2140 return (mips_classify_address (&addr, x, mode, false)
2141 && addr.type == ADDRESS_REG
2142 && addr.reg == stack_pointer_rtx);
2145 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2146 address instruction. Note that such addresses are not considered
2147 legitimate in the GO_IF_LEGITIMATE_ADDRESS sense, because their use
2148 is so restricted. */
2150 static bool
2151 mips_lwxs_address_p (rtx addr)
2153 if (ISA_HAS_LWXS
2154 && GET_CODE (addr) == PLUS
2155 && REG_P (XEXP (addr, 1)))
2157 rtx offset = XEXP (addr, 0);
2158 if (GET_CODE (offset) == MULT
2159 && REG_P (XEXP (offset, 0))
2160 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2161 && INTVAL (XEXP (offset, 1)) == 4)
2162 return true;
2164 return false;
2167 /* Return true if a value at OFFSET bytes from base register BASE can be
2168 accessed using an unextended MIPS16 instruction. MODE is the mode of
2169 the value.
2171 Usually the offset in an unextended instruction is a 5-bit field.
2172 The offset is unsigned and shifted left once for LH and SH, twice
2173 for LW and SW, and so on. An exception is LWSP and SWSP, which have
2174 an 8-bit immediate field that's shifted left twice. */
2176 static bool
2177 mips16_unextended_reference_p (enum machine_mode mode, rtx base,
2178 unsigned HOST_WIDE_INT offset)
2180 if (offset % GET_MODE_SIZE (mode) == 0)
2182 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2183 return offset < 256U * GET_MODE_SIZE (mode);
2184 return offset < 32U * GET_MODE_SIZE (mode);
2186 return false;
2189 /* Return the number of instructions needed to load or store a value
2190 of mode MODE at address X. Return 0 if X isn't valid for MODE.
2191 Assume that multiword moves may need to be split into word moves
2192 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2193 enough.
2195 For MIPS16 code, count extended instructions as two instructions. */
2198 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2200 struct mips_address_info addr;
2201 int factor;
2203 /* BLKmode is used for single unaligned loads and stores and should
2204 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2205 meaningless, so we have to single it out as a special case one way
2206 or the other.) */
2207 if (mode != BLKmode && might_split_p)
2208 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2209 else
2210 factor = 1;
2212 if (mips_classify_address (&addr, x, mode, false))
2213 switch (addr.type)
2215 case ADDRESS_REG:
2216 if (TARGET_MIPS16
2217 && !mips16_unextended_reference_p (mode, addr.reg,
2218 UINTVAL (addr.offset)))
2219 return factor * 2;
2220 return factor;
2222 case ADDRESS_LO_SUM:
2223 return TARGET_MIPS16 ? factor * 2 : factor;
2225 case ADDRESS_CONST_INT:
2226 return factor;
2228 case ADDRESS_SYMBOLIC:
2229 return factor * mips_symbol_insns (addr.symbol_type, mode);
2231 return 0;
2234 /* Return the number of instructions needed to load constant X.
2235 Return 0 if X isn't a valid constant. */
2238 mips_const_insns (rtx x)
2240 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2241 enum mips_symbol_type symbol_type;
2242 rtx offset;
2244 switch (GET_CODE (x))
2246 case HIGH:
2247 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2248 &symbol_type)
2249 || !mips_split_p[symbol_type])
2250 return 0;
2252 /* This is simply an LUI for normal mode. It is an extended
2253 LI followed by an extended SLL for MIPS16. */
2254 return TARGET_MIPS16 ? 4 : 1;
2256 case CONST_INT:
2257 if (TARGET_MIPS16)
2258 /* Unsigned 8-bit constants can be loaded using an unextended
2259 LI instruction. Unsigned 16-bit constants can be loaded
2260 using an extended LI. Negative constants must be loaded
2261 using LI and then negated. */
2262 return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2263 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2264 : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2265 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2266 : 0);
2268 return mips_build_integer (codes, INTVAL (x));
2270 case CONST_DOUBLE:
2271 case CONST_VECTOR:
2272 /* Allow zeros for normal mode, where we can use $0. */
2273 return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2275 case CONST:
2276 if (CONST_GP_P (x))
2277 return 1;
2279 /* See if we can refer to X directly. */
2280 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2281 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2283 /* Otherwise try splitting the constant into a base and offset.
2284 If the offset is a 16-bit value, we can load the base address
2285 into a register and then use (D)ADDIU to add in the offset.
2286 If the offset is larger, we can load the base and offset
2287 into separate registers and add them together with (D)ADDU.
2288 However, the latter is only possible before reload; during
2289 and after reload, we must have the option of forcing the
2290 constant into the pool instead. */
2291 split_const (x, &x, &offset);
2292 if (offset != 0)
2294 int n = mips_const_insns (x);
2295 if (n != 0)
2297 if (SMALL_INT (offset))
2298 return n + 1;
2299 else if (!targetm.cannot_force_const_mem (x))
2300 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2303 return 0;
2305 case SYMBOL_REF:
2306 case LABEL_REF:
2307 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2308 MAX_MACHINE_MODE);
2310 default:
2311 return 0;
2315 /* X is a doubleword constant that can be handled by splitting it into
2316 two words and loading each word separately. Return the number of
2317 instructions required to do this. */
2320 mips_split_const_insns (rtx x)
2322 unsigned int low, high;
2324 low = mips_const_insns (mips_subword (x, false));
2325 high = mips_const_insns (mips_subword (x, true));
2326 gcc_assert (low > 0 && high > 0);
2327 return low + high;
2330 /* Return the number of instructions needed to implement INSN,
2331 given that it loads from or stores to MEM. Count extended
2332 MIPS16 instructions as two instructions. */
2335 mips_load_store_insns (rtx mem, rtx insn)
2337 enum machine_mode mode;
2338 bool might_split_p;
2339 rtx set;
2341 gcc_assert (MEM_P (mem));
2342 mode = GET_MODE (mem);
2344 /* Try to prove that INSN does not need to be split. */
2345 might_split_p = true;
2346 if (GET_MODE_BITSIZE (mode) == 64)
2348 set = single_set (insn);
2349 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2350 might_split_p = false;
2353 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2356 /* Return the number of instructions needed for an integer division. */
2359 mips_idiv_insns (void)
2361 int count;
2363 count = 1;
2364 if (TARGET_CHECK_ZERO_DIV)
2366 if (GENERATE_DIVIDE_TRAPS)
2367 count++;
2368 else
2369 count += 2;
2372 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2373 count++;
2374 return count;
2377 /* Emit a move from SRC to DEST. Assume that the move expanders can
2378 handle all moves if !can_create_pseudo_p (). The distinction is
2379 important because, unlike emit_move_insn, the move expanders know
2380 how to force Pmode objects into the constant pool even when the
2381 constant pool address is not itself legitimate. */
2384 mips_emit_move (rtx dest, rtx src)
2386 return (can_create_pseudo_p ()
2387 ? emit_move_insn (dest, src)
2388 : emit_move_insn_1 (dest, src));
2391 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2393 static void
2394 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2396 emit_insn (gen_rtx_SET (VOIDmode, target,
2397 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2400 /* Compute (CODE OP0 OP1) and store the result in a new register
2401 of mode MODE. Return that new register. */
2403 static rtx
2404 mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
2406 rtx reg;
2408 reg = gen_reg_rtx (mode);
2409 mips_emit_binary (code, reg, op0, op1);
2410 return reg;
2413 /* Copy VALUE to a register and return that register. If new pseudos
2414 are allowed, copy it into a new register, otherwise use DEST. */
2416 static rtx
2417 mips_force_temporary (rtx dest, rtx value)
2419 if (can_create_pseudo_p ())
2420 return force_reg (Pmode, value);
2421 else
2423 mips_emit_move (dest, value);
2424 return dest;
2428 /* Emit a call sequence with call pattern PATTERN and return the call
2429 instruction itself (which is not necessarily the last instruction
2430 emitted). ORIG_ADDR is the original, unlegitimized address,
2431 ADDR is the legitimized form, and LAZY_P is true if the call
2432 address is lazily-bound. */
2434 static rtx
2435 mips_emit_call_insn (rtx pattern, rtx orig_addr, rtx addr, bool lazy_p)
2437 rtx insn, reg;
2439 insn = emit_call_insn (pattern);
2441 if (TARGET_MIPS16 && mips_use_pic_fn_addr_reg_p (orig_addr))
2443 /* MIPS16 JALRs only take MIPS16 registers. If the target
2444 function requires $25 to be valid on entry, we must copy it
2445 there separately. The move instruction can be put in the
2446 call's delay slot. */
2447 reg = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
2448 emit_insn_before (gen_move_insn (reg, addr), insn);
2449 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
2452 if (lazy_p)
2453 /* Lazy-binding stubs require $gp to be valid on entry. */
2454 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2456 if (TARGET_USE_GOT)
2458 /* See the comment above load_call<mode> for details. */
2459 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2460 gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2461 emit_insn (gen_update_got_version ());
2463 return insn;
2466 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2467 then add CONST_INT OFFSET to the result. */
2469 static rtx
2470 mips_unspec_address_offset (rtx base, rtx offset,
2471 enum mips_symbol_type symbol_type)
2473 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2474 UNSPEC_ADDRESS_FIRST + symbol_type);
2475 if (offset != const0_rtx)
2476 base = gen_rtx_PLUS (Pmode, base, offset);
2477 return gen_rtx_CONST (Pmode, base);
2480 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2481 type SYMBOL_TYPE. */
2484 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2486 rtx base, offset;
2488 split_const (address, &base, &offset);
2489 return mips_unspec_address_offset (base, offset, symbol_type);
2492 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2493 high part to BASE and return the result. Just return BASE otherwise.
2494 TEMP is as for mips_force_temporary.
2496 The returned expression can be used as the first operand to a LO_SUM. */
2498 static rtx
2499 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2500 enum mips_symbol_type symbol_type)
2502 if (mips_split_p[symbol_type])
2504 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2505 addr = mips_force_temporary (temp, addr);
2506 base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2508 return base;
2511 /* Return an instruction that copies $gp into register REG. We want
2512 GCC to treat the register's value as constant, so that its value
2513 can be rematerialized on demand. */
2515 static rtx
2516 gen_load_const_gp (rtx reg)
2518 return (Pmode == SImode
2519 ? gen_load_const_gp_si (reg)
2520 : gen_load_const_gp_di (reg));
2523 /* Return a pseudo register that contains the value of $gp throughout
2524 the current function. Such registers are needed by MIPS16 functions,
2525 for which $gp itself is not a valid base register or addition operand. */
2527 static rtx
2528 mips16_gp_pseudo_reg (void)
2530 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2531 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2533 /* Don't emit an instruction to initialize the pseudo register if
2534 we are being called from the tree optimizers' cost-calculation
2535 routines. */
2536 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2537 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2539 rtx insn, scan;
2541 push_topmost_sequence ();
2543 scan = get_insns ();
2544 while (NEXT_INSN (scan) && !INSN_P (NEXT_INSN (scan)))
2545 scan = NEXT_INSN (scan);
2547 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2548 emit_insn_after (insn, scan);
2550 pop_topmost_sequence ();
2552 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2555 return cfun->machine->mips16_gp_pseudo_rtx;
2558 /* Return a base register that holds pic_offset_table_rtx.
2559 TEMP, if nonnull, is a scratch Pmode base register. */
2562 mips_pic_base_register (rtx temp)
2564 if (!TARGET_MIPS16)
2565 return pic_offset_table_rtx;
2567 if (can_create_pseudo_p ())
2568 return mips16_gp_pseudo_reg ();
2570 if (TARGET_USE_GOT)
2571 /* The first post-reload split exposes all references to $gp
2572 (both uses and definitions). All references must remain
2573 explicit after that point.
2575 It is safe to introduce uses of $gp at any time, so for
2576 simplicity, we do that before the split too. */
2577 mips_emit_move (temp, pic_offset_table_rtx);
2578 else
2579 emit_insn (gen_load_const_gp (temp));
2580 return temp;
2583 /* Create and return a GOT reference of type TYPE for address ADDR.
2584 TEMP, if nonnull, is a scratch Pmode base register. */
2587 mips_got_load (rtx temp, rtx addr, enum mips_symbol_type type)
2589 rtx base, high, lo_sum_symbol;
2591 base = mips_pic_base_register (temp);
2593 /* If we used the temporary register to load $gp, we can't use
2594 it for the high part as well. */
2595 if (temp != NULL && reg_overlap_mentioned_p (base, temp))
2596 temp = NULL;
2598 high = mips_unspec_offset_high (temp, base, addr, type);
2599 lo_sum_symbol = mips_unspec_address (addr, type);
2601 if (type == SYMBOL_GOTOFF_CALL)
2602 return (Pmode == SImode
2603 ? gen_unspec_callsi (high, lo_sum_symbol)
2604 : gen_unspec_calldi (high, lo_sum_symbol));
2605 else
2606 return (Pmode == SImode
2607 ? gen_unspec_gotsi (high, lo_sum_symbol)
2608 : gen_unspec_gotdi (high, lo_sum_symbol));
2611 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2612 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2613 constant in that context and can be split into high and low parts.
2614 If so, and if LOW_OUT is nonnull, emit the high part and store the
2615 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
2617 TEMP is as for mips_force_temporary and is used to load the high
2618 part into a register.
2620 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
2621 a legitimize SET_SRC for an .md pattern, otherwise the low part
2622 is guaranteed to be a legitimate address for mode MODE. */
2624 bool
2625 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
2627 enum mips_symbol_context context;
2628 enum mips_symbol_type symbol_type;
2629 rtx high;
2631 context = (mode == MAX_MACHINE_MODE
2632 ? SYMBOL_CONTEXT_LEA
2633 : SYMBOL_CONTEXT_MEM);
2634 if (GET_CODE (addr) == HIGH && context == SYMBOL_CONTEXT_LEA)
2636 addr = XEXP (addr, 0);
2637 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2638 && mips_symbol_insns (symbol_type, mode) > 0
2639 && mips_split_hi_p[symbol_type])
2641 if (low_out)
2642 switch (symbol_type)
2644 case SYMBOL_GOT_PAGE_OFST:
2645 /* The high part of a page/ofst pair is loaded from the GOT. */
2646 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_PAGE);
2647 break;
2649 default:
2650 gcc_unreachable ();
2652 return true;
2655 else
2657 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2658 && mips_symbol_insns (symbol_type, mode) > 0
2659 && mips_split_p[symbol_type])
2661 if (low_out)
2662 switch (symbol_type)
2664 case SYMBOL_GOT_DISP:
2665 /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
2666 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_DISP);
2667 break;
2669 case SYMBOL_GP_RELATIVE:
2670 high = mips_pic_base_register (temp);
2671 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2672 break;
2674 default:
2675 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2676 high = mips_force_temporary (temp, high);
2677 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2678 break;
2680 return true;
2683 return false;
2686 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2687 mips_force_temporary; it is only needed when OFFSET is not a
2688 SMALL_OPERAND. */
2690 static rtx
2691 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2693 if (!SMALL_OPERAND (offset))
2695 rtx high;
2697 if (TARGET_MIPS16)
2699 /* Load the full offset into a register so that we can use
2700 an unextended instruction for the address itself. */
2701 high = GEN_INT (offset);
2702 offset = 0;
2704 else
2706 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2707 high = GEN_INT (CONST_HIGH_PART (offset));
2708 offset = CONST_LOW_PART (offset);
2710 high = mips_force_temporary (temp, high);
2711 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2713 return plus_constant (reg, offset);
2716 /* The __tls_get_attr symbol. */
2717 static GTY(()) rtx mips_tls_symbol;
2719 /* Return an instruction sequence that calls __tls_get_addr. SYM is
2720 the TLS symbol we are referencing and TYPE is the symbol type to use
2721 (either global dynamic or local dynamic). V0 is an RTX for the
2722 return value location. */
2724 static rtx
2725 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2727 rtx insn, loc, a0;
2729 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2731 if (!mips_tls_symbol)
2732 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2734 loc = mips_unspec_address (sym, type);
2736 start_sequence ();
2738 emit_insn (gen_rtx_SET (Pmode, a0,
2739 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2740 insn = mips_expand_call (MIPS_CALL_NORMAL, v0, mips_tls_symbol,
2741 const0_rtx, NULL_RTX, false);
2742 RTL_CONST_CALL_P (insn) = 1;
2743 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2744 insn = get_insns ();
2746 end_sequence ();
2748 return insn;
2751 /* Return a pseudo register that contains the current thread pointer. */
2753 static rtx
2754 mips_get_tp (void)
2756 rtx tp;
2758 tp = gen_reg_rtx (Pmode);
2759 if (Pmode == DImode)
2760 emit_insn (gen_tls_get_tp_di (tp));
2761 else
2762 emit_insn (gen_tls_get_tp_si (tp));
2763 return tp;
2766 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2767 its address. The return value will be both a valid address and a valid
2768 SET_SRC (either a REG or a LO_SUM). */
2770 static rtx
2771 mips_legitimize_tls_address (rtx loc)
2773 rtx dest, insn, v0, tp, tmp1, tmp2, eqv;
2774 enum tls_model model;
2776 if (TARGET_MIPS16)
2778 sorry ("MIPS16 TLS");
2779 return gen_reg_rtx (Pmode);
2782 model = SYMBOL_REF_TLS_MODEL (loc);
2783 /* Only TARGET_ABICALLS code can have more than one module; other
2784 code must be be static and should not use a GOT. All TLS models
2785 reduce to local exec in this situation. */
2786 if (!TARGET_ABICALLS)
2787 model = TLS_MODEL_LOCAL_EXEC;
2789 switch (model)
2791 case TLS_MODEL_GLOBAL_DYNAMIC:
2792 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2793 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2794 dest = gen_reg_rtx (Pmode);
2795 emit_libcall_block (insn, dest, v0, loc);
2796 break;
2798 case TLS_MODEL_LOCAL_DYNAMIC:
2799 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2800 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2801 tmp1 = gen_reg_rtx (Pmode);
2803 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2804 share the LDM result with other LD model accesses. */
2805 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2806 UNSPEC_TLS_LDM);
2807 emit_libcall_block (insn, tmp1, v0, eqv);
2809 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2810 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2811 mips_unspec_address (loc, SYMBOL_DTPREL));
2812 break;
2814 case TLS_MODEL_INITIAL_EXEC:
2815 tp = mips_get_tp ();
2816 tmp1 = gen_reg_rtx (Pmode);
2817 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2818 if (Pmode == DImode)
2819 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2820 else
2821 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2822 dest = gen_reg_rtx (Pmode);
2823 emit_insn (gen_add3_insn (dest, tmp1, tp));
2824 break;
2826 case TLS_MODEL_LOCAL_EXEC:
2827 tp = mips_get_tp ();
2828 tmp1 = mips_unspec_offset_high (NULL, tp, loc, SYMBOL_TPREL);
2829 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2830 mips_unspec_address (loc, SYMBOL_TPREL));
2831 break;
2833 default:
2834 gcc_unreachable ();
2836 return dest;
2839 /* If X is not a valid address for mode MODE, force it into a register. */
2841 static rtx
2842 mips_force_address (rtx x, enum machine_mode mode)
2844 if (!mips_legitimate_address_p (mode, x, false))
2845 x = force_reg (Pmode, x);
2846 return x;
2849 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2850 be legitimized in a way that the generic machinery might not expect,
2851 put the new address in *XLOC and return true. MODE is the mode of
2852 the memory being accessed. */
2854 bool
2855 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2857 rtx base, addr;
2858 HOST_WIDE_INT offset;
2860 if (mips_tls_symbol_p (*xloc))
2862 *xloc = mips_legitimize_tls_address (*xloc);
2863 return true;
2866 /* See if the address can split into a high part and a LO_SUM. */
2867 if (mips_split_symbol (NULL, *xloc, mode, &addr))
2869 *xloc = mips_force_address (addr, mode);
2870 return true;
2873 /* Handle BASE + OFFSET using mips_add_offset. */
2874 mips_split_plus (*xloc, &base, &offset);
2875 if (offset != 0)
2877 if (!mips_valid_base_register_p (base, mode, false))
2878 base = copy_to_mode_reg (Pmode, base);
2879 addr = mips_add_offset (NULL, base, offset);
2880 *xloc = mips_force_address (addr, mode);
2881 return true;
2883 return false;
2886 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
2888 void
2889 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
2891 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2892 enum machine_mode mode;
2893 unsigned int i, num_ops;
2894 rtx x;
2896 mode = GET_MODE (dest);
2897 num_ops = mips_build_integer (codes, value);
2899 /* Apply each binary operation to X. Invariant: X is a legitimate
2900 source operand for a SET pattern. */
2901 x = GEN_INT (codes[0].value);
2902 for (i = 1; i < num_ops; i++)
2904 if (!can_create_pseudo_p ())
2906 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2907 x = temp;
2909 else
2910 x = force_reg (mode, x);
2911 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2914 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2917 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2918 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2919 move_operand. */
2921 static void
2922 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2924 rtx base, offset;
2926 /* Split moves of big integers into smaller pieces. */
2927 if (splittable_const_int_operand (src, mode))
2929 mips_move_integer (dest, dest, INTVAL (src));
2930 return;
2933 /* Split moves of symbolic constants into high/low pairs. */
2934 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2936 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2937 return;
2940 /* Generate the appropriate access sequences for TLS symbols. */
2941 if (mips_tls_symbol_p (src))
2943 mips_emit_move (dest, mips_legitimize_tls_address (src));
2944 return;
2947 /* If we have (const (plus symbol offset)), and that expression cannot
2948 be forced into memory, load the symbol first and add in the offset.
2949 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2950 forced into memory, as it usually produces better code. */
2951 split_const (src, &base, &offset);
2952 if (offset != const0_rtx
2953 && (targetm.cannot_force_const_mem (src)
2954 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2956 base = mips_force_temporary (dest, base);
2957 mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
2958 return;
2961 src = force_const_mem (mode, src);
2963 /* When using explicit relocs, constant pool references are sometimes
2964 not legitimate addresses. */
2965 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2966 mips_emit_move (dest, src);
2969 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
2970 sequence that is valid. */
2972 bool
2973 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2975 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2977 mips_emit_move (dest, force_reg (mode, src));
2978 return true;
2981 /* We need to deal with constants that would be legitimate
2982 immediate_operands but aren't legitimate move_operands. */
2983 if (CONSTANT_P (src) && !move_operand (src, mode))
2985 mips_legitimize_const_move (mode, dest, src);
2986 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2987 return true;
2989 return false;
2992 /* Return true if value X in context CONTEXT is a small-data address
2993 that can be rewritten as a LO_SUM. */
2995 static bool
2996 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2998 enum mips_symbol_type symbol_type;
3000 return (mips_lo_relocs[SYMBOL_GP_RELATIVE]
3001 && !mips_split_p[SYMBOL_GP_RELATIVE]
3002 && mips_symbolic_constant_p (x, context, &symbol_type)
3003 && symbol_type == SYMBOL_GP_RELATIVE);
3006 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
3007 containing MEM, or null if none. */
3009 static int
3010 mips_small_data_pattern_1 (rtx *loc, void *data)
3012 enum mips_symbol_context context;
3014 if (GET_CODE (*loc) == LO_SUM)
3015 return -1;
3017 if (MEM_P (*loc))
3019 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
3020 return 1;
3021 return -1;
3024 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
3025 return mips_rewrite_small_data_p (*loc, context);
3028 /* Return true if OP refers to small data symbols directly, not through
3029 a LO_SUM. */
3031 bool
3032 mips_small_data_pattern_p (rtx op)
3034 return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
3037 /* A for_each_rtx callback, used by mips_rewrite_small_data.
3038 DATA is the containing MEM, or null if none. */
3040 static int
3041 mips_rewrite_small_data_1 (rtx *loc, void *data)
3043 enum mips_symbol_context context;
3045 if (MEM_P (*loc))
3047 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
3048 return -1;
3051 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
3052 if (mips_rewrite_small_data_p (*loc, context))
3053 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
3055 if (GET_CODE (*loc) == LO_SUM)
3056 return -1;
3058 return 0;
3061 /* Rewrite instruction pattern PATTERN so that it refers to small data
3062 using explicit relocations. */
3065 mips_rewrite_small_data (rtx pattern)
3067 pattern = copy_insn (pattern);
3068 for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
3069 return pattern;
3072 /* We need a lot of little routines to check the range of MIPS16 immediate
3073 operands. */
3075 static int
3076 m16_check_op (rtx op, int low, int high, int mask)
3078 return (GET_CODE (op) == CONST_INT
3079 && IN_RANGE (INTVAL (op), low, high)
3080 && (INTVAL (op) & mask) == 0);
3084 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3086 return m16_check_op (op, 0x1, 0x8, 0);
3090 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3092 return m16_check_op (op, -0x8, 0x7, 0);
3096 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3098 return m16_check_op (op, -0x7, 0x8, 0);
3102 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3104 return m16_check_op (op, -0x10, 0xf, 0);
3108 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3110 return m16_check_op (op, -0xf, 0x10, 0);
3114 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3116 return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
3120 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3122 return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
3126 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3128 return m16_check_op (op, -0x80, 0x7f, 0);
3132 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3134 return m16_check_op (op, -0x7f, 0x80, 0);
3138 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3140 return m16_check_op (op, 0x0, 0xff, 0);
3144 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3146 return m16_check_op (op, -0xff, 0x0, 0);
3150 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3152 return m16_check_op (op, -0x1, 0xfe, 0);
3156 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3158 return m16_check_op (op, 0x0, 0xff << 2, 3);
3162 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3164 return m16_check_op (op, -0xff << 2, 0x0, 3);
3168 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3170 return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
3174 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3176 return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
3179 /* The cost of loading values from the constant pool. It should be
3180 larger than the cost of any constant we want to synthesize inline. */
3181 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3183 /* Return the cost of X when used as an operand to the MIPS16 instruction
3184 that implements CODE. Return -1 if there is no such instruction, or if
3185 X is not a valid immediate operand for it. */
3187 static int
3188 mips16_constant_cost (int code, HOST_WIDE_INT x)
3190 switch (code)
3192 case ASHIFT:
3193 case ASHIFTRT:
3194 case LSHIFTRT:
3195 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3196 other shifts are extended. The shift patterns truncate the shift
3197 count to the right size, so there are no out-of-range values. */
3198 if (IN_RANGE (x, 1, 8))
3199 return 0;
3200 return COSTS_N_INSNS (1);
3202 case PLUS:
3203 if (IN_RANGE (x, -128, 127))
3204 return 0;
3205 if (SMALL_OPERAND (x))
3206 return COSTS_N_INSNS (1);
3207 return -1;
3209 case LEU:
3210 /* Like LE, but reject the always-true case. */
3211 if (x == -1)
3212 return -1;
3213 case LE:
3214 /* We add 1 to the immediate and use SLT. */
3215 x += 1;
3216 case XOR:
3217 /* We can use CMPI for an xor with an unsigned 16-bit X. */
3218 case LT:
3219 case LTU:
3220 if (IN_RANGE (x, 0, 255))
3221 return 0;
3222 if (SMALL_OPERAND_UNSIGNED (x))
3223 return COSTS_N_INSNS (1);
3224 return -1;
3226 case EQ:
3227 case NE:
3228 /* Equality comparisons with 0 are cheap. */
3229 if (x == 0)
3230 return 0;
3231 return -1;
3233 default:
3234 return -1;
3238 /* Return true if there is a non-MIPS16 instruction that implements CODE
3239 and if that instruction accepts X as an immediate operand. */
3241 static int
3242 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
3244 switch (code)
3246 case ASHIFT:
3247 case ASHIFTRT:
3248 case LSHIFTRT:
3249 /* All shift counts are truncated to a valid constant. */
3250 return true;
3252 case ROTATE:
3253 case ROTATERT:
3254 /* Likewise rotates, if the target supports rotates at all. */
3255 return ISA_HAS_ROR;
3257 case AND:
3258 case IOR:
3259 case XOR:
3260 /* These instructions take 16-bit unsigned immediates. */
3261 return SMALL_OPERAND_UNSIGNED (x);
3263 case PLUS:
3264 case LT:
3265 case LTU:
3266 /* These instructions take 16-bit signed immediates. */
3267 return SMALL_OPERAND (x);
3269 case EQ:
3270 case NE:
3271 case GT:
3272 case GTU:
3273 /* The "immediate" forms of these instructions are really
3274 implemented as comparisons with register 0. */
3275 return x == 0;
3277 case GE:
3278 case GEU:
3279 /* Likewise, meaning that the only valid immediate operand is 1. */
3280 return x == 1;
3282 case LE:
3283 /* We add 1 to the immediate and use SLT. */
3284 return SMALL_OPERAND (x + 1);
3286 case LEU:
3287 /* Likewise SLTU, but reject the always-true case. */
3288 return SMALL_OPERAND (x + 1) && x + 1 != 0;
3290 case SIGN_EXTRACT:
3291 case ZERO_EXTRACT:
3292 /* The bit position and size are immediate operands. */
3293 return ISA_HAS_EXT_INS;
3295 default:
3296 /* By default assume that $0 can be used for 0. */
3297 return x == 0;
3301 /* Return the cost of binary operation X, given that the instruction
3302 sequence for a word-sized or smaller operation has cost SINGLE_COST
3303 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3305 static int
3306 mips_binary_cost (rtx x, int single_cost, int double_cost)
3308 int cost;
3310 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3311 cost = double_cost;
3312 else
3313 cost = single_cost;
3314 return (cost
3315 + rtx_cost (XEXP (x, 0), SET, !optimize_size)
3316 + rtx_cost (XEXP (x, 1), GET_CODE (x), !optimize_size));
3319 /* Return the cost of floating-point multiplications of mode MODE. */
3321 static int
3322 mips_fp_mult_cost (enum machine_mode mode)
3324 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3327 /* Return the cost of floating-point divisions of mode MODE. */
3329 static int
3330 mips_fp_div_cost (enum machine_mode mode)
3332 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3335 /* Return the cost of sign-extending OP to mode MODE, not including the
3336 cost of OP itself. */
3338 static int
3339 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3341 if (MEM_P (op))
3342 /* Extended loads are as cheap as unextended ones. */
3343 return 0;
3345 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3346 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3347 return 0;
3349 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3350 /* We can use SEB or SEH. */
3351 return COSTS_N_INSNS (1);
3353 /* We need to use a shift left and a shift right. */
3354 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3357 /* Return the cost of zero-extending OP to mode MODE, not including the
3358 cost of OP itself. */
3360 static int
3361 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3363 if (MEM_P (op))
3364 /* Extended loads are as cheap as unextended ones. */
3365 return 0;
3367 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3368 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3369 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3371 if (GENERATE_MIPS16E)
3372 /* We can use ZEB or ZEH. */
3373 return COSTS_N_INSNS (1);
3375 if (TARGET_MIPS16)
3376 /* We need to load 0xff or 0xffff into a register and use AND. */
3377 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3379 /* We can use ANDI. */
3380 return COSTS_N_INSNS (1);
3383 /* Implement TARGET_RTX_COSTS. */
3385 static bool
3386 mips_rtx_costs (rtx x, int code, int outer_code, int *total,
3387 bool speed)
3389 enum machine_mode mode = GET_MODE (x);
3390 bool float_mode_p = FLOAT_MODE_P (mode);
3391 int cost;
3392 rtx addr;
3394 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3395 appear in the instruction stream, and the cost of a comparison is
3396 really the cost of the branch or scc condition. At the time of
3397 writing, GCC only uses an explicit outer COMPARE code when optabs
3398 is testing whether a constant is expensive enough to force into a
3399 register. We want optabs to pass such constants through the MIPS
3400 expanders instead, so make all constants very cheap here. */
3401 if (outer_code == COMPARE)
3403 gcc_assert (CONSTANT_P (x));
3404 *total = 0;
3405 return true;
3408 switch (code)
3410 case CONST_INT:
3411 /* Treat *clear_upper32-style ANDs as having zero cost in the
3412 second operand. The cost is entirely in the first operand.
3414 ??? This is needed because we would otherwise try to CSE
3415 the constant operand. Although that's the right thing for
3416 instructions that continue to be a register operation throughout
3417 compilation, it is disastrous for instructions that could
3418 later be converted into a memory operation. */
3419 if (TARGET_64BIT
3420 && outer_code == AND
3421 && UINTVAL (x) == 0xffffffff)
3423 *total = 0;
3424 return true;
3427 if (TARGET_MIPS16)
3429 cost = mips16_constant_cost (outer_code, INTVAL (x));
3430 if (cost >= 0)
3432 *total = cost;
3433 return true;
3436 else
3438 /* When not optimizing for size, we care more about the cost
3439 of hot code, and hot code is often in a loop. If a constant
3440 operand needs to be forced into a register, we will often be
3441 able to hoist the constant load out of the loop, so the load
3442 should not contribute to the cost. */
3443 if (!optimize_size
3444 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3446 *total = 0;
3447 return true;
3450 /* Fall through. */
3452 case CONST:
3453 case SYMBOL_REF:
3454 case LABEL_REF:
3455 case CONST_DOUBLE:
3456 if (force_to_mem_operand (x, VOIDmode))
3458 *total = COSTS_N_INSNS (1);
3459 return true;
3461 cost = mips_const_insns (x);
3462 if (cost > 0)
3464 /* If the constant is likely to be stored in a GPR, SETs of
3465 single-insn constants are as cheap as register sets; we
3466 never want to CSE them.
3468 Don't reduce the cost of storing a floating-point zero in
3469 FPRs. If we have a zero in an FPR for other reasons, we
3470 can get better cfg-cleanup and delayed-branch results by
3471 using it consistently, rather than using $0 sometimes and
3472 an FPR at other times. Also, moves between floating-point
3473 registers are sometimes cheaper than (D)MTC1 $0. */
3474 if (cost == 1
3475 && outer_code == SET
3476 && !(float_mode_p && TARGET_HARD_FLOAT))
3477 cost = 0;
3478 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3479 want to CSE the constant itself. It is usually better to
3480 have N copies of the last operation in the sequence and one
3481 shared copy of the other operations. (Note that this is
3482 not true for MIPS16 code, where the final operation in the
3483 sequence is often an extended instruction.)
3485 Also, if we have a CONST_INT, we don't know whether it is
3486 for a word or doubleword operation, so we cannot rely on
3487 the result of mips_build_integer. */
3488 else if (!TARGET_MIPS16
3489 && (outer_code == SET || mode == VOIDmode))
3490 cost = 1;
3491 *total = COSTS_N_INSNS (cost);
3492 return true;
3494 /* The value will need to be fetched from the constant pool. */
3495 *total = CONSTANT_POOL_COST;
3496 return true;
3498 case MEM:
3499 /* If the address is legitimate, return the number of
3500 instructions it needs. */
3501 addr = XEXP (x, 0);
3502 cost = mips_address_insns (addr, mode, true);
3503 if (cost > 0)
3505 *total = COSTS_N_INSNS (cost + 1);
3506 return true;
3508 /* Check for a scaled indexed address. */
3509 if (mips_lwxs_address_p (addr))
3511 *total = COSTS_N_INSNS (2);
3512 return true;
3514 /* Otherwise use the default handling. */
3515 return false;
3517 case FFS:
3518 *total = COSTS_N_INSNS (6);
3519 return false;
3521 case NOT:
3522 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3523 return false;
3525 case AND:
3526 /* Check for a *clear_upper32 pattern and treat it like a zero
3527 extension. See the pattern's comment for details. */
3528 if (TARGET_64BIT
3529 && mode == DImode
3530 && CONST_INT_P (XEXP (x, 1))
3531 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3533 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3534 + rtx_cost (XEXP (x, 0), SET, speed));
3535 return true;
3537 /* Fall through. */
3539 case IOR:
3540 case XOR:
3541 /* Double-word operations use two single-word operations. */
3542 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3543 return true;
3545 case ASHIFT:
3546 case ASHIFTRT:
3547 case LSHIFTRT:
3548 case ROTATE:
3549 case ROTATERT:
3550 if (CONSTANT_P (XEXP (x, 1)))
3551 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3552 else
3553 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3554 return true;
3556 case ABS:
3557 if (float_mode_p)
3558 *total = mips_cost->fp_add;
3559 else
3560 *total = COSTS_N_INSNS (4);
3561 return false;
3563 case LO_SUM:
3564 /* Low-part immediates need an extended MIPS16 instruction. */
3565 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3566 + rtx_cost (XEXP (x, 0), SET, speed));
3567 return true;
3569 case LT:
3570 case LTU:
3571 case LE:
3572 case LEU:
3573 case GT:
3574 case GTU:
3575 case GE:
3576 case GEU:
3577 case EQ:
3578 case NE:
3579 case UNORDERED:
3580 case LTGT:
3581 /* Branch comparisons have VOIDmode, so use the first operand's
3582 mode instead. */
3583 mode = GET_MODE (XEXP (x, 0));
3584 if (FLOAT_MODE_P (mode))
3586 *total = mips_cost->fp_add;
3587 return false;
3589 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3590 return true;
3592 case MINUS:
3593 if (float_mode_p
3594 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3595 && TARGET_FUSED_MADD
3596 && !HONOR_NANS (mode)
3597 && !HONOR_SIGNED_ZEROS (mode))
3599 /* See if we can use NMADD or NMSUB. See mips.md for the
3600 associated patterns. */
3601 rtx op0 = XEXP (x, 0);
3602 rtx op1 = XEXP (x, 1);
3603 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3605 *total = (mips_fp_mult_cost (mode)
3606 + rtx_cost (XEXP (XEXP (op0, 0), 0), SET, speed)
3607 + rtx_cost (XEXP (op0, 1), SET, speed)
3608 + rtx_cost (op1, SET, speed));
3609 return true;
3611 if (GET_CODE (op1) == MULT)
3613 *total = (mips_fp_mult_cost (mode)
3614 + rtx_cost (op0, SET, speed)
3615 + rtx_cost (XEXP (op1, 0), SET, speed)
3616 + rtx_cost (XEXP (op1, 1), SET, speed));
3617 return true;
3620 /* Fall through. */
3622 case PLUS:
3623 if (float_mode_p)
3625 /* If this is part of a MADD or MSUB, treat the PLUS as
3626 being free. */
3627 if (ISA_HAS_FP4
3628 && TARGET_FUSED_MADD
3629 && GET_CODE (XEXP (x, 0)) == MULT)
3630 *total = 0;
3631 else
3632 *total = mips_cost->fp_add;
3633 return false;
3636 /* Double-word operations require three single-word operations and
3637 an SLTU. The MIPS16 version then needs to move the result of
3638 the SLTU from $24 to a MIPS16 register. */
3639 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3640 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3641 return true;
3643 case NEG:
3644 if (float_mode_p
3645 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3646 && TARGET_FUSED_MADD
3647 && !HONOR_NANS (mode)
3648 && HONOR_SIGNED_ZEROS (mode))
3650 /* See if we can use NMADD or NMSUB. See mips.md for the
3651 associated patterns. */
3652 rtx op = XEXP (x, 0);
3653 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3654 && GET_CODE (XEXP (op, 0)) == MULT)
3656 *total = (mips_fp_mult_cost (mode)
3657 + rtx_cost (XEXP (XEXP (op, 0), 0), SET, speed)
3658 + rtx_cost (XEXP (XEXP (op, 0), 1), SET, speed)
3659 + rtx_cost (XEXP (op, 1), SET, speed));
3660 return true;
3664 if (float_mode_p)
3665 *total = mips_cost->fp_add;
3666 else
3667 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3668 return false;
3670 case MULT:
3671 if (float_mode_p)
3672 *total = mips_fp_mult_cost (mode);
3673 else if (mode == DImode && !TARGET_64BIT)
3674 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3675 where the mulsidi3 always includes an MFHI and an MFLO. */
3676 *total = (optimize_size
3677 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3678 : mips_cost->int_mult_si * 3 + 6);
3679 else if (optimize_size)
3680 *total = (ISA_HAS_MUL3 ? 1 : 2);
3681 else if (mode == DImode)
3682 *total = mips_cost->int_mult_di;
3683 else
3684 *total = mips_cost->int_mult_si;
3685 return false;
3687 case DIV:
3688 /* Check for a reciprocal. */
3689 if (float_mode_p
3690 && ISA_HAS_FP4
3691 && flag_unsafe_math_optimizations
3692 && XEXP (x, 0) == CONST1_RTX (mode))
3694 if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3695 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3696 division as being free. */
3697 *total = rtx_cost (XEXP (x, 1), SET, speed);
3698 else
3699 *total = (mips_fp_div_cost (mode)
3700 + rtx_cost (XEXP (x, 1), SET, speed));
3701 return true;
3703 /* Fall through. */
3705 case SQRT:
3706 case MOD:
3707 if (float_mode_p)
3709 *total = mips_fp_div_cost (mode);
3710 return false;
3712 /* Fall through. */
3714 case UDIV:
3715 case UMOD:
3716 if (optimize_size)
3718 /* It is our responsibility to make division by a power of 2
3719 as cheap as 2 register additions if we want the division
3720 expanders to be used for such operations; see the setting
3721 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3722 should always produce shorter code than using
3723 expand_sdiv2_pow2. */
3724 if (TARGET_MIPS16
3725 && CONST_INT_P (XEXP (x, 1))
3726 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3728 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), SET, speed);
3729 return true;
3731 *total = COSTS_N_INSNS (mips_idiv_insns ());
3733 else if (mode == DImode)
3734 *total = mips_cost->int_div_di;
3735 else
3736 *total = mips_cost->int_div_si;
3737 return false;
3739 case SIGN_EXTEND:
3740 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3741 return false;
3743 case ZERO_EXTEND:
3744 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3745 return false;
3747 case FLOAT:
3748 case UNSIGNED_FLOAT:
3749 case FIX:
3750 case FLOAT_EXTEND:
3751 case FLOAT_TRUNCATE:
3752 *total = mips_cost->fp_add;
3753 return false;
3755 default:
3756 return false;
3760 /* Implement TARGET_ADDRESS_COST. */
3762 static int
3763 mips_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
3765 return mips_address_insns (addr, SImode, false);
3768 /* Return one word of double-word value OP, taking into account the fixed
3769 endianness of certain registers. HIGH_P is true to select the high part,
3770 false to select the low part. */
3773 mips_subword (rtx op, bool high_p)
3775 unsigned int byte, offset;
3776 enum machine_mode mode;
3778 mode = GET_MODE (op);
3779 if (mode == VOIDmode)
3780 mode = TARGET_64BIT ? TImode : DImode;
3782 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3783 byte = UNITS_PER_WORD;
3784 else
3785 byte = 0;
3787 if (FP_REG_RTX_P (op))
3789 /* Paired FPRs are always ordered little-endian. */
3790 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3791 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3794 if (MEM_P (op))
3795 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3797 return simplify_gen_subreg (word_mode, op, mode, byte);
3800 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3802 bool
3803 mips_split_64bit_move_p (rtx dest, rtx src)
3805 if (TARGET_64BIT)
3806 return false;
3808 /* FPR-to-FPR moves can be done in a single instruction, if they're
3809 allowed at all. */
3810 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3811 return false;
3813 /* Check for floating-point loads and stores. */
3814 if (ISA_HAS_LDC1_SDC1)
3816 if (FP_REG_RTX_P (dest) && MEM_P (src))
3817 return false;
3818 if (FP_REG_RTX_P (src) && MEM_P (dest))
3819 return false;
3821 return true;
3824 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3825 this function handles 64-bit moves for which mips_split_64bit_move_p
3826 holds. For 64-bit targets, this function handles 128-bit moves. */
3828 void
3829 mips_split_doubleword_move (rtx dest, rtx src)
3831 rtx low_dest;
3833 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3835 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3836 emit_insn (gen_move_doubleword_fprdi (dest, src));
3837 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3838 emit_insn (gen_move_doubleword_fprdf (dest, src));
3839 else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
3840 emit_insn (gen_move_doubleword_fprv2sf (dest, src));
3841 else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
3842 emit_insn (gen_move_doubleword_fprv2si (dest, src));
3843 else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
3844 emit_insn (gen_move_doubleword_fprv4hi (dest, src));
3845 else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
3846 emit_insn (gen_move_doubleword_fprv8qi (dest, src));
3847 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3848 emit_insn (gen_move_doubleword_fprtf (dest, src));
3849 else
3850 gcc_unreachable ();
3852 else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
3854 low_dest = mips_subword (dest, false);
3855 mips_emit_move (low_dest, mips_subword (src, false));
3856 if (TARGET_64BIT)
3857 emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
3858 else
3859 emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
3861 else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
3863 mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
3864 if (TARGET_64BIT)
3865 emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
3866 else
3867 emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
3869 else
3871 /* The operation can be split into two normal moves. Decide in
3872 which order to do them. */
3873 low_dest = mips_subword (dest, false);
3874 if (REG_P (low_dest)
3875 && reg_overlap_mentioned_p (low_dest, src))
3877 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3878 mips_emit_move (low_dest, mips_subword (src, false));
3880 else
3882 mips_emit_move (low_dest, mips_subword (src, false));
3883 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3888 /* Return the appropriate instructions to move SRC into DEST. Assume
3889 that SRC is operand 1 and DEST is operand 0. */
3891 const char *
3892 mips_output_move (rtx dest, rtx src)
3894 enum rtx_code dest_code, src_code;
3895 enum machine_mode mode;
3896 enum mips_symbol_type symbol_type;
3897 bool dbl_p;
3899 dest_code = GET_CODE (dest);
3900 src_code = GET_CODE (src);
3901 mode = GET_MODE (dest);
3902 dbl_p = (GET_MODE_SIZE (mode) == 8);
3904 if (dbl_p && mips_split_64bit_move_p (dest, src))
3905 return "#";
3907 if ((src_code == REG && GP_REG_P (REGNO (src)))
3908 || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
3910 if (dest_code == REG)
3912 if (GP_REG_P (REGNO (dest)))
3913 return "move\t%0,%z1";
3915 /* Moves to HI are handled by special .md insns. */
3916 if (REGNO (dest) == LO_REGNUM)
3917 return "mtlo\t%z1";
3919 if (DSP_ACC_REG_P (REGNO (dest)))
3921 static char retval[] = "mt__\t%z1,%q0";
3923 retval[2] = reg_names[REGNO (dest)][4];
3924 retval[3] = reg_names[REGNO (dest)][5];
3925 return retval;
3928 if (FP_REG_P (REGNO (dest)))
3929 return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
3931 if (ALL_COP_REG_P (REGNO (dest)))
3933 static char retval[] = "dmtc_\t%z1,%0";
3935 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3936 return dbl_p ? retval : retval + 1;
3939 if (dest_code == MEM)
3940 switch (GET_MODE_SIZE (mode))
3942 case 1: return "sb\t%z1,%0";
3943 case 2: return "sh\t%z1,%0";
3944 case 4: return "sw\t%z1,%0";
3945 case 8: return "sd\t%z1,%0";
3948 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3950 if (src_code == REG)
3952 /* Moves from HI are handled by special .md insns. */
3953 if (REGNO (src) == LO_REGNUM)
3955 /* When generating VR4120 or VR4130 code, we use MACC and
3956 DMACC instead of MFLO. This avoids both the normal
3957 MIPS III HI/LO hazards and the errata related to
3958 -mfix-vr4130. */
3959 if (ISA_HAS_MACCHI)
3960 return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
3961 return "mflo\t%0";
3964 if (DSP_ACC_REG_P (REGNO (src)))
3966 static char retval[] = "mf__\t%0,%q1";
3968 retval[2] = reg_names[REGNO (src)][4];
3969 retval[3] = reg_names[REGNO (src)][5];
3970 return retval;
3973 if (FP_REG_P (REGNO (src)))
3974 return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
3976 if (ALL_COP_REG_P (REGNO (src)))
3978 static char retval[] = "dmfc_\t%0,%1";
3980 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3981 return dbl_p ? retval : retval + 1;
3984 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3985 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3988 if (src_code == MEM)
3989 switch (GET_MODE_SIZE (mode))
3991 case 1: return "lbu\t%0,%1";
3992 case 2: return "lhu\t%0,%1";
3993 case 4: return "lw\t%0,%1";
3994 case 8: return "ld\t%0,%1";
3997 if (src_code == CONST_INT)
3999 /* Don't use the X format for the operand itself, because that
4000 will give out-of-range numbers for 64-bit hosts and 32-bit
4001 targets. */
4002 if (!TARGET_MIPS16)
4003 return "li\t%0,%1\t\t\t# %X1";
4005 if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
4006 return "li\t%0,%1";
4008 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
4009 return "#";
4012 if (src_code == HIGH)
4013 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
4015 if (CONST_GP_P (src))
4016 return "move\t%0,%1";
4018 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
4019 && mips_lo_relocs[symbol_type] != 0)
4021 /* A signed 16-bit constant formed by applying a relocation
4022 operator to a symbolic address. */
4023 gcc_assert (!mips_split_p[symbol_type]);
4024 return "li\t%0,%R1";
4027 if (symbolic_operand (src, VOIDmode))
4029 gcc_assert (TARGET_MIPS16
4030 ? TARGET_MIPS16_TEXT_LOADS
4031 : !TARGET_EXPLICIT_RELOCS);
4032 return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
4035 if (src_code == REG && FP_REG_P (REGNO (src)))
4037 if (dest_code == REG && FP_REG_P (REGNO (dest)))
4039 if (GET_MODE (dest) == V2SFmode)
4040 return "mov.ps\t%0,%1";
4041 else
4042 return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
4045 if (dest_code == MEM)
4046 return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
4048 if (dest_code == REG && FP_REG_P (REGNO (dest)))
4050 if (src_code == MEM)
4051 return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
4053 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
4055 static char retval[] = "l_c_\t%0,%1";
4057 retval[1] = (dbl_p ? 'd' : 'w');
4058 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
4059 return retval;
4061 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
4063 static char retval[] = "s_c_\t%1,%0";
4065 retval[1] = (dbl_p ? 'd' : 'w');
4066 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
4067 return retval;
4069 gcc_unreachable ();
4072 /* Return true if CMP1 is a suitable second operand for integer ordering
4073 test CODE. See also the *sCC patterns in mips.md. */
4075 static bool
4076 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
4078 switch (code)
4080 case GT:
4081 case GTU:
4082 return reg_or_0_operand (cmp1, VOIDmode);
4084 case GE:
4085 case GEU:
4086 return !TARGET_MIPS16 && cmp1 == const1_rtx;
4088 case LT:
4089 case LTU:
4090 return arith_operand (cmp1, VOIDmode);
4092 case LE:
4093 return sle_operand (cmp1, VOIDmode);
4095 case LEU:
4096 return sleu_operand (cmp1, VOIDmode);
4098 default:
4099 gcc_unreachable ();
4103 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
4104 integer ordering test *CODE, or if an equivalent combination can
4105 be formed by adjusting *CODE and *CMP1. When returning true, update
4106 *CODE and *CMP1 with the chosen code and operand, otherwise leave
4107 them alone. */
4109 static bool
4110 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
4111 enum machine_mode mode)
4113 HOST_WIDE_INT plus_one;
4115 if (mips_int_order_operand_ok_p (*code, *cmp1))
4116 return true;
4118 if (GET_CODE (*cmp1) == CONST_INT)
4119 switch (*code)
4121 case LE:
4122 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4123 if (INTVAL (*cmp1) < plus_one)
4125 *code = LT;
4126 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4127 return true;
4129 break;
4131 case LEU:
4132 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4133 if (plus_one != 0)
4135 *code = LTU;
4136 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4137 return true;
4139 break;
4141 default:
4142 break;
4144 return false;
4147 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
4148 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
4149 is nonnull, it's OK to set TARGET to the inverse of the result and
4150 flip *INVERT_PTR instead. */
4152 static void
4153 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
4154 rtx target, rtx cmp0, rtx cmp1)
4156 enum machine_mode mode;
4158 /* First see if there is a MIPS instruction that can do this operation.
4159 If not, try doing the same for the inverse operation. If that also
4160 fails, force CMP1 into a register and try again. */
4161 mode = GET_MODE (cmp0);
4162 if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
4163 mips_emit_binary (code, target, cmp0, cmp1);
4164 else
4166 enum rtx_code inv_code = reverse_condition (code);
4167 if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
4169 cmp1 = force_reg (mode, cmp1);
4170 mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
4172 else if (invert_ptr == 0)
4174 rtx inv_target;
4176 inv_target = mips_force_binary (GET_MODE (target),
4177 inv_code, cmp0, cmp1);
4178 mips_emit_binary (XOR, target, inv_target, const1_rtx);
4180 else
4182 *invert_ptr = !*invert_ptr;
4183 mips_emit_binary (inv_code, target, cmp0, cmp1);
4188 /* Return a register that is zero iff CMP0 and CMP1 are equal.
4189 The register will have the same mode as CMP0. */
4191 static rtx
4192 mips_zero_if_equal (rtx cmp0, rtx cmp1)
4194 if (cmp1 == const0_rtx)
4195 return cmp0;
4197 if (uns_arith_operand (cmp1, VOIDmode))
4198 return expand_binop (GET_MODE (cmp0), xor_optab,
4199 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4201 return expand_binop (GET_MODE (cmp0), sub_optab,
4202 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4205 /* Convert *CODE into a code that can be used in a floating-point
4206 scc instruction (C.cond.fmt). Return true if the values of
4207 the condition code registers will be inverted, with 0 indicating
4208 that the condition holds. */
4210 static bool
4211 mips_reversed_fp_cond (enum rtx_code *code)
4213 switch (*code)
4215 case NE:
4216 case LTGT:
4217 case ORDERED:
4218 *code = reverse_condition_maybe_unordered (*code);
4219 return true;
4221 default:
4222 return false;
4226 /* Convert a comparison into something that can be used in a branch or
4227 conditional move. cmp_operands[0] and cmp_operands[1] are the values
4228 being compared and *CODE is the code used to compare them.
4230 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
4231 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
4232 otherwise any standard branch condition can be used. The standard branch
4233 conditions are:
4235 - EQ or NE between two registers.
4236 - any comparison between a register and zero. */
4238 static void
4239 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
4241 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
4243 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
4245 *op0 = cmp_operands[0];
4246 *op1 = cmp_operands[1];
4248 else if (*code == EQ || *code == NE)
4250 if (need_eq_ne_p)
4252 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4253 *op1 = const0_rtx;
4255 else
4257 *op0 = cmp_operands[0];
4258 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
4261 else
4263 /* The comparison needs a separate scc instruction. Store the
4264 result of the scc in *OP0 and compare it against zero. */
4265 bool invert = false;
4266 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
4267 mips_emit_int_order_test (*code, &invert, *op0,
4268 cmp_operands[0], cmp_operands[1]);
4269 *code = (invert ? EQ : NE);
4270 *op1 = const0_rtx;
4273 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
4275 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
4276 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
4277 *code = NE;
4278 *op1 = const0_rtx;
4280 else
4282 enum rtx_code cmp_code;
4284 /* Floating-point tests use a separate C.cond.fmt comparison to
4285 set a condition code register. The branch or conditional move
4286 will then compare that register against zero.
4288 Set CMP_CODE to the code of the comparison instruction and
4289 *CODE to the code that the branch or move should use. */
4290 cmp_code = *code;
4291 *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
4292 *op0 = (ISA_HAS_8CC
4293 ? gen_reg_rtx (CCmode)
4294 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4295 *op1 = const0_rtx;
4296 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
4300 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4301 Store the result in TARGET and return true if successful.
4303 On 64-bit targets, TARGET may be narrower than cmp_operands[0]. */
4305 bool
4306 mips_expand_scc (enum rtx_code code, rtx target)
4308 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4309 return false;
4311 if (code == EQ || code == NE)
4313 if (ISA_HAS_SEQ_SNE
4314 && reg_imm10_operand (cmp_operands[1], GET_MODE (cmp_operands[1])))
4315 mips_emit_binary (code, target, cmp_operands[0], cmp_operands[1]);
4316 else
4318 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4319 mips_emit_binary (code, target, zie, const0_rtx);
4322 else
4323 mips_emit_int_order_test (code, 0, target,
4324 cmp_operands[0], cmp_operands[1]);
4325 return true;
4328 /* Compare cmp_operands[0] with cmp_operands[1] using comparison code
4329 CODE and jump to OPERANDS[0] if the condition holds. */
4331 void
4332 mips_expand_conditional_branch (rtx *operands, enum rtx_code code)
4334 rtx op0, op1, condition;
4336 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4337 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4338 emit_jump_insn (gen_condjump (condition, operands[0]));
4341 /* Implement:
4343 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4344 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4346 void
4347 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4348 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4350 rtx cmp_result;
4351 bool reversed_p;
4353 reversed_p = mips_reversed_fp_cond (&cond);
4354 cmp_result = gen_reg_rtx (CCV2mode);
4355 emit_insn (gen_scc_ps (cmp_result,
4356 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4357 if (reversed_p)
4358 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4359 cmp_result));
4360 else
4361 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4362 cmp_result));
4365 /* Compare cmp_operands[0] with cmp_operands[1] using the code of
4366 OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] if the condition
4367 holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
4369 void
4370 mips_expand_conditional_move (rtx *operands)
4372 enum rtx_code code;
4373 rtx cond, op0, op1;
4375 code = GET_CODE (operands[1]);
4376 mips_emit_compare (&code, &op0, &op1, true);
4377 cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1),
4378 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4379 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
4380 operands[2], operands[3])));
4383 /* Compare cmp_operands[0] with cmp_operands[1] using rtl code CODE,
4384 then trap if the condition holds. */
4386 void
4387 mips_expand_conditional_trap (enum rtx_code code)
4389 rtx op0, op1;
4390 enum machine_mode mode;
4392 /* MIPS conditional trap instructions don't have GT or LE flavors,
4393 so we must swap the operands and convert to LT and GE respectively. */
4394 switch (code)
4396 case GT:
4397 case LE:
4398 case GTU:
4399 case LEU:
4400 code = swap_condition (code);
4401 op0 = cmp_operands[1];
4402 op1 = cmp_operands[0];
4403 break;
4405 default:
4406 op0 = cmp_operands[0];
4407 op1 = cmp_operands[1];
4408 break;
4411 mode = GET_MODE (cmp_operands[0]);
4412 op0 = force_reg (mode, op0);
4413 if (!arith_operand (op1, mode))
4414 op1 = force_reg (mode, op1);
4416 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4417 gen_rtx_fmt_ee (code, mode, op0, op1),
4418 const0_rtx));
4421 /* Initialize *CUM for a call to a function of type FNTYPE. */
4423 void
4424 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
4426 memset (cum, 0, sizeof (*cum));
4427 cum->prototype = (fntype && prototype_p (fntype));
4428 cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
4431 /* Fill INFO with information about a single argument. CUM is the
4432 cumulative state for earlier arguments. MODE is the mode of this
4433 argument and TYPE is its type (if known). NAMED is true if this
4434 is a named (fixed) argument rather than a variable one. */
4436 static void
4437 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4438 enum machine_mode mode, tree type, int named)
4440 bool doubleword_aligned_p;
4441 unsigned int num_bytes, num_words, max_regs;
4443 /* Work out the size of the argument. */
4444 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4445 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4447 /* Decide whether it should go in a floating-point register, assuming
4448 one is free. Later code checks for availability.
4450 The checks against UNITS_PER_FPVALUE handle the soft-float and
4451 single-float cases. */
4452 switch (mips_abi)
4454 case ABI_EABI:
4455 /* The EABI conventions have traditionally been defined in terms
4456 of TYPE_MODE, regardless of the actual type. */
4457 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4458 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4459 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4460 break;
4462 case ABI_32:
4463 case ABI_O64:
4464 /* Only leading floating-point scalars are passed in
4465 floating-point registers. We also handle vector floats the same
4466 say, which is OK because they are not covered by the standard ABI. */
4467 info->fpr_p = (!cum->gp_reg_found
4468 && cum->arg_number < 2
4469 && (type == 0
4470 || SCALAR_FLOAT_TYPE_P (type)
4471 || VECTOR_FLOAT_TYPE_P (type))
4472 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4473 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4474 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4475 break;
4477 case ABI_N32:
4478 case ABI_64:
4479 /* Scalar, complex and vector floating-point types are passed in
4480 floating-point registers, as long as this is a named rather
4481 than a variable argument. */
4482 info->fpr_p = (named
4483 && (type == 0 || FLOAT_TYPE_P (type))
4484 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4485 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4486 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4487 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4489 /* ??? According to the ABI documentation, the real and imaginary
4490 parts of complex floats should be passed in individual registers.
4491 The real and imaginary parts of stack arguments are supposed
4492 to be contiguous and there should be an extra word of padding
4493 at the end.
4495 This has two problems. First, it makes it impossible to use a
4496 single "void *" va_list type, since register and stack arguments
4497 are passed differently. (At the time of writing, MIPSpro cannot
4498 handle complex float varargs correctly.) Second, it's unclear
4499 what should happen when there is only one register free.
4501 For now, we assume that named complex floats should go into FPRs
4502 if there are two FPRs free, otherwise they should be passed in the
4503 same way as a struct containing two floats. */
4504 if (info->fpr_p
4505 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4506 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4508 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4509 info->fpr_p = false;
4510 else
4511 num_words = 2;
4513 break;
4515 default:
4516 gcc_unreachable ();
4519 /* See whether the argument has doubleword alignment. */
4520 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4522 /* Set REG_OFFSET to the register count we're interested in.
4523 The EABI allocates the floating-point registers separately,
4524 but the other ABIs allocate them like integer registers. */
4525 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4526 ? cum->num_fprs
4527 : cum->num_gprs);
4529 /* Advance to an even register if the argument is doubleword-aligned. */
4530 if (doubleword_aligned_p)
4531 info->reg_offset += info->reg_offset & 1;
4533 /* Work out the offset of a stack argument. */
4534 info->stack_offset = cum->stack_words;
4535 if (doubleword_aligned_p)
4536 info->stack_offset += info->stack_offset & 1;
4538 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4540 /* Partition the argument between registers and stack. */
4541 info->reg_words = MIN (num_words, max_regs);
4542 info->stack_words = num_words - info->reg_words;
4545 /* INFO describes a register argument that has the normal format for the
4546 argument's mode. Return the register it uses, assuming that FPRs are
4547 available if HARD_FLOAT_P. */
4549 static unsigned int
4550 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4552 if (!info->fpr_p || !hard_float_p)
4553 return GP_ARG_FIRST + info->reg_offset;
4554 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4555 /* In o32, the second argument is always passed in $f14
4556 for TARGET_DOUBLE_FLOAT, regardless of whether the
4557 first argument was a word or doubleword. */
4558 return FP_ARG_FIRST + 2;
4559 else
4560 return FP_ARG_FIRST + info->reg_offset;
4563 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
4565 static bool
4566 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4568 return !TARGET_OLDABI;
4571 /* Implement FUNCTION_ARG. */
4574 mips_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4575 tree type, int named)
4577 struct mips_arg_info info;
4579 /* We will be called with a mode of VOIDmode after the last argument
4580 has been seen. Whatever we return will be passed to the call expander.
4581 If we need a MIPS16 fp_code, return a REG with the code stored as
4582 the mode. */
4583 if (mode == VOIDmode)
4585 if (TARGET_MIPS16 && cum->fp_code != 0)
4586 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4587 else
4588 return NULL;
4591 mips_get_arg_info (&info, cum, mode, type, named);
4593 /* Return straight away if the whole argument is passed on the stack. */
4594 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4595 return NULL;
4597 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4598 contains a double in its entirety, then that 64-bit chunk is passed
4599 in a floating-point register. */
4600 if (TARGET_NEWABI
4601 && TARGET_HARD_FLOAT
4602 && named
4603 && type != 0
4604 && TREE_CODE (type) == RECORD_TYPE
4605 && TYPE_SIZE_UNIT (type)
4606 && host_integerp (TYPE_SIZE_UNIT (type), 1))
4608 tree field;
4610 /* First check to see if there is any such field. */
4611 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4612 if (TREE_CODE (field) == FIELD_DECL
4613 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4614 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4615 && host_integerp (bit_position (field), 0)
4616 && int_bit_position (field) % BITS_PER_WORD == 0)
4617 break;
4619 if (field != 0)
4621 /* Now handle the special case by returning a PARALLEL
4622 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4623 chunks are passed in registers. */
4624 unsigned int i;
4625 HOST_WIDE_INT bitpos;
4626 rtx ret;
4628 /* assign_parms checks the mode of ENTRY_PARM, so we must
4629 use the actual mode here. */
4630 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4632 bitpos = 0;
4633 field = TYPE_FIELDS (type);
4634 for (i = 0; i < info.reg_words; i++)
4636 rtx reg;
4638 for (; field; field = TREE_CHAIN (field))
4639 if (TREE_CODE (field) == FIELD_DECL
4640 && int_bit_position (field) >= bitpos)
4641 break;
4643 if (field
4644 && int_bit_position (field) == bitpos
4645 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4646 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4647 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4648 else
4649 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4651 XVECEXP (ret, 0, i)
4652 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4653 GEN_INT (bitpos / BITS_PER_UNIT));
4655 bitpos += BITS_PER_WORD;
4657 return ret;
4661 /* Handle the n32/n64 conventions for passing complex floating-point
4662 arguments in FPR pairs. The real part goes in the lower register
4663 and the imaginary part goes in the upper register. */
4664 if (TARGET_NEWABI
4665 && info.fpr_p
4666 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4668 rtx real, imag;
4669 enum machine_mode inner;
4670 unsigned int regno;
4672 inner = GET_MODE_INNER (mode);
4673 regno = FP_ARG_FIRST + info.reg_offset;
4674 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4676 /* Real part in registers, imaginary part on stack. */
4677 gcc_assert (info.stack_words == info.reg_words);
4678 return gen_rtx_REG (inner, regno);
4680 else
4682 gcc_assert (info.stack_words == 0);
4683 real = gen_rtx_EXPR_LIST (VOIDmode,
4684 gen_rtx_REG (inner, regno),
4685 const0_rtx);
4686 imag = gen_rtx_EXPR_LIST (VOIDmode,
4687 gen_rtx_REG (inner,
4688 regno + info.reg_words / 2),
4689 GEN_INT (GET_MODE_SIZE (inner)));
4690 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4694 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4697 /* Implement FUNCTION_ARG_ADVANCE. */
4699 void
4700 mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4701 tree type, int named)
4703 struct mips_arg_info info;
4705 mips_get_arg_info (&info, cum, mode, type, named);
4707 if (!info.fpr_p)
4708 cum->gp_reg_found = true;
4710 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4711 an explanation of what this code does. It assumes that we're using
4712 either the o32 or the o64 ABI, both of which pass at most 2 arguments
4713 in FPRs. */
4714 if (cum->arg_number < 2 && info.fpr_p)
4715 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4717 /* Advance the register count. This has the effect of setting
4718 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4719 argument required us to skip the final GPR and pass the whole
4720 argument on the stack. */
4721 if (mips_abi != ABI_EABI || !info.fpr_p)
4722 cum->num_gprs = info.reg_offset + info.reg_words;
4723 else if (info.reg_words > 0)
4724 cum->num_fprs += MAX_FPRS_PER_FMT;
4726 /* Advance the stack word count. */
4727 if (info.stack_words > 0)
4728 cum->stack_words = info.stack_offset + info.stack_words;
4730 cum->arg_number++;
4733 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4735 static int
4736 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4737 enum machine_mode mode, tree type, bool named)
4739 struct mips_arg_info info;
4741 mips_get_arg_info (&info, cum, mode, type, named);
4742 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4745 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4746 PARM_BOUNDARY bits of alignment, but will be given anything up
4747 to STACK_BOUNDARY bits if the type requires it. */
4750 mips_function_arg_boundary (enum machine_mode mode, tree type)
4752 unsigned int alignment;
4754 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4755 if (alignment < PARM_BOUNDARY)
4756 alignment = PARM_BOUNDARY;
4757 if (alignment > STACK_BOUNDARY)
4758 alignment = STACK_BOUNDARY;
4759 return alignment;
4762 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4763 upward rather than downward. In other words, return true if the
4764 first byte of the stack slot has useful data, false if the last
4765 byte does. */
4767 bool
4768 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4770 /* On little-endian targets, the first byte of every stack argument
4771 is passed in the first byte of the stack slot. */
4772 if (!BYTES_BIG_ENDIAN)
4773 return true;
4775 /* Otherwise, integral types are padded downward: the last byte of a
4776 stack argument is passed in the last byte of the stack slot. */
4777 if (type != 0
4778 ? (INTEGRAL_TYPE_P (type)
4779 || POINTER_TYPE_P (type)
4780 || FIXED_POINT_TYPE_P (type))
4781 : (SCALAR_INT_MODE_P (mode)
4782 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4783 return false;
4785 /* Big-endian o64 pads floating-point arguments downward. */
4786 if (mips_abi == ABI_O64)
4787 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4788 return false;
4790 /* Other types are padded upward for o32, o64, n32 and n64. */
4791 if (mips_abi != ABI_EABI)
4792 return true;
4794 /* Arguments smaller than a stack slot are padded downward. */
4795 if (mode != BLKmode)
4796 return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
4797 else
4798 return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
4801 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4802 if the least significant byte of the register has useful data. Return
4803 the opposite if the most significant byte does. */
4805 bool
4806 mips_pad_reg_upward (enum machine_mode mode, tree type)
4808 /* No shifting is required for floating-point arguments. */
4809 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4810 return !BYTES_BIG_ENDIAN;
4812 /* Otherwise, apply the same padding to register arguments as we do
4813 to stack arguments. */
4814 return mips_pad_arg_upward (mode, type);
4817 /* Return nonzero when an argument must be passed by reference. */
4819 static bool
4820 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4821 enum machine_mode mode, const_tree type,
4822 bool named ATTRIBUTE_UNUSED)
4824 if (mips_abi == ABI_EABI)
4826 int size;
4828 /* ??? How should SCmode be handled? */
4829 if (mode == DImode || mode == DFmode
4830 || mode == DQmode || mode == UDQmode
4831 || mode == DAmode || mode == UDAmode)
4832 return 0;
4834 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4835 return size == -1 || size > UNITS_PER_WORD;
4837 else
4839 /* If we have a variable-sized parameter, we have no choice. */
4840 return targetm.calls.must_pass_in_stack (mode, type);
4844 /* Implement TARGET_CALLEE_COPIES. */
4846 static bool
4847 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4848 enum machine_mode mode ATTRIBUTE_UNUSED,
4849 const_tree type ATTRIBUTE_UNUSED, bool named)
4851 return mips_abi == ABI_EABI && named;
4854 /* See whether VALTYPE is a record whose fields should be returned in
4855 floating-point registers. If so, return the number of fields and
4856 list them in FIELDS (which should have two elements). Return 0
4857 otherwise.
4859 For n32 & n64, a structure with one or two fields is returned in
4860 floating-point registers as long as every field has a floating-point
4861 type. */
4863 static int
4864 mips_fpr_return_fields (const_tree valtype, tree *fields)
4866 tree field;
4867 int i;
4869 if (!TARGET_NEWABI)
4870 return 0;
4872 if (TREE_CODE (valtype) != RECORD_TYPE)
4873 return 0;
4875 i = 0;
4876 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4878 if (TREE_CODE (field) != FIELD_DECL)
4879 continue;
4881 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
4882 return 0;
4884 if (i == 2)
4885 return 0;
4887 fields[i++] = field;
4889 return i;
4892 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4893 a value in the most significant part of $2/$3 if:
4895 - the target is big-endian;
4897 - the value has a structure or union type (we generalize this to
4898 cover aggregates from other languages too); and
4900 - the structure is not returned in floating-point registers. */
4902 static bool
4903 mips_return_in_msb (const_tree valtype)
4905 tree fields[2];
4907 return (TARGET_NEWABI
4908 && TARGET_BIG_ENDIAN
4909 && AGGREGATE_TYPE_P (valtype)
4910 && mips_fpr_return_fields (valtype, fields) == 0);
4913 /* Return true if the function return value MODE will get returned in a
4914 floating-point register. */
4916 static bool
4917 mips_return_mode_in_fpr_p (enum machine_mode mode)
4919 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4920 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4921 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4922 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4925 /* Return the representation of an FPR return register when the
4926 value being returned in FP_RETURN has mode VALUE_MODE and the
4927 return type itself has mode TYPE_MODE. On NewABI targets,
4928 the two modes may be different for structures like:
4930 struct __attribute__((packed)) foo { float f; }
4932 where we return the SFmode value of "f" in FP_RETURN, but where
4933 the structure itself has mode BLKmode. */
4935 static rtx
4936 mips_return_fpr_single (enum machine_mode type_mode,
4937 enum machine_mode value_mode)
4939 rtx x;
4941 x = gen_rtx_REG (value_mode, FP_RETURN);
4942 if (type_mode != value_mode)
4944 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
4945 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
4947 return x;
4950 /* Return a composite value in a pair of floating-point registers.
4951 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4952 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4953 complete value.
4955 For n32 & n64, $f0 always holds the first value and $f2 the second.
4956 Otherwise the values are packed together as closely as possible. */
4958 static rtx
4959 mips_return_fpr_pair (enum machine_mode mode,
4960 enum machine_mode mode1, HOST_WIDE_INT offset1,
4961 enum machine_mode mode2, HOST_WIDE_INT offset2)
4963 int inc;
4965 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4966 return gen_rtx_PARALLEL
4967 (mode,
4968 gen_rtvec (2,
4969 gen_rtx_EXPR_LIST (VOIDmode,
4970 gen_rtx_REG (mode1, FP_RETURN),
4971 GEN_INT (offset1)),
4972 gen_rtx_EXPR_LIST (VOIDmode,
4973 gen_rtx_REG (mode2, FP_RETURN + inc),
4974 GEN_INT (offset2))));
4978 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4979 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4980 VALTYPE is null and MODE is the mode of the return value. */
4983 mips_function_value (const_tree valtype, enum machine_mode mode)
4985 if (valtype)
4987 tree fields[2];
4988 int unsigned_p;
4990 mode = TYPE_MODE (valtype);
4991 unsigned_p = TYPE_UNSIGNED (valtype);
4993 /* Since TARGET_PROMOTE_FUNCTION_RETURN unconditionally returns true,
4994 we must promote the mode just as PROMOTE_MODE does. */
4995 mode = promote_mode (valtype, mode, &unsigned_p, 1);
4997 /* Handle structures whose fields are returned in $f0/$f2. */
4998 switch (mips_fpr_return_fields (valtype, fields))
5000 case 1:
5001 return mips_return_fpr_single (mode,
5002 TYPE_MODE (TREE_TYPE (fields[0])));
5004 case 2:
5005 return mips_return_fpr_pair (mode,
5006 TYPE_MODE (TREE_TYPE (fields[0])),
5007 int_byte_position (fields[0]),
5008 TYPE_MODE (TREE_TYPE (fields[1])),
5009 int_byte_position (fields[1]));
5012 /* If a value is passed in the most significant part of a register, see
5013 whether we have to round the mode up to a whole number of words. */
5014 if (mips_return_in_msb (valtype))
5016 HOST_WIDE_INT size = int_size_in_bytes (valtype);
5017 if (size % UNITS_PER_WORD != 0)
5019 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
5020 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5024 /* For EABI, the class of return register depends entirely on MODE.
5025 For example, "struct { some_type x; }" and "union { some_type x; }"
5026 are returned in the same way as a bare "some_type" would be.
5027 Other ABIs only use FPRs for scalar, complex or vector types. */
5028 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
5029 return gen_rtx_REG (mode, GP_RETURN);
5032 if (!TARGET_MIPS16)
5034 /* Handle long doubles for n32 & n64. */
5035 if (mode == TFmode)
5036 return mips_return_fpr_pair (mode,
5037 DImode, 0,
5038 DImode, GET_MODE_SIZE (mode) / 2);
5040 if (mips_return_mode_in_fpr_p (mode))
5042 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5043 return mips_return_fpr_pair (mode,
5044 GET_MODE_INNER (mode), 0,
5045 GET_MODE_INNER (mode),
5046 GET_MODE_SIZE (mode) / 2);
5047 else
5048 return gen_rtx_REG (mode, FP_RETURN);
5052 return gen_rtx_REG (mode, GP_RETURN);
5055 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
5056 all BLKmode objects are returned in memory. Under the n32, n64
5057 and embedded ABIs, small structures are returned in a register.
5058 Objects with varying size must still be returned in memory, of
5059 course. */
5061 static bool
5062 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5064 return (TARGET_OLDABI
5065 ? TYPE_MODE (type) == BLKmode
5066 : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
5069 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
5071 static void
5072 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5073 tree type, int *pretend_size ATTRIBUTE_UNUSED,
5074 int no_rtl)
5076 CUMULATIVE_ARGS local_cum;
5077 int gp_saved, fp_saved;
5079 /* The caller has advanced CUM up to, but not beyond, the last named
5080 argument. Advance a local copy of CUM past the last "real" named
5081 argument, to find out how many registers are left over. */
5082 local_cum = *cum;
5083 FUNCTION_ARG_ADVANCE (local_cum, mode, type, true);
5085 /* Found out how many registers we need to save. */
5086 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
5087 fp_saved = (EABI_FLOAT_VARARGS_P
5088 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
5089 : 0);
5091 if (!no_rtl)
5093 if (gp_saved > 0)
5095 rtx ptr, mem;
5097 ptr = plus_constant (virtual_incoming_args_rtx,
5098 REG_PARM_STACK_SPACE (cfun->decl)
5099 - gp_saved * UNITS_PER_WORD);
5100 mem = gen_frame_mem (BLKmode, ptr);
5101 set_mem_alias_set (mem, get_varargs_alias_set ());
5103 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
5104 mem, gp_saved);
5106 if (fp_saved > 0)
5108 /* We can't use move_block_from_reg, because it will use
5109 the wrong mode. */
5110 enum machine_mode mode;
5111 int off, i;
5113 /* Set OFF to the offset from virtual_incoming_args_rtx of
5114 the first float register. The FP save area lies below
5115 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
5116 off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
5117 off -= fp_saved * UNITS_PER_FPREG;
5119 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
5121 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
5122 i += MAX_FPRS_PER_FMT)
5124 rtx ptr, mem;
5126 ptr = plus_constant (virtual_incoming_args_rtx, off);
5127 mem = gen_frame_mem (mode, ptr);
5128 set_mem_alias_set (mem, get_varargs_alias_set ());
5129 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
5130 off += UNITS_PER_HWFPVALUE;
5134 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
5135 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
5136 + fp_saved * UNITS_PER_FPREG);
5139 /* Implement TARGET_BUILTIN_VA_LIST. */
5141 static tree
5142 mips_build_builtin_va_list (void)
5144 if (EABI_FLOAT_VARARGS_P)
5146 /* We keep 3 pointers, and two offsets.
5148 Two pointers are to the overflow area, which starts at the CFA.
5149 One of these is constant, for addressing into the GPR save area
5150 below it. The other is advanced up the stack through the
5151 overflow region.
5153 The third pointer is to the bottom of the GPR save area.
5154 Since the FPR save area is just below it, we can address
5155 FPR slots off this pointer.
5157 We also keep two one-byte offsets, which are to be subtracted
5158 from the constant pointers to yield addresses in the GPR and
5159 FPR save areas. These are downcounted as float or non-float
5160 arguments are used, and when they get to zero, the argument
5161 must be obtained from the overflow region. */
5162 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
5163 tree array, index;
5165 record = lang_hooks.types.make_type (RECORD_TYPE);
5167 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
5168 ptr_type_node);
5169 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
5170 ptr_type_node);
5171 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
5172 ptr_type_node);
5173 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
5174 unsigned_char_type_node);
5175 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
5176 unsigned_char_type_node);
5177 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5178 warn on every user file. */
5179 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
5180 array = build_array_type (unsigned_char_type_node,
5181 build_index_type (index));
5182 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
5184 DECL_FIELD_CONTEXT (f_ovfl) = record;
5185 DECL_FIELD_CONTEXT (f_gtop) = record;
5186 DECL_FIELD_CONTEXT (f_ftop) = record;
5187 DECL_FIELD_CONTEXT (f_goff) = record;
5188 DECL_FIELD_CONTEXT (f_foff) = record;
5189 DECL_FIELD_CONTEXT (f_res) = record;
5191 TYPE_FIELDS (record) = f_ovfl;
5192 TREE_CHAIN (f_ovfl) = f_gtop;
5193 TREE_CHAIN (f_gtop) = f_ftop;
5194 TREE_CHAIN (f_ftop) = f_goff;
5195 TREE_CHAIN (f_goff) = f_foff;
5196 TREE_CHAIN (f_foff) = f_res;
5198 layout_type (record);
5199 return record;
5201 else if (TARGET_IRIX && TARGET_IRIX6)
5202 /* On IRIX 6, this type is 'char *'. */
5203 return build_pointer_type (char_type_node);
5204 else
5205 /* Otherwise, we use 'void *'. */
5206 return ptr_type_node;
5209 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
5211 static void
5212 mips_va_start (tree valist, rtx nextarg)
5214 if (EABI_FLOAT_VARARGS_P)
5216 const CUMULATIVE_ARGS *cum;
5217 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5218 tree ovfl, gtop, ftop, goff, foff;
5219 tree t;
5220 int gpr_save_area_size;
5221 int fpr_save_area_size;
5222 int fpr_offset;
5224 cum = &crtl->args.info;
5225 gpr_save_area_size
5226 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5227 fpr_save_area_size
5228 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5230 f_ovfl = TYPE_FIELDS (va_list_type_node);
5231 f_gtop = TREE_CHAIN (f_ovfl);
5232 f_ftop = TREE_CHAIN (f_gtop);
5233 f_goff = TREE_CHAIN (f_ftop);
5234 f_foff = TREE_CHAIN (f_goff);
5236 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5237 NULL_TREE);
5238 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5239 NULL_TREE);
5240 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5241 NULL_TREE);
5242 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5243 NULL_TREE);
5244 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5245 NULL_TREE);
5247 /* Emit code to initialize OVFL, which points to the next varargs
5248 stack argument. CUM->STACK_WORDS gives the number of stack
5249 words used by named arguments. */
5250 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5251 if (cum->stack_words > 0)
5252 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
5253 size_int (cum->stack_words * UNITS_PER_WORD));
5254 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5255 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5257 /* Emit code to initialize GTOP, the top of the GPR save area. */
5258 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5259 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
5260 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5262 /* Emit code to initialize FTOP, the top of the FPR save area.
5263 This address is gpr_save_area_bytes below GTOP, rounded
5264 down to the next fp-aligned boundary. */
5265 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5266 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5267 fpr_offset &= -UNITS_PER_FPVALUE;
5268 if (fpr_offset)
5269 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
5270 size_int (-fpr_offset));
5271 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
5272 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5274 /* Emit code to initialize GOFF, the offset from GTOP of the
5275 next GPR argument. */
5276 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
5277 build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
5278 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5280 /* Likewise emit code to initialize FOFF, the offset from FTOP
5281 of the next FPR argument. */
5282 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
5283 build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
5284 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5286 else
5288 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5289 std_expand_builtin_va_start (valist, nextarg);
5293 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
5295 static tree
5296 mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5297 gimple_seq *post_p)
5299 tree addr;
5300 bool indirect_p;
5302 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5303 if (indirect_p)
5304 type = build_pointer_type (type);
5306 if (!EABI_FLOAT_VARARGS_P)
5307 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5308 else
5310 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5311 tree ovfl, top, off, align;
5312 HOST_WIDE_INT size, rsize, osize;
5313 tree t, u;
5315 f_ovfl = TYPE_FIELDS (va_list_type_node);
5316 f_gtop = TREE_CHAIN (f_ovfl);
5317 f_ftop = TREE_CHAIN (f_gtop);
5318 f_goff = TREE_CHAIN (f_ftop);
5319 f_foff = TREE_CHAIN (f_goff);
5321 /* Let:
5323 TOP be the top of the GPR or FPR save area;
5324 OFF be the offset from TOP of the next register;
5325 ADDR_RTX be the address of the argument;
5326 SIZE be the number of bytes in the argument type;
5327 RSIZE be the number of bytes used to store the argument
5328 when it's in the register save area; and
5329 OSIZE be the number of bytes used to store it when it's
5330 in the stack overflow area.
5332 The code we want is:
5334 1: off &= -rsize; // round down
5335 2: if (off != 0)
5336 3: {
5337 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
5338 5: off -= rsize;
5339 6: }
5340 7: else
5341 8: {
5342 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
5343 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
5344 11: ovfl += osize;
5345 14: }
5347 [1] and [9] can sometimes be optimized away. */
5349 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5350 NULL_TREE);
5351 size = int_size_in_bytes (type);
5353 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5354 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5356 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop),
5357 unshare_expr (valist), f_ftop, NULL_TREE);
5358 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff),
5359 unshare_expr (valist), f_foff, NULL_TREE);
5361 /* When va_start saves FPR arguments to the stack, each slot
5362 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
5363 argument's precision. */
5364 rsize = UNITS_PER_HWFPVALUE;
5366 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5367 (= PARM_BOUNDARY bits). This can be different from RSIZE
5368 in two cases:
5370 (1) On 32-bit targets when TYPE is a structure such as:
5372 struct s { float f; };
5374 Such structures are passed in paired FPRs, so RSIZE
5375 will be 8 bytes. However, the structure only takes
5376 up 4 bytes of memory, so OSIZE will only be 4.
5378 (2) In combinations such as -mgp64 -msingle-float
5379 -fshort-double. Doubles passed in registers will then take
5380 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
5381 stack take up UNITS_PER_WORD bytes. */
5382 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5384 else
5386 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop),
5387 unshare_expr (valist), f_gtop, NULL_TREE);
5388 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff),
5389 unshare_expr (valist), f_goff, NULL_TREE);
5390 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5391 if (rsize > UNITS_PER_WORD)
5393 /* [1] Emit code for: off &= -rsize. */
5394 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), unshare_expr (off),
5395 build_int_cst (TREE_TYPE (off), -rsize));
5396 gimplify_assign (unshare_expr (off), t, pre_p);
5398 osize = rsize;
5401 /* [2] Emit code to branch if off == 0. */
5402 t = build2 (NE_EXPR, boolean_type_node, off,
5403 build_int_cst (TREE_TYPE (off), 0));
5404 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5406 /* [5] Emit code for: off -= rsize. We do this as a form of
5407 post-decrement not available to C. */
5408 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5409 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5411 /* [4] Emit code for:
5412 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
5413 t = fold_convert (sizetype, t);
5414 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5415 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5416 if (BYTES_BIG_ENDIAN && rsize > size)
5418 u = size_int (rsize - size);
5419 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5421 COND_EXPR_THEN (addr) = t;
5423 if (osize > UNITS_PER_WORD)
5425 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
5426 u = size_int (osize - 1);
5427 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl),
5428 unshare_expr (ovfl), u);
5429 t = fold_convert (sizetype, t);
5430 u = size_int (-osize);
5431 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5432 t = fold_convert (TREE_TYPE (ovfl), t);
5433 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl),
5434 unshare_expr (ovfl), t);
5436 else
5437 align = NULL;
5439 /* [10, 11] Emit code for:
5440 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5441 ovfl += osize. */
5442 u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
5443 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5444 if (BYTES_BIG_ENDIAN && osize > size)
5446 u = size_int (osize - size);
5447 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5450 /* String [9] and [10, 11] together. */
5451 if (align)
5452 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5453 COND_EXPR_ELSE (addr) = t;
5455 addr = fold_convert (build_pointer_type (type), addr);
5456 addr = build_va_arg_indirect_ref (addr);
5459 if (indirect_p)
5460 addr = build_va_arg_indirect_ref (addr);
5462 return addr;
5465 /* Start a definition of function NAME. MIPS16_P indicates whether the
5466 function contains MIPS16 code. */
5468 static void
5469 mips_start_function_definition (const char *name, bool mips16_p)
5471 if (mips16_p)
5472 fprintf (asm_out_file, "\t.set\tmips16\n");
5473 else
5474 fprintf (asm_out_file, "\t.set\tnomips16\n");
5476 if (!flag_inhibit_size_directive)
5478 fputs ("\t.ent\t", asm_out_file);
5479 assemble_name (asm_out_file, name);
5480 fputs ("\n", asm_out_file);
5483 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function");
5485 /* Start the definition proper. */
5486 assemble_name (asm_out_file, name);
5487 fputs (":\n", asm_out_file);
5490 /* End a function definition started by mips_start_function_definition. */
5492 static void
5493 mips_end_function_definition (const char *name)
5495 if (!flag_inhibit_size_directive)
5497 fputs ("\t.end\t", asm_out_file);
5498 assemble_name (asm_out_file, name);
5499 fputs ("\n", asm_out_file);
5503 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5505 static bool
5506 mips_ok_for_lazy_binding_p (rtx x)
5508 return (TARGET_USE_GOT
5509 && GET_CODE (x) == SYMBOL_REF
5510 && !SYMBOL_REF_BIND_NOW_P (x)
5511 && !mips_symbol_binds_local_p (x));
5514 /* Load function address ADDR into register DEST. TYPE is as for
5515 mips_expand_call. Return true if we used an explicit lazy-binding
5516 sequence. */
5518 static bool
5519 mips_load_call_address (enum mips_call_type type, rtx dest, rtx addr)
5521 /* If we're generating PIC, and this call is to a global function,
5522 try to allow its address to be resolved lazily. This isn't
5523 possible for sibcalls when $gp is call-saved because the value
5524 of $gp on entry to the stub would be our caller's gp, not ours. */
5525 if (TARGET_EXPLICIT_RELOCS
5526 && !(type == MIPS_CALL_SIBCALL && TARGET_CALL_SAVED_GP)
5527 && mips_ok_for_lazy_binding_p (addr))
5529 addr = mips_got_load (dest, addr, SYMBOL_GOTOFF_CALL);
5530 emit_insn (gen_rtx_SET (VOIDmode, dest, addr));
5531 return true;
5533 else
5535 mips_emit_move (dest, addr);
5536 return false;
5540 /* Each locally-defined hard-float MIPS16 function has a local symbol
5541 associated with it. This hash table maps the function symbol (FUNC)
5542 to the local symbol (LOCAL). */
5543 struct GTY(()) mips16_local_alias {
5544 rtx func;
5545 rtx local;
5547 static GTY ((param_is (struct mips16_local_alias))) htab_t mips16_local_aliases;
5549 /* Hash table callbacks for mips16_local_aliases. */
5551 static hashval_t
5552 mips16_local_aliases_hash (const void *entry)
5554 const struct mips16_local_alias *alias;
5556 alias = (const struct mips16_local_alias *) entry;
5557 return htab_hash_string (XSTR (alias->func, 0));
5560 static int
5561 mips16_local_aliases_eq (const void *entry1, const void *entry2)
5563 const struct mips16_local_alias *alias1, *alias2;
5565 alias1 = (const struct mips16_local_alias *) entry1;
5566 alias2 = (const struct mips16_local_alias *) entry2;
5567 return rtx_equal_p (alias1->func, alias2->func);
5570 /* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
5571 Return a local alias for it, creating a new one if necessary. */
5573 static rtx
5574 mips16_local_alias (rtx func)
5576 struct mips16_local_alias *alias, tmp_alias;
5577 void **slot;
5579 /* Create the hash table if this is the first call. */
5580 if (mips16_local_aliases == NULL)
5581 mips16_local_aliases = htab_create_ggc (37, mips16_local_aliases_hash,
5582 mips16_local_aliases_eq, NULL);
5584 /* Look up the function symbol, creating a new entry if need be. */
5585 tmp_alias.func = func;
5586 slot = htab_find_slot (mips16_local_aliases, &tmp_alias, INSERT);
5587 gcc_assert (slot != NULL);
5589 alias = (struct mips16_local_alias *) *slot;
5590 if (alias == NULL)
5592 const char *func_name, *local_name;
5593 rtx local;
5595 /* Create a new SYMBOL_REF for the local symbol. The choice of
5596 __fn_local_* is based on the __fn_stub_* names that we've
5597 traditionally used for the non-MIPS16 stub. */
5598 func_name = targetm.strip_name_encoding (XSTR (func, 0));
5599 local_name = ACONCAT (("__fn_local_", func_name, NULL));
5600 local = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (local_name));
5601 SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
5603 /* Create a new structure to represent the mapping. */
5604 alias = GGC_NEW (struct mips16_local_alias);
5605 alias->func = func;
5606 alias->local = local;
5607 *slot = alias;
5609 return alias->local;
5612 /* A chained list of functions for which mips16_build_call_stub has already
5613 generated a stub. NAME is the name of the function and FP_RET_P is true
5614 if the function returns a value in floating-point registers. */
5615 struct mips16_stub {
5616 struct mips16_stub *next;
5617 char *name;
5618 bool fp_ret_p;
5620 static struct mips16_stub *mips16_stubs;
5622 /* Return a SYMBOL_REF for a MIPS16 function called NAME. */
5624 static rtx
5625 mips16_stub_function (const char *name)
5627 rtx x;
5629 x = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5630 SYMBOL_REF_FLAGS (x) |= (SYMBOL_FLAG_EXTERNAL | SYMBOL_FLAG_FUNCTION);
5631 return x;
5634 /* Return the two-character string that identifies floating-point
5635 return mode MODE in the name of a MIPS16 function stub. */
5637 static const char *
5638 mips16_call_stub_mode_suffix (enum machine_mode mode)
5640 if (mode == SFmode)
5641 return "sf";
5642 else if (mode == DFmode)
5643 return "df";
5644 else if (mode == SCmode)
5645 return "sc";
5646 else if (mode == DCmode)
5647 return "dc";
5648 else if (mode == V2SFmode)
5649 return "df";
5650 else
5651 gcc_unreachable ();
5654 /* Write instructions to move a 32-bit value between general register
5655 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5656 from GPREG to FPREG and 'f' to move in the opposite direction. */
5658 static void
5659 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5661 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5662 reg_names[gpreg], reg_names[fpreg]);
5665 /* Likewise for 64-bit values. */
5667 static void
5668 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5670 if (TARGET_64BIT)
5671 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
5672 reg_names[gpreg], reg_names[fpreg]);
5673 else if (TARGET_FLOAT64)
5675 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5676 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5677 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
5678 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
5680 else
5682 /* Move the least-significant word. */
5683 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5684 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5685 /* ...then the most significant word. */
5686 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5687 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
5691 /* Write out code to move floating-point arguments into or out of
5692 general registers. FP_CODE is the code describing which arguments
5693 are present (see the comment above the definition of CUMULATIVE_ARGS
5694 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
5696 static void
5697 mips_output_args_xfer (int fp_code, char direction)
5699 unsigned int gparg, fparg, f;
5700 CUMULATIVE_ARGS cum;
5702 /* This code only works for o32 and o64. */
5703 gcc_assert (TARGET_OLDABI);
5705 mips_init_cumulative_args (&cum, NULL);
5707 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5709 enum machine_mode mode;
5710 struct mips_arg_info info;
5712 if ((f & 3) == 1)
5713 mode = SFmode;
5714 else if ((f & 3) == 2)
5715 mode = DFmode;
5716 else
5717 gcc_unreachable ();
5719 mips_get_arg_info (&info, &cum, mode, NULL, true);
5720 gparg = mips_arg_regno (&info, false);
5721 fparg = mips_arg_regno (&info, true);
5723 if (mode == SFmode)
5724 mips_output_32bit_xfer (direction, gparg, fparg);
5725 else
5726 mips_output_64bit_xfer (direction, gparg, fparg);
5728 mips_function_arg_advance (&cum, mode, NULL, true);
5732 /* Write a MIPS16 stub for the current function. This stub is used
5733 for functions which take arguments in the floating-point registers.
5734 It is normal-mode code that moves the floating-point arguments
5735 into the general registers and then jumps to the MIPS16 code. */
5737 static void
5738 mips16_build_function_stub (void)
5740 const char *fnname, *alias_name, *separator;
5741 char *secname, *stubname;
5742 tree stubdecl;
5743 unsigned int f;
5744 rtx symbol, alias;
5746 /* Create the name of the stub, and its unique section. */
5747 symbol = XEXP (DECL_RTL (current_function_decl), 0);
5748 alias = mips16_local_alias (symbol);
5750 fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
5751 alias_name = targetm.strip_name_encoding (XSTR (alias, 0));
5752 secname = ACONCAT ((".mips16.fn.", fnname, NULL));
5753 stubname = ACONCAT (("__fn_stub_", fnname, NULL));
5755 /* Build a decl for the stub. */
5756 stubdecl = build_decl (FUNCTION_DECL, get_identifier (stubname),
5757 build_function_type (void_type_node, NULL_TREE));
5758 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5759 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5761 /* Output a comment. */
5762 fprintf (asm_out_file, "\t# Stub function for %s (",
5763 current_function_name ());
5764 separator = "";
5765 for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
5767 fprintf (asm_out_file, "%s%s", separator,
5768 (f & 3) == 1 ? "float" : "double");
5769 separator = ", ";
5771 fprintf (asm_out_file, ")\n");
5773 /* Start the function definition. */
5774 assemble_start_function (stubdecl, stubname);
5775 mips_start_function_definition (stubname, false);
5777 /* If generating pic2 code, either set up the global pointer or
5778 switch to pic0. */
5779 if (TARGET_ABICALLS_PIC2)
5781 if (TARGET_ABSOLUTE_ABICALLS)
5782 fprintf (asm_out_file, "\t.option\tpic0\n");
5783 else
5785 output_asm_insn ("%(.cpload\t%^%)", NULL);
5786 /* Emit an R_MIPS_NONE relocation to tell the linker what the
5787 target function is. Use a local GOT access when loading the
5788 symbol, to cut down on the number of unnecessary GOT entries
5789 for stubs that aren't needed. */
5790 output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol);
5791 symbol = alias;
5795 /* Load the address of the MIPS16 function into $25. Do this first so
5796 that targets with coprocessor interlocks can use an MFC1 to fill the
5797 delay slot. */
5798 output_asm_insn ("la\t%^,%0", &symbol);
5800 /* Move the arguments from floating-point registers to general registers. */
5801 mips_output_args_xfer (crtl->args.info.fp_code, 'f');
5803 /* Jump to the MIPS16 function. */
5804 output_asm_insn ("jr\t%^", NULL);
5806 if (TARGET_ABICALLS_PIC2 && TARGET_ABSOLUTE_ABICALLS)
5807 fprintf (asm_out_file, "\t.option\tpic2\n");
5809 mips_end_function_definition (stubname);
5811 /* If the linker needs to create a dynamic symbol for the target
5812 function, it will associate the symbol with the stub (which,
5813 unlike the target function, follows the proper calling conventions).
5814 It is therefore useful to have a local alias for the target function,
5815 so that it can still be identified as MIPS16 code. As an optimization,
5816 this symbol can also be used for indirect MIPS16 references from
5817 within this file. */
5818 ASM_OUTPUT_DEF (asm_out_file, alias_name, fnname);
5820 switch_to_section (function_section (current_function_decl));
5823 /* The current function is a MIPS16 function that returns a value in an FPR.
5824 Copy the return value from its soft-float to its hard-float location.
5825 libgcc2 has special non-MIPS16 helper functions for each case. */
5827 static void
5828 mips16_copy_fpr_return_value (void)
5830 rtx fn, insn, retval;
5831 tree return_type;
5832 enum machine_mode return_mode;
5833 const char *name;
5835 return_type = DECL_RESULT (current_function_decl);
5836 return_mode = DECL_MODE (return_type);
5838 name = ACONCAT (("__mips16_ret_",
5839 mips16_call_stub_mode_suffix (return_mode),
5840 NULL));
5841 fn = mips16_stub_function (name);
5843 /* The function takes arguments in $2 (and possibly $3), so calls
5844 to it cannot be lazily bound. */
5845 SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_BIND_NOW;
5847 /* Model the call as something that takes the GPR return value as
5848 argument and returns an "updated" value. */
5849 retval = gen_rtx_REG (return_mode, GP_RETURN);
5850 insn = mips_expand_call (MIPS_CALL_EPILOGUE, retval, fn,
5851 const0_rtx, NULL_RTX, false);
5852 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
5855 /* Consider building a stub for a MIPS16 call to function *FN_PTR.
5856 RETVAL is the location of the return value, or null if this is
5857 a "call" rather than a "call_value". ARGS_SIZE is the size of the
5858 arguments and FP_CODE is the code built by mips_function_arg;
5859 see the comment above CUMULATIVE_ARGS for details.
5861 There are three alternatives:
5863 - If a stub was needed, emit the call and return the call insn itself.
5865 - If we can avoid using a stub by redirecting the call, set *FN_PTR
5866 to the new target and return null.
5868 - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
5869 unmodified.
5871 A stub is needed for calls to functions that, in normal mode,
5872 receive arguments in FPRs or return values in FPRs. The stub
5873 copies the arguments from their soft-float positions to their
5874 hard-float positions, calls the real function, then copies the
5875 return value from its hard-float position to its soft-float
5876 position.
5878 We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
5879 If *FN_PTR turns out to be to a non-MIPS16 function, the linker
5880 automatically redirects the JAL to the stub, otherwise the JAL
5881 continues to call FN directly. */
5883 static rtx
5884 mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
5886 const char *fnname;
5887 bool fp_ret_p;
5888 struct mips16_stub *l;
5889 rtx insn, fn;
5891 /* We don't need to do anything if we aren't in MIPS16 mode, or if
5892 we were invoked with the -msoft-float option. */
5893 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5894 return NULL_RTX;
5896 /* Figure out whether the value might come back in a floating-point
5897 register. */
5898 fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
5900 /* We don't need to do anything if there were no floating-point
5901 arguments and the value will not be returned in a floating-point
5902 register. */
5903 if (fp_code == 0 && !fp_ret_p)
5904 return NULL_RTX;
5906 /* We don't need to do anything if this is a call to a special
5907 MIPS16 support function. */
5908 fn = *fn_ptr;
5909 if (mips16_stub_function_p (fn))
5910 return NULL_RTX;
5912 /* This code will only work for o32 and o64 abis. The other ABI's
5913 require more sophisticated support. */
5914 gcc_assert (TARGET_OLDABI);
5916 /* If we're calling via a function pointer, use one of the magic
5917 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
5918 Each stub expects the function address to arrive in register $2. */
5919 if (GET_CODE (fn) != SYMBOL_REF
5920 || !call_insn_operand (fn, VOIDmode))
5922 char buf[30];
5923 rtx stub_fn, insn, addr;
5924 bool lazy_p;
5926 /* If this is a locally-defined and locally-binding function,
5927 avoid the stub by calling the local alias directly. */
5928 if (mips16_local_function_p (fn))
5930 *fn_ptr = mips16_local_alias (fn);
5931 return NULL_RTX;
5934 /* Create a SYMBOL_REF for the libgcc.a function. */
5935 if (fp_ret_p)
5936 sprintf (buf, "__mips16_call_stub_%s_%d",
5937 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5938 fp_code);
5939 else
5940 sprintf (buf, "__mips16_call_stub_%d", fp_code);
5941 stub_fn = mips16_stub_function (buf);
5943 /* The function uses $2 as an argument, so calls to it
5944 cannot be lazily bound. */
5945 SYMBOL_REF_FLAGS (stub_fn) |= SYMBOL_FLAG_BIND_NOW;
5947 /* Load the target function into $2. */
5948 addr = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
5949 lazy_p = mips_load_call_address (MIPS_CALL_NORMAL, addr, fn);
5951 /* Emit the call. */
5952 insn = mips_expand_call (MIPS_CALL_NORMAL, retval, stub_fn,
5953 args_size, NULL_RTX, lazy_p);
5955 /* Tell GCC that this call does indeed use the value of $2. */
5956 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), addr);
5958 /* If we are handling a floating-point return value, we need to
5959 save $18 in the function prologue. Putting a note on the
5960 call will mean that df_regs_ever_live_p ($18) will be true if the
5961 call is not eliminated, and we can check that in the prologue
5962 code. */
5963 if (fp_ret_p)
5964 CALL_INSN_FUNCTION_USAGE (insn) =
5965 gen_rtx_EXPR_LIST (VOIDmode,
5966 gen_rtx_CLOBBER (VOIDmode,
5967 gen_rtx_REG (word_mode, 18)),
5968 CALL_INSN_FUNCTION_USAGE (insn));
5970 return insn;
5973 /* We know the function we are going to call. If we have already
5974 built a stub, we don't need to do anything further. */
5975 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5976 for (l = mips16_stubs; l != NULL; l = l->next)
5977 if (strcmp (l->name, fnname) == 0)
5978 break;
5980 if (l == NULL)
5982 const char *separator;
5983 char *secname, *stubname;
5984 tree stubid, stubdecl;
5985 unsigned int f;
5987 /* If the function does not return in FPRs, the special stub
5988 section is named
5989 .mips16.call.FNNAME
5991 If the function does return in FPRs, the stub section is named
5992 .mips16.call.fp.FNNAME
5994 Build a decl for the stub. */
5995 secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
5996 fnname, NULL));
5997 stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
5998 fnname, NULL));
5999 stubid = get_identifier (stubname);
6000 stubdecl = build_decl (FUNCTION_DECL, stubid,
6001 build_function_type (void_type_node, NULL_TREE));
6002 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
6003 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE,
6004 void_type_node);
6006 /* Output a comment. */
6007 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
6008 (fp_ret_p
6009 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
6010 : ""),
6011 fnname);
6012 separator = "";
6013 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
6015 fprintf (asm_out_file, "%s%s", separator,
6016 (f & 3) == 1 ? "float" : "double");
6017 separator = ", ";
6019 fprintf (asm_out_file, ")\n");
6021 /* Start the function definition. */
6022 assemble_start_function (stubdecl, stubname);
6023 mips_start_function_definition (stubname, false);
6025 if (!fp_ret_p)
6027 /* Load the address of the MIPS16 function into $25. Do this
6028 first so that targets with coprocessor interlocks can use
6029 an MFC1 to fill the delay slot. */
6030 if (TARGET_EXPLICIT_RELOCS)
6032 output_asm_insn ("lui\t%^,%%hi(%0)", &fn);
6033 output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn);
6035 else
6036 output_asm_insn ("la\t%^,%0", &fn);
6039 /* Move the arguments from general registers to floating-point
6040 registers. */
6041 mips_output_args_xfer (fp_code, 't');
6043 if (!fp_ret_p)
6045 /* Jump to the previously-loaded address. */
6046 output_asm_insn ("jr\t%^", NULL);
6048 else
6050 /* Save the return address in $18 and call the non-MIPS16 function.
6051 The stub's caller knows that $18 might be clobbered, even though
6052 $18 is usually a call-saved register. */
6053 fprintf (asm_out_file, "\tmove\t%s,%s\n",
6054 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
6055 output_asm_insn (MIPS_CALL ("jal", &fn, 0), &fn);
6057 /* Move the result from floating-point registers to
6058 general registers. */
6059 switch (GET_MODE (retval))
6061 case SCmode:
6062 mips_output_32bit_xfer ('f', GP_RETURN + 1,
6063 FP_REG_FIRST + MAX_FPRS_PER_FMT);
6064 /* Fall though. */
6065 case SFmode:
6066 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6067 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
6069 /* On 64-bit targets, complex floats are returned in
6070 a single GPR, such that "sd" on a suitably-aligned
6071 target would store the value correctly. */
6072 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
6073 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
6074 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
6075 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
6076 reg_names[GP_RETURN],
6077 reg_names[GP_RETURN],
6078 reg_names[GP_RETURN + 1]);
6080 break;
6082 case DCmode:
6083 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
6084 FP_REG_FIRST + MAX_FPRS_PER_FMT);
6085 /* Fall though. */
6086 case DFmode:
6087 case V2SFmode:
6088 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6089 break;
6091 default:
6092 gcc_unreachable ();
6094 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
6097 #ifdef ASM_DECLARE_FUNCTION_SIZE
6098 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
6099 #endif
6101 mips_end_function_definition (stubname);
6103 /* Record this stub. */
6104 l = XNEW (struct mips16_stub);
6105 l->name = xstrdup (fnname);
6106 l->fp_ret_p = fp_ret_p;
6107 l->next = mips16_stubs;
6108 mips16_stubs = l;
6111 /* If we expect a floating-point return value, but we've built a
6112 stub which does not expect one, then we're in trouble. We can't
6113 use the existing stub, because it won't handle the floating-point
6114 value. We can't build a new stub, because the linker won't know
6115 which stub to use for the various calls in this object file.
6116 Fortunately, this case is illegal, since it means that a function
6117 was declared in two different ways in a single compilation. */
6118 if (fp_ret_p && !l->fp_ret_p)
6119 error ("cannot handle inconsistent calls to %qs", fnname);
6121 if (retval == NULL_RTX)
6122 insn = gen_call_internal_direct (fn, args_size);
6123 else
6124 insn = gen_call_value_internal_direct (retval, fn, args_size);
6125 insn = mips_emit_call_insn (insn, fn, fn, false);
6127 /* If we are calling a stub which handles a floating-point return
6128 value, we need to arrange to save $18 in the prologue. We do this
6129 by marking the function call as using the register. The prologue
6130 will later see that it is used, and emit code to save it. */
6131 if (fp_ret_p)
6132 CALL_INSN_FUNCTION_USAGE (insn) =
6133 gen_rtx_EXPR_LIST (VOIDmode,
6134 gen_rtx_CLOBBER (VOIDmode,
6135 gen_rtx_REG (word_mode, 18)),
6136 CALL_INSN_FUNCTION_USAGE (insn));
6138 return insn;
6141 /* Expand a call of type TYPE. RESULT is where the result will go (null
6142 for "call"s and "sibcall"s), ADDR is the address of the function,
6143 ARGS_SIZE is the size of the arguments and AUX is the value passed
6144 to us by mips_function_arg. LAZY_P is true if this call already
6145 involves a lazily-bound function address (such as when calling
6146 functions through a MIPS16 hard-float stub).
6148 Return the call itself. */
6151 mips_expand_call (enum mips_call_type type, rtx result, rtx addr,
6152 rtx args_size, rtx aux, bool lazy_p)
6154 rtx orig_addr, pattern, insn;
6155 int fp_code;
6157 fp_code = aux == 0 ? 0 : (int) GET_MODE (aux);
6158 insn = mips16_build_call_stub (result, &addr, args_size, fp_code);
6159 if (insn)
6161 gcc_assert (!lazy_p && type == MIPS_CALL_NORMAL);
6162 return insn;
6165 orig_addr = addr;
6166 if (!call_insn_operand (addr, VOIDmode))
6168 if (type == MIPS_CALL_EPILOGUE)
6169 addr = MIPS_EPILOGUE_TEMP (Pmode);
6170 else
6171 addr = gen_reg_rtx (Pmode);
6172 lazy_p |= mips_load_call_address (type, addr, orig_addr);
6175 if (result == 0)
6177 rtx (*fn) (rtx, rtx);
6179 if (type == MIPS_CALL_EPILOGUE && TARGET_SPLIT_CALLS)
6180 fn = gen_call_split;
6181 else if (type == MIPS_CALL_SIBCALL)
6182 fn = gen_sibcall_internal;
6183 else
6184 fn = gen_call_internal;
6186 pattern = fn (addr, args_size);
6188 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
6190 /* Handle return values created by mips_return_fpr_pair. */
6191 rtx (*fn) (rtx, rtx, rtx, rtx);
6192 rtx reg1, reg2;
6194 if (type == MIPS_CALL_EPILOGUE && TARGET_SPLIT_CALLS)
6195 fn = gen_call_value_multiple_split;
6196 else if (type == MIPS_CALL_SIBCALL)
6197 fn = gen_sibcall_value_multiple_internal;
6198 else
6199 fn = gen_call_value_multiple_internal;
6201 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
6202 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
6203 pattern = fn (reg1, addr, args_size, reg2);
6205 else
6207 rtx (*fn) (rtx, rtx, rtx);
6209 if (type == MIPS_CALL_EPILOGUE && TARGET_SPLIT_CALLS)
6210 fn = gen_call_value_split;
6211 else if (type == MIPS_CALL_SIBCALL)
6212 fn = gen_sibcall_value_internal;
6213 else
6214 fn = gen_call_value_internal;
6216 /* Handle return values created by mips_return_fpr_single. */
6217 if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
6218 result = XEXP (XVECEXP (result, 0, 0), 0);
6219 pattern = fn (result, addr, args_size);
6222 return mips_emit_call_insn (pattern, orig_addr, addr, lazy_p);
6225 /* Split call instruction INSN into a $gp-clobbering call and
6226 (where necessary) an instruction to restore $gp from its save slot.
6227 CALL_PATTERN is the pattern of the new call. */
6229 void
6230 mips_split_call (rtx insn, rtx call_pattern)
6232 rtx new_insn;
6234 new_insn = emit_call_insn (call_pattern);
6235 CALL_INSN_FUNCTION_USAGE (new_insn)
6236 = copy_rtx (CALL_INSN_FUNCTION_USAGE (insn));
6237 if (!find_reg_note (insn, REG_NORETURN, 0))
6238 /* Pick a temporary register that is suitable for both MIPS16 and
6239 non-MIPS16 code. $4 and $5 are used for returning complex double
6240 values in soft-float code, so $6 is the first suitable candidate. */
6241 mips_restore_gp (gen_rtx_REG (Pmode, GP_ARG_FIRST + 2));
6244 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
6246 static bool
6247 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
6249 if (!TARGET_SIBCALLS)
6250 return false;
6252 /* Interrupt handlers need special epilogue code and therefore can't
6253 use sibcalls. */
6254 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
6255 return false;
6257 /* We can't do a sibcall if the called function is a MIPS16 function
6258 because there is no direct "jx" instruction equivalent to "jalx" to
6259 switch the ISA mode. We only care about cases where the sibling
6260 and normal calls would both be direct. */
6261 if (decl
6262 && mips_use_mips16_mode_p (decl)
6263 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6264 return false;
6266 /* When -minterlink-mips16 is in effect, assume that non-locally-binding
6267 functions could be MIPS16 ones unless an attribute explicitly tells
6268 us otherwise. */
6269 if (TARGET_INTERLINK_MIPS16
6270 && decl
6271 && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
6272 && !mips_nomips16_decl_p (decl)
6273 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6274 return false;
6276 /* Otherwise OK. */
6277 return true;
6280 /* Emit code to move general operand SRC into condition-code
6281 register DEST given that SCRATCH is a scratch TFmode FPR.
6282 The sequence is:
6284 FP1 = SRC
6285 FP2 = 0.0f
6286 DEST = FP2 < FP1
6288 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
6290 void
6291 mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
6293 rtx fp1, fp2;
6295 /* Change the source to SFmode. */
6296 if (MEM_P (src))
6297 src = adjust_address (src, SFmode, 0);
6298 else if (REG_P (src) || GET_CODE (src) == SUBREG)
6299 src = gen_rtx_REG (SFmode, true_regnum (src));
6301 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
6302 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
6304 mips_emit_move (copy_rtx (fp1), src);
6305 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
6306 emit_insn (gen_slt_sf (dest, fp2, fp1));
6309 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
6310 Assume that the areas do not overlap. */
6312 static void
6313 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
6315 HOST_WIDE_INT offset, delta;
6316 unsigned HOST_WIDE_INT bits;
6317 int i;
6318 enum machine_mode mode;
6319 rtx *regs;
6321 /* Work out how many bits to move at a time. If both operands have
6322 half-word alignment, it is usually better to move in half words.
6323 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
6324 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
6325 Otherwise move word-sized chunks. */
6326 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
6327 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
6328 bits = BITS_PER_WORD / 2;
6329 else
6330 bits = BITS_PER_WORD;
6332 mode = mode_for_size (bits, MODE_INT, 0);
6333 delta = bits / BITS_PER_UNIT;
6335 /* Allocate a buffer for the temporary registers. */
6336 regs = XALLOCAVEC (rtx, length / delta);
6338 /* Load as many BITS-sized chunks as possible. Use a normal load if
6339 the source has enough alignment, otherwise use left/right pairs. */
6340 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6342 regs[i] = gen_reg_rtx (mode);
6343 if (MEM_ALIGN (src) >= bits)
6344 mips_emit_move (regs[i], adjust_address (src, mode, offset));
6345 else
6347 rtx part = adjust_address (src, BLKmode, offset);
6348 if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
6349 gcc_unreachable ();
6353 /* Copy the chunks to the destination. */
6354 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6355 if (MEM_ALIGN (dest) >= bits)
6356 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
6357 else
6359 rtx part = adjust_address (dest, BLKmode, offset);
6360 if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
6361 gcc_unreachable ();
6364 /* Mop up any left-over bytes. */
6365 if (offset < length)
6367 src = adjust_address (src, BLKmode, offset);
6368 dest = adjust_address (dest, BLKmode, offset);
6369 move_by_pieces (dest, src, length - offset,
6370 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
6374 /* Helper function for doing a loop-based block operation on memory
6375 reference MEM. Each iteration of the loop will operate on LENGTH
6376 bytes of MEM.
6378 Create a new base register for use within the loop and point it to
6379 the start of MEM. Create a new memory reference that uses this
6380 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
6382 static void
6383 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
6384 rtx *loop_reg, rtx *loop_mem)
6386 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
6388 /* Although the new mem does not refer to a known location,
6389 it does keep up to LENGTH bytes of alignment. */
6390 *loop_mem = change_address (mem, BLKmode, *loop_reg);
6391 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
6394 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
6395 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
6396 the memory regions do not overlap. */
6398 static void
6399 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
6400 HOST_WIDE_INT bytes_per_iter)
6402 rtx label, src_reg, dest_reg, final_src;
6403 HOST_WIDE_INT leftover;
6405 leftover = length % bytes_per_iter;
6406 length -= leftover;
6408 /* Create registers and memory references for use within the loop. */
6409 mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
6410 mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
6412 /* Calculate the value that SRC_REG should have after the last iteration
6413 of the loop. */
6414 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
6415 0, 0, OPTAB_WIDEN);
6417 /* Emit the start of the loop. */
6418 label = gen_label_rtx ();
6419 emit_label (label);
6421 /* Emit the loop body. */
6422 mips_block_move_straight (dest, src, bytes_per_iter);
6424 /* Move on to the next block. */
6425 mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
6426 mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
6428 /* Emit the loop condition. */
6429 if (Pmode == DImode)
6430 emit_insn (gen_cmpdi (src_reg, final_src));
6431 else
6432 emit_insn (gen_cmpsi (src_reg, final_src));
6433 emit_jump_insn (gen_bne (label));
6435 /* Mop up any left-over bytes. */
6436 if (leftover)
6437 mips_block_move_straight (dest, src, leftover);
6440 /* Expand a movmemsi instruction, which copies LENGTH bytes from
6441 memory reference SRC to memory reference DEST. */
6443 bool
6444 mips_expand_block_move (rtx dest, rtx src, rtx length)
6446 if (GET_CODE (length) == CONST_INT)
6448 if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
6450 mips_block_move_straight (dest, src, INTVAL (length));
6451 return true;
6453 else if (optimize)
6455 mips_block_move_loop (dest, src, INTVAL (length),
6456 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
6457 return true;
6460 return false;
6463 /* Expand a loop of synci insns for the address range [BEGIN, END). */
6465 void
6466 mips_expand_synci_loop (rtx begin, rtx end)
6468 rtx inc, label, cmp, cmp_result;
6470 /* Load INC with the cache line size (rdhwr INC,$1). */
6471 inc = gen_reg_rtx (Pmode);
6472 emit_insn (Pmode == SImode
6473 ? gen_rdhwr_synci_step_si (inc)
6474 : gen_rdhwr_synci_step_di (inc));
6476 /* Loop back to here. */
6477 label = gen_label_rtx ();
6478 emit_label (label);
6480 emit_insn (gen_synci (begin));
6482 cmp = mips_force_binary (Pmode, GTU, begin, end);
6484 mips_emit_binary (PLUS, begin, begin, inc);
6486 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
6487 emit_jump_insn (gen_condjump (cmp_result, label));
6490 /* Expand a QI or HI mode atomic memory operation.
6492 GENERATOR contains a pointer to the gen_* function that generates
6493 the SI mode underlying atomic operation using masks that we
6494 calculate.
6496 RESULT is the return register for the operation. Its value is NULL
6497 if unused.
6499 MEM is the location of the atomic access.
6501 OLDVAL is the first operand for the operation.
6503 NEWVAL is the optional second operand for the operation. Its value
6504 is NULL if unused. */
6506 void
6507 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
6508 rtx result, rtx mem, rtx oldval, rtx newval)
6510 rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
6511 rtx unshifted_mask_reg, mask, inverted_mask, si_op;
6512 rtx res = NULL;
6513 enum machine_mode mode;
6515 mode = GET_MODE (mem);
6517 /* Compute the address of the containing SImode value. */
6518 orig_addr = force_reg (Pmode, XEXP (mem, 0));
6519 memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
6520 force_reg (Pmode, GEN_INT (-4)));
6522 /* Create a memory reference for it. */
6523 memsi = gen_rtx_MEM (SImode, memsi_addr);
6524 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
6525 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
6527 /* Work out the byte offset of the QImode or HImode value,
6528 counting from the least significant byte. */
6529 shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
6530 if (TARGET_BIG_ENDIAN)
6531 mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
6533 /* Multiply by eight to convert the shift value from bytes to bits. */
6534 mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
6536 /* Make the final shift an SImode value, so that it can be used in
6537 SImode operations. */
6538 shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
6540 /* Set MASK to an inclusive mask of the QImode or HImode value. */
6541 unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
6542 unshifted_mask_reg = force_reg (SImode, unshifted_mask);
6543 mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
6545 /* Compute the equivalent exclusive mask. */
6546 inverted_mask = gen_reg_rtx (SImode);
6547 emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
6548 gen_rtx_NOT (SImode, mask)));
6550 /* Shift the old value into place. */
6551 if (oldval != const0_rtx)
6553 oldval = convert_modes (SImode, mode, oldval, true);
6554 oldval = force_reg (SImode, oldval);
6555 oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
6558 /* Do the same for the new value. */
6559 if (newval && newval != const0_rtx)
6561 newval = convert_modes (SImode, mode, newval, true);
6562 newval = force_reg (SImode, newval);
6563 newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
6566 /* Do the SImode atomic access. */
6567 if (result)
6568 res = gen_reg_rtx (SImode);
6569 if (newval)
6570 si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
6571 else if (result)
6572 si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
6573 else
6574 si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
6576 emit_insn (si_op);
6578 if (result)
6580 /* Shift and convert the result. */
6581 mips_emit_binary (AND, res, res, mask);
6582 mips_emit_binary (LSHIFTRT, res, res, shiftsi);
6583 mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
6587 /* Return true if it is possible to use left/right accesses for a
6588 bitfield of WIDTH bits starting BITPOS bits into *OP. When
6589 returning true, update *OP, *LEFT and *RIGHT as follows:
6591 *OP is a BLKmode reference to the whole field.
6593 *LEFT is a QImode reference to the first byte if big endian or
6594 the last byte if little endian. This address can be used in the
6595 left-side instructions (LWL, SWL, LDL, SDL).
6597 *RIGHT is a QImode reference to the opposite end of the field and
6598 can be used in the patterning right-side instruction. */
6600 static bool
6601 mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
6602 rtx *left, rtx *right)
6604 rtx first, last;
6606 /* Check that the operand really is a MEM. Not all the extv and
6607 extzv predicates are checked. */
6608 if (!MEM_P (*op))
6609 return false;
6611 /* Check that the size is valid. */
6612 if (width != 32 && (!TARGET_64BIT || width != 64))
6613 return false;
6615 /* We can only access byte-aligned values. Since we are always passed
6616 a reference to the first byte of the field, it is not necessary to
6617 do anything with BITPOS after this check. */
6618 if (bitpos % BITS_PER_UNIT != 0)
6619 return false;
6621 /* Reject aligned bitfields: we want to use a normal load or store
6622 instead of a left/right pair. */
6623 if (MEM_ALIGN (*op) >= width)
6624 return false;
6626 /* Adjust *OP to refer to the whole field. This also has the effect
6627 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
6628 *op = adjust_address (*op, BLKmode, 0);
6629 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
6631 /* Get references to both ends of the field. We deliberately don't
6632 use the original QImode *OP for FIRST since the new BLKmode one
6633 might have a simpler address. */
6634 first = adjust_address (*op, QImode, 0);
6635 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
6637 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
6638 correspond to the MSB and RIGHT to the LSB. */
6639 if (TARGET_BIG_ENDIAN)
6640 *left = first, *right = last;
6641 else
6642 *left = last, *right = first;
6644 return true;
6647 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
6648 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
6649 the operation is the equivalent of:
6651 (set DEST (*_extract SRC WIDTH BITPOS))
6653 Return true on success. */
6655 bool
6656 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
6657 HOST_WIDE_INT bitpos)
6659 rtx left, right, temp;
6661 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
6662 be a paradoxical word_mode subreg. This is the only case in which
6663 we allow the destination to be larger than the source. */
6664 if (GET_CODE (dest) == SUBREG
6665 && GET_MODE (dest) == DImode
6666 && GET_MODE (SUBREG_REG (dest)) == SImode)
6667 dest = SUBREG_REG (dest);
6669 /* After the above adjustment, the destination must be the same
6670 width as the source. */
6671 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
6672 return false;
6674 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
6675 return false;
6677 temp = gen_reg_rtx (GET_MODE (dest));
6678 if (GET_MODE (dest) == DImode)
6680 emit_insn (gen_mov_ldl (temp, src, left));
6681 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
6683 else
6685 emit_insn (gen_mov_lwl (temp, src, left));
6686 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
6688 return true;
6691 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
6692 BITPOS and SRC are the operands passed to the expander; the operation
6693 is the equivalent of:
6695 (set (zero_extract DEST WIDTH BITPOS) SRC)
6697 Return true on success. */
6699 bool
6700 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
6701 HOST_WIDE_INT bitpos)
6703 rtx left, right;
6704 enum machine_mode mode;
6706 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
6707 return false;
6709 mode = mode_for_size (width, MODE_INT, 0);
6710 src = gen_lowpart (mode, src);
6711 if (mode == DImode)
6713 emit_insn (gen_mov_sdl (dest, src, left));
6714 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
6716 else
6718 emit_insn (gen_mov_swl (dest, src, left));
6719 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
6721 return true;
6724 /* Return true if X is a MEM with the same size as MODE. */
6726 bool
6727 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6729 rtx size;
6731 if (!MEM_P (x))
6732 return false;
6734 size = MEM_SIZE (x);
6735 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6738 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
6739 source of an "ext" instruction or the destination of an "ins"
6740 instruction. OP must be a register operand and the following
6741 conditions must hold:
6743 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
6744 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6745 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6747 Also reject lengths equal to a word as they are better handled
6748 by the move patterns. */
6750 bool
6751 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
6753 if (!ISA_HAS_EXT_INS
6754 || !register_operand (op, VOIDmode)
6755 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6756 return false;
6758 if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
6759 return false;
6761 if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
6762 return false;
6764 return true;
6767 /* Check if MASK and SHIFT are valid in mask-low-and-shift-left
6768 operation if MAXLEN is the maxium length of consecutive bits that
6769 can make up MASK. MODE is the mode of the operation. See
6770 mask_low_and_shift_len for the actual definition. */
6772 bool
6773 mask_low_and_shift_p (enum machine_mode mode, rtx mask, rtx shift, int maxlen)
6775 return IN_RANGE (mask_low_and_shift_len (mode, mask, shift), 1, maxlen);
6778 /* The canonical form of a mask-low-and-shift-left operation is
6779 (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
6780 cleared. Thus we need to shift MASK to the right before checking if it
6781 is a valid mask value. MODE is the mode of the operation. If true
6782 return the length of the mask, otherwise return -1. */
6785 mask_low_and_shift_len (enum machine_mode mode, rtx mask, rtx shift)
6787 HOST_WIDE_INT shval;
6789 shval = INTVAL (shift) & (GET_MODE_BITSIZE (mode) - 1);
6790 return exact_log2 ((UINTVAL (mask) >> shval) + 1);
6793 /* Return true if -msplit-addresses is selected and should be honored.
6795 -msplit-addresses is a half-way house between explicit relocations
6796 and the traditional assembler macros. It can split absolute 32-bit
6797 symbolic constants into a high/lo_sum pair but uses macros for other
6798 sorts of access.
6800 Like explicit relocation support for REL targets, it relies
6801 on GNU extensions in the assembler and the linker.
6803 Although this code should work for -O0, it has traditionally
6804 been treated as an optimization. */
6806 static bool
6807 mips_split_addresses_p (void)
6809 return (TARGET_SPLIT_ADDRESSES
6810 && optimize
6811 && !TARGET_MIPS16
6812 && !flag_pic
6813 && !ABI_HAS_64BIT_SYMBOLS);
6816 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
6818 static void
6819 mips_init_relocs (void)
6821 memset (mips_split_p, '\0', sizeof (mips_split_p));
6822 memset (mips_split_hi_p, '\0', sizeof (mips_split_hi_p));
6823 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6824 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6826 if (ABI_HAS_64BIT_SYMBOLS)
6828 if (TARGET_EXPLICIT_RELOCS)
6830 mips_split_p[SYMBOL_64_HIGH] = true;
6831 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6832 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6834 mips_split_p[SYMBOL_64_MID] = true;
6835 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6836 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6838 mips_split_p[SYMBOL_64_LOW] = true;
6839 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6840 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6842 mips_split_p[SYMBOL_ABSOLUTE] = true;
6843 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6846 else
6848 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses_p () || TARGET_MIPS16)
6850 mips_split_p[SYMBOL_ABSOLUTE] = true;
6851 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6852 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6854 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6858 if (TARGET_MIPS16)
6860 /* The high part is provided by a pseudo copy of $gp. */
6861 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6862 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6864 else if (TARGET_EXPLICIT_RELOCS)
6865 /* Small data constants are kept whole until after reload,
6866 then lowered by mips_rewrite_small_data. */
6867 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6869 if (TARGET_EXPLICIT_RELOCS)
6871 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6872 if (TARGET_NEWABI)
6874 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6875 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6877 else
6879 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6880 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6882 if (TARGET_MIPS16)
6883 /* Expose the use of $28 as soon as possible. */
6884 mips_split_hi_p[SYMBOL_GOT_PAGE_OFST] = true;
6886 if (TARGET_XGOT)
6888 /* The HIGH and LO_SUM are matched by special .md patterns. */
6889 mips_split_p[SYMBOL_GOT_DISP] = true;
6891 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6892 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6893 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6895 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6896 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6897 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6899 else
6901 if (TARGET_NEWABI)
6902 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6903 else
6904 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6905 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6906 if (TARGET_MIPS16)
6907 /* Expose the use of $28 as soon as possible. */
6908 mips_split_p[SYMBOL_GOT_DISP] = true;
6912 if (TARGET_NEWABI)
6914 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6915 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6916 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6919 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6920 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6922 mips_split_p[SYMBOL_DTPREL] = true;
6923 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6924 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6926 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6928 mips_split_p[SYMBOL_TPREL] = true;
6929 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6930 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6932 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6935 /* If OP is an UNSPEC address, return the address to which it refers,
6936 otherwise return OP itself. */
6938 static rtx
6939 mips_strip_unspec_address (rtx op)
6941 rtx base, offset;
6943 split_const (op, &base, &offset);
6944 if (UNSPEC_ADDRESS_P (base))
6945 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6946 return op;
6949 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6950 in context CONTEXT. RELOCS is the array of relocations to use. */
6952 static void
6953 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6954 const char **relocs)
6956 enum mips_symbol_type symbol_type;
6957 const char *p;
6959 symbol_type = mips_classify_symbolic_expression (op, context);
6960 gcc_assert (relocs[symbol_type]);
6962 fputs (relocs[symbol_type], file);
6963 output_addr_const (file, mips_strip_unspec_address (op));
6964 for (p = relocs[symbol_type]; *p != 0; p++)
6965 if (*p == '(')
6966 fputc (')', file);
6969 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6970 The punctuation characters are:
6972 '(' Start a nested ".set noreorder" block.
6973 ')' End a nested ".set noreorder" block.
6974 '[' Start a nested ".set noat" block.
6975 ']' End a nested ".set noat" block.
6976 '<' Start a nested ".set nomacro" block.
6977 '>' End a nested ".set nomacro" block.
6978 '*' Behave like %(%< if generating a delayed-branch sequence.
6979 '#' Print a nop if in a ".set noreorder" block.
6980 '/' Like '#', but do nothing within a delayed-branch sequence.
6981 '?' Print "l" if mips_branch_likely is true
6982 '~' Print a nop if mips_branch_likely is true
6983 '.' Print the name of the register with a hard-wired zero (zero or $0).
6984 '@' Print the name of the assembler temporary register (at or $1).
6985 '^' Print the name of the pic call-through register (t9 or $25).
6986 '+' Print the name of the gp register (usually gp or $28).
6987 '$' Print the name of the stack pointer register (sp or $29).
6988 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6989 '-' Print ".set pop" under the same conditions for '|'.
6991 See also mips_init_print_operand_pucnt. */
6993 static void
6994 mips_print_operand_punctuation (FILE *file, int ch)
6996 switch (ch)
6998 case '(':
6999 if (set_noreorder++ == 0)
7000 fputs (".set\tnoreorder\n\t", file);
7001 break;
7003 case ')':
7004 gcc_assert (set_noreorder > 0);
7005 if (--set_noreorder == 0)
7006 fputs ("\n\t.set\treorder", file);
7007 break;
7009 case '[':
7010 if (set_noat++ == 0)
7011 fputs (".set\tnoat\n\t", file);
7012 break;
7014 case ']':
7015 gcc_assert (set_noat > 0);
7016 if (--set_noat == 0)
7017 fputs ("\n\t.set\tat", file);
7018 break;
7020 case '<':
7021 if (set_nomacro++ == 0)
7022 fputs (".set\tnomacro\n\t", file);
7023 break;
7025 case '>':
7026 gcc_assert (set_nomacro > 0);
7027 if (--set_nomacro == 0)
7028 fputs ("\n\t.set\tmacro", file);
7029 break;
7031 case '*':
7032 if (final_sequence != 0)
7034 mips_print_operand_punctuation (file, '(');
7035 mips_print_operand_punctuation (file, '<');
7037 break;
7039 case '#':
7040 if (set_noreorder != 0)
7041 fputs ("\n\tnop", file);
7042 break;
7044 case '/':
7045 /* Print an extra newline so that the delayed insn is separated
7046 from the following ones. This looks neater and is consistent
7047 with non-nop delayed sequences. */
7048 if (set_noreorder != 0 && final_sequence == 0)
7049 fputs ("\n\tnop\n", file);
7050 break;
7052 case '?':
7053 if (mips_branch_likely)
7054 putc ('l', file);
7055 break;
7057 case '~':
7058 if (mips_branch_likely)
7059 fputs ("\n\tnop", file);
7060 break;
7062 case '.':
7063 fputs (reg_names[GP_REG_FIRST + 0], file);
7064 break;
7066 case '@':
7067 fputs (reg_names[GP_REG_FIRST + 1], file);
7068 break;
7070 case '^':
7071 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
7072 break;
7074 case '+':
7075 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
7076 break;
7078 case '$':
7079 fputs (reg_names[STACK_POINTER_REGNUM], file);
7080 break;
7082 case '|':
7083 if (!ISA_HAS_LL_SC)
7084 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
7085 break;
7087 case '-':
7088 if (!ISA_HAS_LL_SC)
7089 fputs ("\n\t.set\tpop", file);
7090 break;
7092 default:
7093 gcc_unreachable ();
7094 break;
7098 /* Initialize mips_print_operand_punct. */
7100 static void
7101 mips_init_print_operand_punct (void)
7103 const char *p;
7105 for (p = "()[]<>*#/?~.@^+$|-"; *p; p++)
7106 mips_print_operand_punct[(unsigned char) *p] = true;
7109 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
7110 associated with condition CODE. Print the condition part of the
7111 opcode to FILE. */
7113 static void
7114 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
7116 switch (code)
7118 case EQ:
7119 case NE:
7120 case GT:
7121 case GE:
7122 case LT:
7123 case LE:
7124 case GTU:
7125 case GEU:
7126 case LTU:
7127 case LEU:
7128 /* Conveniently, the MIPS names for these conditions are the same
7129 as their RTL equivalents. */
7130 fputs (GET_RTX_NAME (code), file);
7131 break;
7133 default:
7134 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
7135 break;
7139 /* Likewise floating-point branches. */
7141 static void
7142 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
7144 switch (code)
7146 case EQ:
7147 fputs ("c1f", file);
7148 break;
7150 case NE:
7151 fputs ("c1t", file);
7152 break;
7154 default:
7155 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
7156 break;
7160 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
7162 'X' Print CONST_INT OP in hexadecimal format.
7163 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
7164 'd' Print CONST_INT OP in decimal.
7165 'm' Print one less than CONST_INT OP in decimal.
7166 'h' Print the high-part relocation associated with OP, after stripping
7167 any outermost HIGH.
7168 'R' Print the low-part relocation associated with OP.
7169 'C' Print the integer branch condition for comparison OP.
7170 'N' Print the inverse of the integer branch condition for comparison OP.
7171 'F' Print the FPU branch condition for comparison OP.
7172 'W' Print the inverse of the FPU branch condition for comparison OP.
7173 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
7174 'z' for (eq:?I ...), 'n' for (ne:?I ...).
7175 't' Like 'T', but with the EQ/NE cases reversed
7176 'Y' Print mips_fp_conditions[INTVAL (OP)]
7177 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
7178 'q' Print a DSP accumulator register.
7179 'D' Print the second part of a double-word register or memory operand.
7180 'L' Print the low-order register in a double-word register operand.
7181 'M' Print high-order register in a double-word register operand.
7182 'z' Print $0 if OP is zero, otherwise print OP normally. */
7184 void
7185 mips_print_operand (FILE *file, rtx op, int letter)
7187 enum rtx_code code;
7189 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
7191 mips_print_operand_punctuation (file, letter);
7192 return;
7195 gcc_assert (op);
7196 code = GET_CODE (op);
7198 switch (letter)
7200 case 'X':
7201 if (GET_CODE (op) == CONST_INT)
7202 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
7203 else
7204 output_operand_lossage ("invalid use of '%%%c'", letter);
7205 break;
7207 case 'x':
7208 if (GET_CODE (op) == CONST_INT)
7209 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
7210 else
7211 output_operand_lossage ("invalid use of '%%%c'", letter);
7212 break;
7214 case 'd':
7215 if (GET_CODE (op) == CONST_INT)
7216 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
7217 else
7218 output_operand_lossage ("invalid use of '%%%c'", letter);
7219 break;
7221 case 'm':
7222 if (GET_CODE (op) == CONST_INT)
7223 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
7224 else
7225 output_operand_lossage ("invalid use of '%%%c'", letter);
7226 break;
7228 case 'h':
7229 if (code == HIGH)
7230 op = XEXP (op, 0);
7231 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
7232 break;
7234 case 'R':
7235 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
7236 break;
7238 case 'C':
7239 mips_print_int_branch_condition (file, code, letter);
7240 break;
7242 case 'N':
7243 mips_print_int_branch_condition (file, reverse_condition (code), letter);
7244 break;
7246 case 'F':
7247 mips_print_float_branch_condition (file, code, letter);
7248 break;
7250 case 'W':
7251 mips_print_float_branch_condition (file, reverse_condition (code),
7252 letter);
7253 break;
7255 case 'T':
7256 case 't':
7258 int truth = (code == NE) == (letter == 'T');
7259 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
7261 break;
7263 case 'Y':
7264 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
7265 fputs (mips_fp_conditions[UINTVAL (op)], file);
7266 else
7267 output_operand_lossage ("'%%%c' is not a valid operand prefix",
7268 letter);
7269 break;
7271 case 'Z':
7272 if (ISA_HAS_8CC)
7274 mips_print_operand (file, op, 0);
7275 fputc (',', file);
7277 break;
7279 case 'q':
7280 if (code == REG && MD_REG_P (REGNO (op)))
7281 fprintf (file, "$ac0");
7282 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
7283 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
7284 else
7285 output_operand_lossage ("invalid use of '%%%c'", letter);
7286 break;
7288 default:
7289 switch (code)
7291 case REG:
7293 unsigned int regno = REGNO (op);
7294 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
7295 || (letter == 'L' && TARGET_BIG_ENDIAN)
7296 || letter == 'D')
7297 regno++;
7298 /* We need to print $0 .. $31 for COP0 registers. */
7299 if (COP0_REG_P (regno))
7300 fprintf (file, "$%s", &reg_names[regno][4]);
7301 else
7302 fprintf (file, "%s", reg_names[regno]);
7304 break;
7306 case MEM:
7307 if (letter == 'D')
7308 output_address (plus_constant (XEXP (op, 0), 4));
7309 else
7310 output_address (XEXP (op, 0));
7311 break;
7313 default:
7314 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
7315 fputs (reg_names[GP_REG_FIRST], file);
7316 else if (CONST_GP_P (op))
7317 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
7318 else
7319 output_addr_const (file, mips_strip_unspec_address (op));
7320 break;
7325 /* Output address operand X to FILE. */
7327 void
7328 mips_print_operand_address (FILE *file, rtx x)
7330 struct mips_address_info addr;
7332 if (mips_classify_address (&addr, x, word_mode, true))
7333 switch (addr.type)
7335 case ADDRESS_REG:
7336 mips_print_operand (file, addr.offset, 0);
7337 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7338 return;
7340 case ADDRESS_LO_SUM:
7341 mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
7342 mips_lo_relocs);
7343 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7344 return;
7346 case ADDRESS_CONST_INT:
7347 output_addr_const (file, x);
7348 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
7349 return;
7351 case ADDRESS_SYMBOLIC:
7352 output_addr_const (file, mips_strip_unspec_address (x));
7353 return;
7355 gcc_unreachable ();
7358 /* Implement TARGET_ENCODE_SECTION_INFO. */
7360 static void
7361 mips_encode_section_info (tree decl, rtx rtl, int first)
7363 default_encode_section_info (decl, rtl, first);
7365 if (TREE_CODE (decl) == FUNCTION_DECL)
7367 rtx symbol = XEXP (rtl, 0);
7368 tree type = TREE_TYPE (decl);
7370 /* Encode whether the symbol is short or long. */
7371 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
7372 || mips_far_type_p (type))
7373 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
7377 /* Implement TARGET_SELECT_RTX_SECTION. */
7379 static section *
7380 mips_select_rtx_section (enum machine_mode mode, rtx x,
7381 unsigned HOST_WIDE_INT align)
7383 /* ??? Consider using mergeable small data sections. */
7384 if (mips_rtx_constant_in_small_data_p (mode))
7385 return get_named_section (NULL, ".sdata", 0);
7387 return default_elf_select_rtx_section (mode, x, align);
7390 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7392 The complication here is that, with the combination TARGET_ABICALLS
7393 && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
7394 absolute addresses, and should therefore not be included in the
7395 read-only part of a DSO. Handle such cases by selecting a normal
7396 data section instead of a read-only one. The logic apes that in
7397 default_function_rodata_section. */
7399 static section *
7400 mips_function_rodata_section (tree decl)
7402 if (!TARGET_ABICALLS || TARGET_ABSOLUTE_ABICALLS || TARGET_GPWORD)
7403 return default_function_rodata_section (decl);
7405 if (decl && DECL_SECTION_NAME (decl))
7407 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7408 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7410 char *rname = ASTRDUP (name);
7411 rname[14] = 'd';
7412 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7414 else if (flag_function_sections
7415 && flag_data_sections
7416 && strncmp (name, ".text.", 6) == 0)
7418 char *rname = ASTRDUP (name);
7419 memcpy (rname + 1, "data", 4);
7420 return get_section (rname, SECTION_WRITE, decl);
7423 return data_section;
7426 /* Implement TARGET_IN_SMALL_DATA_P. */
7428 static bool
7429 mips_in_small_data_p (const_tree decl)
7431 unsigned HOST_WIDE_INT size;
7433 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7434 return false;
7436 /* We don't yet generate small-data references for -mabicalls
7437 or VxWorks RTP code. See the related -G handling in
7438 mips_override_options. */
7439 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
7440 return false;
7442 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7444 const char *name;
7446 /* Reject anything that isn't in a known small-data section. */
7447 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7448 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7449 return false;
7451 /* If a symbol is defined externally, the assembler will use the
7452 usual -G rules when deciding how to implement macros. */
7453 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
7454 return true;
7456 else if (TARGET_EMBEDDED_DATA)
7458 /* Don't put constants into the small data section: we want them
7459 to be in ROM rather than RAM. */
7460 if (TREE_CODE (decl) != VAR_DECL)
7461 return false;
7463 if (TREE_READONLY (decl)
7464 && !TREE_SIDE_EFFECTS (decl)
7465 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7466 return false;
7469 /* Enforce -mlocal-sdata. */
7470 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
7471 return false;
7473 /* Enforce -mextern-sdata. */
7474 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
7476 if (DECL_EXTERNAL (decl))
7477 return false;
7478 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
7479 return false;
7482 /* We have traditionally not treated zero-sized objects as small data,
7483 so this is now effectively part of the ABI. */
7484 size = int_size_in_bytes (TREE_TYPE (decl));
7485 return size > 0 && size <= mips_small_data_threshold;
7488 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7489 anchors for small data: the GP register acts as an anchor in that
7490 case. We also don't want to use them for PC-relative accesses,
7491 where the PC acts as an anchor. */
7493 static bool
7494 mips_use_anchors_for_symbol_p (const_rtx symbol)
7496 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
7498 case SYMBOL_PC_RELATIVE:
7499 case SYMBOL_GP_RELATIVE:
7500 return false;
7502 default:
7503 return default_use_anchors_for_symbol_p (symbol);
7507 /* The MIPS debug format wants all automatic variables and arguments
7508 to be in terms of the virtual frame pointer (stack pointer before
7509 any adjustment in the function), while the MIPS 3.0 linker wants
7510 the frame pointer to be the stack pointer after the initial
7511 adjustment. So, we do the adjustment here. The arg pointer (which
7512 is eliminated) points to the virtual frame pointer, while the frame
7513 pointer (which may be eliminated) points to the stack pointer after
7514 the initial adjustments. */
7516 HOST_WIDE_INT
7517 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
7519 rtx offset2 = const0_rtx;
7520 rtx reg = eliminate_constant_term (addr, &offset2);
7522 if (offset == 0)
7523 offset = INTVAL (offset2);
7525 if (reg == stack_pointer_rtx
7526 || reg == frame_pointer_rtx
7527 || reg == hard_frame_pointer_rtx)
7529 offset -= cfun->machine->frame.total_size;
7530 if (reg == hard_frame_pointer_rtx)
7531 offset += cfun->machine->frame.hard_frame_pointer_offset;
7534 /* sdbout_parms does not want this to crash for unrecognized cases. */
7535 #if 0
7536 else if (reg != arg_pointer_rtx)
7537 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
7538 addr);
7539 #endif
7541 return offset;
7544 /* Implement ASM_OUTPUT_EXTERNAL. */
7546 void
7547 mips_output_external (FILE *file, tree decl, const char *name)
7549 default_elf_asm_output_external (file, decl, name);
7551 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
7552 set in order to avoid putting out names that are never really
7553 used. */
7554 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
7556 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
7558 /* When using assembler macros, emit .extern directives for
7559 all small-data externs so that the assembler knows how
7560 big they are.
7562 In most cases it would be safe (though pointless) to emit
7563 .externs for other symbols too. One exception is when an
7564 object is within the -G limit but declared by the user to
7565 be in a section other than .sbss or .sdata. */
7566 fputs ("\t.extern\t", file);
7567 assemble_name (file, name);
7568 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
7569 int_size_in_bytes (TREE_TYPE (decl)));
7571 else if (TARGET_IRIX
7572 && mips_abi == ABI_32
7573 && TREE_CODE (decl) == FUNCTION_DECL)
7575 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
7576 `.global name .text' directive for every used but
7577 undefined function. If we don't, the linker may perform
7578 an optimization (skipping over the insns that set $gp)
7579 when it is unsafe. */
7580 fputs ("\t.globl ", file);
7581 assemble_name (file, name);
7582 fputs (" .text\n", file);
7587 /* Implement ASM_OUTPUT_SOURCE_FILENAME. */
7589 void
7590 mips_output_filename (FILE *stream, const char *name)
7592 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
7593 directives. */
7594 if (write_symbols == DWARF2_DEBUG)
7595 return;
7596 else if (mips_output_filename_first_time)
7598 mips_output_filename_first_time = 0;
7599 num_source_filenames += 1;
7600 current_function_file = name;
7601 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7602 output_quoted_string (stream, name);
7603 putc ('\n', stream);
7605 /* If we are emitting stabs, let dbxout.c handle this (except for
7606 the mips_output_filename_first_time case). */
7607 else if (write_symbols == DBX_DEBUG)
7608 return;
7609 else if (name != current_function_file
7610 && strcmp (name, current_function_file) != 0)
7612 num_source_filenames += 1;
7613 current_function_file = name;
7614 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7615 output_quoted_string (stream, name);
7616 putc ('\n', stream);
7620 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
7622 static void ATTRIBUTE_UNUSED
7623 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
7625 switch (size)
7627 case 4:
7628 fputs ("\t.dtprelword\t", file);
7629 break;
7631 case 8:
7632 fputs ("\t.dtpreldword\t", file);
7633 break;
7635 default:
7636 gcc_unreachable ();
7638 output_addr_const (file, x);
7639 fputs ("+0x8000", file);
7642 /* Implement TARGET_DWARF_REGISTER_SPAN. */
7644 static rtx
7645 mips_dwarf_register_span (rtx reg)
7647 rtx high, low;
7648 enum machine_mode mode;
7650 /* By default, GCC maps increasing register numbers to increasing
7651 memory locations, but paired FPRs are always little-endian,
7652 regardless of the prevailing endianness. */
7653 mode = GET_MODE (reg);
7654 if (FP_REG_P (REGNO (reg))
7655 && TARGET_BIG_ENDIAN
7656 && MAX_FPRS_PER_FMT > 1
7657 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
7659 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
7660 high = mips_subword (reg, true);
7661 low = mips_subword (reg, false);
7662 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
7665 return NULL_RTX;
7668 /* Implement ASM_OUTPUT_ASCII. */
7670 void
7671 mips_output_ascii (FILE *stream, const char *string, size_t len)
7673 size_t i;
7674 int cur_pos;
7676 cur_pos = 17;
7677 fprintf (stream, "\t.ascii\t\"");
7678 for (i = 0; i < len; i++)
7680 int c;
7682 c = (unsigned char) string[i];
7683 if (ISPRINT (c))
7685 if (c == '\\' || c == '\"')
7687 putc ('\\', stream);
7688 cur_pos++;
7690 putc (c, stream);
7691 cur_pos++;
7693 else
7695 fprintf (stream, "\\%03o", c);
7696 cur_pos += 4;
7699 if (cur_pos > 72 && i+1 < len)
7701 cur_pos = 17;
7702 fprintf (stream, "\"\n\t.ascii\t\"");
7705 fprintf (stream, "\"\n");
7708 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7709 macros, mark the symbol as written so that mips_asm_output_external
7710 won't emit an .extern for it. STREAM is the output file, NAME is the
7711 name of the symbol, INIT_STRING is the string that should be written
7712 before the symbol and FINAL_STRING is the string that should be
7713 written after it. FINAL_STRING is a printf format that consumes the
7714 remaining arguments. */
7716 void
7717 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7718 const char *final_string, ...)
7720 va_list ap;
7722 fputs (init_string, stream);
7723 assemble_name (stream, name);
7724 va_start (ap, final_string);
7725 vfprintf (stream, final_string, ap);
7726 va_end (ap);
7728 if (!TARGET_EXPLICIT_RELOCS)
7730 tree name_tree = get_identifier (name);
7731 TREE_ASM_WRITTEN (name_tree) = 1;
7735 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7736 NAME is the name of the object and ALIGN is the required alignment
7737 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7738 alignment argument. */
7740 void
7741 mips_declare_common_object (FILE *stream, const char *name,
7742 const char *init_string,
7743 unsigned HOST_WIDE_INT size,
7744 unsigned int align, bool takes_alignment_p)
7746 if (!takes_alignment_p)
7748 size += (align / BITS_PER_UNIT) - 1;
7749 size -= size % (align / BITS_PER_UNIT);
7750 mips_declare_object (stream, name, init_string,
7751 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7753 else
7754 mips_declare_object (stream, name, init_string,
7755 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7756 size, align / BITS_PER_UNIT);
7759 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7760 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7762 void
7763 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7764 unsigned HOST_WIDE_INT size,
7765 unsigned int align)
7767 /* If the target wants uninitialized const declarations in
7768 .rdata then don't put them in .comm. */
7769 if (TARGET_EMBEDDED_DATA
7770 && TARGET_UNINIT_CONST_IN_RODATA
7771 && TREE_CODE (decl) == VAR_DECL
7772 && TREE_READONLY (decl)
7773 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7775 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7776 targetm.asm_out.globalize_label (stream, name);
7778 switch_to_section (readonly_data_section);
7779 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7780 mips_declare_object (stream, name, "",
7781 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7782 size);
7784 else
7785 mips_declare_common_object (stream, name, "\n\t.comm\t",
7786 size, align, true);
7789 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7790 extern int size_directive_output;
7792 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7793 definitions except that it uses mips_declare_object to emit the label. */
7795 void
7796 mips_declare_object_name (FILE *stream, const char *name,
7797 tree decl ATTRIBUTE_UNUSED)
7799 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7800 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7801 #endif
7803 size_directive_output = 0;
7804 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7806 HOST_WIDE_INT size;
7808 size_directive_output = 1;
7809 size = int_size_in_bytes (TREE_TYPE (decl));
7810 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7813 mips_declare_object (stream, name, "", ":\n");
7816 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7818 void
7819 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7821 const char *name;
7823 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7824 if (!flag_inhibit_size_directive
7825 && DECL_SIZE (decl) != 0
7826 && !at_end
7827 && top_level
7828 && DECL_INITIAL (decl) == error_mark_node
7829 && !size_directive_output)
7831 HOST_WIDE_INT size;
7833 size_directive_output = 1;
7834 size = int_size_in_bytes (TREE_TYPE (decl));
7835 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7838 #endif
7840 /* Return the FOO in the name of the ".mdebug.FOO" section associated
7841 with the current ABI. */
7843 static const char *
7844 mips_mdebug_abi_name (void)
7846 switch (mips_abi)
7848 case ABI_32:
7849 return "abi32";
7850 case ABI_O64:
7851 return "abiO64";
7852 case ABI_N32:
7853 return "abiN32";
7854 case ABI_64:
7855 return "abi64";
7856 case ABI_EABI:
7857 return TARGET_64BIT ? "eabi64" : "eabi32";
7858 default:
7859 gcc_unreachable ();
7863 /* Implement TARGET_ASM_FILE_START. */
7865 static void
7866 mips_file_start (void)
7868 default_file_start ();
7870 /* Generate a special section to describe the ABI switches used to
7871 produce the resultant binary. This is unnecessary on IRIX and
7872 causes unwanted warnings from the native linker. */
7873 if (!TARGET_IRIX)
7875 /* Record the ABI itself. Modern versions of binutils encode
7876 this information in the ELF header flags, but GDB needs the
7877 information in order to correctly debug binaries produced by
7878 older binutils. See the function mips_gdbarch_init in
7879 gdb/mips-tdep.c. */
7880 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7881 mips_mdebug_abi_name ());
7883 /* There is no ELF header flag to distinguish long32 forms of the
7884 EABI from long64 forms. Emit a special section to help tools
7885 such as GDB. Do the same for o64, which is sometimes used with
7886 -mlong64. */
7887 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7888 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7889 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7891 #ifdef HAVE_AS_GNU_ATTRIBUTE
7892 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7893 (TARGET_HARD_FLOAT_ABI
7894 ? (TARGET_DOUBLE_FLOAT
7895 ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
7896 #endif
7899 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
7900 if (TARGET_ABICALLS)
7902 fprintf (asm_out_file, "\t.abicalls\n");
7903 if (TARGET_ABICALLS_PIC0)
7904 fprintf (asm_out_file, "\t.option\tpic0\n");
7907 if (flag_verbose_asm)
7908 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7909 ASM_COMMENT_START,
7910 mips_small_data_threshold, mips_arch_info->name, mips_isa);
7913 /* Make the last instruction frame-related and note that it performs
7914 the operation described by FRAME_PATTERN. */
7916 static void
7917 mips_set_frame_expr (rtx frame_pattern)
7919 rtx insn;
7921 insn = get_last_insn ();
7922 RTX_FRAME_RELATED_P (insn) = 1;
7923 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7924 frame_pattern,
7925 REG_NOTES (insn));
7928 /* Return a frame-related rtx that stores REG at MEM.
7929 REG must be a single register. */
7931 static rtx
7932 mips_frame_set (rtx mem, rtx reg)
7934 rtx set;
7936 /* If we're saving the return address register and the DWARF return
7937 address column differs from the hard register number, adjust the
7938 note reg to refer to the former. */
7939 if (REGNO (reg) == GP_REG_FIRST + 31
7940 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7941 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7943 set = gen_rtx_SET (VOIDmode, mem, reg);
7944 RTX_FRAME_RELATED_P (set) = 1;
7946 return set;
7949 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7950 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7951 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7952 static const unsigned char mips16e_s2_s8_regs[] = {
7953 30, 23, 22, 21, 20, 19, 18
7955 static const unsigned char mips16e_a0_a3_regs[] = {
7956 4, 5, 6, 7
7959 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7960 ordered from the uppermost in memory to the lowest in memory. */
7961 static const unsigned char mips16e_save_restore_regs[] = {
7962 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7965 /* Return the index of the lowest X in the range [0, SIZE) for which
7966 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7968 static unsigned int
7969 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7970 unsigned int size)
7972 unsigned int i;
7974 for (i = 0; i < size; i++)
7975 if (BITSET_P (mask, regs[i]))
7976 break;
7978 return i;
7981 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7982 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7983 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7984 is true for all indexes (X, SIZE). */
7986 static void
7987 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7988 unsigned int size, unsigned int *num_regs_ptr)
7990 unsigned int i;
7992 i = mips16e_find_first_register (*mask_ptr, regs, size);
7993 for (i++; i < size; i++)
7994 if (!BITSET_P (*mask_ptr, regs[i]))
7996 *num_regs_ptr += 1;
7997 *mask_ptr |= 1 << regs[i];
8001 /* Return a simplified form of X using the register values in REG_VALUES.
8002 REG_VALUES[R] is the last value assigned to hard register R, or null
8003 if R has not been modified.
8005 This function is rather limited, but is good enough for our purposes. */
8007 static rtx
8008 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
8010 x = avoid_constant_pool_reference (x);
8012 if (UNARY_P (x))
8014 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8015 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
8016 x0, GET_MODE (XEXP (x, 0)));
8019 if (ARITHMETIC_P (x))
8021 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8022 rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
8023 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
8026 if (REG_P (x)
8027 && reg_values[REGNO (x)]
8028 && !rtx_unstable_p (reg_values[REGNO (x)]))
8029 return reg_values[REGNO (x)];
8031 return x;
8034 /* Return true if (set DEST SRC) stores an argument register into its
8035 caller-allocated save slot, storing the number of that argument
8036 register in *REGNO_PTR if so. REG_VALUES is as for
8037 mips16e_collect_propagate_value. */
8039 static bool
8040 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
8041 unsigned int *regno_ptr)
8043 unsigned int argno, regno;
8044 HOST_WIDE_INT offset, required_offset;
8045 rtx addr, base;
8047 /* Check that this is a word-mode store. */
8048 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
8049 return false;
8051 /* Check that the register being saved is an unmodified argument
8052 register. */
8053 regno = REGNO (src);
8054 if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
8055 return false;
8056 argno = regno - GP_ARG_FIRST;
8058 /* Check whether the address is an appropriate stack-pointer or
8059 frame-pointer access. */
8060 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
8061 mips_split_plus (addr, &base, &offset);
8062 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
8063 if (base == hard_frame_pointer_rtx)
8064 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
8065 else if (base != stack_pointer_rtx)
8066 return false;
8067 if (offset != required_offset)
8068 return false;
8070 *regno_ptr = regno;
8071 return true;
8074 /* A subroutine of mips_expand_prologue, called only when generating
8075 MIPS16e SAVE instructions. Search the start of the function for any
8076 instructions that save argument registers into their caller-allocated
8077 save slots. Delete such instructions and return a value N such that
8078 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8079 instructions redundant. */
8081 static unsigned int
8082 mips16e_collect_argument_saves (void)
8084 rtx reg_values[FIRST_PSEUDO_REGISTER];
8085 rtx insn, next, set, dest, src;
8086 unsigned int nargs, regno;
8088 push_topmost_sequence ();
8089 nargs = 0;
8090 memset (reg_values, 0, sizeof (reg_values));
8091 for (insn = get_insns (); insn; insn = next)
8093 next = NEXT_INSN (insn);
8094 if (NOTE_P (insn))
8095 continue;
8097 if (!INSN_P (insn))
8098 break;
8100 set = PATTERN (insn);
8101 if (GET_CODE (set) != SET)
8102 break;
8104 dest = SET_DEST (set);
8105 src = SET_SRC (set);
8106 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
8108 if (!BITSET_P (cfun->machine->frame.mask, regno))
8110 delete_insn (insn);
8111 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
8114 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
8115 reg_values[REGNO (dest)]
8116 = mips16e_collect_propagate_value (src, reg_values);
8117 else
8118 break;
8120 pop_topmost_sequence ();
8122 return nargs;
8125 /* Return a move between register REGNO and memory location SP + OFFSET.
8126 Make the move a load if RESTORE_P, otherwise make it a frame-related
8127 store. */
8129 static rtx
8130 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
8131 unsigned int regno)
8133 rtx reg, mem;
8135 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
8136 reg = gen_rtx_REG (SImode, regno);
8137 return (restore_p
8138 ? gen_rtx_SET (VOIDmode, reg, mem)
8139 : mips_frame_set (mem, reg));
8142 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
8143 The instruction must:
8145 - Allocate or deallocate SIZE bytes in total; SIZE is known
8146 to be nonzero.
8148 - Save or restore as many registers in *MASK_PTR as possible.
8149 The instruction saves the first registers at the top of the
8150 allocated area, with the other registers below it.
8152 - Save NARGS argument registers above the allocated area.
8154 (NARGS is always zero if RESTORE_P.)
8156 The SAVE and RESTORE instructions cannot save and restore all general
8157 registers, so there may be some registers left over for the caller to
8158 handle. Destructively modify *MASK_PTR so that it contains the registers
8159 that still need to be saved or restored. The caller can save these
8160 registers in the memory immediately below *OFFSET_PTR, which is a
8161 byte offset from the bottom of the allocated stack area. */
8163 static rtx
8164 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
8165 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
8166 HOST_WIDE_INT size)
8168 rtx pattern, set;
8169 HOST_WIDE_INT offset, top_offset;
8170 unsigned int i, regno;
8171 int n;
8173 gcc_assert (cfun->machine->frame.num_fp == 0);
8175 /* Calculate the number of elements in the PARALLEL. We need one element
8176 for the stack adjustment, one for each argument register save, and one
8177 for each additional register move. */
8178 n = 1 + nargs;
8179 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8180 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
8181 n++;
8183 /* Create the final PARALLEL. */
8184 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
8185 n = 0;
8187 /* Add the stack pointer adjustment. */
8188 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8189 plus_constant (stack_pointer_rtx,
8190 restore_p ? size : -size));
8191 RTX_FRAME_RELATED_P (set) = 1;
8192 XVECEXP (pattern, 0, n++) = set;
8194 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8195 top_offset = restore_p ? size : 0;
8197 /* Save the arguments. */
8198 for (i = 0; i < nargs; i++)
8200 offset = top_offset + i * UNITS_PER_WORD;
8201 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
8202 XVECEXP (pattern, 0, n++) = set;
8205 /* Then fill in the other register moves. */
8206 offset = top_offset;
8207 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8209 regno = mips16e_save_restore_regs[i];
8210 if (BITSET_P (*mask_ptr, regno))
8212 offset -= UNITS_PER_WORD;
8213 set = mips16e_save_restore_reg (restore_p, offset, regno);
8214 XVECEXP (pattern, 0, n++) = set;
8215 *mask_ptr &= ~(1 << regno);
8219 /* Tell the caller what offset it should use for the remaining registers. */
8220 *offset_ptr = size + (offset - top_offset);
8222 gcc_assert (n == XVECLEN (pattern, 0));
8224 return pattern;
8227 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8228 pointer. Return true if PATTERN matches the kind of instruction
8229 generated by mips16e_build_save_restore. If INFO is nonnull,
8230 initialize it when returning true. */
8232 bool
8233 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
8234 struct mips16e_save_restore_info *info)
8236 unsigned int i, nargs, mask, extra;
8237 HOST_WIDE_INT top_offset, save_offset, offset;
8238 rtx set, reg, mem, base;
8239 int n;
8241 if (!GENERATE_MIPS16E_SAVE_RESTORE)
8242 return false;
8244 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8245 top_offset = adjust > 0 ? adjust : 0;
8247 /* Interpret all other members of the PARALLEL. */
8248 save_offset = top_offset - UNITS_PER_WORD;
8249 mask = 0;
8250 nargs = 0;
8251 i = 0;
8252 for (n = 1; n < XVECLEN (pattern, 0); n++)
8254 /* Check that we have a SET. */
8255 set = XVECEXP (pattern, 0, n);
8256 if (GET_CODE (set) != SET)
8257 return false;
8259 /* Check that the SET is a load (if restoring) or a store
8260 (if saving). */
8261 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8262 if (!MEM_P (mem))
8263 return false;
8265 /* Check that the address is the sum of the stack pointer and a
8266 possibly-zero constant offset. */
8267 mips_split_plus (XEXP (mem, 0), &base, &offset);
8268 if (base != stack_pointer_rtx)
8269 return false;
8271 /* Check that SET's other operand is a register. */
8272 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8273 if (!REG_P (reg))
8274 return false;
8276 /* Check for argument saves. */
8277 if (offset == top_offset + nargs * UNITS_PER_WORD
8278 && REGNO (reg) == GP_ARG_FIRST + nargs)
8279 nargs++;
8280 else if (offset == save_offset)
8282 while (mips16e_save_restore_regs[i++] != REGNO (reg))
8283 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8284 return false;
8286 mask |= 1 << REGNO (reg);
8287 save_offset -= UNITS_PER_WORD;
8289 else
8290 return false;
8293 /* Check that the restrictions on register ranges are met. */
8294 extra = 0;
8295 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8296 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8297 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8298 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8299 if (extra != 0)
8300 return false;
8302 /* Make sure that the topmost argument register is not saved twice.
8303 The checks above ensure that the same is then true for the other
8304 argument registers. */
8305 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8306 return false;
8308 /* Pass back information, if requested. */
8309 if (info)
8311 info->nargs = nargs;
8312 info->mask = mask;
8313 info->size = (adjust > 0 ? adjust : -adjust);
8316 return true;
8319 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8320 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8321 the null terminator. */
8323 static char *
8324 mips16e_add_register_range (char *s, unsigned int min_reg,
8325 unsigned int max_reg)
8327 if (min_reg != max_reg)
8328 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8329 else
8330 s += sprintf (s, ",%s", reg_names[min_reg]);
8331 return s;
8334 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8335 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8337 const char *
8338 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8340 static char buffer[300];
8342 struct mips16e_save_restore_info info;
8343 unsigned int i, end;
8344 char *s;
8346 /* Parse the pattern. */
8347 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8348 gcc_unreachable ();
8350 /* Add the mnemonic. */
8351 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8352 s += strlen (s);
8354 /* Save the arguments. */
8355 if (info.nargs > 1)
8356 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8357 reg_names[GP_ARG_FIRST + info.nargs - 1]);
8358 else if (info.nargs == 1)
8359 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8361 /* Emit the amount of stack space to allocate or deallocate. */
8362 s += sprintf (s, "%d", (int) info.size);
8364 /* Save or restore $16. */
8365 if (BITSET_P (info.mask, 16))
8366 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8368 /* Save or restore $17. */
8369 if (BITSET_P (info.mask, 17))
8370 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8372 /* Save or restore registers in the range $s2...$s8, which
8373 mips16e_s2_s8_regs lists in decreasing order. Note that this
8374 is a software register range; the hardware registers are not
8375 numbered consecutively. */
8376 end = ARRAY_SIZE (mips16e_s2_s8_regs);
8377 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
8378 if (i < end)
8379 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
8380 mips16e_s2_s8_regs[i]);
8382 /* Save or restore registers in the range $a0...$a3. */
8383 end = ARRAY_SIZE (mips16e_a0_a3_regs);
8384 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
8385 if (i < end)
8386 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
8387 mips16e_a0_a3_regs[end - 1]);
8389 /* Save or restore $31. */
8390 if (BITSET_P (info.mask, 31))
8391 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
8393 return buffer;
8396 /* Return true if the current function has an insn that implicitly
8397 refers to $gp. */
8399 static bool
8400 mips_function_has_gp_insn (void)
8402 /* Don't bother rechecking if we found one last time. */
8403 if (!cfun->machine->has_gp_insn_p)
8405 rtx insn;
8407 push_topmost_sequence ();
8408 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8409 if (USEFUL_INSN_P (insn)
8410 && (get_attr_got (insn) != GOT_UNSET
8411 || mips_small_data_pattern_p (PATTERN (insn))))
8413 cfun->machine->has_gp_insn_p = true;
8414 break;
8416 pop_topmost_sequence ();
8418 return cfun->machine->has_gp_insn_p;
8421 /* Return true if the current function returns its value in a floating-point
8422 register in MIPS16 mode. */
8424 static bool
8425 mips16_cfun_returns_in_fpr_p (void)
8427 tree return_type = DECL_RESULT (current_function_decl);
8428 return (TARGET_MIPS16
8429 && TARGET_HARD_FLOAT_ABI
8430 && !aggregate_value_p (return_type, current_function_decl)
8431 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
8434 /* Return the register that should be used as the global pointer
8435 within this function. Return INVALID_REGNUM if the function
8436 doesn't need a global pointer. */
8438 static unsigned int
8439 mips_global_pointer (void)
8441 unsigned int regno;
8443 /* $gp is always available unless we're using a GOT. */
8444 if (!TARGET_USE_GOT)
8445 return GLOBAL_POINTER_REGNUM;
8447 /* We must always provide $gp when it is used implicitly. */
8448 if (!TARGET_EXPLICIT_RELOCS)
8449 return GLOBAL_POINTER_REGNUM;
8451 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
8452 a valid gp. */
8453 if (crtl->profile)
8454 return GLOBAL_POINTER_REGNUM;
8456 /* If the function has a nonlocal goto, $gp must hold the correct
8457 global pointer for the target function. */
8458 if (crtl->has_nonlocal_goto)
8459 return GLOBAL_POINTER_REGNUM;
8461 /* There's no need to initialize $gp if it isn't referenced now,
8462 and if we can be sure that no new references will be added during
8463 or after reload. */
8464 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
8465 && !mips_function_has_gp_insn ())
8467 /* The function doesn't use $gp at the moment. If we're generating
8468 -call_nonpic code, no new uses will be introduced during or after
8469 reload. */
8470 if (TARGET_ABICALLS_PIC0)
8471 return INVALID_REGNUM;
8473 /* We need to handle the following implicit gp references:
8475 - Reload can sometimes introduce constant pool references
8476 into a function that otherwise didn't need them. For example,
8477 suppose we have an instruction like:
8479 (set (reg:DF R1) (float:DF (reg:SI R2)))
8481 If R2 turns out to be constant such as 1, the instruction may
8482 have a REG_EQUAL note saying that R1 == 1.0. Reload then has
8483 the option of using this constant if R2 doesn't get allocated
8484 to a register.
8486 In cases like these, reload will have added the constant to the
8487 pool but no instruction will yet refer to it.
8489 - MIPS16 functions that return in FPRs need to call an
8490 external libgcc routine. */
8491 if (!crtl->uses_const_pool
8492 && !mips16_cfun_returns_in_fpr_p ())
8493 return INVALID_REGNUM;
8496 /* We need a global pointer, but perhaps we can use a call-clobbered
8497 register instead of $gp. */
8498 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
8499 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8500 if (!df_regs_ever_live_p (regno)
8501 && call_really_used_regs[regno]
8502 && !fixed_regs[regno]
8503 && regno != PIC_FUNCTION_ADDR_REGNUM)
8504 return regno;
8506 return GLOBAL_POINTER_REGNUM;
8509 /* Return true if REGNO is a register that is ordinarily call-clobbered
8510 but must nevertheless be preserved by an interrupt handler. */
8512 static bool
8513 mips_interrupt_extra_call_saved_reg_p (unsigned int regno)
8515 if (MD_REG_P (regno))
8516 return true;
8518 if (TARGET_DSP && DSP_ACC_REG_P (regno))
8519 return true;
8521 if (GP_REG_P (regno) && !cfun->machine->use_shadow_register_set_p)
8523 /* $0 is hard-wired. */
8524 if (regno == GP_REG_FIRST)
8525 return false;
8527 /* The interrupt handler can treat kernel registers as
8528 scratch registers. */
8529 if (KERNEL_REG_P (regno))
8530 return false;
8532 /* The function will return the stack pointer to its original value
8533 anyway. */
8534 if (regno == STACK_POINTER_REGNUM)
8535 return false;
8537 /* Otherwise, return true for registers that aren't ordinarily
8538 call-clobbered. */
8539 return call_really_used_regs[regno];
8542 return false;
8545 /* Return true if the current function should treat register REGNO
8546 as call-saved. */
8548 static bool
8549 mips_cfun_call_saved_reg_p (unsigned int regno)
8551 /* Interrupt handlers need to save extra registers. */
8552 if (cfun->machine->interrupt_handler_p
8553 && mips_interrupt_extra_call_saved_reg_p (regno))
8554 return true;
8556 /* call_insns preserve $28 unless they explicitly say otherwise,
8557 so call_really_used_regs[] treats $28 as call-saved. However,
8558 we want the ABI property rather than the default call_insn
8559 property here. */
8560 return (regno == GLOBAL_POINTER_REGNUM
8561 ? TARGET_CALL_SAVED_GP
8562 : !call_really_used_regs[regno]);
8565 /* Return true if the function body might clobber register REGNO.
8566 We know that REGNO is call-saved. */
8568 static bool
8569 mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno)
8571 /* Some functions should be treated as clobbering all call-saved
8572 registers. */
8573 if (crtl->saves_all_registers)
8574 return true;
8576 /* DF handles cases where a register is explicitly referenced in
8577 the rtl. Incoming values are passed in call-clobbered registers,
8578 so we can assume that any live call-saved register is set within
8579 the function. */
8580 if (df_regs_ever_live_p (regno))
8581 return true;
8583 /* Check for registers that are clobbered by FUNCTION_PROFILER.
8584 These clobbers are not explicit in the rtl. */
8585 if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
8586 return true;
8588 /* If we're using a call-saved global pointer, the function's
8589 prologue will need to set it up. */
8590 if (cfun->machine->global_pointer == regno)
8591 return true;
8593 /* The function's prologue will need to set the frame pointer if
8594 frame_pointer_needed. */
8595 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
8596 return true;
8598 /* If a MIPS16 function returns a value in FPRs, its epilogue
8599 will need to call an external libgcc routine. This yet-to-be
8600 generated call_insn will clobber $31. */
8601 if (regno == GP_REG_FIRST + 31 && mips16_cfun_returns_in_fpr_p ())
8602 return true;
8604 /* If REGNO is ordinarily call-clobbered, we must assume that any
8605 called function could modify it. */
8606 if (cfun->machine->interrupt_handler_p
8607 && !current_function_is_leaf
8608 && mips_interrupt_extra_call_saved_reg_p (regno))
8609 return true;
8611 return false;
8614 /* Return true if the current function must save register REGNO. */
8616 static bool
8617 mips_save_reg_p (unsigned int regno)
8619 if (mips_cfun_call_saved_reg_p (regno))
8621 if (mips_cfun_might_clobber_call_saved_reg_p (regno))
8622 return true;
8624 /* Save both registers in an FPR pair if either one is used. This is
8625 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
8626 register to be used without the even register. */
8627 if (FP_REG_P (regno)
8628 && MAX_FPRS_PER_FMT == 2
8629 && mips_cfun_might_clobber_call_saved_reg_p (regno + 1))
8630 return true;
8633 /* We need to save the incoming return address if __builtin_eh_return
8634 is being used to set a different return address. */
8635 if (regno == GP_REG_FIRST + 31 && crtl->calls_eh_return)
8636 return true;
8638 return false;
8641 /* Populate the current function's mips_frame_info structure.
8643 MIPS stack frames look like:
8645 +-------------------------------+
8647 | incoming stack arguments |
8649 +-------------------------------+
8651 | caller-allocated save area |
8652 A | for register arguments |
8654 +-------------------------------+ <-- incoming stack pointer
8656 | callee-allocated save area |
8657 B | for arguments that are |
8658 | split between registers and |
8659 | the stack |
8661 +-------------------------------+ <-- arg_pointer_rtx
8663 C | callee-allocated save area |
8664 | for register varargs |
8666 +-------------------------------+ <-- frame_pointer_rtx
8667 | | + cop0_sp_offset
8668 | COP0 reg save area | + UNITS_PER_WORD
8670 +-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
8671 | | + UNITS_PER_WORD
8672 | accumulator save area |
8674 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
8675 | | + UNITS_PER_HWFPVALUE
8676 | FPR save area |
8678 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
8679 | | + UNITS_PER_WORD
8680 | GPR save area |
8682 +-------------------------------+
8683 | | \
8684 | local variables | | var_size
8685 | | /
8686 +-------------------------------+
8687 | | \
8688 | $gp save area | | cprestore_size
8689 | | /
8690 P +-------------------------------+ <-- hard_frame_pointer_rtx for
8691 | | MIPS16 code
8692 | outgoing stack arguments |
8694 +-------------------------------+
8696 | caller-allocated save area |
8697 | for register arguments |
8699 +-------------------------------+ <-- stack_pointer_rtx
8700 frame_pointer_rtx
8701 hard_frame_pointer_rtx for
8702 non-MIPS16 code.
8704 At least two of A, B and C will be empty.
8706 Dynamic stack allocations such as alloca insert data at point P.
8707 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
8708 hard_frame_pointer_rtx unchanged. */
8710 static void
8711 mips_compute_frame_info (void)
8713 struct mips_frame_info *frame;
8714 HOST_WIDE_INT offset, size;
8715 unsigned int regno, i;
8717 /* Set this function's interrupt properties. */
8718 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
8720 if (!ISA_MIPS32R2)
8721 error ("the %<interrupt%> attribute requires a MIPS32r2 processor");
8722 else if (TARGET_HARD_FLOAT)
8723 error ("the %<interrupt%> attribute requires %<-msoft-float%>");
8724 else if (TARGET_MIPS16)
8725 error ("interrupt handlers cannot be MIPS16 functions");
8726 else
8728 cfun->machine->interrupt_handler_p = true;
8729 cfun->machine->use_shadow_register_set_p =
8730 mips_use_shadow_register_set_p (TREE_TYPE (current_function_decl));
8731 cfun->machine->keep_interrupts_masked_p =
8732 mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl));
8733 cfun->machine->use_debug_exception_return_p =
8734 mips_use_debug_exception_return_p (TREE_TYPE
8735 (current_function_decl));
8739 frame = &cfun->machine->frame;
8740 memset (frame, 0, sizeof (*frame));
8741 size = get_frame_size ();
8743 cfun->machine->global_pointer = mips_global_pointer ();
8745 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
8746 area and the $gp save slot. This area isn't needed in leaf functions,
8747 but if the target-independent frame size is nonzero, we're committed
8748 to allocating it anyway. */
8749 if (size == 0 && current_function_is_leaf)
8751 /* The MIPS 3.0 linker does not like functions that dynamically
8752 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
8753 looks like we are trying to create a second frame pointer to the
8754 function, so allocate some stack space to make it happy. */
8755 if (cfun->calls_alloca)
8756 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
8757 else
8758 frame->args_size = 0;
8759 frame->cprestore_size = 0;
8761 else
8763 frame->args_size = crtl->outgoing_args_size;
8764 frame->cprestore_size = STARTING_FRAME_OFFSET - frame->args_size;
8766 offset = frame->args_size + frame->cprestore_size;
8768 /* Move above the local variables. */
8769 frame->var_size = MIPS_STACK_ALIGN (size);
8770 offset += frame->var_size;
8772 /* Find out which GPRs we need to save. */
8773 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8774 if (mips_save_reg_p (regno))
8776 frame->num_gp++;
8777 frame->mask |= 1 << (regno - GP_REG_FIRST);
8780 /* If this function calls eh_return, we must also save and restore the
8781 EH data registers. */
8782 if (crtl->calls_eh_return)
8783 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
8785 frame->num_gp++;
8786 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
8789 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
8790 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
8791 save all later registers too. */
8792 if (GENERATE_MIPS16E_SAVE_RESTORE)
8794 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
8795 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
8796 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
8797 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
8800 /* Move above the GPR save area. */
8801 if (frame->num_gp > 0)
8803 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
8804 frame->gp_sp_offset = offset - UNITS_PER_WORD;
8807 /* Find out which FPRs we need to save. This loop must iterate over
8808 the same space as its companion in mips_for_each_saved_gpr_and_fpr. */
8809 if (TARGET_HARD_FLOAT)
8810 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
8811 if (mips_save_reg_p (regno))
8813 frame->num_fp += MAX_FPRS_PER_FMT;
8814 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
8817 /* Move above the FPR save area. */
8818 if (frame->num_fp > 0)
8820 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
8821 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
8824 /* Add in space for the interrupt context information. */
8825 if (cfun->machine->interrupt_handler_p)
8827 /* Check HI/LO. */
8828 if (mips_save_reg_p (LO_REGNUM) || mips_save_reg_p (HI_REGNUM))
8830 frame->num_acc++;
8831 frame->acc_mask |= (1 << 0);
8834 /* Check accumulators 1, 2, 3. */
8835 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
8836 if (mips_save_reg_p (i) || mips_save_reg_p (i + 1))
8838 frame->num_acc++;
8839 frame->acc_mask |= 1 << (((i - DSP_ACC_REG_FIRST) / 2) + 1);
8842 /* All interrupt context functions need space to preserve STATUS. */
8843 frame->num_cop0_regs++;
8845 /* If we don't keep interrupts masked, we need to save EPC. */
8846 if (!cfun->machine->keep_interrupts_masked_p)
8847 frame->num_cop0_regs++;
8850 /* Move above the accumulator save area. */
8851 if (frame->num_acc > 0)
8853 /* Each accumulator needs 2 words. */
8854 offset += frame->num_acc * 2 * UNITS_PER_WORD;
8855 frame->acc_sp_offset = offset - UNITS_PER_WORD;
8858 /* Move above the COP0 register save area. */
8859 if (frame->num_cop0_regs > 0)
8861 offset += frame->num_cop0_regs * UNITS_PER_WORD;
8862 frame->cop0_sp_offset = offset - UNITS_PER_WORD;
8865 /* Move above the callee-allocated varargs save area. */
8866 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
8867 frame->arg_pointer_offset = offset;
8869 /* Move above the callee-allocated area for pretend stack arguments. */
8870 offset += crtl->args.pretend_args_size;
8871 frame->total_size = offset;
8873 /* Work out the offsets of the save areas from the top of the frame. */
8874 if (frame->gp_sp_offset > 0)
8875 frame->gp_save_offset = frame->gp_sp_offset - offset;
8876 if (frame->fp_sp_offset > 0)
8877 frame->fp_save_offset = frame->fp_sp_offset - offset;
8878 if (frame->acc_sp_offset > 0)
8879 frame->acc_save_offset = frame->acc_sp_offset - offset;
8880 if (frame->num_cop0_regs > 0)
8881 frame->cop0_save_offset = frame->cop0_sp_offset - offset;
8883 /* MIPS16 code offsets the frame pointer by the size of the outgoing
8884 arguments. This tends to increase the chances of using unextended
8885 instructions for local variables and incoming arguments. */
8886 if (TARGET_MIPS16)
8887 frame->hard_frame_pointer_offset = frame->args_size;
8890 /* Return the style of GP load sequence that is being used for the
8891 current function. */
8893 enum mips_loadgp_style
8894 mips_current_loadgp_style (void)
8896 if (!TARGET_USE_GOT || cfun->machine->global_pointer == INVALID_REGNUM)
8897 return LOADGP_NONE;
8899 if (TARGET_RTP_PIC)
8900 return LOADGP_RTP;
8902 if (TARGET_ABSOLUTE_ABICALLS)
8903 return LOADGP_ABSOLUTE;
8905 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
8908 /* Implement FRAME_POINTER_REQUIRED. */
8910 bool
8911 mips_frame_pointer_required (void)
8913 /* If the function contains dynamic stack allocations, we need to
8914 use the frame pointer to access the static parts of the frame. */
8915 if (cfun->calls_alloca)
8916 return true;
8918 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
8919 reload may be unable to compute the address of a local variable,
8920 since there is no way to add a large constant to the stack pointer
8921 without using a second temporary register. */
8922 if (TARGET_MIPS16)
8924 mips_compute_frame_info ();
8925 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
8926 return true;
8929 return false;
8932 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
8933 or argument pointer. TO is either the stack pointer or hard frame
8934 pointer. */
8936 HOST_WIDE_INT
8937 mips_initial_elimination_offset (int from, int to)
8939 HOST_WIDE_INT offset;
8941 mips_compute_frame_info ();
8943 /* Set OFFSET to the offset from the soft frame pointer, which is also
8944 the offset from the end-of-prologue stack pointer. */
8945 switch (from)
8947 case FRAME_POINTER_REGNUM:
8948 offset = 0;
8949 break;
8951 case ARG_POINTER_REGNUM:
8952 offset = cfun->machine->frame.arg_pointer_offset;
8953 break;
8955 default:
8956 gcc_unreachable ();
8959 if (to == HARD_FRAME_POINTER_REGNUM)
8960 offset -= cfun->machine->frame.hard_frame_pointer_offset;
8962 return offset;
8965 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
8967 static void
8968 mips_extra_live_on_entry (bitmap regs)
8970 if (TARGET_USE_GOT)
8972 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
8973 the global pointer. */
8974 if (!TARGET_ABSOLUTE_ABICALLS)
8975 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8977 /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
8978 the global pointer. */
8979 if (TARGET_MIPS16)
8980 bitmap_set_bit (regs, MIPS16_PIC_TEMP_REGNUM);
8982 /* See the comment above load_call<mode> for details. */
8983 bitmap_set_bit (regs, GOT_VERSION_REGNUM);
8987 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
8988 previous frame. */
8991 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8993 if (count != 0)
8994 return const0_rtx;
8996 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8999 /* Emit code to change the current function's return address to
9000 ADDRESS. SCRATCH is available as a scratch register, if needed.
9001 ADDRESS and SCRATCH are both word-mode GPRs. */
9003 void
9004 mips_set_return_address (rtx address, rtx scratch)
9006 rtx slot_address;
9008 gcc_assert (BITSET_P (cfun->machine->frame.mask, 31));
9009 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
9010 cfun->machine->frame.gp_sp_offset);
9011 mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
9014 /* Return a MEM rtx for the cprestore slot, using TEMP as a temporary base
9015 register if need be. */
9017 static rtx
9018 mips_cprestore_slot (rtx temp)
9020 const struct mips_frame_info *frame;
9021 rtx base;
9022 HOST_WIDE_INT offset;
9024 frame = &cfun->machine->frame;
9025 if (frame_pointer_needed)
9027 base = hard_frame_pointer_rtx;
9028 offset = frame->args_size - frame->hard_frame_pointer_offset;
9030 else
9032 base = stack_pointer_rtx;
9033 offset = frame->args_size;
9035 return gen_frame_mem (Pmode, mips_add_offset (temp, base, offset));
9038 /* Restore $gp from its save slot, using TEMP as a temporary base register
9039 if need be. This function is for o32 and o64 abicalls only. */
9041 void
9042 mips_restore_gp (rtx temp)
9044 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
9046 if (cfun->machine->global_pointer == INVALID_REGNUM)
9047 return;
9049 if (TARGET_MIPS16)
9051 mips_emit_move (temp, mips_cprestore_slot (temp));
9052 mips_emit_move (pic_offset_table_rtx, temp);
9054 else
9055 mips_emit_move (pic_offset_table_rtx, mips_cprestore_slot (temp));
9056 if (!TARGET_EXPLICIT_RELOCS)
9057 emit_insn (gen_blockage ());
9060 /* A function to save or store a register. The first argument is the
9061 register and the second is the stack slot. */
9062 typedef void (*mips_save_restore_fn) (rtx, rtx);
9064 /* Use FN to save or restore register REGNO. MODE is the register's
9065 mode and OFFSET is the offset of its save slot from the current
9066 stack pointer. */
9068 static void
9069 mips_save_restore_reg (enum machine_mode mode, int regno,
9070 HOST_WIDE_INT offset, mips_save_restore_fn fn)
9072 rtx mem;
9074 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
9075 fn (gen_rtx_REG (mode, regno), mem);
9078 /* Call FN for each accumlator that is saved by the current function.
9079 SP_OFFSET is the offset of the current stack pointer from the start
9080 of the frame. */
9082 static void
9083 mips_for_each_saved_acc (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
9085 HOST_WIDE_INT offset;
9086 int regno;
9088 offset = cfun->machine->frame.acc_sp_offset - sp_offset;
9089 if (BITSET_P (cfun->machine->frame.acc_mask, 0))
9091 mips_save_restore_reg (word_mode, LO_REGNUM, offset, fn);
9092 offset -= UNITS_PER_WORD;
9093 mips_save_restore_reg (word_mode, HI_REGNUM, offset, fn);
9094 offset -= UNITS_PER_WORD;
9097 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
9098 if (BITSET_P (cfun->machine->frame.acc_mask,
9099 ((regno - DSP_ACC_REG_FIRST) / 2) + 1))
9101 mips_save_restore_reg (word_mode, regno, offset, fn);
9102 offset -= UNITS_PER_WORD;
9106 /* Call FN for each register that is saved by the current function.
9107 SP_OFFSET is the offset of the current stack pointer from the start
9108 of the frame. */
9110 static void
9111 mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
9112 mips_save_restore_fn fn)
9114 enum machine_mode fpr_mode;
9115 HOST_WIDE_INT offset;
9116 int regno;
9118 /* Save registers starting from high to low. The debuggers prefer at least
9119 the return register be stored at func+4, and also it allows us not to
9120 need a nop in the epilogue if at least one register is reloaded in
9121 addition to return address. */
9122 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
9123 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
9124 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
9126 mips_save_restore_reg (word_mode, regno, offset, fn);
9127 offset -= UNITS_PER_WORD;
9130 /* This loop must iterate over the same space as its companion in
9131 mips_compute_frame_info. */
9132 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
9133 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
9134 for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
9135 regno >= FP_REG_FIRST;
9136 regno -= MAX_FPRS_PER_FMT)
9137 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
9139 mips_save_restore_reg (fpr_mode, regno, offset, fn);
9140 offset -= GET_MODE_SIZE (fpr_mode);
9144 /* If we're generating n32 or n64 abicalls, and the current function
9145 does not use $28 as its global pointer, emit a cplocal directive.
9146 Use pic_offset_table_rtx as the argument to the directive. */
9148 static void
9149 mips_output_cplocal (void)
9151 if (!TARGET_EXPLICIT_RELOCS
9152 && cfun->machine->global_pointer != INVALID_REGNUM
9153 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
9154 output_asm_insn (".cplocal %+", 0);
9157 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
9159 static void
9160 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9162 const char *fnname;
9164 #ifdef SDB_DEBUGGING_INFO
9165 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
9166 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
9167 #endif
9169 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
9170 floating-point arguments. */
9171 if (TARGET_MIPS16
9172 && TARGET_HARD_FLOAT_ABI
9173 && crtl->args.info.fp_code != 0)
9174 mips16_build_function_stub ();
9176 /* Get the function name the same way that toplev.c does before calling
9177 assemble_start_function. This is needed so that the name used here
9178 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
9179 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9180 mips_start_function_definition (fnname, TARGET_MIPS16);
9182 /* Stop mips_file_end from treating this function as external. */
9183 if (TARGET_IRIX && mips_abi == ABI_32)
9184 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
9186 /* Output MIPS-specific frame information. */
9187 if (!flag_inhibit_size_directive)
9189 const struct mips_frame_info *frame;
9191 frame = &cfun->machine->frame;
9193 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
9194 fprintf (file,
9195 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
9196 "# vars= " HOST_WIDE_INT_PRINT_DEC
9197 ", regs= %d/%d"
9198 ", args= " HOST_WIDE_INT_PRINT_DEC
9199 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
9200 reg_names[frame_pointer_needed
9201 ? HARD_FRAME_POINTER_REGNUM
9202 : STACK_POINTER_REGNUM],
9203 (frame_pointer_needed
9204 ? frame->total_size - frame->hard_frame_pointer_offset
9205 : frame->total_size),
9206 reg_names[GP_REG_FIRST + 31],
9207 frame->var_size,
9208 frame->num_gp, frame->num_fp,
9209 frame->args_size,
9210 frame->cprestore_size);
9212 /* .mask MASK, OFFSET. */
9213 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
9214 frame->mask, frame->gp_save_offset);
9216 /* .fmask MASK, OFFSET. */
9217 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
9218 frame->fmask, frame->fp_save_offset);
9221 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
9222 Also emit the ".set noreorder; .set nomacro" sequence for functions
9223 that need it. */
9224 if (mips_current_loadgp_style () == LOADGP_OLDABI)
9226 if (TARGET_MIPS16)
9228 /* This is a fixed-form sequence. The position of the
9229 first two instructions is important because of the
9230 way _gp_disp is defined. */
9231 output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
9232 output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
9233 output_asm_insn ("sll\t$2,16", 0);
9234 output_asm_insn ("addu\t$2,$3", 0);
9236 /* .cpload must be in a .set noreorder but not a .set nomacro block. */
9237 else if (!cfun->machine->all_noreorder_p)
9238 output_asm_insn ("%(.cpload\t%^%)", 0);
9239 else
9240 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
9242 else if (cfun->machine->all_noreorder_p)
9243 output_asm_insn ("%(%<", 0);
9245 /* Tell the assembler which register we're using as the global
9246 pointer. This is needed for thunks, since they can use either
9247 explicit relocs or assembler macros. */
9248 mips_output_cplocal ();
9251 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
9253 static void
9254 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9255 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9257 const char *fnname;
9259 /* Reinstate the normal $gp. */
9260 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
9261 mips_output_cplocal ();
9263 if (cfun->machine->all_noreorder_p)
9265 /* Avoid using %>%) since it adds excess whitespace. */
9266 output_asm_insn (".set\tmacro", 0);
9267 output_asm_insn (".set\treorder", 0);
9268 set_noreorder = set_nomacro = 0;
9271 /* Get the function name the same way that toplev.c does before calling
9272 assemble_start_function. This is needed so that the name used here
9273 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
9274 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9275 mips_end_function_definition (fnname);
9278 /* Save register REG to MEM. Make the instruction frame-related. */
9280 static void
9281 mips_save_reg (rtx reg, rtx mem)
9283 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
9285 rtx x1, x2;
9287 if (mips_split_64bit_move_p (mem, reg))
9288 mips_split_doubleword_move (mem, reg);
9289 else
9290 mips_emit_move (mem, reg);
9292 x1 = mips_frame_set (mips_subword (mem, false),
9293 mips_subword (reg, false));
9294 x2 = mips_frame_set (mips_subword (mem, true),
9295 mips_subword (reg, true));
9296 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
9298 else
9300 if (REGNO (reg) == HI_REGNUM)
9302 if (TARGET_64BIT)
9303 emit_insn (gen_mfhidi_ti (MIPS_PROLOGUE_TEMP (DImode),
9304 gen_rtx_REG (TImode, MD_REG_FIRST)));
9305 else
9306 emit_insn (gen_mfhisi_di (MIPS_PROLOGUE_TEMP (SImode),
9307 gen_rtx_REG (DImode, MD_REG_FIRST)));
9308 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
9310 else if ((TARGET_MIPS16
9311 && REGNO (reg) != GP_REG_FIRST + 31
9312 && !M16_REG_P (REGNO (reg)))
9313 || ACC_REG_P (REGNO (reg)))
9315 /* If the register has no direct store instruction, move it
9316 through a temporary. Note that there's a special MIPS16
9317 instruction to save $31. */
9318 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
9319 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
9321 else
9322 mips_emit_move (mem, reg);
9324 mips_set_frame_expr (mips_frame_set (mem, reg));
9328 /* The __gnu_local_gp symbol. */
9330 static GTY(()) rtx mips_gnu_local_gp;
9332 /* If we're generating n32 or n64 abicalls, emit instructions
9333 to set up the global pointer. */
9335 static void
9336 mips_emit_loadgp (void)
9338 rtx addr, offset, incoming_address, base, index, pic_reg;
9340 pic_reg = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
9341 switch (mips_current_loadgp_style ())
9343 case LOADGP_ABSOLUTE:
9344 if (mips_gnu_local_gp == NULL)
9346 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
9347 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
9349 emit_insn (Pmode == SImode
9350 ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
9351 : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
9352 break;
9354 case LOADGP_OLDABI:
9355 /* Added by mips_output_function_prologue. */
9356 break;
9358 case LOADGP_NEWABI:
9359 addr = XEXP (DECL_RTL (current_function_decl), 0);
9360 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
9361 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
9362 emit_insn (Pmode == SImode
9363 ? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
9364 : gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
9365 break;
9367 case LOADGP_RTP:
9368 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
9369 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
9370 emit_insn (Pmode == SImode
9371 ? gen_loadgp_rtp_si (pic_reg, base, index)
9372 : gen_loadgp_rtp_di (pic_reg, base, index));
9373 break;
9375 default:
9376 return;
9379 if (TARGET_MIPS16)
9380 emit_insn (gen_copygp_mips16 (pic_offset_table_rtx, pic_reg));
9382 /* Emit a blockage if there are implicit uses of the GP register.
9383 This includes profiled functions, because FUNCTION_PROFILE uses
9384 a jal macro. */
9385 if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
9386 emit_insn (gen_loadgp_blockage ());
9389 /* A for_each_rtx callback. Stop the search if *X is a kernel register. */
9391 static int
9392 mips_kernel_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
9394 return GET_CODE (*x) == REG && KERNEL_REG_P (REGNO (*x));
9397 /* Expand the "prologue" pattern. */
9399 void
9400 mips_expand_prologue (void)
9402 const struct mips_frame_info *frame;
9403 HOST_WIDE_INT size;
9404 unsigned int nargs;
9405 rtx insn;
9407 if (cfun->machine->global_pointer != INVALID_REGNUM)
9408 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
9410 frame = &cfun->machine->frame;
9411 size = frame->total_size;
9413 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
9414 bytes beforehand; this is enough to cover the register save area
9415 without going out of range. */
9416 if (((frame->mask | frame->fmask | frame->acc_mask) != 0)
9417 || frame->num_cop0_regs > 0)
9419 HOST_WIDE_INT step1;
9421 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
9422 if (GENERATE_MIPS16E_SAVE_RESTORE)
9424 HOST_WIDE_INT offset;
9425 unsigned int mask, regno;
9427 /* Try to merge argument stores into the save instruction. */
9428 nargs = mips16e_collect_argument_saves ();
9430 /* Build the save instruction. */
9431 mask = frame->mask;
9432 insn = mips16e_build_save_restore (false, &mask, &offset,
9433 nargs, step1);
9434 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9435 size -= step1;
9437 /* Check if we need to save other registers. */
9438 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
9439 if (BITSET_P (mask, regno - GP_REG_FIRST))
9441 offset -= UNITS_PER_WORD;
9442 mips_save_restore_reg (word_mode, regno,
9443 offset, mips_save_reg);
9446 else
9448 if (cfun->machine->interrupt_handler_p)
9450 HOST_WIDE_INT offset;
9451 rtx mem;
9453 /* If this interrupt is using a shadow register set, we need to
9454 get the stack pointer from the previous register set. */
9455 if (cfun->machine->use_shadow_register_set_p)
9456 emit_insn (gen_mips_rdpgpr (stack_pointer_rtx,
9457 stack_pointer_rtx));
9459 if (!cfun->machine->keep_interrupts_masked_p)
9461 /* Move from COP0 Cause to K0. */
9462 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K0_REG_NUM),
9463 gen_rtx_REG (SImode,
9464 COP0_CAUSE_REG_NUM)));
9465 /* Move from COP0 EPC to K1. */
9466 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
9467 gen_rtx_REG (SImode,
9468 COP0_EPC_REG_NUM)));
9471 /* Allocate the first part of the frame. */
9472 insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
9473 GEN_INT (-step1));
9474 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9475 size -= step1;
9477 /* Start at the uppermost location for saving. */
9478 offset = frame->cop0_sp_offset - size;
9479 if (!cfun->machine->keep_interrupts_masked_p)
9481 /* Push EPC into its stack slot. */
9482 mem = gen_frame_mem (word_mode,
9483 plus_constant (stack_pointer_rtx,
9484 offset));
9485 mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
9486 offset -= UNITS_PER_WORD;
9489 /* Move from COP0 Status to K1. */
9490 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
9491 gen_rtx_REG (SImode,
9492 COP0_STATUS_REG_NUM)));
9494 /* Right justify the RIPL in k0. */
9495 if (!cfun->machine->keep_interrupts_masked_p)
9496 emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode, K0_REG_NUM),
9497 gen_rtx_REG (SImode, K0_REG_NUM),
9498 GEN_INT (CAUSE_IPL)));
9500 /* Push Status into its stack slot. */
9501 mem = gen_frame_mem (word_mode,
9502 plus_constant (stack_pointer_rtx, offset));
9503 mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
9504 offset -= UNITS_PER_WORD;
9506 /* Insert the RIPL into our copy of SR (k1) as the new IPL. */
9507 if (!cfun->machine->keep_interrupts_masked_p)
9508 emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
9509 GEN_INT (6),
9510 GEN_INT (SR_IPL),
9511 gen_rtx_REG (SImode, K0_REG_NUM)));
9513 if (!cfun->machine->keep_interrupts_masked_p)
9514 /* Enable interrupts by clearing the KSU ERL and EXL bits.
9515 IE is already the correct value, so we don't have to do
9516 anything explicit. */
9517 emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
9518 GEN_INT (4),
9519 GEN_INT (SR_EXL),
9520 gen_rtx_REG (SImode, GP_REG_FIRST)));
9521 else
9522 /* Disable interrupts by clearing the KSU, ERL, EXL,
9523 and IE bits. */
9524 emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
9525 GEN_INT (5),
9526 GEN_INT (SR_IE),
9527 gen_rtx_REG (SImode, GP_REG_FIRST)));
9529 else
9531 insn = gen_add3_insn (stack_pointer_rtx,
9532 stack_pointer_rtx,
9533 GEN_INT (-step1));
9534 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9535 size -= step1;
9537 mips_for_each_saved_acc (size, mips_save_reg);
9538 mips_for_each_saved_gpr_and_fpr (size, mips_save_reg);
9542 /* Allocate the rest of the frame. */
9543 if (size > 0)
9545 if (SMALL_OPERAND (-size))
9546 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
9547 stack_pointer_rtx,
9548 GEN_INT (-size)))) = 1;
9549 else
9551 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
9552 if (TARGET_MIPS16)
9554 /* There are no instructions to add or subtract registers
9555 from the stack pointer, so use the frame pointer as a
9556 temporary. We should always be using a frame pointer
9557 in this case anyway. */
9558 gcc_assert (frame_pointer_needed);
9559 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
9560 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
9561 hard_frame_pointer_rtx,
9562 MIPS_PROLOGUE_TEMP (Pmode)));
9563 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
9565 else
9566 emit_insn (gen_sub3_insn (stack_pointer_rtx,
9567 stack_pointer_rtx,
9568 MIPS_PROLOGUE_TEMP (Pmode)));
9570 /* Describe the combined effect of the previous instructions. */
9571 mips_set_frame_expr
9572 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9573 plus_constant (stack_pointer_rtx, -size)));
9577 /* Set up the frame pointer, if we're using one. */
9578 if (frame_pointer_needed)
9580 HOST_WIDE_INT offset;
9582 offset = frame->hard_frame_pointer_offset;
9583 if (offset == 0)
9585 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
9586 RTX_FRAME_RELATED_P (insn) = 1;
9588 else if (SMALL_OPERAND (offset))
9590 insn = gen_add3_insn (hard_frame_pointer_rtx,
9591 stack_pointer_rtx, GEN_INT (offset));
9592 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9594 else
9596 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
9597 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
9598 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
9599 hard_frame_pointer_rtx,
9600 MIPS_PROLOGUE_TEMP (Pmode)));
9601 mips_set_frame_expr
9602 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
9603 plus_constant (stack_pointer_rtx, offset)));
9607 mips_emit_loadgp ();
9609 /* Initialize the $gp save slot. */
9610 if (frame->cprestore_size > 0
9611 && cfun->machine->global_pointer != INVALID_REGNUM)
9613 if (TARGET_MIPS16)
9614 mips_emit_move (mips_cprestore_slot (MIPS_PROLOGUE_TEMP (Pmode)),
9615 MIPS16_PIC_TEMP);
9616 else if (TARGET_ABICALLS_PIC2)
9617 emit_insn (gen_cprestore (GEN_INT (frame->args_size)));
9618 else
9619 emit_move_insn (mips_cprestore_slot (MIPS_PROLOGUE_TEMP (Pmode)),
9620 pic_offset_table_rtx);
9623 /* We need to search back to the last use of K0 or K1. */
9624 if (cfun->machine->interrupt_handler_p)
9626 for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
9627 if (INSN_P (insn)
9628 && for_each_rtx (&PATTERN (insn), mips_kernel_reg_p, NULL))
9629 break;
9630 /* Emit a move from K1 to COP0 Status after insn. */
9631 gcc_assert (insn != NULL_RTX);
9632 emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
9633 gen_rtx_REG (SImode, K1_REG_NUM)),
9634 insn);
9637 /* If we are profiling, make sure no instructions are scheduled before
9638 the call to mcount. */
9639 if (crtl->profile)
9640 emit_insn (gen_blockage ());
9643 /* Emit instructions to restore register REG from slot MEM. */
9645 static void
9646 mips_restore_reg (rtx reg, rtx mem)
9648 /* There's no MIPS16 instruction to load $31 directly. Load into
9649 $7 instead and adjust the return insn appropriately. */
9650 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
9651 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
9653 if (REGNO (reg) == HI_REGNUM)
9655 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
9656 if (TARGET_64BIT)
9657 emit_insn (gen_mthisi_di (gen_rtx_REG (TImode, MD_REG_FIRST),
9658 MIPS_EPILOGUE_TEMP (DImode),
9659 gen_rtx_REG (DImode, LO_REGNUM)));
9660 else
9661 emit_insn (gen_mthisi_di (gen_rtx_REG (DImode, MD_REG_FIRST),
9662 MIPS_EPILOGUE_TEMP (SImode),
9663 gen_rtx_REG (SImode, LO_REGNUM)));
9665 else if ((TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
9666 || ACC_REG_P (REGNO (reg)))
9668 /* Can't restore directly; move through a temporary. */
9669 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
9670 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
9672 else
9673 mips_emit_move (reg, mem);
9676 /* Emit any instructions needed before a return. */
9678 void
9679 mips_expand_before_return (void)
9681 /* When using a call-clobbered gp, we start out with unified call
9682 insns that include instructions to restore the gp. We then split
9683 these unified calls after reload. These split calls explicitly
9684 clobber gp, so there is no need to define
9685 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
9687 For consistency, we should also insert an explicit clobber of $28
9688 before return insns, so that the post-reload optimizers know that
9689 the register is not live on exit. */
9690 if (TARGET_CALL_CLOBBERED_GP)
9691 emit_clobber (pic_offset_table_rtx);
9694 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
9695 says which. */
9697 void
9698 mips_expand_epilogue (bool sibcall_p)
9700 const struct mips_frame_info *frame;
9701 HOST_WIDE_INT step1, step2;
9702 rtx base, target, insn;
9704 if (!sibcall_p && mips_can_use_return_insn ())
9706 emit_jump_insn (gen_return ());
9707 return;
9710 /* In MIPS16 mode, if the return value should go into a floating-point
9711 register, we need to call a helper routine to copy it over. */
9712 if (mips16_cfun_returns_in_fpr_p ())
9713 mips16_copy_fpr_return_value ();
9715 /* Split the frame into two. STEP1 is the amount of stack we should
9716 deallocate before restoring the registers. STEP2 is the amount we
9717 should deallocate afterwards.
9719 Start off by assuming that no registers need to be restored. */
9720 frame = &cfun->machine->frame;
9721 step1 = frame->total_size;
9722 step2 = 0;
9724 /* Work out which register holds the frame address. */
9725 if (!frame_pointer_needed)
9726 base = stack_pointer_rtx;
9727 else
9729 base = hard_frame_pointer_rtx;
9730 step1 -= frame->hard_frame_pointer_offset;
9733 /* If we need to restore registers, deallocate as much stack as
9734 possible in the second step without going out of range. */
9735 if ((frame->mask | frame->fmask | frame->acc_mask) != 0
9736 || frame->num_cop0_regs > 0)
9738 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
9739 step1 -= step2;
9742 /* Set TARGET to BASE + STEP1. */
9743 target = base;
9744 if (step1 > 0)
9746 rtx adjust;
9748 /* Get an rtx for STEP1 that we can add to BASE. */
9749 adjust = GEN_INT (step1);
9750 if (!SMALL_OPERAND (step1))
9752 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
9753 adjust = MIPS_EPILOGUE_TEMP (Pmode);
9756 /* Normal mode code can copy the result straight into $sp. */
9757 if (!TARGET_MIPS16)
9758 target = stack_pointer_rtx;
9760 emit_insn (gen_add3_insn (target, base, adjust));
9763 /* Copy TARGET into the stack pointer. */
9764 if (target != stack_pointer_rtx)
9765 mips_emit_move (stack_pointer_rtx, target);
9767 /* If we're using addressing macros, $gp is implicitly used by all
9768 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
9769 from the stack. */
9770 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
9771 emit_insn (gen_blockage ());
9773 if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
9775 unsigned int regno, mask;
9776 HOST_WIDE_INT offset;
9777 rtx restore;
9779 /* Generate the restore instruction. */
9780 mask = frame->mask;
9781 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
9783 /* Restore any other registers manually. */
9784 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
9785 if (BITSET_P (mask, regno - GP_REG_FIRST))
9787 offset -= UNITS_PER_WORD;
9788 mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
9791 /* Restore the remaining registers and deallocate the final bit
9792 of the frame. */
9793 emit_insn (restore);
9795 else
9797 /* Restore the registers. */
9798 mips_for_each_saved_acc (frame->total_size - step2, mips_restore_reg);
9799 mips_for_each_saved_gpr_and_fpr (frame->total_size - step2,
9800 mips_restore_reg);
9802 if (cfun->machine->interrupt_handler_p)
9804 HOST_WIDE_INT offset;
9805 rtx mem;
9807 offset = frame->cop0_sp_offset - (frame->total_size - step2);
9808 if (!cfun->machine->keep_interrupts_masked_p)
9810 /* Restore the original EPC. */
9811 mem = gen_frame_mem (word_mode,
9812 plus_constant (stack_pointer_rtx, offset));
9813 mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
9814 offset -= UNITS_PER_WORD;
9816 /* Move to COP0 EPC. */
9817 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_EPC_REG_NUM),
9818 gen_rtx_REG (SImode, K0_REG_NUM)));
9821 /* Restore the original Status. */
9822 mem = gen_frame_mem (word_mode,
9823 plus_constant (stack_pointer_rtx, offset));
9824 mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
9825 offset -= UNITS_PER_WORD;
9827 /* If we don't use shoadow register set, we need to update SP. */
9828 if (!cfun->machine->use_shadow_register_set_p && step2 > 0)
9829 emit_insn (gen_add3_insn (stack_pointer_rtx,
9830 stack_pointer_rtx,
9831 GEN_INT (step2)));
9833 /* Move to COP0 Status. */
9834 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
9835 gen_rtx_REG (SImode, K0_REG_NUM)));
9837 else
9839 /* Deallocate the final bit of the frame. */
9840 if (step2 > 0)
9841 emit_insn (gen_add3_insn (stack_pointer_rtx,
9842 stack_pointer_rtx,
9843 GEN_INT (step2)));
9847 /* Add in the __builtin_eh_return stack adjustment. We need to
9848 use a temporary in MIPS16 code. */
9849 if (crtl->calls_eh_return)
9851 if (TARGET_MIPS16)
9853 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
9854 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
9855 MIPS_EPILOGUE_TEMP (Pmode),
9856 EH_RETURN_STACKADJ_RTX));
9857 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
9859 else
9860 emit_insn (gen_add3_insn (stack_pointer_rtx,
9861 stack_pointer_rtx,
9862 EH_RETURN_STACKADJ_RTX));
9865 if (!sibcall_p)
9867 mips_expand_before_return ();
9868 if (cfun->machine->interrupt_handler_p)
9870 /* Interrupt handlers generate eret or deret. */
9871 if (cfun->machine->use_debug_exception_return_p)
9872 emit_jump_insn (gen_mips_deret ());
9873 else
9874 emit_jump_insn (gen_mips_eret ());
9876 else
9878 unsigned int regno;
9880 /* When generating MIPS16 code, the normal
9881 mips_for_each_saved_gpr_and_fpr path will restore the return
9882 address into $7 rather than $31. */
9883 if (TARGET_MIPS16
9884 && !GENERATE_MIPS16E_SAVE_RESTORE
9885 && BITSET_P (frame->mask, 31))
9886 regno = GP_REG_FIRST + 7;
9887 else
9888 regno = GP_REG_FIRST + 31;
9889 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
9893 /* Search from the beginning to the first use of K0 or K1. */
9894 if (cfun->machine->interrupt_handler_p
9895 && !cfun->machine->keep_interrupts_masked_p)
9897 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
9898 if (INSN_P (insn)
9899 && for_each_rtx (&PATTERN(insn), mips_kernel_reg_p, NULL))
9900 break;
9901 gcc_assert (insn != NULL_RTX);
9902 /* Insert disable interrupts before the first use of K0 or K1. */
9903 emit_insn_before (gen_mips_di (), insn);
9904 emit_insn_before (gen_mips_ehb (), insn);
9908 /* Return nonzero if this function is known to have a null epilogue.
9909 This allows the optimizer to omit jumps to jumps if no stack
9910 was created. */
9912 bool
9913 mips_can_use_return_insn (void)
9915 /* Interrupt handlers need to go through the epilogue. */
9916 if (cfun->machine->interrupt_handler_p)
9917 return false;
9919 if (!reload_completed)
9920 return false;
9922 if (crtl->profile)
9923 return false;
9925 /* In MIPS16 mode, a function that returns a floating-point value
9926 needs to arrange to copy the return value into the floating-point
9927 registers. */
9928 if (mips16_cfun_returns_in_fpr_p ())
9929 return false;
9931 return cfun->machine->frame.total_size == 0;
9934 /* Return true if register REGNO can store a value of mode MODE.
9935 The result of this function is cached in mips_hard_regno_mode_ok. */
9937 static bool
9938 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
9940 unsigned int size;
9941 enum mode_class mclass;
9943 if (mode == CCV2mode)
9944 return (ISA_HAS_8CC
9945 && ST_REG_P (regno)
9946 && (regno - ST_REG_FIRST) % 2 == 0);
9948 if (mode == CCV4mode)
9949 return (ISA_HAS_8CC
9950 && ST_REG_P (regno)
9951 && (regno - ST_REG_FIRST) % 4 == 0);
9953 if (mode == CCmode)
9955 if (!ISA_HAS_8CC)
9956 return regno == FPSW_REGNUM;
9958 return (ST_REG_P (regno)
9959 || GP_REG_P (regno)
9960 || FP_REG_P (regno));
9963 size = GET_MODE_SIZE (mode);
9964 mclass = GET_MODE_CLASS (mode);
9966 if (GP_REG_P (regno))
9967 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
9969 if (FP_REG_P (regno)
9970 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
9971 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
9973 /* Allow TFmode for CCmode reloads. */
9974 if (mode == TFmode && ISA_HAS_8CC)
9975 return true;
9977 /* Allow 64-bit vector modes for Loongson-2E/2F. */
9978 if (TARGET_LOONGSON_VECTORS
9979 && (mode == V2SImode
9980 || mode == V4HImode
9981 || mode == V8QImode
9982 || mode == DImode))
9983 return true;
9985 if (mclass == MODE_FLOAT
9986 || mclass == MODE_COMPLEX_FLOAT
9987 || mclass == MODE_VECTOR_FLOAT)
9988 return size <= UNITS_PER_FPVALUE;
9990 /* Allow integer modes that fit into a single register. We need
9991 to put integers into FPRs when using instructions like CVT
9992 and TRUNC. There's no point allowing sizes smaller than a word,
9993 because the FPU has no appropriate load/store instructions. */
9994 if (mclass == MODE_INT)
9995 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
9998 if (ACC_REG_P (regno)
9999 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
10001 if (MD_REG_P (regno))
10003 /* After a multiplication or division, clobbering HI makes
10004 the value of LO unpredictable, and vice versa. This means
10005 that, for all interesting cases, HI and LO are effectively
10006 a single register.
10008 We model this by requiring that any value that uses HI
10009 also uses LO. */
10010 if (size <= UNITS_PER_WORD * 2)
10011 return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
10013 else
10015 /* DSP accumulators do not have the same restrictions as
10016 HI and LO, so we can treat them as normal doubleword
10017 registers. */
10018 if (size <= UNITS_PER_WORD)
10019 return true;
10021 if (size <= UNITS_PER_WORD * 2
10022 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
10023 return true;
10027 if (ALL_COP_REG_P (regno))
10028 return mclass == MODE_INT && size <= UNITS_PER_WORD;
10030 if (regno == GOT_VERSION_REGNUM)
10031 return mode == SImode;
10033 return false;
10036 /* Implement HARD_REGNO_NREGS. */
10038 unsigned int
10039 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10041 if (ST_REG_P (regno))
10042 /* The size of FP status registers is always 4, because they only hold
10043 CCmode values, and CCmode is always considered to be 4 bytes wide. */
10044 return (GET_MODE_SIZE (mode) + 3) / 4;
10046 if (FP_REG_P (regno))
10047 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
10049 /* All other registers are word-sized. */
10050 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
10053 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
10054 in mips_hard_regno_nregs. */
10057 mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
10059 int size;
10060 HARD_REG_SET left;
10062 size = 0x8000;
10063 COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
10064 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
10066 size = MIN (size, 4);
10067 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
10069 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
10071 size = MIN (size, UNITS_PER_FPREG);
10072 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
10074 if (!hard_reg_set_empty_p (left))
10075 size = MIN (size, UNITS_PER_WORD);
10076 return (GET_MODE_SIZE (mode) + size - 1) / size;
10079 /* Implement CANNOT_CHANGE_MODE_CLASS. */
10081 bool
10082 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
10083 enum machine_mode to ATTRIBUTE_UNUSED,
10084 enum reg_class rclass)
10086 /* There are several problems with changing the modes of values
10087 in floating-point registers:
10089 - When a multi-word value is stored in paired floating-point
10090 registers, the first register always holds the low word.
10091 We therefore can't allow FPRs to change between single-word
10092 and multi-word modes on big-endian targets.
10094 - GCC assumes that each word of a multiword register can be accessed
10095 individually using SUBREGs. This is not true for floating-point
10096 registers if they are bigger than a word.
10098 - Loading a 32-bit value into a 64-bit floating-point register
10099 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
10100 We can't allow FPRs to change from SImode to to a wider mode on
10101 64-bit targets.
10103 - If the FPU has already interpreted a value in one format, we must
10104 not ask it to treat the value as having a different format.
10106 We therefore disallow all mode changes involving FPRs. */
10107 return reg_classes_intersect_p (FP_REGS, rclass);
10110 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
10112 static bool
10113 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
10115 switch (mode)
10117 case SFmode:
10118 return TARGET_HARD_FLOAT;
10120 case DFmode:
10121 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
10123 case V2SFmode:
10124 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
10126 default:
10127 return false;
10131 /* Implement MODES_TIEABLE_P. */
10133 bool
10134 mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
10136 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
10137 prefer to put one of them in FPRs. */
10138 return (mode1 == mode2
10139 || (!mips_mode_ok_for_mov_fmt_p (mode1)
10140 && !mips_mode_ok_for_mov_fmt_p (mode2)));
10143 /* Implement PREFERRED_RELOAD_CLASS. */
10145 enum reg_class
10146 mips_preferred_reload_class (rtx x, enum reg_class rclass)
10148 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
10149 return LEA_REGS;
10151 if (reg_class_subset_p (FP_REGS, rclass)
10152 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
10153 return FP_REGS;
10155 if (reg_class_subset_p (GR_REGS, rclass))
10156 rclass = GR_REGS;
10158 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
10159 rclass = M16_REGS;
10161 return rclass;
10164 /* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
10165 Return a "canonical" class to represent it in later calculations. */
10167 static enum reg_class
10168 mips_canonicalize_move_class (enum reg_class rclass)
10170 /* All moves involving accumulator registers have the same cost. */
10171 if (reg_class_subset_p (rclass, ACC_REGS))
10172 rclass = ACC_REGS;
10174 /* Likewise promote subclasses of general registers to the most
10175 interesting containing class. */
10176 if (TARGET_MIPS16 && reg_class_subset_p (rclass, M16_REGS))
10177 rclass = M16_REGS;
10178 else if (reg_class_subset_p (rclass, GENERAL_REGS))
10179 rclass = GENERAL_REGS;
10181 return rclass;
10184 /* Return the cost of moving a value of mode MODE from a register of
10185 class FROM to a GPR. Return 0 for classes that are unions of other
10186 classes handled by this function. */
10188 static int
10189 mips_move_to_gpr_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10190 enum reg_class from)
10192 switch (from)
10194 case GENERAL_REGS:
10195 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
10196 return 2;
10198 case ACC_REGS:
10199 /* MFLO and MFHI. */
10200 return 6;
10202 case FP_REGS:
10203 /* MFC1, etc. */
10204 return 4;
10206 case ST_REGS:
10207 /* LUI followed by MOVF. */
10208 return 4;
10210 case COP0_REGS:
10211 case COP2_REGS:
10212 case COP3_REGS:
10213 /* This choice of value is historical. */
10214 return 5;
10216 default:
10217 return 0;
10221 /* Return the cost of moving a value of mode MODE from a GPR to a
10222 register of class TO. Return 0 for classes that are unions of
10223 other classes handled by this function. */
10225 static int
10226 mips_move_from_gpr_cost (enum machine_mode mode, enum reg_class to)
10228 switch (to)
10230 case GENERAL_REGS:
10231 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
10232 return 2;
10234 case ACC_REGS:
10235 /* MTLO and MTHI. */
10236 return 6;
10238 case FP_REGS:
10239 /* MTC1, etc. */
10240 return 4;
10242 case ST_REGS:
10243 /* A secondary reload through an FPR scratch. */
10244 return (mips_register_move_cost (mode, GENERAL_REGS, FP_REGS)
10245 + mips_register_move_cost (mode, FP_REGS, ST_REGS));
10247 case COP0_REGS:
10248 case COP2_REGS:
10249 case COP3_REGS:
10250 /* This choice of value is historical. */
10251 return 5;
10253 default:
10254 return 0;
10258 /* Implement REGISTER_MOVE_COST. Return 0 for classes that are the
10259 maximum of the move costs for subclasses; regclass will work out
10260 the maximum for us. */
10263 mips_register_move_cost (enum machine_mode mode,
10264 enum reg_class from, enum reg_class to)
10266 enum reg_class dregs;
10267 int cost1, cost2;
10269 from = mips_canonicalize_move_class (from);
10270 to = mips_canonicalize_move_class (to);
10272 /* Handle moves that can be done without using general-purpose registers. */
10273 if (from == FP_REGS)
10275 if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode))
10276 /* MOV.FMT. */
10277 return 4;
10278 if (to == ST_REGS)
10279 /* The sequence generated by mips_expand_fcc_reload. */
10280 return 8;
10283 /* Handle cases in which only one class deviates from the ideal. */
10284 dregs = TARGET_MIPS16 ? M16_REGS : GENERAL_REGS;
10285 if (from == dregs)
10286 return mips_move_from_gpr_cost (mode, to);
10287 if (to == dregs)
10288 return mips_move_to_gpr_cost (mode, from);
10290 /* Handles cases that require a GPR temporary. */
10291 cost1 = mips_move_to_gpr_cost (mode, from);
10292 if (cost1 != 0)
10294 cost2 = mips_move_from_gpr_cost (mode, to);
10295 if (cost2 != 0)
10296 return cost1 + cost2;
10299 return 0;
10302 /* Implement TARGET_IRA_COVER_CLASSES. */
10304 static const enum reg_class *
10305 mips_ira_cover_classes (void)
10307 static const enum reg_class acc_classes[] = {
10308 GR_AND_ACC_REGS, FP_REGS, COP0_REGS, COP2_REGS, COP3_REGS,
10309 ST_REGS, LIM_REG_CLASSES
10311 static const enum reg_class no_acc_classes[] = {
10312 GR_REGS, FP_REGS, COP0_REGS, COP2_REGS, COP3_REGS,
10313 ST_REGS, LIM_REG_CLASSES
10316 /* Don't allow the register allocators to use LO and HI in MIPS16 mode,
10317 which has no MTLO or MTHI instructions. Also, using GR_AND_ACC_REGS
10318 as a cover class only works well when we keep per-register costs.
10319 Using it when not optimizing can cause us to think accumulators
10320 have the same cost as GPRs in cases where GPRs are actually much
10321 cheaper. */
10322 return TARGET_MIPS16 || !optimize ? no_acc_classes : acc_classes;
10325 /* Return the register class required for a secondary register when
10326 copying between one of the registers in RCLASS and value X, which
10327 has mode MODE. X is the source of the move if IN_P, otherwise it
10328 is the destination. Return NO_REGS if no secondary register is
10329 needed. */
10331 enum reg_class
10332 mips_secondary_reload_class (enum reg_class rclass,
10333 enum machine_mode mode, rtx x, bool in_p)
10335 int regno;
10337 /* If X is a constant that cannot be loaded into $25, it must be loaded
10338 into some other GPR. No other register class allows a direct move. */
10339 if (mips_dangerous_for_la25_p (x))
10340 return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
10342 regno = true_regnum (x);
10343 if (TARGET_MIPS16)
10345 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
10346 if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
10347 return M16_REGS;
10349 return NO_REGS;
10352 /* Copying from accumulator registers to anywhere other than a general
10353 register requires a temporary general register. */
10354 if (reg_class_subset_p (rclass, ACC_REGS))
10355 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
10356 if (ACC_REG_P (regno))
10357 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
10359 /* We can only copy a value to a condition code register from a
10360 floating-point register, and even then we require a scratch
10361 floating-point register. We can only copy a value out of a
10362 condition-code register into a general register. */
10363 if (reg_class_subset_p (rclass, ST_REGS))
10365 if (in_p)
10366 return FP_REGS;
10367 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
10369 if (ST_REG_P (regno))
10371 if (!in_p)
10372 return FP_REGS;
10373 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
10376 if (reg_class_subset_p (rclass, FP_REGS))
10378 if (MEM_P (x)
10379 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
10380 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
10381 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
10382 return NO_REGS;
10384 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
10385 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
10386 return NO_REGS;
10388 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
10389 /* We can force the constant to memory and use lwc1
10390 and ldc1. As above, we will use pairs of lwc1s if
10391 ldc1 is not supported. */
10392 return NO_REGS;
10394 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
10395 /* In this case we can use mov.fmt. */
10396 return NO_REGS;
10398 /* Otherwise, we need to reload through an integer register. */
10399 return GR_REGS;
10401 if (FP_REG_P (regno))
10402 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
10404 return NO_REGS;
10407 /* Implement TARGET_MODE_REP_EXTENDED. */
10409 static int
10410 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
10412 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
10413 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
10414 return SIGN_EXTEND;
10416 return UNKNOWN;
10419 /* Implement TARGET_VALID_POINTER_MODE. */
10421 static bool
10422 mips_valid_pointer_mode (enum machine_mode mode)
10424 return mode == SImode || (TARGET_64BIT && mode == DImode);
10427 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
10429 static bool
10430 mips_vector_mode_supported_p (enum machine_mode mode)
10432 switch (mode)
10434 case V2SFmode:
10435 return TARGET_PAIRED_SINGLE_FLOAT;
10437 case V2HImode:
10438 case V4QImode:
10439 case V2HQmode:
10440 case V2UHQmode:
10441 case V2HAmode:
10442 case V2UHAmode:
10443 case V4QQmode:
10444 case V4UQQmode:
10445 return TARGET_DSP;
10447 case V2SImode:
10448 case V4HImode:
10449 case V8QImode:
10450 return TARGET_LOONGSON_VECTORS;
10452 default:
10453 return false;
10457 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
10459 static bool
10460 mips_scalar_mode_supported_p (enum machine_mode mode)
10462 if (ALL_FIXED_POINT_MODE_P (mode)
10463 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
10464 return true;
10466 return default_scalar_mode_supported_p (mode);
10469 /* Implement TARGET_INIT_LIBFUNCS. */
10471 #include "config/gofast.h"
10473 static void
10474 mips_init_libfuncs (void)
10476 if (TARGET_FIX_VR4120)
10478 /* Register the special divsi3 and modsi3 functions needed to work
10479 around VR4120 division errata. */
10480 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10481 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10484 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10486 /* Register the MIPS16 -mhard-float stubs. */
10487 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10488 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10489 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10490 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10492 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10493 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10494 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10495 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10496 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10497 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10498 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10500 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10501 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10502 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10504 if (TARGET_DOUBLE_FLOAT)
10506 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10507 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10508 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10509 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10511 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10512 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10513 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10514 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10515 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10516 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10517 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10519 set_conv_libfunc (sext_optab, DFmode, SFmode,
10520 "__mips16_extendsfdf2");
10521 set_conv_libfunc (trunc_optab, SFmode, DFmode,
10522 "__mips16_truncdfsf2");
10523 set_conv_libfunc (sfix_optab, SImode, DFmode,
10524 "__mips16_fix_truncdfsi");
10525 set_conv_libfunc (sfloat_optab, DFmode, SImode,
10526 "__mips16_floatsidf");
10527 set_conv_libfunc (ufloat_optab, DFmode, SImode,
10528 "__mips16_floatunsidf");
10531 else
10532 /* Register the gofast functions if selected using --enable-gofast. */
10533 gofast_maybe_init_libfuncs ();
10535 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
10536 on an external non-MIPS16 routine to implement __sync_synchronize. */
10537 if (TARGET_MIPS16)
10538 synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
10541 /* Return the length of INSN. LENGTH is the initial length computed by
10542 attributes in the machine-description file. */
10545 mips_adjust_insn_length (rtx insn, int length)
10547 /* A unconditional jump has an unfilled delay slot if it is not part
10548 of a sequence. A conditional jump normally has a delay slot, but
10549 does not on MIPS16. */
10550 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10551 length += 4;
10553 /* See how many nops might be needed to avoid hardware hazards. */
10554 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10555 switch (get_attr_hazard (insn))
10557 case HAZARD_NONE:
10558 break;
10560 case HAZARD_DELAY:
10561 length += 4;
10562 break;
10564 case HAZARD_HILO:
10565 length += 8;
10566 break;
10569 /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
10570 the .md file length attributes are 4-based for both modes.
10571 Adjust the MIPS16 ones here. */
10572 if (TARGET_MIPS16)
10573 length /= 2;
10575 return length;
10578 /* Return an asm sequence to start a noat block and load the address
10579 of a label into $1. */
10581 const char *
10582 mips_output_load_label (void)
10584 if (TARGET_EXPLICIT_RELOCS)
10585 switch (mips_abi)
10587 case ABI_N32:
10588 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10590 case ABI_64:
10591 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10593 default:
10594 if (ISA_HAS_LOAD_DELAY)
10595 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10596 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10598 else
10600 if (Pmode == DImode)
10601 return "%[dla\t%@,%0";
10602 else
10603 return "%[la\t%@,%0";
10607 /* Return the assembly code for INSN, which has the operands given by
10608 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10609 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10610 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10611 version of BRANCH_IF_TRUE. */
10613 const char *
10614 mips_output_conditional_branch (rtx insn, rtx *operands,
10615 const char *branch_if_true,
10616 const char *branch_if_false)
10618 unsigned int length;
10619 rtx taken, not_taken;
10621 gcc_assert (LABEL_P (operands[1]));
10623 length = get_attr_length (insn);
10624 if (length <= 8)
10626 /* Just a simple conditional branch. */
10627 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10628 return branch_if_true;
10631 /* Generate a reversed branch around a direct jump. This fallback does
10632 not use branch-likely instructions. */
10633 mips_branch_likely = false;
10634 not_taken = gen_label_rtx ();
10635 taken = operands[1];
10637 /* Generate the reversed branch to NOT_TAKEN. */
10638 operands[1] = not_taken;
10639 output_asm_insn (branch_if_false, operands);
10641 /* If INSN has a delay slot, we must provide delay slots for both the
10642 branch to NOT_TAKEN and the conditional jump. We must also ensure
10643 that INSN's delay slot is executed in the appropriate cases. */
10644 if (final_sequence)
10646 /* This first delay slot will always be executed, so use INSN's
10647 delay slot if is not annulled. */
10648 if (!INSN_ANNULLED_BRANCH_P (insn))
10650 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10651 asm_out_file, optimize, 1, NULL);
10652 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10654 else
10655 output_asm_insn ("nop", 0);
10656 fprintf (asm_out_file, "\n");
10659 /* Output the unconditional branch to TAKEN. */
10660 if (length <= 16)
10661 output_asm_insn ("j\t%0%/", &taken);
10662 else
10664 output_asm_insn (mips_output_load_label (), &taken);
10665 output_asm_insn ("jr\t%@%]%/", 0);
10668 /* Now deal with its delay slot; see above. */
10669 if (final_sequence)
10671 /* This delay slot will only be executed if the branch is taken.
10672 Use INSN's delay slot if is annulled. */
10673 if (INSN_ANNULLED_BRANCH_P (insn))
10675 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10676 asm_out_file, optimize, 1, NULL);
10677 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10679 else
10680 output_asm_insn ("nop", 0);
10681 fprintf (asm_out_file, "\n");
10684 /* Output NOT_TAKEN. */
10685 targetm.asm_out.internal_label (asm_out_file, "L",
10686 CODE_LABEL_NUMBER (not_taken));
10687 return "";
10690 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10691 if some ordering condition is true. The condition is given by
10692 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10693 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10694 its second is always zero. */
10696 const char *
10697 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10699 const char *branch[2];
10701 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10702 Make BRANCH[0] branch on the inverse condition. */
10703 switch (GET_CODE (operands[0]))
10705 /* These cases are equivalent to comparisons against zero. */
10706 case LEU:
10707 inverted_p = !inverted_p;
10708 /* Fall through. */
10709 case GTU:
10710 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10711 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10712 break;
10714 /* These cases are always true or always false. */
10715 case LTU:
10716 inverted_p = !inverted_p;
10717 /* Fall through. */
10718 case GEU:
10719 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10720 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10721 break;
10723 default:
10724 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10725 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10726 break;
10728 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10731 /* Return the assembly code for __sync_*() loop LOOP. The loop should support
10732 both normal and likely branches, using %? and %~ where appropriate. */
10734 const char *
10735 mips_output_sync_loop (const char *loop)
10737 /* Use branch-likely instructions to work around the LL/SC R10000 errata. */
10738 mips_branch_likely = TARGET_FIX_R10000;
10739 return loop;
10742 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
10743 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
10745 When working around R4000 and R4400 errata, we need to make sure that
10746 the division is not immediately followed by a shift[1][2]. We also
10747 need to stop the division from being put into a branch delay slot[3].
10748 The easiest way to avoid both problems is to add a nop after the
10749 division. When a divide-by-zero check is needed, this nop can be
10750 used to fill the branch delay slot.
10752 [1] If a double-word or a variable shift executes immediately
10753 after starting an integer division, the shift may give an
10754 incorrect result. See quotations of errata #16 and #28 from
10755 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10756 in mips.md for details.
10758 [2] A similar bug to [1] exists for all revisions of the
10759 R4000 and the R4400 when run in an MC configuration.
10760 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10762 "19. In this following sequence:
10764 ddiv (or ddivu or div or divu)
10765 dsll32 (or dsrl32, dsra32)
10767 if an MPT stall occurs, while the divide is slipping the cpu
10768 pipeline, then the following double shift would end up with an
10769 incorrect result.
10771 Workaround: The compiler needs to avoid generating any
10772 sequence with divide followed by extended double shift."
10774 This erratum is also present in "MIPS R4400MC Errata, Processor
10775 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10776 & 3.0" as errata #10 and #4, respectively.
10778 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10779 (also valid for MIPS R4000MC processors):
10781 "52. R4000SC: This bug does not apply for the R4000PC.
10783 There are two flavors of this bug:
10785 1) If the instruction just after divide takes an RF exception
10786 (tlb-refill, tlb-invalid) and gets an instruction cache
10787 miss (both primary and secondary) and the line which is
10788 currently in secondary cache at this index had the first
10789 data word, where the bits 5..2 are set, then R4000 would
10790 get a wrong result for the div.
10794 div r8, r9
10795 ------------------- # end-of page. -tlb-refill
10799 div r8, r9
10800 ------------------- # end-of page. -tlb-invalid
10803 2) If the divide is in the taken branch delay slot, where the
10804 target takes RF exception and gets an I-cache miss for the
10805 exception vector or where I-cache miss occurs for the
10806 target address, under the above mentioned scenarios, the
10807 div would get wrong results.
10810 j r2 # to next page mapped or unmapped
10811 div r8,r9 # this bug would be there as long
10812 # as there is an ICache miss and
10813 nop # the "data pattern" is present
10816 beq r0, r0, NextPage # to Next page
10817 div r8,r9
10820 This bug is present for div, divu, ddiv, and ddivu
10821 instructions.
10823 Workaround: For item 1), OS could make sure that the next page
10824 after the divide instruction is also mapped. For item 2), the
10825 compiler could make sure that the divide instruction is not in
10826 the branch delay slot."
10828 These processors have PRId values of 0x00004220 and 0x00004300 for
10829 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10831 const char *
10832 mips_output_division (const char *division, rtx *operands)
10834 const char *s;
10836 s = division;
10837 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10839 output_asm_insn (s, operands);
10840 s = "nop";
10842 if (TARGET_CHECK_ZERO_DIV)
10844 if (TARGET_MIPS16)
10846 output_asm_insn (s, operands);
10847 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10849 else if (GENERATE_DIVIDE_TRAPS)
10851 output_asm_insn (s, operands);
10852 s = "teq\t%2,%.,7";
10854 else
10856 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10857 output_asm_insn (s, operands);
10858 s = "break\t7%)\n1:";
10861 return s;
10864 /* Return true if IN_INSN is a multiply-add or multiply-subtract
10865 instruction and if OUT_INSN assigns to the accumulator operand. */
10867 bool
10868 mips_linked_madd_p (rtx out_insn, rtx in_insn)
10870 rtx x;
10872 x = single_set (in_insn);
10873 if (x == 0)
10874 return false;
10876 x = SET_SRC (x);
10878 if (GET_CODE (x) == PLUS
10879 && GET_CODE (XEXP (x, 0)) == MULT
10880 && reg_set_p (XEXP (x, 1), out_insn))
10881 return true;
10883 if (GET_CODE (x) == MINUS
10884 && GET_CODE (XEXP (x, 1)) == MULT
10885 && reg_set_p (XEXP (x, 0), out_insn))
10886 return true;
10888 return false;
10891 /* True if the dependency between OUT_INSN and IN_INSN is on the store
10892 data rather than the address. We need this because the cprestore
10893 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
10894 which causes the default routine to abort. We just return false
10895 for that case. */
10897 bool
10898 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
10900 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
10901 return false;
10903 return !store_data_bypass_p (out_insn, in_insn);
10907 /* Variables and flags used in scheduler hooks when tuning for
10908 Loongson 2E/2F. */
10909 static struct
10911 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
10912 strategy. */
10914 /* If true, then next ALU1/2 instruction will go to ALU1. */
10915 bool alu1_turn_p;
10917 /* If true, then next FALU1/2 unstruction will go to FALU1. */
10918 bool falu1_turn_p;
10920 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
10921 int alu1_core_unit_code;
10922 int alu2_core_unit_code;
10923 int falu1_core_unit_code;
10924 int falu2_core_unit_code;
10926 /* True if current cycle has a multi instruction.
10927 This flag is used in mips_ls2_dfa_post_advance_cycle. */
10928 bool cycle_has_multi_p;
10930 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
10931 These are used in mips_ls2_dfa_post_advance_cycle to initialize
10932 DFA state.
10933 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
10934 instruction to go ALU1. */
10935 rtx alu1_turn_enabled_insn;
10936 rtx alu2_turn_enabled_insn;
10937 rtx falu1_turn_enabled_insn;
10938 rtx falu2_turn_enabled_insn;
10939 } mips_ls2;
10941 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
10942 dependencies have no cost, except on the 20Kc where output-dependence
10943 is treated like input-dependence. */
10945 static int
10946 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
10947 rtx dep ATTRIBUTE_UNUSED, int cost)
10949 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
10950 && TUNE_20KC)
10951 return cost;
10952 if (REG_NOTE_KIND (link) != 0)
10953 return 0;
10954 return cost;
10957 /* Return the number of instructions that can be issued per cycle. */
10959 static int
10960 mips_issue_rate (void)
10962 switch (mips_tune)
10964 case PROCESSOR_74KC:
10965 case PROCESSOR_74KF2_1:
10966 case PROCESSOR_74KF1_1:
10967 case PROCESSOR_74KF3_2:
10968 /* The 74k is not strictly quad-issue cpu, but can be seen as one
10969 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
10970 but in reality only a maximum of 3 insns can be issued as
10971 floating-point loads and stores also require a slot in the
10972 AGEN pipe. */
10973 case PROCESSOR_R10000:
10974 /* All R10K Processors are quad-issue (being the first MIPS
10975 processors to support this feature). */
10976 return 4;
10978 case PROCESSOR_20KC:
10979 case PROCESSOR_R4130:
10980 case PROCESSOR_R5400:
10981 case PROCESSOR_R5500:
10982 case PROCESSOR_R7000:
10983 case PROCESSOR_R9000:
10984 case PROCESSOR_OCTEON:
10985 return 2;
10987 case PROCESSOR_SB1:
10988 case PROCESSOR_SB1A:
10989 /* This is actually 4, but we get better performance if we claim 3.
10990 This is partly because of unwanted speculative code motion with the
10991 larger number, and partly because in most common cases we can't
10992 reach the theoretical max of 4. */
10993 return 3;
10995 case PROCESSOR_LOONGSON_2E:
10996 case PROCESSOR_LOONGSON_2F:
10997 return 4;
10999 default:
11000 return 1;
11004 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
11006 static void
11007 mips_ls2_init_dfa_post_cycle_insn (void)
11009 start_sequence ();
11010 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
11011 mips_ls2.alu1_turn_enabled_insn = get_insns ();
11012 end_sequence ();
11014 start_sequence ();
11015 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
11016 mips_ls2.alu2_turn_enabled_insn = get_insns ();
11017 end_sequence ();
11019 start_sequence ();
11020 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
11021 mips_ls2.falu1_turn_enabled_insn = get_insns ();
11022 end_sequence ();
11024 start_sequence ();
11025 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
11026 mips_ls2.falu2_turn_enabled_insn = get_insns ();
11027 end_sequence ();
11029 mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
11030 mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
11031 mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
11032 mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
11035 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
11036 Init data used in mips_dfa_post_advance_cycle. */
11038 static void
11039 mips_init_dfa_post_cycle_insn (void)
11041 if (TUNE_LOONGSON_2EF)
11042 mips_ls2_init_dfa_post_cycle_insn ();
11045 /* Initialize STATE when scheduling for Loongson 2E/2F.
11046 Support round-robin dispatch scheme by enabling only one of
11047 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
11048 respectively. */
11050 static void
11051 mips_ls2_dfa_post_advance_cycle (state_t state)
11053 if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
11055 /* Though there are no non-pipelined ALU1 insns,
11056 we can get an instruction of type 'multi' before reload. */
11057 gcc_assert (mips_ls2.cycle_has_multi_p);
11058 mips_ls2.alu1_turn_p = false;
11061 mips_ls2.cycle_has_multi_p = false;
11063 if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
11064 /* We have a non-pipelined alu instruction in the core,
11065 adjust round-robin counter. */
11066 mips_ls2.alu1_turn_p = true;
11068 if (mips_ls2.alu1_turn_p)
11070 if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
11071 gcc_unreachable ();
11073 else
11075 if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
11076 gcc_unreachable ();
11079 if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
11081 /* There are no non-pipelined FALU1 insns. */
11082 gcc_unreachable ();
11083 mips_ls2.falu1_turn_p = false;
11086 if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
11087 /* We have a non-pipelined falu instruction in the core,
11088 adjust round-robin counter. */
11089 mips_ls2.falu1_turn_p = true;
11091 if (mips_ls2.falu1_turn_p)
11093 if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
11094 gcc_unreachable ();
11096 else
11098 if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
11099 gcc_unreachable ();
11103 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
11104 This hook is being called at the start of each cycle. */
11106 static void
11107 mips_dfa_post_advance_cycle (void)
11109 if (TUNE_LOONGSON_2EF)
11110 mips_ls2_dfa_post_advance_cycle (curr_state);
11113 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11114 be as wide as the scheduling freedom in the DFA. */
11116 static int
11117 mips_multipass_dfa_lookahead (void)
11119 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11120 if (TUNE_SB1)
11121 return 4;
11123 if (TUNE_LOONGSON_2EF)
11124 return 4;
11126 if (TUNE_OCTEON)
11127 return 2;
11129 return 0;
11132 /* Remove the instruction at index LOWER from ready queue READY and
11133 reinsert it in front of the instruction at index HIGHER. LOWER must
11134 be <= HIGHER. */
11136 static void
11137 mips_promote_ready (rtx *ready, int lower, int higher)
11139 rtx new_head;
11140 int i;
11142 new_head = ready[lower];
11143 for (i = lower; i < higher; i++)
11144 ready[i] = ready[i + 1];
11145 ready[i] = new_head;
11148 /* If the priority of the instruction at POS2 in the ready queue READY
11149 is within LIMIT units of that of the instruction at POS1, swap the
11150 instructions if POS2 is not already less than POS1. */
11152 static void
11153 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11155 if (pos1 < pos2
11156 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11158 rtx temp;
11160 temp = ready[pos1];
11161 ready[pos1] = ready[pos2];
11162 ready[pos2] = temp;
11166 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
11167 that may clobber hi or lo. */
11168 static rtx mips_macc_chains_last_hilo;
11170 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
11171 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
11173 static void
11174 mips_macc_chains_record (rtx insn)
11176 if (get_attr_may_clobber_hilo (insn))
11177 mips_macc_chains_last_hilo = insn;
11180 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
11181 has NREADY elements, looking for a multiply-add or multiply-subtract
11182 instruction that is cumulative with mips_macc_chains_last_hilo.
11183 If there is one, promote it ahead of anything else that might
11184 clobber hi or lo. */
11186 static void
11187 mips_macc_chains_reorder (rtx *ready, int nready)
11189 int i, j;
11191 if (mips_macc_chains_last_hilo != 0)
11192 for (i = nready - 1; i >= 0; i--)
11193 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
11195 for (j = nready - 1; j > i; j--)
11196 if (recog_memoized (ready[j]) >= 0
11197 && get_attr_may_clobber_hilo (ready[j]))
11199 mips_promote_ready (ready, i, j);
11200 break;
11202 break;
11206 /* The last instruction to be scheduled. */
11207 static rtx vr4130_last_insn;
11209 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
11210 points to an rtx that is initially an instruction. Nullify the rtx
11211 if the instruction uses the value of register X. */
11213 static void
11214 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
11215 void *data)
11217 rtx *insn_ptr;
11219 insn_ptr = (rtx *) data;
11220 if (REG_P (x)
11221 && *insn_ptr != 0
11222 && reg_referenced_p (x, PATTERN (*insn_ptr)))
11223 *insn_ptr = 0;
11226 /* Return true if there is true register dependence between vr4130_last_insn
11227 and INSN. */
11229 static bool
11230 vr4130_true_reg_dependence_p (rtx insn)
11232 note_stores (PATTERN (vr4130_last_insn),
11233 vr4130_true_reg_dependence_p_1, &insn);
11234 return insn == 0;
11237 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
11238 the ready queue and that INSN2 is the instruction after it, return
11239 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
11240 in which INSN1 and INSN2 can probably issue in parallel, but for
11241 which (INSN2, INSN1) should be less sensitive to instruction
11242 alignment than (INSN1, INSN2). See 4130.md for more details. */
11244 static bool
11245 vr4130_swap_insns_p (rtx insn1, rtx insn2)
11247 sd_iterator_def sd_it;
11248 dep_t dep;
11250 /* Check for the following case:
11252 1) there is some other instruction X with an anti dependence on INSN1;
11253 2) X has a higher priority than INSN2; and
11254 3) X is an arithmetic instruction (and thus has no unit restrictions).
11256 If INSN1 is the last instruction blocking X, it would better to
11257 choose (INSN1, X) over (INSN2, INSN1). */
11258 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
11259 if (DEP_TYPE (dep) == REG_DEP_ANTI
11260 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
11261 && recog_memoized (DEP_CON (dep)) >= 0
11262 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
11263 return false;
11265 if (vr4130_last_insn != 0
11266 && recog_memoized (insn1) >= 0
11267 && recog_memoized (insn2) >= 0)
11269 /* See whether INSN1 and INSN2 use different execution units,
11270 or if they are both ALU-type instructions. If so, they can
11271 probably execute in parallel. */
11272 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
11273 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
11274 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
11276 /* If only one of the instructions has a dependence on
11277 vr4130_last_insn, prefer to schedule the other one first. */
11278 bool dep1_p = vr4130_true_reg_dependence_p (insn1);
11279 bool dep2_p = vr4130_true_reg_dependence_p (insn2);
11280 if (dep1_p != dep2_p)
11281 return dep1_p;
11283 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
11284 is not an ALU-type instruction and if INSN1 uses the same
11285 execution unit. (Note that if this condition holds, we already
11286 know that INSN2 uses a different execution unit.) */
11287 if (class1 != VR4130_CLASS_ALU
11288 && recog_memoized (vr4130_last_insn) >= 0
11289 && class1 == get_attr_vr4130_class (vr4130_last_insn))
11290 return true;
11293 return false;
11296 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
11297 queue with at least two instructions. Swap the first two if
11298 vr4130_swap_insns_p says that it could be worthwhile. */
11300 static void
11301 vr4130_reorder (rtx *ready, int nready)
11303 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
11304 mips_promote_ready (ready, nready - 2, nready - 1);
11307 /* Record whether last 74k AGEN instruction was a load or store. */
11308 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11310 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11311 resets to TYPE_UNKNOWN state. */
11313 static void
11314 mips_74k_agen_init (rtx insn)
11316 if (!insn || !NONJUMP_INSN_P (insn))
11317 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11318 else
11320 enum attr_type type = get_attr_type (insn);
11321 if (type == TYPE_LOAD || type == TYPE_STORE)
11322 mips_last_74k_agen_insn = type;
11326 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11327 loads to be grouped together, and multiple stores to be grouped
11328 together. Swap things around in the ready queue to make this happen. */
11330 static void
11331 mips_74k_agen_reorder (rtx *ready, int nready)
11333 int i;
11334 int store_pos, load_pos;
11336 store_pos = -1;
11337 load_pos = -1;
11339 for (i = nready - 1; i >= 0; i--)
11341 rtx insn = ready[i];
11342 if (USEFUL_INSN_P (insn))
11343 switch (get_attr_type (insn))
11345 case TYPE_STORE:
11346 if (store_pos == -1)
11347 store_pos = i;
11348 break;
11350 case TYPE_LOAD:
11351 if (load_pos == -1)
11352 load_pos = i;
11353 break;
11355 default:
11356 break;
11360 if (load_pos == -1 || store_pos == -1)
11361 return;
11363 switch (mips_last_74k_agen_insn)
11365 case TYPE_UNKNOWN:
11366 /* Prefer to schedule loads since they have a higher latency. */
11367 case TYPE_LOAD:
11368 /* Swap loads to the front of the queue. */
11369 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11370 break;
11371 case TYPE_STORE:
11372 /* Swap stores to the front of the queue. */
11373 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11374 break;
11375 default:
11376 break;
11380 /* Implement TARGET_SCHED_INIT. */
11382 static void
11383 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11384 int max_ready ATTRIBUTE_UNUSED)
11386 mips_macc_chains_last_hilo = 0;
11387 vr4130_last_insn = 0;
11388 mips_74k_agen_init (NULL_RTX);
11390 /* When scheduling for Loongson2, branch instructions go to ALU1,
11391 therefore basic block is most likely to start with round-robin counter
11392 pointed to ALU2. */
11393 mips_ls2.alu1_turn_p = false;
11394 mips_ls2.falu1_turn_p = true;
11397 /* Implement TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
11399 static int
11400 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11401 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11403 if (!reload_completed
11404 && TUNE_MACC_CHAINS
11405 && *nreadyp > 0)
11406 mips_macc_chains_reorder (ready, *nreadyp);
11408 if (reload_completed
11409 && TUNE_MIPS4130
11410 && !TARGET_VR4130_ALIGN
11411 && *nreadyp > 1)
11412 vr4130_reorder (ready, *nreadyp);
11414 if (TUNE_74K)
11415 mips_74k_agen_reorder (ready, *nreadyp);
11417 return mips_issue_rate ();
11420 /* Update round-robin counters for ALU1/2 and FALU1/2. */
11422 static void
11423 mips_ls2_variable_issue (rtx insn)
11425 if (mips_ls2.alu1_turn_p)
11427 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
11428 mips_ls2.alu1_turn_p = false;
11430 else
11432 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
11433 mips_ls2.alu1_turn_p = true;
11436 if (mips_ls2.falu1_turn_p)
11438 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
11439 mips_ls2.falu1_turn_p = false;
11441 else
11443 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
11444 mips_ls2.falu1_turn_p = true;
11447 if (recog_memoized (insn) >= 0)
11448 mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
11451 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11453 static int
11454 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11455 rtx insn, int more)
11457 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
11458 if (USEFUL_INSN_P (insn))
11460 more--;
11461 if (!reload_completed && TUNE_MACC_CHAINS)
11462 mips_macc_chains_record (insn);
11463 vr4130_last_insn = insn;
11464 if (TUNE_74K)
11465 mips_74k_agen_init (insn);
11466 else if (TUNE_LOONGSON_2EF)
11467 mips_ls2_variable_issue (insn);
11470 /* Instructions of type 'multi' should all be split before
11471 the second scheduling pass. */
11472 gcc_assert (!reload_completed
11473 || recog_memoized (insn) < 0
11474 || get_attr_type (insn) != TYPE_MULTI);
11476 return more;
11479 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11480 return the first operand of the associated PREF or PREFX insn. */
11483 mips_prefetch_cookie (rtx write, rtx locality)
11485 /* store_streamed / load_streamed. */
11486 if (INTVAL (locality) <= 0)
11487 return GEN_INT (INTVAL (write) + 4);
11489 /* store / load. */
11490 if (INTVAL (locality) <= 2)
11491 return write;
11493 /* store_retained / load_retained. */
11494 return GEN_INT (INTVAL (write) + 6);
11497 /* Flags that indicate when a built-in function is available.
11499 BUILTIN_AVAIL_NON_MIPS16
11500 The function is available on the current target, but only
11501 in non-MIPS16 mode. */
11502 #define BUILTIN_AVAIL_NON_MIPS16 1
11504 /* Declare an availability predicate for built-in functions that
11505 require non-MIPS16 mode and also require COND to be true.
11506 NAME is the main part of the predicate's name. */
11507 #define AVAIL_NON_MIPS16(NAME, COND) \
11508 static unsigned int \
11509 mips_builtin_avail_##NAME (void) \
11511 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
11514 /* This structure describes a single built-in function. */
11515 struct mips_builtin_description {
11516 /* The code of the main .md file instruction. See mips_builtin_type
11517 for more information. */
11518 enum insn_code icode;
11520 /* The floating-point comparison code to use with ICODE, if any. */
11521 enum mips_fp_condition cond;
11523 /* The name of the built-in function. */
11524 const char *name;
11526 /* Specifies how the function should be expanded. */
11527 enum mips_builtin_type builtin_type;
11529 /* The function's prototype. */
11530 enum mips_function_type function_type;
11532 /* Whether the function is available. */
11533 unsigned int (*avail) (void);
11536 AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
11537 AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
11538 AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
11539 AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
11540 AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
11541 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
11542 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
11543 AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
11544 AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
11546 /* Construct a mips_builtin_description from the given arguments.
11548 INSN is the name of the associated instruction pattern, without the
11549 leading CODE_FOR_mips_.
11551 CODE is the floating-point condition code associated with the
11552 function. It can be 'f' if the field is not applicable.
11554 NAME is the name of the function itself, without the leading
11555 "__builtin_mips_".
11557 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
11559 AVAIL is the name of the availability predicate, without the leading
11560 mips_builtin_avail_. */
11561 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
11562 FUNCTION_TYPE, AVAIL) \
11563 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
11564 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
11565 mips_builtin_avail_ ## AVAIL }
11567 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
11568 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
11569 are as for MIPS_BUILTIN. */
11570 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
11571 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
11573 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
11574 are subject to mips_builtin_avail_<AVAIL>. */
11575 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
11576 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
11577 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
11578 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
11579 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
11581 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11582 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
11583 while the any and all forms are subject to mips_builtin_avail_mips3d. */
11584 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
11585 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
11586 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
11587 mips3d), \
11588 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
11589 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
11590 mips3d), \
11591 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
11592 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
11593 AVAIL), \
11594 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
11595 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
11596 AVAIL)
11598 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11599 are subject to mips_builtin_avail_mips3d. */
11600 #define CMP_4S_BUILTINS(INSN, COND) \
11601 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
11602 MIPS_BUILTIN_CMP_ANY, \
11603 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
11604 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
11605 MIPS_BUILTIN_CMP_ALL, \
11606 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
11608 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11609 instruction requires mips_builtin_avail_<AVAIL>. */
11610 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
11611 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
11612 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11613 AVAIL), \
11614 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
11615 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11616 AVAIL)
11618 /* Define all the built-in functions related to C.cond.fmt condition COND. */
11619 #define CMP_BUILTINS(COND) \
11620 MOVTF_BUILTINS (c, COND, paired_single), \
11621 MOVTF_BUILTINS (cabs, COND, mips3d), \
11622 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
11623 CMP_PS_BUILTINS (c, COND, paired_single), \
11624 CMP_PS_BUILTINS (cabs, COND, mips3d), \
11625 CMP_4S_BUILTINS (c, COND), \
11626 CMP_4S_BUILTINS (cabs, COND)
11628 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
11629 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
11630 and AVAIL are as for MIPS_BUILTIN. */
11631 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
11632 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
11633 FUNCTION_TYPE, AVAIL)
11635 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11636 branch instruction. AVAIL is as for MIPS_BUILTIN. */
11637 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
11638 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
11639 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
11641 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
11642 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
11643 builtin_description field. */
11644 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
11645 { CODE_FOR_loongson_ ## INSN, 0, "__builtin_loongson_" #FN_NAME, \
11646 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, mips_builtin_avail_loongson }
11648 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
11649 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
11650 builtin_description field. */
11651 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
11652 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
11654 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
11655 We use functions of this form when the same insn can be usefully applied
11656 to more than one datatype. */
11657 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
11658 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
11660 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11661 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11662 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11663 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11664 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11665 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11667 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
11668 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
11669 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
11670 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
11671 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
11672 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
11673 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
11674 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
11675 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
11676 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
11677 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
11678 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
11679 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
11680 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
11681 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
11682 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
11683 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
11684 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
11685 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
11686 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
11687 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
11688 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
11689 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
11690 #define CODE_FOR_loongson_punpckhbh CODE_FOR_vec_interleave_highv8qi
11691 #define CODE_FOR_loongson_punpckhhw CODE_FOR_vec_interleave_highv4hi
11692 #define CODE_FOR_loongson_punpckhwd CODE_FOR_vec_interleave_highv2si
11693 #define CODE_FOR_loongson_punpcklbh CODE_FOR_vec_interleave_lowv8qi
11694 #define CODE_FOR_loongson_punpcklhw CODE_FOR_vec_interleave_lowv4hi
11695 #define CODE_FOR_loongson_punpcklwd CODE_FOR_vec_interleave_lowv2si
11697 static const struct mips_builtin_description mips_builtins[] = {
11698 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11699 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11700 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11701 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11702 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
11703 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
11704 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
11705 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
11707 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
11708 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11709 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11710 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
11711 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
11713 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
11714 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
11715 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
11716 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
11717 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
11718 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11720 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
11721 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
11722 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
11723 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
11724 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
11725 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11727 MIPS_FP_CONDITIONS (CMP_BUILTINS),
11729 /* Built-in functions for the SB-1 processor. */
11730 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
11732 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
11733 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11734 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11735 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
11736 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11737 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11738 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11739 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11740 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
11741 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11742 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11743 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
11744 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
11745 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
11746 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
11747 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
11748 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
11749 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
11750 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
11751 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
11752 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
11753 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
11754 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
11755 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
11756 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
11757 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
11758 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
11759 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
11760 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
11761 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
11762 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
11763 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
11764 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11765 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11766 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
11767 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
11768 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11769 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11770 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
11771 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
11772 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
11773 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11774 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
11775 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
11776 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
11777 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
11778 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
11779 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
11780 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
11781 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
11782 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
11783 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
11784 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
11785 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
11786 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
11787 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
11788 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
11789 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11790 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11791 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11792 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
11793 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
11794 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
11795 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
11796 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
11797 BPOSGE_BUILTIN (32, dsp),
11799 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
11800 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
11801 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11802 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11803 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11804 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11805 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
11806 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
11807 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
11808 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
11809 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
11810 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11811 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11812 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11813 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11814 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11815 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
11816 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
11817 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
11818 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
11819 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
11820 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
11821 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
11822 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11823 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11824 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11825 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11826 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11827 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11828 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11829 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11830 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11831 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11832 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11833 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11835 /* Built-in functions for the DSP ASE (32-bit only). */
11836 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11837 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11838 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11839 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11840 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11841 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11842 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11843 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
11844 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
11845 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11846 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11847 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11848 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11849 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
11850 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
11851 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
11852 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
11853 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
11854 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
11855 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
11856 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
11858 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
11859 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11860 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11861 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
11862 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
11863 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
11864 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
11865 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11866 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dspr2_32),
11867 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dspr2_32),
11868 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11869 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11870 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11871 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11872 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11873 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11875 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
11876 LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
11877 LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
11878 LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
11879 LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11880 LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11881 LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11882 LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11883 LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11884 LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11885 LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
11886 LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
11887 LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11888 LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
11889 LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11890 LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11891 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
11892 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11893 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11894 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11895 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
11896 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
11897 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11898 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
11899 LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11900 LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11901 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11902 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11903 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11904 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11905 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11906 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11907 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11908 LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11909 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11910 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11911 LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11912 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11913 LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
11914 LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
11915 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11916 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11917 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11918 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11919 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11920 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11921 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11922 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11923 LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
11924 LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11925 LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11926 LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11927 LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11928 LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
11929 LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
11930 LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11931 LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11932 LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11933 LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
11934 LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11935 LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
11936 LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
11937 LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI_UQI),
11938 LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_V4HI_UQI),
11939 LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
11940 LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
11941 LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
11942 LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
11943 LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
11944 LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
11945 LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
11946 LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
11947 LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
11948 LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
11949 LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
11950 LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
11951 LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11952 LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11953 LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11954 LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11955 LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11956 LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11957 LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
11958 LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
11959 LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11960 LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
11961 LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11962 LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11963 LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11964 LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11965 LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11966 LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11967 LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11968 LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11969 LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11970 LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11971 LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11972 LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11973 LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11974 LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11976 /* Sundry other built-in functions. */
11977 DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache)
11980 /* MODE is a vector mode whose elements have type TYPE. Return the type
11981 of the vector itself. */
11983 static tree
11984 mips_builtin_vector_type (tree type, enum machine_mode mode)
11986 static tree types[2 * (int) MAX_MACHINE_MODE];
11987 int mode_index;
11989 mode_index = (int) mode;
11991 if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
11992 mode_index += MAX_MACHINE_MODE;
11994 if (types[mode_index] == NULL_TREE)
11995 types[mode_index] = build_vector_type_for_mode (type, mode);
11996 return types[mode_index];
11999 /* Return a type for 'const volatile void *'. */
12001 static tree
12002 mips_build_cvpointer_type (void)
12004 static tree cache;
12006 if (cache == NULL_TREE)
12007 cache = build_pointer_type (build_qualified_type
12008 (void_type_node,
12009 TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE));
12010 return cache;
12013 /* Source-level argument types. */
12014 #define MIPS_ATYPE_VOID void_type_node
12015 #define MIPS_ATYPE_INT integer_type_node
12016 #define MIPS_ATYPE_POINTER ptr_type_node
12017 #define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
12019 /* Standard mode-based argument types. */
12020 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
12021 #define MIPS_ATYPE_SI intSI_type_node
12022 #define MIPS_ATYPE_USI unsigned_intSI_type_node
12023 #define MIPS_ATYPE_DI intDI_type_node
12024 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
12025 #define MIPS_ATYPE_SF float_type_node
12026 #define MIPS_ATYPE_DF double_type_node
12028 /* Vector argument types. */
12029 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
12030 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
12031 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
12032 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
12033 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
12034 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
12035 #define MIPS_ATYPE_UV2SI \
12036 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
12037 #define MIPS_ATYPE_UV4HI \
12038 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
12039 #define MIPS_ATYPE_UV8QI \
12040 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
12042 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
12043 their associated MIPS_ATYPEs. */
12044 #define MIPS_FTYPE_ATYPES1(A, B) \
12045 MIPS_ATYPE_##A, MIPS_ATYPE_##B
12047 #define MIPS_FTYPE_ATYPES2(A, B, C) \
12048 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
12050 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
12051 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
12053 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
12054 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
12055 MIPS_ATYPE_##E
12057 /* Return the function type associated with function prototype TYPE. */
12059 static tree
12060 mips_build_function_type (enum mips_function_type type)
12062 static tree types[(int) MIPS_MAX_FTYPE_MAX];
12064 if (types[(int) type] == NULL_TREE)
12065 switch (type)
12067 #define DEF_MIPS_FTYPE(NUM, ARGS) \
12068 case MIPS_FTYPE_NAME##NUM ARGS: \
12069 types[(int) type] \
12070 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
12071 NULL_TREE); \
12072 break;
12073 #include "config/mips/mips-ftypes.def"
12074 #undef DEF_MIPS_FTYPE
12075 default:
12076 gcc_unreachable ();
12079 return types[(int) type];
12082 /* Implement TARGET_INIT_BUILTINS. */
12084 static void
12085 mips_init_builtins (void)
12087 const struct mips_builtin_description *d;
12088 unsigned int i;
12090 /* Iterate through all of the bdesc arrays, initializing all of the
12091 builtin functions. */
12092 for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
12094 d = &mips_builtins[i];
12095 if (d->avail ())
12096 add_builtin_function (d->name,
12097 mips_build_function_type (d->function_type),
12098 i, BUILT_IN_MD, NULL, NULL);
12102 /* Take argument ARGNO from EXP's argument list and convert it into a
12103 form suitable for input operand OPNO of instruction ICODE. Return the
12104 value. */
12106 static rtx
12107 mips_prepare_builtin_arg (enum insn_code icode,
12108 unsigned int opno, tree exp, unsigned int argno)
12110 tree arg;
12111 rtx value;
12112 enum machine_mode mode;
12114 arg = CALL_EXPR_ARG (exp, argno);
12115 value = expand_normal (arg);
12116 mode = insn_data[icode].operand[opno].mode;
12117 if (!insn_data[icode].operand[opno].predicate (value, mode))
12119 /* We need to get the mode from ARG for two reasons:
12121 - to cope with address operands, where MODE is the mode of the
12122 memory, rather than of VALUE itself.
12124 - to cope with special predicates like pmode_register_operand,
12125 where MODE is VOIDmode. */
12126 value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
12128 /* Check the predicate again. */
12129 if (!insn_data[icode].operand[opno].predicate (value, mode))
12131 error ("invalid argument to built-in function");
12132 return const0_rtx;
12136 return value;
12139 /* Return an rtx suitable for output operand OP of instruction ICODE.
12140 If TARGET is non-null, try to use it where possible. */
12142 static rtx
12143 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
12145 enum machine_mode mode;
12147 mode = insn_data[icode].operand[op].mode;
12148 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
12149 target = gen_reg_rtx (mode);
12151 return target;
12154 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
12155 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
12156 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
12157 suggests a good place to put the result. */
12159 static rtx
12160 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
12161 bool has_target_p)
12163 rtx ops[MAX_RECOG_OPERANDS];
12164 int opno, argno;
12166 /* Map any target to operand 0. */
12167 opno = 0;
12168 if (has_target_p)
12170 target = mips_prepare_builtin_target (icode, opno, target);
12171 ops[opno] = target;
12172 opno++;
12175 /* Map the arguments to the other operands. The n_operands value
12176 for an expander includes match_dups and match_scratches as well as
12177 match_operands, so n_operands is only an upper bound on the number
12178 of arguments to the expander function. */
12179 gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
12180 for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
12181 ops[opno] = mips_prepare_builtin_arg (icode, opno, exp, argno);
12183 switch (opno)
12185 case 2:
12186 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
12187 break;
12189 case 3:
12190 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
12191 break;
12193 case 4:
12194 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
12195 break;
12197 default:
12198 gcc_unreachable ();
12200 return target;
12203 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
12204 function; TYPE says which. EXP is the CALL_EXPR that calls the
12205 function, ICODE is the instruction that should be used to compare
12206 the first two arguments, and COND is the condition it should test.
12207 TARGET, if nonnull, suggests a good place to put the result. */
12209 static rtx
12210 mips_expand_builtin_movtf (enum mips_builtin_type type,
12211 enum insn_code icode, enum mips_fp_condition cond,
12212 rtx target, tree exp)
12214 rtx cmp_result, op0, op1;
12216 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12217 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12218 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12219 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12221 icode = CODE_FOR_mips_cond_move_tf_ps;
12222 target = mips_prepare_builtin_target (icode, 0, target);
12223 if (type == MIPS_BUILTIN_MOVT)
12225 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12226 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12228 else
12230 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12231 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12233 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12234 return target;
12237 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12238 into TARGET otherwise. Return TARGET. */
12240 static rtx
12241 mips_builtin_branch_and_move (rtx condition, rtx target,
12242 rtx value_if_true, rtx value_if_false)
12244 rtx true_label, done_label;
12246 true_label = gen_label_rtx ();
12247 done_label = gen_label_rtx ();
12249 /* First assume that CONDITION is false. */
12250 mips_emit_move (target, value_if_false);
12252 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12253 emit_jump_insn (gen_condjump (condition, true_label));
12254 emit_jump_insn (gen_jump (done_label));
12255 emit_barrier ();
12257 /* Fix TARGET if CONDITION is true. */
12258 emit_label (true_label);
12259 mips_emit_move (target, value_if_true);
12261 emit_label (done_label);
12262 return target;
12265 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
12266 the CALL_EXPR that calls the function, ICODE is the code of the
12267 comparison instruction, and COND is the condition it should test.
12268 TARGET, if nonnull, suggests a good place to put the boolean result. */
12270 static rtx
12271 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12272 enum insn_code icode, enum mips_fp_condition cond,
12273 rtx target, tree exp)
12275 rtx offset, condition, cmp_result, args[MAX_RECOG_OPERANDS];
12276 int argno;
12278 if (target == 0 || GET_MODE (target) != SImode)
12279 target = gen_reg_rtx (SImode);
12281 /* The instruction should have a target operand, an operand for each
12282 argument, and an operand for COND. */
12283 gcc_assert (call_expr_nargs (exp) + 2 == insn_data[icode].n_operands);
12285 /* Prepare the operands to the comparison. */
12286 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12287 for (argno = 0; argno < call_expr_nargs (exp); argno++)
12288 args[argno] = mips_prepare_builtin_arg (icode, argno + 1, exp, argno);
12290 switch (insn_data[icode].n_operands)
12292 case 4:
12293 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
12294 GEN_INT (cond)));
12295 break;
12297 case 6:
12298 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
12299 args[2], args[3], GEN_INT (cond)));
12300 break;
12302 default:
12303 gcc_unreachable ();
12306 /* If the comparison sets more than one register, we define the result
12307 to be 0 if all registers are false and -1 if all registers are true.
12308 The value of the complete result is indeterminate otherwise. */
12309 switch (builtin_type)
12311 case MIPS_BUILTIN_CMP_ALL:
12312 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12313 return mips_builtin_branch_and_move (condition, target,
12314 const0_rtx, const1_rtx);
12316 case MIPS_BUILTIN_CMP_UPPER:
12317 case MIPS_BUILTIN_CMP_LOWER:
12318 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12319 condition = gen_single_cc (cmp_result, offset);
12320 return mips_builtin_branch_and_move (condition, target,
12321 const1_rtx, const0_rtx);
12323 default:
12324 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12325 return mips_builtin_branch_and_move (condition, target,
12326 const1_rtx, const0_rtx);
12330 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
12331 if nonnull, suggests a good place to put the boolean result. */
12333 static rtx
12334 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12336 rtx condition, cmp_result;
12337 int cmp_value;
12339 if (target == 0 || GET_MODE (target) != SImode)
12340 target = gen_reg_rtx (SImode);
12342 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12344 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12345 cmp_value = 32;
12346 else
12347 gcc_assert (0);
12349 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12350 return mips_builtin_branch_and_move (condition, target,
12351 const1_rtx, const0_rtx);
12354 /* Implement TARGET_EXPAND_BUILTIN. */
12356 static rtx
12357 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
12358 enum machine_mode mode, int ignore)
12360 tree fndecl;
12361 unsigned int fcode, avail;
12362 const struct mips_builtin_description *d;
12364 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12365 fcode = DECL_FUNCTION_CODE (fndecl);
12366 gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
12367 d = &mips_builtins[fcode];
12368 avail = d->avail ();
12369 gcc_assert (avail != 0);
12370 if (TARGET_MIPS16)
12372 error ("built-in function %qs not supported for MIPS16",
12373 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
12374 return ignore ? const0_rtx : CONST0_RTX (mode);
12376 switch (d->builtin_type)
12378 case MIPS_BUILTIN_DIRECT:
12379 return mips_expand_builtin_direct (d->icode, target, exp, true);
12381 case MIPS_BUILTIN_DIRECT_NO_TARGET:
12382 return mips_expand_builtin_direct (d->icode, target, exp, false);
12384 case MIPS_BUILTIN_MOVT:
12385 case MIPS_BUILTIN_MOVF:
12386 return mips_expand_builtin_movtf (d->builtin_type, d->icode,
12387 d->cond, target, exp);
12389 case MIPS_BUILTIN_CMP_ANY:
12390 case MIPS_BUILTIN_CMP_ALL:
12391 case MIPS_BUILTIN_CMP_UPPER:
12392 case MIPS_BUILTIN_CMP_LOWER:
12393 case MIPS_BUILTIN_CMP_SINGLE:
12394 return mips_expand_builtin_compare (d->builtin_type, d->icode,
12395 d->cond, target, exp);
12397 case MIPS_BUILTIN_BPOSGE32:
12398 return mips_expand_builtin_bposge (d->builtin_type, target);
12400 gcc_unreachable ();
12403 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
12404 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
12405 struct mips16_constant {
12406 struct mips16_constant *next;
12407 rtx value;
12408 rtx label;
12409 enum machine_mode mode;
12412 /* Information about an incomplete MIPS16 constant pool. FIRST is the
12413 first constant, HIGHEST_ADDRESS is the highest address that the first
12414 byte of the pool can have, and INSN_ADDRESS is the current instruction
12415 address. */
12416 struct mips16_constant_pool {
12417 struct mips16_constant *first;
12418 int highest_address;
12419 int insn_address;
12422 /* Add constant VALUE to POOL and return its label. MODE is the
12423 value's mode (used for CONST_INTs, etc.). */
12425 static rtx
12426 mips16_add_constant (struct mips16_constant_pool *pool,
12427 rtx value, enum machine_mode mode)
12429 struct mips16_constant **p, *c;
12430 bool first_of_size_p;
12432 /* See whether the constant is already in the pool. If so, return the
12433 existing label, otherwise leave P pointing to the place where the
12434 constant should be added.
12436 Keep the pool sorted in increasing order of mode size so that we can
12437 reduce the number of alignments needed. */
12438 first_of_size_p = true;
12439 for (p = &pool->first; *p != 0; p = &(*p)->next)
12441 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
12442 return (*p)->label;
12443 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
12444 break;
12445 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
12446 first_of_size_p = false;
12449 /* In the worst case, the constant needed by the earliest instruction
12450 will end up at the end of the pool. The entire pool must then be
12451 accessible from that instruction.
12453 When adding the first constant, set the pool's highest address to
12454 the address of the first out-of-range byte. Adjust this address
12455 downwards each time a new constant is added. */
12456 if (pool->first == 0)
12457 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
12458 of the instruction with the lowest two bits clear. The base PC
12459 value for LDPC has the lowest three bits clear. Assume the worst
12460 case here; namely that the PC-relative instruction occupies the
12461 last 2 bytes in an aligned word. */
12462 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
12463 pool->highest_address -= GET_MODE_SIZE (mode);
12464 if (first_of_size_p)
12465 /* Take into account the worst possible padding due to alignment. */
12466 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
12468 /* Create a new entry. */
12469 c = XNEW (struct mips16_constant);
12470 c->value = value;
12471 c->mode = mode;
12472 c->label = gen_label_rtx ();
12473 c->next = *p;
12474 *p = c;
12476 return c->label;
12479 /* Output constant VALUE after instruction INSN and return the last
12480 instruction emitted. MODE is the mode of the constant. */
12482 static rtx
12483 mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
12485 if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
12487 rtx size = GEN_INT (GET_MODE_SIZE (mode));
12488 return emit_insn_after (gen_consttable_int (value, size), insn);
12491 if (SCALAR_FLOAT_MODE_P (mode))
12492 return emit_insn_after (gen_consttable_float (value), insn);
12494 if (VECTOR_MODE_P (mode))
12496 int i;
12498 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
12499 insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
12500 CONST_VECTOR_ELT (value, i), insn);
12501 return insn;
12504 gcc_unreachable ();
12507 /* Dump out the constants in CONSTANTS after INSN. */
12509 static void
12510 mips16_emit_constants (struct mips16_constant *constants, rtx insn)
12512 struct mips16_constant *c, *next;
12513 int align;
12515 align = 0;
12516 for (c = constants; c != NULL; c = next)
12518 /* If necessary, increase the alignment of PC. */
12519 if (align < GET_MODE_SIZE (c->mode))
12521 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
12522 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
12524 align = GET_MODE_SIZE (c->mode);
12526 insn = emit_label_after (c->label, insn);
12527 insn = mips16_emit_constants_1 (c->mode, c->value, insn);
12529 next = c->next;
12530 free (c);
12533 emit_barrier_after (insn);
12536 /* Return the length of instruction INSN. */
12538 static int
12539 mips16_insn_length (rtx insn)
12541 if (JUMP_P (insn))
12543 rtx body = PATTERN (insn);
12544 if (GET_CODE (body) == ADDR_VEC)
12545 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
12546 if (GET_CODE (body) == ADDR_DIFF_VEC)
12547 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
12549 return get_attr_length (insn);
12552 /* If *X is a symbolic constant that refers to the constant pool, add
12553 the constant to POOL and rewrite *X to use the constant's label. */
12555 static void
12556 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
12558 rtx base, offset, label;
12560 split_const (*x, &base, &offset);
12561 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
12563 label = mips16_add_constant (pool, get_pool_constant (base),
12564 get_pool_mode (base));
12565 base = gen_rtx_LABEL_REF (Pmode, label);
12566 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
12570 /* This structure is used to communicate with mips16_rewrite_pool_refs.
12571 INSN is the instruction we're rewriting and POOL points to the current
12572 constant pool. */
12573 struct mips16_rewrite_pool_refs_info {
12574 rtx insn;
12575 struct mips16_constant_pool *pool;
12578 /* Rewrite *X so that constant pool references refer to the constant's
12579 label instead. DATA points to a mips16_rewrite_pool_refs_info
12580 structure. */
12582 static int
12583 mips16_rewrite_pool_refs (rtx *x, void *data)
12585 struct mips16_rewrite_pool_refs_info *info =
12586 (struct mips16_rewrite_pool_refs_info *) data;
12588 if (force_to_mem_operand (*x, Pmode))
12590 rtx mem = force_const_mem (GET_MODE (*x), *x);
12591 validate_change (info->insn, x, mem, false);
12594 if (MEM_P (*x))
12596 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
12597 return -1;
12600 if (TARGET_MIPS16_TEXT_LOADS)
12601 mips16_rewrite_pool_constant (info->pool, x);
12603 return GET_CODE (*x) == CONST ? -1 : 0;
12606 /* Build MIPS16 constant pools. */
12608 static void
12609 mips16_lay_out_constants (void)
12611 struct mips16_constant_pool pool;
12612 struct mips16_rewrite_pool_refs_info info;
12613 rtx insn, barrier;
12615 if (!TARGET_MIPS16_PCREL_LOADS)
12616 return;
12618 split_all_insns_noflow ();
12619 barrier = 0;
12620 memset (&pool, 0, sizeof (pool));
12621 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12623 /* Rewrite constant pool references in INSN. */
12624 if (INSN_P (insn))
12626 info.insn = insn;
12627 info.pool = &pool;
12628 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
12631 pool.insn_address += mips16_insn_length (insn);
12633 if (pool.first != NULL)
12635 /* If there are no natural barriers between the first user of
12636 the pool and the highest acceptable address, we'll need to
12637 create a new instruction to jump around the constant pool.
12638 In the worst case, this instruction will be 4 bytes long.
12640 If it's too late to do this transformation after INSN,
12641 do it immediately before INSN. */
12642 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
12644 rtx label, jump;
12646 label = gen_label_rtx ();
12648 jump = emit_jump_insn_before (gen_jump (label), insn);
12649 JUMP_LABEL (jump) = label;
12650 LABEL_NUSES (label) = 1;
12651 barrier = emit_barrier_after (jump);
12653 emit_label_after (label, barrier);
12654 pool.insn_address += 4;
12657 /* See whether the constant pool is now out of range of the first
12658 user. If so, output the constants after the previous barrier.
12659 Note that any instructions between BARRIER and INSN (inclusive)
12660 will use negative offsets to refer to the pool. */
12661 if (pool.insn_address > pool.highest_address)
12663 mips16_emit_constants (pool.first, barrier);
12664 pool.first = NULL;
12665 barrier = 0;
12667 else if (BARRIER_P (insn))
12668 barrier = insn;
12671 mips16_emit_constants (pool.first, get_last_insn ());
12674 /* Return true if it is worth r10k_simplify_address's while replacing
12675 an address with X. We are looking for constants, and for addresses
12676 at a known offset from the incoming stack pointer. */
12678 static bool
12679 r10k_simplified_address_p (rtx x)
12681 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
12682 x = XEXP (x, 0);
12683 return x == virtual_incoming_args_rtx || CONSTANT_P (x);
12686 /* X is an expression that appears in INSN. Try to use the UD chains
12687 to simplify it, returning the simplified form on success and the
12688 original form otherwise. Replace the incoming value of $sp with
12689 virtual_incoming_args_rtx (which should never occur in X otherwise). */
12691 static rtx
12692 r10k_simplify_address (rtx x, rtx insn)
12694 rtx newx, op0, op1, set, def_insn, note;
12695 df_ref use, def;
12696 struct df_link *defs;
12698 newx = NULL_RTX;
12699 if (UNARY_P (x))
12701 op0 = r10k_simplify_address (XEXP (x, 0), insn);
12702 if (op0 != XEXP (x, 0))
12703 newx = simplify_gen_unary (GET_CODE (x), GET_MODE (x),
12704 op0, GET_MODE (XEXP (x, 0)));
12706 else if (BINARY_P (x))
12708 op0 = r10k_simplify_address (XEXP (x, 0), insn);
12709 op1 = r10k_simplify_address (XEXP (x, 1), insn);
12710 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
12711 newx = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
12713 else if (GET_CODE (x) == LO_SUM)
12715 /* LO_SUMs can be offset from HIGHs, if we know they won't
12716 overflow. See mips_classify_address for the rationale behind
12717 the lax check. */
12718 op0 = r10k_simplify_address (XEXP (x, 0), insn);
12719 if (GET_CODE (op0) == HIGH)
12720 newx = XEXP (x, 1);
12722 else if (REG_P (x))
12724 /* Uses are recorded by regno_reg_rtx, not X itself. */
12725 use = df_find_use (insn, regno_reg_rtx[REGNO (x)]);
12726 gcc_assert (use);
12727 defs = DF_REF_CHAIN (use);
12729 /* Require a single definition. */
12730 if (defs && defs->next == NULL)
12732 def = defs->ref;
12733 if (DF_REF_IS_ARTIFICIAL (def))
12735 /* Replace the incoming value of $sp with
12736 virtual_incoming_args_rtx. */
12737 if (x == stack_pointer_rtx
12738 && DF_REF_BB (def) == ENTRY_BLOCK_PTR)
12739 newx = virtual_incoming_args_rtx;
12741 else if (dominated_by_p (CDI_DOMINATORS, DF_REF_BB (use),
12742 DF_REF_BB (def)))
12744 /* Make sure that DEF_INSN is a single set of REG. */
12745 def_insn = DF_REF_INSN (def);
12746 if (NONJUMP_INSN_P (def_insn))
12748 set = single_set (def_insn);
12749 if (set && rtx_equal_p (SET_DEST (set), x))
12751 /* Prefer to use notes, since the def-use chains
12752 are often shorter. */
12753 note = find_reg_equal_equiv_note (def_insn);
12754 if (note)
12755 newx = XEXP (note, 0);
12756 else
12757 newx = SET_SRC (set);
12758 newx = r10k_simplify_address (newx, def_insn);
12764 if (newx && r10k_simplified_address_p (newx))
12765 return newx;
12766 return x;
12769 /* Return true if ADDRESS is known to be an uncached address
12770 on R10K systems. */
12772 static bool
12773 r10k_uncached_address_p (unsigned HOST_WIDE_INT address)
12775 unsigned HOST_WIDE_INT upper;
12777 /* Check for KSEG1. */
12778 if (address + 0x60000000 < 0x20000000)
12779 return true;
12781 /* Check for uncached XKPHYS addresses. */
12782 if (Pmode == DImode)
12784 upper = (address >> 40) & 0xf9ffff;
12785 if (upper == 0x900000 || upper == 0xb80000)
12786 return true;
12788 return false;
12791 /* Return true if we can prove that an access to address X in instruction
12792 INSN would be safe from R10K speculation. This X is a general
12793 expression; it might not be a legitimate address. */
12795 static bool
12796 r10k_safe_address_p (rtx x, rtx insn)
12798 rtx base, offset;
12799 HOST_WIDE_INT offset_val;
12801 x = r10k_simplify_address (x, insn);
12803 /* Check for references to the stack frame. It doesn't really matter
12804 how much of the frame has been allocated at INSN; -mr10k-cache-barrier
12805 allows us to assume that accesses to any part of the eventual frame
12806 is safe from speculation at any point in the function. */
12807 mips_split_plus (x, &base, &offset_val);
12808 if (base == virtual_incoming_args_rtx
12809 && offset_val >= -cfun->machine->frame.total_size
12810 && offset_val < cfun->machine->frame.args_size)
12811 return true;
12813 /* Check for uncached addresses. */
12814 if (CONST_INT_P (x))
12815 return r10k_uncached_address_p (INTVAL (x));
12817 /* Check for accesses to a static object. */
12818 split_const (x, &base, &offset);
12819 return offset_within_block_p (base, INTVAL (offset));
12822 /* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
12823 an in-range access to an automatic variable, or to an object with
12824 a link-time-constant address. */
12826 static bool
12827 r10k_safe_mem_expr_p (tree expr, rtx offset)
12829 if (expr == NULL_TREE
12830 || offset == NULL_RTX
12831 || !CONST_INT_P (offset)
12832 || INTVAL (offset) < 0
12833 || INTVAL (offset) >= int_size_in_bytes (TREE_TYPE (expr)))
12834 return false;
12836 while (TREE_CODE (expr) == COMPONENT_REF)
12838 expr = TREE_OPERAND (expr, 0);
12839 if (expr == NULL_TREE)
12840 return false;
12843 return DECL_P (expr);
12846 /* A for_each_rtx callback for which DATA points to the instruction
12847 containing *X. Stop the search if we find a MEM that is not safe
12848 from R10K speculation. */
12850 static int
12851 r10k_needs_protection_p_1 (rtx *loc, void *data)
12853 rtx mem;
12855 mem = *loc;
12856 if (!MEM_P (mem))
12857 return 0;
12859 if (r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
12860 return -1;
12862 if (r10k_safe_address_p (XEXP (mem, 0), (rtx) data))
12863 return -1;
12865 return 1;
12868 /* A note_stores callback for which DATA points to an instruction pointer.
12869 If *DATA is nonnull, make it null if it X contains a MEM that is not
12870 safe from R10K speculation. */
12872 static void
12873 r10k_needs_protection_p_store (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
12874 void *data)
12876 rtx *insn_ptr;
12878 insn_ptr = (rtx *) data;
12879 if (*insn_ptr && for_each_rtx (&x, r10k_needs_protection_p_1, *insn_ptr))
12880 *insn_ptr = NULL_RTX;
12883 /* A for_each_rtx callback that iterates over the pattern of a CALL_INSN.
12884 Return nonzero if the call is not to a declared function. */
12886 static int
12887 r10k_needs_protection_p_call (rtx *loc, void *data ATTRIBUTE_UNUSED)
12889 rtx x;
12891 x = *loc;
12892 if (!MEM_P (x))
12893 return 0;
12895 x = XEXP (x, 0);
12896 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DECL (x))
12897 return -1;
12899 return 1;
12902 /* Return true if instruction INSN needs to be protected by an R10K
12903 cache barrier. */
12905 static bool
12906 r10k_needs_protection_p (rtx insn)
12908 if (CALL_P (insn))
12909 return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_call, NULL);
12911 if (mips_r10k_cache_barrier == R10K_CACHE_BARRIER_STORE)
12913 note_stores (PATTERN (insn), r10k_needs_protection_p_store, &insn);
12914 return insn == NULL_RTX;
12917 return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_1, insn);
12920 /* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
12921 edge is unconditional. */
12923 static bool
12924 r10k_protected_bb_p (basic_block bb, sbitmap protected_bbs)
12926 edge_iterator ei;
12927 edge e;
12929 FOR_EACH_EDGE (e, ei, bb->preds)
12930 if (!single_succ_p (e->src)
12931 || !TEST_BIT (protected_bbs, e->src->index)
12932 || (e->flags & EDGE_COMPLEX) != 0)
12933 return false;
12934 return true;
12937 /* Implement -mr10k-cache-barrier= for the current function. */
12939 static void
12940 r10k_insert_cache_barriers (void)
12942 int *rev_post_order;
12943 unsigned int i, n;
12944 basic_block bb;
12945 sbitmap protected_bbs;
12946 rtx insn, end, unprotected_region;
12948 if (TARGET_MIPS16)
12950 sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
12951 return;
12954 /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF. */
12955 compute_bb_for_insn ();
12957 /* Create def-use chains. */
12958 df_set_flags (DF_EQ_NOTES);
12959 df_chain_add_problem (DF_UD_CHAIN);
12960 df_analyze ();
12962 /* Calculate dominators. */
12963 calculate_dominance_info (CDI_DOMINATORS);
12965 /* Bit X of PROTECTED_BBS is set if the last operation in basic block
12966 X is protected by a cache barrier. */
12967 protected_bbs = sbitmap_alloc (last_basic_block);
12968 sbitmap_zero (protected_bbs);
12970 /* Iterate over the basic blocks in reverse post-order. */
12971 rev_post_order = XNEWVEC (int, last_basic_block);
12972 n = pre_and_rev_post_order_compute (NULL, rev_post_order, false);
12973 for (i = 0; i < n; i++)
12975 bb = BASIC_BLOCK (rev_post_order[i]);
12977 /* If this block is only reached by unconditional edges, and if the
12978 source of every edge is protected, the beginning of the block is
12979 also protected. */
12980 if (r10k_protected_bb_p (bb, protected_bbs))
12981 unprotected_region = NULL_RTX;
12982 else
12983 unprotected_region = pc_rtx;
12984 end = NEXT_INSN (BB_END (bb));
12986 /* UNPROTECTED_REGION is:
12988 - null if we are processing a protected region,
12989 - pc_rtx if we are processing an unprotected region but have
12990 not yet found the first instruction in it
12991 - the first instruction in an unprotected region otherwise. */
12992 for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn))
12994 if (unprotected_region && INSN_P (insn))
12996 if (recog_memoized (insn) == CODE_FOR_mips_cache)
12997 /* This CACHE instruction protects the following code. */
12998 unprotected_region = NULL_RTX;
12999 else
13001 /* See if INSN is the first instruction in this
13002 unprotected region. */
13003 if (unprotected_region == pc_rtx)
13004 unprotected_region = insn;
13006 /* See if INSN needs to be protected. If so,
13007 we must insert a cache barrier somewhere between
13008 PREV_INSN (UNPROTECTED_REGION) and INSN. It isn't
13009 clear which position is better performance-wise,
13010 but as a tie-breaker, we assume that it is better
13011 to allow delay slots to be back-filled where
13012 possible, and that it is better not to insert
13013 barriers in the middle of already-scheduled code.
13014 We therefore insert the barrier at the beginning
13015 of the region. */
13016 if (r10k_needs_protection_p (insn))
13018 emit_insn_before (gen_r10k_cache_barrier (),
13019 unprotected_region);
13020 unprotected_region = NULL_RTX;
13025 if (CALL_P (insn))
13026 /* The called function is not required to protect the exit path.
13027 The code that follows a call is therefore unprotected. */
13028 unprotected_region = pc_rtx;
13031 /* Record whether the end of this block is protected. */
13032 if (unprotected_region == NULL_RTX)
13033 SET_BIT (protected_bbs, bb->index);
13035 XDELETEVEC (rev_post_order);
13037 sbitmap_free (protected_bbs);
13039 free_dominance_info (CDI_DOMINATORS);
13041 df_finish_pass (false);
13043 free_bb_for_insn ();
13046 /* A temporary variable used by for_each_rtx callbacks, etc. */
13047 static rtx mips_sim_insn;
13049 /* A structure representing the state of the processor pipeline.
13050 Used by the mips_sim_* family of functions. */
13051 struct mips_sim {
13052 /* The maximum number of instructions that can be issued in a cycle.
13053 (Caches mips_issue_rate.) */
13054 unsigned int issue_rate;
13056 /* The current simulation time. */
13057 unsigned int time;
13059 /* How many more instructions can be issued in the current cycle. */
13060 unsigned int insns_left;
13062 /* LAST_SET[X].INSN is the last instruction to set register X.
13063 LAST_SET[X].TIME is the time at which that instruction was issued.
13064 INSN is null if no instruction has yet set register X. */
13065 struct {
13066 rtx insn;
13067 unsigned int time;
13068 } last_set[FIRST_PSEUDO_REGISTER];
13070 /* The pipeline's current DFA state. */
13071 state_t dfa_state;
13074 /* Reset STATE to the initial simulation state. */
13076 static void
13077 mips_sim_reset (struct mips_sim *state)
13079 state->time = 0;
13080 state->insns_left = state->issue_rate;
13081 memset (&state->last_set, 0, sizeof (state->last_set));
13082 state_reset (state->dfa_state);
13085 /* Initialize STATE before its first use. DFA_STATE points to an
13086 allocated but uninitialized DFA state. */
13088 static void
13089 mips_sim_init (struct mips_sim *state, state_t dfa_state)
13091 state->issue_rate = mips_issue_rate ();
13092 state->dfa_state = dfa_state;
13093 mips_sim_reset (state);
13096 /* Advance STATE by one clock cycle. */
13098 static void
13099 mips_sim_next_cycle (struct mips_sim *state)
13101 state->time++;
13102 state->insns_left = state->issue_rate;
13103 state_transition (state->dfa_state, 0);
13106 /* Advance simulation state STATE until instruction INSN can read
13107 register REG. */
13109 static void
13110 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
13112 unsigned int regno, end_regno;
13114 end_regno = END_REGNO (reg);
13115 for (regno = REGNO (reg); regno < end_regno; regno++)
13116 if (state->last_set[regno].insn != 0)
13118 unsigned int t;
13120 t = (state->last_set[regno].time
13121 + insn_latency (state->last_set[regno].insn, insn));
13122 while (state->time < t)
13123 mips_sim_next_cycle (state);
13127 /* A for_each_rtx callback. If *X is a register, advance simulation state
13128 DATA until mips_sim_insn can read the register's value. */
13130 static int
13131 mips_sim_wait_regs_2 (rtx *x, void *data)
13133 if (REG_P (*x))
13134 mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
13135 return 0;
13138 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
13140 static void
13141 mips_sim_wait_regs_1 (rtx *x, void *data)
13143 for_each_rtx (x, mips_sim_wait_regs_2, data);
13146 /* Advance simulation state STATE until all of INSN's register
13147 dependencies are satisfied. */
13149 static void
13150 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
13152 mips_sim_insn = insn;
13153 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
13156 /* Advance simulation state STATE until the units required by
13157 instruction INSN are available. */
13159 static void
13160 mips_sim_wait_units (struct mips_sim *state, rtx insn)
13162 state_t tmp_state;
13164 tmp_state = alloca (state_size ());
13165 while (state->insns_left == 0
13166 || (memcpy (tmp_state, state->dfa_state, state_size ()),
13167 state_transition (tmp_state, insn) >= 0))
13168 mips_sim_next_cycle (state);
13171 /* Advance simulation state STATE until INSN is ready to issue. */
13173 static void
13174 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
13176 mips_sim_wait_regs (state, insn);
13177 mips_sim_wait_units (state, insn);
13180 /* mips_sim_insn has just set X. Update the LAST_SET array
13181 in simulation state DATA. */
13183 static void
13184 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
13186 struct mips_sim *state;
13188 state = (struct mips_sim *) data;
13189 if (REG_P (x))
13191 unsigned int regno, end_regno;
13193 end_regno = END_REGNO (x);
13194 for (regno = REGNO (x); regno < end_regno; regno++)
13196 state->last_set[regno].insn = mips_sim_insn;
13197 state->last_set[regno].time = state->time;
13202 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
13203 can issue immediately (i.e., that mips_sim_wait_insn has already
13204 been called). */
13206 static void
13207 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
13209 state_transition (state->dfa_state, insn);
13210 state->insns_left--;
13212 mips_sim_insn = insn;
13213 note_stores (PATTERN (insn), mips_sim_record_set, state);
13216 /* Simulate issuing a NOP in state STATE. */
13218 static void
13219 mips_sim_issue_nop (struct mips_sim *state)
13221 if (state->insns_left == 0)
13222 mips_sim_next_cycle (state);
13223 state->insns_left--;
13226 /* Update simulation state STATE so that it's ready to accept the instruction
13227 after INSN. INSN should be part of the main rtl chain, not a member of a
13228 SEQUENCE. */
13230 static void
13231 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
13233 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
13234 if (JUMP_P (insn))
13235 mips_sim_issue_nop (state);
13237 switch (GET_CODE (SEQ_BEGIN (insn)))
13239 case CODE_LABEL:
13240 case CALL_INSN:
13241 /* We can't predict the processor state after a call or label. */
13242 mips_sim_reset (state);
13243 break;
13245 case JUMP_INSN:
13246 /* The delay slots of branch likely instructions are only executed
13247 when the branch is taken. Therefore, if the caller has simulated
13248 the delay slot instruction, STATE does not really reflect the state
13249 of the pipeline for the instruction after the delay slot. Also,
13250 branch likely instructions tend to incur a penalty when not taken,
13251 so there will probably be an extra delay between the branch and
13252 the instruction after the delay slot. */
13253 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
13254 mips_sim_reset (state);
13255 break;
13257 default:
13258 break;
13262 /* The VR4130 pipeline issues aligned pairs of instructions together,
13263 but it stalls the second instruction if it depends on the first.
13264 In order to cut down the amount of logic required, this dependence
13265 check is not based on a full instruction decode. Instead, any non-SPECIAL
13266 instruction is assumed to modify the register specified by bits 20-16
13267 (which is usually the "rt" field).
13269 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
13270 input, so we can end up with a false dependence between the branch
13271 and its delay slot. If this situation occurs in instruction INSN,
13272 try to avoid it by swapping rs and rt. */
13274 static void
13275 vr4130_avoid_branch_rt_conflict (rtx insn)
13277 rtx first, second;
13279 first = SEQ_BEGIN (insn);
13280 second = SEQ_END (insn);
13281 if (JUMP_P (first)
13282 && NONJUMP_INSN_P (second)
13283 && GET_CODE (PATTERN (first)) == SET
13284 && GET_CODE (SET_DEST (PATTERN (first))) == PC
13285 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
13287 /* Check for the right kind of condition. */
13288 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
13289 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
13290 && REG_P (XEXP (cond, 0))
13291 && REG_P (XEXP (cond, 1))
13292 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
13293 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
13295 /* SECOND mentions the rt register but not the rs register. */
13296 rtx tmp = XEXP (cond, 0);
13297 XEXP (cond, 0) = XEXP (cond, 1);
13298 XEXP (cond, 1) = tmp;
13303 /* Implement -mvr4130-align. Go through each basic block and simulate the
13304 processor pipeline. If we find that a pair of instructions could execute
13305 in parallel, and the first of those instructions is not 8-byte aligned,
13306 insert a nop to make it aligned. */
13308 static void
13309 vr4130_align_insns (void)
13311 struct mips_sim state;
13312 rtx insn, subinsn, last, last2, next;
13313 bool aligned_p;
13315 dfa_start ();
13317 /* LAST is the last instruction before INSN to have a nonzero length.
13318 LAST2 is the last such instruction before LAST. */
13319 last = 0;
13320 last2 = 0;
13322 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
13323 aligned_p = true;
13325 mips_sim_init (&state, alloca (state_size ()));
13326 for (insn = get_insns (); insn != 0; insn = next)
13328 unsigned int length;
13330 next = NEXT_INSN (insn);
13332 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
13333 This isn't really related to the alignment pass, but we do it on
13334 the fly to avoid a separate instruction walk. */
13335 vr4130_avoid_branch_rt_conflict (insn);
13337 if (USEFUL_INSN_P (insn))
13338 FOR_EACH_SUBINSN (subinsn, insn)
13340 mips_sim_wait_insn (&state, subinsn);
13342 /* If we want this instruction to issue in parallel with the
13343 previous one, make sure that the previous instruction is
13344 aligned. There are several reasons why this isn't worthwhile
13345 when the second instruction is a call:
13347 - Calls are less likely to be performance critical,
13348 - There's a good chance that the delay slot can execute
13349 in parallel with the call.
13350 - The return address would then be unaligned.
13352 In general, if we're going to insert a nop between instructions
13353 X and Y, it's better to insert it immediately after X. That
13354 way, if the nop makes Y aligned, it will also align any labels
13355 between X and Y. */
13356 if (state.insns_left != state.issue_rate
13357 && !CALL_P (subinsn))
13359 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
13361 /* SUBINSN is the first instruction in INSN and INSN is
13362 aligned. We want to align the previous instruction
13363 instead, so insert a nop between LAST2 and LAST.
13365 Note that LAST could be either a single instruction
13366 or a branch with a delay slot. In the latter case,
13367 LAST, like INSN, is already aligned, but the delay
13368 slot must have some extra delay that stops it from
13369 issuing at the same time as the branch. We therefore
13370 insert a nop before the branch in order to align its
13371 delay slot. */
13372 emit_insn_after (gen_nop (), last2);
13373 aligned_p = false;
13375 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
13377 /* SUBINSN is the delay slot of INSN, but INSN is
13378 currently unaligned. Insert a nop between
13379 LAST and INSN to align it. */
13380 emit_insn_after (gen_nop (), last);
13381 aligned_p = true;
13384 mips_sim_issue_insn (&state, subinsn);
13386 mips_sim_finish_insn (&state, insn);
13388 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
13389 length = get_attr_length (insn);
13390 if (length > 0)
13392 /* If the instruction is an asm statement or multi-instruction
13393 mips.md patern, the length is only an estimate. Insert an
13394 8 byte alignment after it so that the following instructions
13395 can be handled correctly. */
13396 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
13397 && (recog_memoized (insn) < 0 || length >= 8))
13399 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
13400 next = NEXT_INSN (next);
13401 mips_sim_next_cycle (&state);
13402 aligned_p = true;
13404 else if (length & 4)
13405 aligned_p = !aligned_p;
13406 last2 = last;
13407 last = insn;
13410 /* See whether INSN is an aligned label. */
13411 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
13412 aligned_p = true;
13414 dfa_finish ();
13417 /* This structure records that the current function has a LO_SUM
13418 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
13419 the largest offset applied to BASE by all such LO_SUMs. */
13420 struct mips_lo_sum_offset {
13421 rtx base;
13422 HOST_WIDE_INT offset;
13425 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
13427 static hashval_t
13428 mips_hash_base (rtx base)
13430 int do_not_record_p;
13432 return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
13435 /* Hash-table callbacks for mips_lo_sum_offsets. */
13437 static hashval_t
13438 mips_lo_sum_offset_hash (const void *entry)
13440 return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
13443 static int
13444 mips_lo_sum_offset_eq (const void *entry, const void *value)
13446 return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
13447 (const_rtx) value);
13450 /* Look up symbolic constant X in HTAB, which is a hash table of
13451 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
13452 paired with a recorded LO_SUM, otherwise record X in the table. */
13454 static bool
13455 mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
13457 rtx base, offset;
13458 void **slot;
13459 struct mips_lo_sum_offset *entry;
13461 /* Split X into a base and offset. */
13462 split_const (x, &base, &offset);
13463 if (UNSPEC_ADDRESS_P (base))
13464 base = UNSPEC_ADDRESS (base);
13466 /* Look up the base in the hash table. */
13467 slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
13468 if (slot == NULL)
13469 return false;
13471 entry = (struct mips_lo_sum_offset *) *slot;
13472 if (option == INSERT)
13474 if (entry == NULL)
13476 entry = XNEW (struct mips_lo_sum_offset);
13477 entry->base = base;
13478 entry->offset = INTVAL (offset);
13479 *slot = entry;
13481 else
13483 if (INTVAL (offset) > entry->offset)
13484 entry->offset = INTVAL (offset);
13487 return INTVAL (offset) <= entry->offset;
13490 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
13491 Record every LO_SUM in *LOC. */
13493 static int
13494 mips_record_lo_sum (rtx *loc, void *data)
13496 if (GET_CODE (*loc) == LO_SUM)
13497 mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
13498 return 0;
13501 /* Return true if INSN is a SET of an orphaned high-part relocation.
13502 HTAB is a hash table of mips_lo_sum_offsets that describes all the
13503 LO_SUMs in the current function. */
13505 static bool
13506 mips_orphaned_high_part_p (htab_t htab, rtx insn)
13508 enum mips_symbol_type type;
13509 rtx x, set;
13511 set = single_set (insn);
13512 if (set)
13514 /* Check for %his. */
13515 x = SET_SRC (set);
13516 if (GET_CODE (x) == HIGH
13517 && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
13518 return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
13520 /* Check for local %gots (and %got_pages, which is redundant but OK). */
13521 if (GET_CODE (x) == UNSPEC
13522 && XINT (x, 1) == UNSPEC_LOAD_GOT
13523 && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
13524 SYMBOL_CONTEXT_LEA, &type)
13525 && type == SYMBOL_GOTOFF_PAGE)
13526 return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
13528 return false;
13531 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
13532 INSN and a previous instruction, avoid it by inserting nops after
13533 instruction AFTER.
13535 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
13536 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
13537 before using the value of that register. *HILO_DELAY counts the
13538 number of instructions since the last hilo hazard (that is,
13539 the number of instructions since the last MFLO or MFHI).
13541 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
13542 for the next instruction.
13544 LO_REG is an rtx for the LO register, used in dependence checking. */
13546 static void
13547 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
13548 rtx *delayed_reg, rtx lo_reg)
13550 rtx pattern, set;
13551 int nops, ninsns;
13553 pattern = PATTERN (insn);
13555 /* Do not put the whole function in .set noreorder if it contains
13556 an asm statement. We don't know whether there will be hazards
13557 between the asm statement and the gcc-generated code. */
13558 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
13559 cfun->machine->all_noreorder_p = false;
13561 /* Ignore zero-length instructions (barriers and the like). */
13562 ninsns = get_attr_length (insn) / 4;
13563 if (ninsns == 0)
13564 return;
13566 /* Work out how many nops are needed. Note that we only care about
13567 registers that are explicitly mentioned in the instruction's pattern.
13568 It doesn't matter that calls use the argument registers or that they
13569 clobber hi and lo. */
13570 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
13571 nops = 2 - *hilo_delay;
13572 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
13573 nops = 1;
13574 else
13575 nops = 0;
13577 /* Insert the nops between this instruction and the previous one.
13578 Each new nop takes us further from the last hilo hazard. */
13579 *hilo_delay += nops;
13580 while (nops-- > 0)
13581 emit_insn_after (gen_hazard_nop (), after);
13583 /* Set up the state for the next instruction. */
13584 *hilo_delay += ninsns;
13585 *delayed_reg = 0;
13586 if (INSN_CODE (insn) >= 0)
13587 switch (get_attr_hazard (insn))
13589 case HAZARD_NONE:
13590 break;
13592 case HAZARD_HILO:
13593 *hilo_delay = 0;
13594 break;
13596 case HAZARD_DELAY:
13597 set = single_set (insn);
13598 gcc_assert (set);
13599 *delayed_reg = SET_DEST (set);
13600 break;
13604 /* Go through the instruction stream and insert nops where necessary.
13605 Also delete any high-part relocations whose partnering low parts
13606 are now all dead. See if the whole function can then be put into
13607 .set noreorder and .set nomacro. */
13609 static void
13610 mips_reorg_process_insns (void)
13612 rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
13613 int hilo_delay;
13614 htab_t htab;
13616 /* Force all instructions to be split into their final form. */
13617 split_all_insns_noflow ();
13619 /* Recalculate instruction lengths without taking nops into account. */
13620 cfun->machine->ignore_hazard_length_p = true;
13621 shorten_branches (get_insns ());
13623 cfun->machine->all_noreorder_p = true;
13625 /* We don't track MIPS16 PC-relative offsets closely enough to make
13626 a good job of "set .noreorder" code in MIPS16 mode. */
13627 if (TARGET_MIPS16)
13628 cfun->machine->all_noreorder_p = false;
13630 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
13631 if (!TARGET_EXPLICIT_RELOCS)
13632 cfun->machine->all_noreorder_p = false;
13634 /* Profiled functions can't be all noreorder because the profiler
13635 support uses assembler macros. */
13636 if (crtl->profile)
13637 cfun->machine->all_noreorder_p = false;
13639 /* Code compiled with -mfix-vr4120 can't be all noreorder because
13640 we rely on the assembler to work around some errata. */
13641 if (TARGET_FIX_VR4120)
13642 cfun->machine->all_noreorder_p = false;
13644 /* The same is true for -mfix-vr4130 if we might generate MFLO or
13645 MFHI instructions. Note that we avoid using MFLO and MFHI if
13646 the VR4130 MACC and DMACC instructions are available instead;
13647 see the *mfhilo_{si,di}_macc patterns. */
13648 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
13649 cfun->machine->all_noreorder_p = false;
13651 htab = htab_create (37, mips_lo_sum_offset_hash,
13652 mips_lo_sum_offset_eq, free);
13654 /* Make a first pass over the instructions, recording all the LO_SUMs. */
13655 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
13656 FOR_EACH_SUBINSN (subinsn, insn)
13657 if (INSN_P (subinsn))
13658 for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
13660 last_insn = 0;
13661 hilo_delay = 2;
13662 delayed_reg = 0;
13663 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
13665 /* Make a second pass over the instructions. Delete orphaned
13666 high-part relocations or turn them into NOPs. Avoid hazards
13667 by inserting NOPs. */
13668 for (insn = get_insns (); insn != 0; insn = next_insn)
13670 next_insn = NEXT_INSN (insn);
13671 if (INSN_P (insn))
13673 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
13675 /* If we find an orphaned high-part relocation in a delay
13676 slot, it's easier to turn that instruction into a NOP than
13677 to delete it. The delay slot will be a NOP either way. */
13678 FOR_EACH_SUBINSN (subinsn, insn)
13679 if (INSN_P (subinsn))
13681 if (mips_orphaned_high_part_p (htab, subinsn))
13683 PATTERN (subinsn) = gen_nop ();
13684 INSN_CODE (subinsn) = CODE_FOR_nop;
13686 mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
13687 &delayed_reg, lo_reg);
13689 last_insn = insn;
13691 else
13693 /* INSN is a single instruction. Delete it if it's an
13694 orphaned high-part relocation. */
13695 if (mips_orphaned_high_part_p (htab, insn))
13696 delete_insn (insn);
13697 /* Also delete cache barriers if the last instruction
13698 was an annulled branch. INSN will not be speculatively
13699 executed. */
13700 else if (recog_memoized (insn) == CODE_FOR_r10k_cache_barrier
13701 && last_insn
13702 && INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn)))
13703 delete_insn (insn);
13704 else
13706 mips_avoid_hazard (last_insn, insn, &hilo_delay,
13707 &delayed_reg, lo_reg);
13708 last_insn = insn;
13714 htab_delete (htab);
13717 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
13719 static void
13720 mips_reorg (void)
13722 mips16_lay_out_constants ();
13723 if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE)
13724 r10k_insert_cache_barriers ();
13725 if (optimize > 0 && flag_delayed_branch)
13726 dbr_schedule (get_insns ());
13727 mips_reorg_process_insns ();
13728 if (!TARGET_MIPS16
13729 && TARGET_EXPLICIT_RELOCS
13730 && TUNE_MIPS4130
13731 && TARGET_VR4130_ALIGN)
13732 vr4130_align_insns ();
13735 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
13736 in order to avoid duplicating too much logic from elsewhere. */
13738 static void
13739 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
13740 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
13741 tree function)
13743 rtx this_rtx, temp1, temp2, insn, fnaddr;
13744 bool use_sibcall_p;
13746 /* Pretend to be a post-reload pass while generating rtl. */
13747 reload_completed = 1;
13749 /* Mark the end of the (empty) prologue. */
13750 emit_note (NOTE_INSN_PROLOGUE_END);
13752 /* Determine if we can use a sibcall to call FUNCTION directly. */
13753 fnaddr = XEXP (DECL_RTL (function), 0);
13754 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
13755 && const_call_insn_operand (fnaddr, Pmode));
13757 /* Determine if we need to load FNADDR from the GOT. */
13758 if (!use_sibcall_p
13759 && (mips_got_symbol_type_p
13760 (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))))
13762 /* Pick a global pointer. Use a call-clobbered register if
13763 TARGET_CALL_SAVED_GP. */
13764 cfun->machine->global_pointer
13765 = TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
13766 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
13768 /* Set up the global pointer for n32 or n64 abicalls. */
13769 mips_emit_loadgp ();
13772 /* We need two temporary registers in some cases. */
13773 temp1 = gen_rtx_REG (Pmode, 2);
13774 temp2 = gen_rtx_REG (Pmode, 3);
13776 /* Find out which register contains the "this" pointer. */
13777 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
13778 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
13779 else
13780 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
13782 /* Add DELTA to THIS_RTX. */
13783 if (delta != 0)
13785 rtx offset = GEN_INT (delta);
13786 if (!SMALL_OPERAND (delta))
13788 mips_emit_move (temp1, offset);
13789 offset = temp1;
13791 emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
13794 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
13795 if (vcall_offset != 0)
13797 rtx addr;
13799 /* Set TEMP1 to *THIS_RTX. */
13800 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
13802 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
13803 addr = mips_add_offset (temp2, temp1, vcall_offset);
13805 /* Load the offset and add it to THIS_RTX. */
13806 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
13807 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
13810 /* Jump to the target function. Use a sibcall if direct jumps are
13811 allowed, otherwise load the address into a register first. */
13812 if (use_sibcall_p)
13814 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
13815 SIBLING_CALL_P (insn) = 1;
13817 else
13819 /* This is messy. GAS treats "la $25,foo" as part of a call
13820 sequence and may allow a global "foo" to be lazily bound.
13821 The general move patterns therefore reject this combination.
13823 In this context, lazy binding would actually be OK
13824 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
13825 TARGET_CALL_SAVED_GP; see mips_load_call_address.
13826 We must therefore load the address via a temporary
13827 register if mips_dangerous_for_la25_p.
13829 If we jump to the temporary register rather than $25,
13830 the assembler can use the move insn to fill the jump's
13831 delay slot.
13833 We can use the same technique for MIPS16 code, where $25
13834 is not a valid JR register. */
13835 if (TARGET_USE_PIC_FN_ADDR_REG
13836 && !TARGET_MIPS16
13837 && !mips_dangerous_for_la25_p (fnaddr))
13838 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
13839 mips_load_call_address (MIPS_CALL_SIBCALL, temp1, fnaddr);
13841 if (TARGET_USE_PIC_FN_ADDR_REG
13842 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
13843 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
13844 emit_jump_insn (gen_indirect_jump (temp1));
13847 /* Run just enough of rest_of_compilation. This sequence was
13848 "borrowed" from alpha.c. */
13849 insn = get_insns ();
13850 insn_locators_alloc ();
13851 split_all_insns_noflow ();
13852 mips16_lay_out_constants ();
13853 shorten_branches (insn);
13854 final_start_function (insn, file, 1);
13855 final (insn, file, 1);
13856 final_end_function ();
13857 free_after_compilation (cfun);
13859 /* Clean up the vars set above. Note that final_end_function resets
13860 the global pointer for us. */
13861 reload_completed = 0;
13864 /* The last argument passed to mips_set_mips16_mode, or negative if the
13865 function hasn't been called yet.
13867 There are two copies of this information. One is saved and restored
13868 by the PCH process while the other is specific to this compiler
13869 invocation. The information calculated by mips_set_mips16_mode
13870 is invalid unless the two variables are the same. */
13871 static int was_mips16_p = -1;
13872 static GTY(()) int was_mips16_pch_p = -1;
13874 /* Set up the target-dependent global state so that it matches the
13875 current function's ISA mode. */
13877 static void
13878 mips_set_mips16_mode (int mips16_p)
13880 if (mips16_p == was_mips16_p
13881 && mips16_p == was_mips16_pch_p)
13882 return;
13884 /* Restore base settings of various flags. */
13885 target_flags = mips_base_target_flags;
13886 flag_schedule_insns = mips_base_schedule_insns;
13887 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
13888 flag_move_loop_invariants = mips_base_move_loop_invariants;
13889 align_loops = mips_base_align_loops;
13890 align_jumps = mips_base_align_jumps;
13891 align_functions = mips_base_align_functions;
13893 if (mips16_p)
13895 /* Switch to MIPS16 mode. */
13896 target_flags |= MASK_MIPS16;
13898 /* Don't run the scheduler before reload, since it tends to
13899 increase register pressure. */
13900 flag_schedule_insns = 0;
13902 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
13903 the whole function to be in a single section. */
13904 flag_reorder_blocks_and_partition = 0;
13906 /* Don't move loop invariants, because it tends to increase
13907 register pressure. It also introduces an extra move in cases
13908 where the constant is the first operand in a two-operand binary
13909 instruction, or when it forms a register argument to a functon
13910 call. */
13911 flag_move_loop_invariants = 0;
13913 target_flags |= MASK_EXPLICIT_RELOCS;
13915 /* Experiments suggest we get the best overall section-anchor
13916 results from using the range of an unextended LW or SW. Code
13917 that makes heavy use of byte or short accesses can do better
13918 with ranges of 0...31 and 0...63 respectively, but most code is
13919 sensitive to the range of LW and SW instead. */
13920 targetm.min_anchor_offset = 0;
13921 targetm.max_anchor_offset = 127;
13923 if (flag_pic && !TARGET_OLDABI)
13924 sorry ("MIPS16 PIC for ABIs other than o32 and o64");
13926 if (TARGET_XGOT)
13927 sorry ("MIPS16 -mxgot code");
13929 if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
13930 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
13932 else
13934 /* Switch to normal (non-MIPS16) mode. */
13935 target_flags &= ~MASK_MIPS16;
13937 /* Provide default values for align_* for 64-bit targets. */
13938 if (TARGET_64BIT)
13940 if (align_loops == 0)
13941 align_loops = 8;
13942 if (align_jumps == 0)
13943 align_jumps = 8;
13944 if (align_functions == 0)
13945 align_functions = 8;
13948 targetm.min_anchor_offset = -32768;
13949 targetm.max_anchor_offset = 32767;
13952 /* (Re)initialize MIPS target internals for new ISA. */
13953 mips_init_relocs ();
13955 if (was_mips16_p >= 0 || was_mips16_pch_p >= 0)
13956 /* Reinitialize target-dependent state. */
13957 target_reinit ();
13959 was_mips16_p = mips16_p;
13960 was_mips16_pch_p = mips16_p;
13963 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
13964 function should use the MIPS16 ISA and switch modes accordingly. */
13966 static void
13967 mips_set_current_function (tree fndecl)
13969 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
13972 /* Allocate a chunk of memory for per-function machine-dependent data. */
13974 static struct machine_function *
13975 mips_init_machine_status (void)
13977 return ((struct machine_function *)
13978 ggc_alloc_cleared (sizeof (struct machine_function)));
13981 /* Return the processor associated with the given ISA level, or null
13982 if the ISA isn't valid. */
13984 static const struct mips_cpu_info *
13985 mips_cpu_info_from_isa (int isa)
13987 unsigned int i;
13989 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
13990 if (mips_cpu_info_table[i].isa == isa)
13991 return mips_cpu_info_table + i;
13993 return NULL;
13996 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
13997 with a final "000" replaced by "k". Ignore case.
13999 Note: this function is shared between GCC and GAS. */
14001 static bool
14002 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
14004 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
14005 given++, canonical++;
14007 return ((*given == 0 && *canonical == 0)
14008 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
14011 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
14012 CPU name. We've traditionally allowed a lot of variation here.
14014 Note: this function is shared between GCC and GAS. */
14016 static bool
14017 mips_matching_cpu_name_p (const char *canonical, const char *given)
14019 /* First see if the name matches exactly, or with a final "000"
14020 turned into "k". */
14021 if (mips_strict_matching_cpu_name_p (canonical, given))
14022 return true;
14024 /* If not, try comparing based on numerical designation alone.
14025 See if GIVEN is an unadorned number, or 'r' followed by a number. */
14026 if (TOLOWER (*given) == 'r')
14027 given++;
14028 if (!ISDIGIT (*given))
14029 return false;
14031 /* Skip over some well-known prefixes in the canonical name,
14032 hoping to find a number there too. */
14033 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
14034 canonical += 2;
14035 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
14036 canonical += 2;
14037 else if (TOLOWER (canonical[0]) == 'r')
14038 canonical += 1;
14040 return mips_strict_matching_cpu_name_p (canonical, given);
14043 /* Return the mips_cpu_info entry for the processor or ISA given
14044 by CPU_STRING. Return null if the string isn't recognized.
14046 A similar function exists in GAS. */
14048 static const struct mips_cpu_info *
14049 mips_parse_cpu (const char *cpu_string)
14051 unsigned int i;
14052 const char *s;
14054 /* In the past, we allowed upper-case CPU names, but it doesn't
14055 work well with the multilib machinery. */
14056 for (s = cpu_string; *s != 0; s++)
14057 if (ISUPPER (*s))
14059 warning (0, "CPU names must be lower case");
14060 break;
14063 /* 'from-abi' selects the most compatible architecture for the given
14064 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
14065 EABIs, we have to decide whether we're using the 32-bit or 64-bit
14066 version. */
14067 if (strcasecmp (cpu_string, "from-abi") == 0)
14068 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
14069 : ABI_NEEDS_64BIT_REGS ? 3
14070 : (TARGET_64BIT ? 3 : 1));
14072 /* 'default' has traditionally been a no-op. Probably not very useful. */
14073 if (strcasecmp (cpu_string, "default") == 0)
14074 return NULL;
14076 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
14077 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
14078 return mips_cpu_info_table + i;
14080 return NULL;
14083 /* Set up globals to generate code for the ISA or processor
14084 described by INFO. */
14086 static void
14087 mips_set_architecture (const struct mips_cpu_info *info)
14089 if (info != 0)
14091 mips_arch_info = info;
14092 mips_arch = info->cpu;
14093 mips_isa = info->isa;
14097 /* Likewise for tuning. */
14099 static void
14100 mips_set_tune (const struct mips_cpu_info *info)
14102 if (info != 0)
14104 mips_tune_info = info;
14105 mips_tune = info->cpu;
14109 /* Implement TARGET_HANDLE_OPTION. */
14111 static bool
14112 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
14114 switch (code)
14116 case OPT_mabi_:
14117 if (strcmp (arg, "32") == 0)
14118 mips_abi = ABI_32;
14119 else if (strcmp (arg, "o64") == 0)
14120 mips_abi = ABI_O64;
14121 else if (strcmp (arg, "n32") == 0)
14122 mips_abi = ABI_N32;
14123 else if (strcmp (arg, "64") == 0)
14124 mips_abi = ABI_64;
14125 else if (strcmp (arg, "eabi") == 0)
14126 mips_abi = ABI_EABI;
14127 else
14128 return false;
14129 return true;
14131 case OPT_march_:
14132 case OPT_mtune_:
14133 return mips_parse_cpu (arg) != 0;
14135 case OPT_mips:
14136 mips_isa_option_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
14137 return mips_isa_option_info != 0;
14139 case OPT_mno_flush_func:
14140 mips_cache_flush_func = NULL;
14141 return true;
14143 case OPT_mcode_readable_:
14144 if (strcmp (arg, "yes") == 0)
14145 mips_code_readable = CODE_READABLE_YES;
14146 else if (strcmp (arg, "pcrel") == 0)
14147 mips_code_readable = CODE_READABLE_PCREL;
14148 else if (strcmp (arg, "no") == 0)
14149 mips_code_readable = CODE_READABLE_NO;
14150 else
14151 return false;
14152 return true;
14154 case OPT_mr10k_cache_barrier_:
14155 if (strcmp (arg, "load-store") == 0)
14156 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_LOAD_STORE;
14157 else if (strcmp (arg, "store") == 0)
14158 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_STORE;
14159 else if (strcmp (arg, "none") == 0)
14160 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
14161 else
14162 return false;
14163 return true;
14165 default:
14166 return true;
14170 /* Implement OVERRIDE_OPTIONS. */
14172 void
14173 mips_override_options (void)
14175 int i, start, regno, mode;
14177 /* Process flags as though we were generating non-MIPS16 code. */
14178 mips_base_mips16 = TARGET_MIPS16;
14179 target_flags &= ~MASK_MIPS16;
14181 #ifdef SUBTARGET_OVERRIDE_OPTIONS
14182 SUBTARGET_OVERRIDE_OPTIONS;
14183 #endif
14185 /* Set the small data limit. */
14186 mips_small_data_threshold = (g_switch_set
14187 ? g_switch_value
14188 : MIPS_DEFAULT_GVALUE);
14190 /* The following code determines the architecture and register size.
14191 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
14192 The GAS and GCC code should be kept in sync as much as possible. */
14194 if (mips_arch_string != 0)
14195 mips_set_architecture (mips_parse_cpu (mips_arch_string));
14197 if (mips_isa_option_info != 0)
14199 if (mips_arch_info == 0)
14200 mips_set_architecture (mips_isa_option_info);
14201 else if (mips_arch_info->isa != mips_isa_option_info->isa)
14202 error ("%<-%s%> conflicts with the other architecture options, "
14203 "which specify a %s processor",
14204 mips_isa_option_info->name,
14205 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
14208 if (mips_arch_info == 0)
14210 #ifdef MIPS_CPU_STRING_DEFAULT
14211 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
14212 #else
14213 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
14214 #endif
14217 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
14218 error ("%<-march=%s%> is not compatible with the selected ABI",
14219 mips_arch_info->name);
14221 /* Optimize for mips_arch, unless -mtune selects a different processor. */
14222 if (mips_tune_string != 0)
14223 mips_set_tune (mips_parse_cpu (mips_tune_string));
14225 if (mips_tune_info == 0)
14226 mips_set_tune (mips_arch_info);
14228 if ((target_flags_explicit & MASK_64BIT) != 0)
14230 /* The user specified the size of the integer registers. Make sure
14231 it agrees with the ABI and ISA. */
14232 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
14233 error ("%<-mgp64%> used with a 32-bit processor");
14234 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
14235 error ("%<-mgp32%> used with a 64-bit ABI");
14236 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
14237 error ("%<-mgp64%> used with a 32-bit ABI");
14239 else
14241 /* Infer the integer register size from the ABI and processor.
14242 Restrict ourselves to 32-bit registers if that's all the
14243 processor has, or if the ABI cannot handle 64-bit registers. */
14244 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
14245 target_flags &= ~MASK_64BIT;
14246 else
14247 target_flags |= MASK_64BIT;
14250 if ((target_flags_explicit & MASK_FLOAT64) != 0)
14252 if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
14253 error ("unsupported combination: %s", "-mfp64 -msingle-float");
14254 else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
14255 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
14256 else if (!TARGET_64BIT && TARGET_FLOAT64)
14258 if (!ISA_HAS_MXHC1)
14259 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
14260 " the target supports the mfhc1 and mthc1 instructions");
14261 else if (mips_abi != ABI_32)
14262 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
14263 " the o32 ABI");
14266 else
14268 /* -msingle-float selects 32-bit float registers. Otherwise the
14269 float registers should be the same size as the integer ones. */
14270 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
14271 target_flags |= MASK_FLOAT64;
14272 else
14273 target_flags &= ~MASK_FLOAT64;
14276 /* End of code shared with GAS. */
14278 /* If no -mlong* option was given, infer it from the other options. */
14279 if ((target_flags_explicit & MASK_LONG64) == 0)
14281 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
14282 target_flags |= MASK_LONG64;
14283 else
14284 target_flags &= ~MASK_LONG64;
14287 if (!TARGET_OLDABI)
14288 flag_pcc_struct_return = 0;
14290 /* Decide which rtx_costs structure to use. */
14291 if (optimize_size)
14292 mips_cost = &mips_rtx_cost_optimize_size;
14293 else
14294 mips_cost = &mips_rtx_cost_data[mips_tune];
14296 /* If the user hasn't specified a branch cost, use the processor's
14297 default. */
14298 if (mips_branch_cost == 0)
14299 mips_branch_cost = mips_cost->branch_cost;
14301 /* If neither -mbranch-likely nor -mno-branch-likely was given
14302 on the command line, set MASK_BRANCHLIKELY based on the target
14303 architecture and tuning flags. Annulled delay slots are a
14304 size win, so we only consider the processor-specific tuning
14305 for !optimize_size. */
14306 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
14308 if (ISA_HAS_BRANCHLIKELY
14309 && (optimize_size
14310 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
14311 target_flags |= MASK_BRANCHLIKELY;
14312 else
14313 target_flags &= ~MASK_BRANCHLIKELY;
14315 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
14316 warning (0, "the %qs architecture does not support branch-likely"
14317 " instructions", mips_arch_info->name);
14319 /* The effect of -mabicalls isn't defined for the EABI. */
14320 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
14322 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
14323 target_flags &= ~MASK_ABICALLS;
14326 if (TARGET_ABICALLS_PIC2)
14327 /* We need to set flag_pic for executables as well as DSOs
14328 because we may reference symbols that are not defined in
14329 the final executable. (MIPS does not use things like
14330 copy relocs, for example.)
14332 There is a body of code that uses __PIC__ to distinguish
14333 between -mabicalls and -mno-abicalls code. The non-__PIC__
14334 variant is usually appropriate for TARGET_ABICALLS_PIC0, as
14335 long as any indirect jumps use $25. */
14336 flag_pic = 1;
14338 /* -mvr4130-align is a "speed over size" optimization: it usually produces
14339 faster code, but at the expense of more nops. Enable it at -O3 and
14340 above. */
14341 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
14342 target_flags |= MASK_VR4130_ALIGN;
14344 /* Prefer a call to memcpy over inline code when optimizing for size,
14345 though see MOVE_RATIO in mips.h. */
14346 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
14347 target_flags |= MASK_MEMCPY;
14349 /* If we have a nonzero small-data limit, check that the -mgpopt
14350 setting is consistent with the other target flags. */
14351 if (mips_small_data_threshold > 0)
14353 if (!TARGET_GPOPT)
14355 if (!TARGET_EXPLICIT_RELOCS)
14356 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
14358 TARGET_LOCAL_SDATA = false;
14359 TARGET_EXTERN_SDATA = false;
14361 else
14363 if (TARGET_VXWORKS_RTP)
14364 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
14366 if (TARGET_ABICALLS)
14367 warning (0, "cannot use small-data accesses for %qs",
14368 "-mabicalls");
14372 #ifdef MIPS_TFMODE_FORMAT
14373 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
14374 #endif
14376 /* Make sure that the user didn't turn off paired single support when
14377 MIPS-3D support is requested. */
14378 if (TARGET_MIPS3D
14379 && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
14380 && !TARGET_PAIRED_SINGLE_FLOAT)
14381 error ("%<-mips3d%> requires %<-mpaired-single%>");
14383 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
14384 if (TARGET_MIPS3D)
14385 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
14387 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
14388 and TARGET_HARD_FLOAT_ABI are both true. */
14389 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
14390 error ("%qs must be used with %qs",
14391 TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
14392 TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
14394 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
14395 enabled. */
14396 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
14397 warning (0, "the %qs architecture does not support paired-single"
14398 " instructions", mips_arch_info->name);
14400 if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
14401 && !TARGET_CACHE_BUILTIN)
14403 error ("%qs requires a target that provides the %qs instruction",
14404 "-mr10k-cache-barrier", "cache");
14405 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
14408 /* If TARGET_DSPR2, enable MASK_DSP. */
14409 if (TARGET_DSPR2)
14410 target_flags |= MASK_DSP;
14412 /* .eh_frame addresses should be the same width as a C pointer.
14413 Most MIPS ABIs support only one pointer size, so the assembler
14414 will usually know exactly how big an .eh_frame address is.
14416 Unfortunately, this is not true of the 64-bit EABI. The ABI was
14417 originally defined to use 64-bit pointers (i.e. it is LP64), and
14418 this is still the default mode. However, we also support an n32-like
14419 ILP32 mode, which is selected by -mlong32. The problem is that the
14420 assembler has traditionally not had an -mlong option, so it has
14421 traditionally not known whether we're using the ILP32 or LP64 form.
14423 As it happens, gas versions up to and including 2.19 use _32-bit_
14424 addresses for EABI64 .cfi_* directives. This is wrong for the
14425 default LP64 mode, so we can't use the directives by default.
14426 Moreover, since gas's current behavior is at odds with gcc's
14427 default behavior, it seems unwise to rely on future versions
14428 of gas behaving the same way. We therefore avoid using .cfi
14429 directives for -mlong32 as well. */
14430 if (mips_abi == ABI_EABI && TARGET_64BIT)
14431 flag_dwarf2_cfi_asm = 0;
14433 mips_init_print_operand_punct ();
14435 /* Set up array to map GCC register number to debug register number.
14436 Ignore the special purpose register numbers. */
14438 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
14440 mips_dbx_regno[i] = INVALID_REGNUM;
14441 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
14442 mips_dwarf_regno[i] = i;
14443 else
14444 mips_dwarf_regno[i] = INVALID_REGNUM;
14447 start = GP_DBX_FIRST - GP_REG_FIRST;
14448 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
14449 mips_dbx_regno[i] = i + start;
14451 start = FP_DBX_FIRST - FP_REG_FIRST;
14452 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
14453 mips_dbx_regno[i] = i + start;
14455 /* Accumulator debug registers use big-endian ordering. */
14456 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
14457 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
14458 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
14459 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
14460 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
14462 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
14463 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
14466 /* Set up mips_hard_regno_mode_ok. */
14467 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
14468 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
14469 mips_hard_regno_mode_ok[mode][regno]
14470 = mips_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
14472 /* Function to allocate machine-dependent function status. */
14473 init_machine_status = &mips_init_machine_status;
14475 /* Default to working around R4000 errata only if the processor
14476 was selected explicitly. */
14477 if ((target_flags_explicit & MASK_FIX_R4000) == 0
14478 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
14479 target_flags |= MASK_FIX_R4000;
14481 /* Default to working around R4400 errata only if the processor
14482 was selected explicitly. */
14483 if ((target_flags_explicit & MASK_FIX_R4400) == 0
14484 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
14485 target_flags |= MASK_FIX_R4400;
14487 /* Default to working around R10000 errata only if the processor
14488 was selected explicitly. */
14489 if ((target_flags_explicit & MASK_FIX_R10000) == 0
14490 && mips_matching_cpu_name_p (mips_arch_info->name, "r10000"))
14491 target_flags |= MASK_FIX_R10000;
14493 /* Make sure that branch-likely instructions available when using
14494 -mfix-r10000. The instructions are not available if either:
14496 1. -mno-branch-likely was passed.
14497 2. The selected ISA does not support branch-likely and
14498 the command line does not include -mbranch-likely. */
14499 if (TARGET_FIX_R10000
14500 && ((target_flags_explicit & MASK_BRANCHLIKELY) == 0
14501 ? !ISA_HAS_BRANCHLIKELY
14502 : !TARGET_BRANCHLIKELY))
14503 sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
14505 /* Save base state of options. */
14506 mips_base_target_flags = target_flags;
14507 mips_base_schedule_insns = flag_schedule_insns;
14508 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
14509 mips_base_move_loop_invariants = flag_move_loop_invariants;
14510 mips_base_align_loops = align_loops;
14511 mips_base_align_jumps = align_jumps;
14512 mips_base_align_functions = align_functions;
14514 /* Now select the ISA mode.
14516 Do all CPP-sensitive stuff in non-MIPS16 mode; we'll switch to
14517 MIPS16 mode afterwards if need be. */
14518 mips_set_mips16_mode (false);
14521 /* Swap the register information for registers I and I + 1, which
14522 currently have the wrong endianness. Note that the registers'
14523 fixedness and call-clobberedness might have been set on the
14524 command line. */
14526 static void
14527 mips_swap_registers (unsigned int i)
14529 int tmpi;
14530 const char *tmps;
14532 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
14533 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
14535 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
14536 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
14537 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
14538 SWAP_STRING (reg_names[i], reg_names[i + 1]);
14540 #undef SWAP_STRING
14541 #undef SWAP_INT
14544 /* Implement CONDITIONAL_REGISTER_USAGE. */
14546 void
14547 mips_conditional_register_usage (void)
14550 if (ISA_HAS_DSP)
14552 /* These DSP control register fields are global. */
14553 global_regs[CCDSP_PO_REGNUM] = 1;
14554 global_regs[CCDSP_SC_REGNUM] = 1;
14556 else
14558 int regno;
14560 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
14561 fixed_regs[regno] = call_used_regs[regno] = 1;
14563 if (!TARGET_HARD_FLOAT)
14565 int regno;
14567 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
14568 fixed_regs[regno] = call_used_regs[regno] = 1;
14569 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
14570 fixed_regs[regno] = call_used_regs[regno] = 1;
14572 else if (! ISA_HAS_8CC)
14574 int regno;
14576 /* We only have a single condition-code register. We implement
14577 this by fixing all the condition-code registers and generating
14578 RTL that refers directly to ST_REG_FIRST. */
14579 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
14580 fixed_regs[regno] = call_used_regs[regno] = 1;
14582 /* In MIPS16 mode, we permit the $t temporary registers to be used
14583 for reload. We prohibit the unused $s registers, since they
14584 are call-saved, and saving them via a MIPS16 register would
14585 probably waste more time than just reloading the value. */
14586 if (TARGET_MIPS16)
14588 fixed_regs[18] = call_used_regs[18] = 1;
14589 fixed_regs[19] = call_used_regs[19] = 1;
14590 fixed_regs[20] = call_used_regs[20] = 1;
14591 fixed_regs[21] = call_used_regs[21] = 1;
14592 fixed_regs[22] = call_used_regs[22] = 1;
14593 fixed_regs[23] = call_used_regs[23] = 1;
14594 fixed_regs[26] = call_used_regs[26] = 1;
14595 fixed_regs[27] = call_used_regs[27] = 1;
14596 fixed_regs[30] = call_used_regs[30] = 1;
14598 /* $f20-$f23 are call-clobbered for n64. */
14599 if (mips_abi == ABI_64)
14601 int regno;
14602 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
14603 call_really_used_regs[regno] = call_used_regs[regno] = 1;
14605 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
14606 for n32. */
14607 if (mips_abi == ABI_N32)
14609 int regno;
14610 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
14611 call_really_used_regs[regno] = call_used_regs[regno] = 1;
14613 /* Make sure that double-register accumulator values are correctly
14614 ordered for the current endianness. */
14615 if (TARGET_LITTLE_ENDIAN)
14617 unsigned int regno;
14619 mips_swap_registers (MD_REG_FIRST);
14620 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
14621 mips_swap_registers (regno);
14625 /* Initialize vector TARGET to VALS. */
14627 void
14628 mips_expand_vector_init (rtx target, rtx vals)
14630 enum machine_mode mode;
14631 enum machine_mode inner;
14632 unsigned int i, n_elts;
14633 rtx mem;
14635 mode = GET_MODE (target);
14636 inner = GET_MODE_INNER (mode);
14637 n_elts = GET_MODE_NUNITS (mode);
14639 gcc_assert (VECTOR_MODE_P (mode));
14641 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
14642 for (i = 0; i < n_elts; i++)
14643 emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)),
14644 XVECEXP (vals, 0, i));
14646 emit_move_insn (target, mem);
14649 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
14650 other registers for instructions for which it is possible. This
14651 encourages the compiler to use CMP in cases where an XOR would
14652 require some register shuffling. */
14654 void
14655 mips_order_regs_for_local_alloc (void)
14657 int i;
14659 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
14660 reg_alloc_order[i] = i;
14662 if (TARGET_MIPS16)
14664 /* It really doesn't matter where we put register 0, since it is
14665 a fixed register anyhow. */
14666 reg_alloc_order[0] = 24;
14667 reg_alloc_order[24] = 0;
14671 /* Implement EPILOGUE_USES. */
14673 bool
14674 mips_epilogue_uses (unsigned int regno)
14676 /* Say that the epilogue uses the return address register. Note that
14677 in the case of sibcalls, the values "used by the epilogue" are
14678 considered live at the start of the called function. */
14679 if (regno == 31)
14680 return true;
14682 /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
14683 See the comment above load_call<mode> for details. */
14684 if (TARGET_USE_GOT && (regno) == GOT_VERSION_REGNUM)
14685 return true;
14687 /* An interrupt handler must preserve some registers that are
14688 ordinarily call-clobbered. */
14689 if (cfun->machine->interrupt_handler_p
14690 && mips_interrupt_extra_call_saved_reg_p (regno))
14691 return true;
14693 return false;
14696 /* A for_each_rtx callback. Stop the search if *X is an AT register. */
14698 static int
14699 mips_at_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
14701 return GET_CODE (*x) == REG && REGNO (*x) == AT_REGNUM;
14705 /* Implement FINAL_PRESCAN_INSN. */
14707 void
14708 mips_final_prescan_insn (rtx insn, rtx *opvec, int noperands)
14710 int i;
14712 /* We need to emit ".set noat" before an instruction that accesses
14713 $1 (AT). */
14714 if (recog_memoized (insn) >= 0)
14715 for (i = 0; i < noperands; i++)
14716 if (for_each_rtx (&opvec[i], mips_at_reg_p, NULL))
14717 if (set_noat++ == 0)
14718 fprintf (asm_out_file, "\t.set\tnoat\n");
14721 /* Implement TARGET_ASM_FINAL_POSTSCAN_INSN. */
14723 static void
14724 mips_final_postscan_insn (FILE *file, rtx insn, rtx *opvec, int noperands)
14726 int i;
14728 /* Close any ".set noat" block opened by mips_final_prescan_insn. */
14729 if (recog_memoized (insn) >= 0)
14730 for (i = 0; i < noperands; i++)
14731 if (for_each_rtx (&opvec[i], mips_at_reg_p, NULL))
14732 if (--set_noat == 0)
14733 fprintf (file, "\t.set\tat\n");
14736 /* Initialize the GCC target structure. */
14737 #undef TARGET_ASM_ALIGNED_HI_OP
14738 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
14739 #undef TARGET_ASM_ALIGNED_SI_OP
14740 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
14741 #undef TARGET_ASM_ALIGNED_DI_OP
14742 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
14744 #undef TARGET_ASM_FUNCTION_PROLOGUE
14745 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
14746 #undef TARGET_ASM_FUNCTION_EPILOGUE
14747 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
14748 #undef TARGET_ASM_SELECT_RTX_SECTION
14749 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
14750 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
14751 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
14753 #undef TARGET_SCHED_INIT
14754 #define TARGET_SCHED_INIT mips_sched_init
14755 #undef TARGET_SCHED_REORDER
14756 #define TARGET_SCHED_REORDER mips_sched_reorder
14757 #undef TARGET_SCHED_REORDER2
14758 #define TARGET_SCHED_REORDER2 mips_sched_reorder
14759 #undef TARGET_SCHED_VARIABLE_ISSUE
14760 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
14761 #undef TARGET_SCHED_ADJUST_COST
14762 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
14763 #undef TARGET_SCHED_ISSUE_RATE
14764 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
14765 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
14766 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
14767 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
14768 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
14769 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
14770 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
14771 mips_multipass_dfa_lookahead
14773 #undef TARGET_DEFAULT_TARGET_FLAGS
14774 #define TARGET_DEFAULT_TARGET_FLAGS \
14775 (TARGET_DEFAULT \
14776 | TARGET_CPU_DEFAULT \
14777 | TARGET_ENDIAN_DEFAULT \
14778 | TARGET_FP_EXCEPTIONS_DEFAULT \
14779 | MASK_CHECK_ZERO_DIV \
14780 | MASK_FUSED_MADD)
14781 #undef TARGET_HANDLE_OPTION
14782 #define TARGET_HANDLE_OPTION mips_handle_option
14784 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
14785 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
14787 #undef TARGET_INSERT_ATTRIBUTES
14788 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
14789 #undef TARGET_MERGE_DECL_ATTRIBUTES
14790 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
14791 #undef TARGET_SET_CURRENT_FUNCTION
14792 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
14794 #undef TARGET_VALID_POINTER_MODE
14795 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
14796 #undef TARGET_RTX_COSTS
14797 #define TARGET_RTX_COSTS mips_rtx_costs
14798 #undef TARGET_ADDRESS_COST
14799 #define TARGET_ADDRESS_COST mips_address_cost
14801 #undef TARGET_IN_SMALL_DATA_P
14802 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
14804 #undef TARGET_MACHINE_DEPENDENT_REORG
14805 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
14807 #undef TARGET_ASM_FILE_START
14808 #define TARGET_ASM_FILE_START mips_file_start
14809 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
14810 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
14812 #undef TARGET_INIT_LIBFUNCS
14813 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
14815 #undef TARGET_BUILD_BUILTIN_VA_LIST
14816 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
14817 #undef TARGET_EXPAND_BUILTIN_VA_START
14818 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
14819 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
14820 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
14822 #undef TARGET_PROMOTE_FUNCTION_ARGS
14823 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
14824 #undef TARGET_PROMOTE_FUNCTION_RETURN
14825 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
14826 #undef TARGET_PROMOTE_PROTOTYPES
14827 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
14829 #undef TARGET_RETURN_IN_MEMORY
14830 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
14831 #undef TARGET_RETURN_IN_MSB
14832 #define TARGET_RETURN_IN_MSB mips_return_in_msb
14834 #undef TARGET_ASM_OUTPUT_MI_THUNK
14835 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
14836 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
14837 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
14839 #undef TARGET_SETUP_INCOMING_VARARGS
14840 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
14841 #undef TARGET_STRICT_ARGUMENT_NAMING
14842 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
14843 #undef TARGET_MUST_PASS_IN_STACK
14844 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
14845 #undef TARGET_PASS_BY_REFERENCE
14846 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
14847 #undef TARGET_CALLEE_COPIES
14848 #define TARGET_CALLEE_COPIES mips_callee_copies
14849 #undef TARGET_ARG_PARTIAL_BYTES
14850 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
14852 #undef TARGET_MODE_REP_EXTENDED
14853 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
14855 #undef TARGET_VECTOR_MODE_SUPPORTED_P
14856 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
14858 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14859 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
14861 #undef TARGET_INIT_BUILTINS
14862 #define TARGET_INIT_BUILTINS mips_init_builtins
14863 #undef TARGET_EXPAND_BUILTIN
14864 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
14866 #undef TARGET_HAVE_TLS
14867 #define TARGET_HAVE_TLS HAVE_AS_TLS
14869 #undef TARGET_CANNOT_FORCE_CONST_MEM
14870 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
14872 #undef TARGET_ENCODE_SECTION_INFO
14873 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
14875 #undef TARGET_ATTRIBUTE_TABLE
14876 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
14877 /* All our function attributes are related to how out-of-line copies should
14878 be compiled or called. They don't in themselves prevent inlining. */
14879 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
14880 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
14882 #undef TARGET_EXTRA_LIVE_ON_ENTRY
14883 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
14885 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
14886 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
14887 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
14888 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
14890 #undef TARGET_COMP_TYPE_ATTRIBUTES
14891 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
14893 #ifdef HAVE_AS_DTPRELWORD
14894 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
14895 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
14896 #endif
14897 #undef TARGET_DWARF_REGISTER_SPAN
14898 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
14900 #undef TARGET_IRA_COVER_CLASSES
14901 #define TARGET_IRA_COVER_CLASSES mips_ira_cover_classes
14903 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
14904 #define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
14906 struct gcc_target targetm = TARGET_INITIALIZER;
14908 #include "gt-mips.h"