merge with trunk @ 139506
[official-gcc.git] / gcc / config / mips / mips.c
blobe9eb20cc0eec58bb7ef5440ef53775fff17c20b7
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "libfuncs.h"
46 #include "flags.h"
47 #include "reload.h"
48 #include "tm_p.h"
49 #include "ggc.h"
50 #include "gstab.h"
51 #include "hashtab.h"
52 #include "debug.h"
53 #include "target.h"
54 #include "target-def.h"
55 #include "integrate.h"
56 #include "langhooks.h"
57 #include "cfglayout.h"
58 #include "sched-int.h"
59 #include "gimple.h"
60 #include "bitmap.h"
61 #include "diagnostic.h"
63 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
64 #define UNSPEC_ADDRESS_P(X) \
65 (GET_CODE (X) == UNSPEC \
66 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
67 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
69 /* Extract the symbol or label from UNSPEC wrapper X. */
70 #define UNSPEC_ADDRESS(X) \
71 XVECEXP (X, 0, 0)
73 /* Extract the symbol type from UNSPEC wrapper X. */
74 #define UNSPEC_ADDRESS_TYPE(X) \
75 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
77 /* The maximum distance between the top of the stack frame and the
78 value $sp has when we save and restore registers.
80 The value for normal-mode code must be a SMALL_OPERAND and must
81 preserve the maximum stack alignment. We therefore use a value
82 of 0x7ff0 in this case.
84 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
85 up to 0x7f8 bytes and can usually save or restore all the registers
86 that we need to save or restore. (Note that we can only use these
87 instructions for o32, for which the stack alignment is 8 bytes.)
89 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
90 RESTORE are not available. We can then use unextended instructions
91 to save and restore registers, and to allocate and deallocate the top
92 part of the frame. */
93 #define MIPS_MAX_FIRST_STACK_STEP \
94 (!TARGET_MIPS16 ? 0x7ff0 \
95 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
96 : TARGET_64BIT ? 0x100 : 0x400)
98 /* True if INSN is a mips.md pattern or asm statement. */
99 #define USEFUL_INSN_P(INSN) \
100 (INSN_P (INSN) \
101 && GET_CODE (PATTERN (INSN)) != USE \
102 && GET_CODE (PATTERN (INSN)) != CLOBBER \
103 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
104 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
106 /* If INSN is a delayed branch sequence, return the first instruction
107 in the sequence, otherwise return INSN itself. */
108 #define SEQ_BEGIN(INSN) \
109 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
110 ? XVECEXP (PATTERN (INSN), 0, 0) \
111 : (INSN))
113 /* Likewise for the last instruction in a delayed branch sequence. */
114 #define SEQ_END(INSN) \
115 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
116 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117 : (INSN))
119 /* Execute the following loop body with SUBINSN set to each instruction
120 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
121 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
122 for ((SUBINSN) = SEQ_BEGIN (INSN); \
123 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
124 (SUBINSN) = NEXT_INSN (SUBINSN))
126 /* True if bit BIT is set in VALUE. */
127 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
129 /* Classifies an address.
131 ADDRESS_REG
132 A natural register + offset address. The register satisfies
133 mips_valid_base_register_p and the offset is a const_arith_operand.
135 ADDRESS_LO_SUM
136 A LO_SUM rtx. The first operand is a valid base register and
137 the second operand is a symbolic address.
139 ADDRESS_CONST_INT
140 A signed 16-bit constant address.
142 ADDRESS_SYMBOLIC:
143 A constant symbolic address. */
144 enum mips_address_type {
145 ADDRESS_REG,
146 ADDRESS_LO_SUM,
147 ADDRESS_CONST_INT,
148 ADDRESS_SYMBOLIC
151 /* Macros to create an enumeration identifier for a function prototype. */
152 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
153 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
154 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
155 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
157 /* Classifies the prototype of a built-in function. */
158 enum mips_function_type {
159 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
160 #include "config/mips/mips-ftypes.def"
161 #undef DEF_MIPS_FTYPE
162 MIPS_MAX_FTYPE_MAX
165 /* Specifies how a built-in function should be converted into rtl. */
166 enum mips_builtin_type {
167 /* The function corresponds directly to an .md pattern. The return
168 value is mapped to operand 0 and the arguments are mapped to
169 operands 1 and above. */
170 MIPS_BUILTIN_DIRECT,
172 /* The function corresponds directly to an .md pattern. There is no return
173 value and the arguments are mapped to operands 0 and above. */
174 MIPS_BUILTIN_DIRECT_NO_TARGET,
176 /* The function corresponds to a comparison instruction followed by
177 a mips_cond_move_tf_ps pattern. The first two arguments are the
178 values to compare and the second two arguments are the vector
179 operands for the movt.ps or movf.ps instruction (in assembly order). */
180 MIPS_BUILTIN_MOVF,
181 MIPS_BUILTIN_MOVT,
183 /* The function corresponds to a V2SF comparison instruction. Operand 0
184 of this instruction is the result of the comparison, which has mode
185 CCV2 or CCV4. The function arguments are mapped to operands 1 and
186 above. The function's return value is an SImode boolean that is
187 true under the following conditions:
189 MIPS_BUILTIN_CMP_ANY: one of the registers is true
190 MIPS_BUILTIN_CMP_ALL: all of the registers are true
191 MIPS_BUILTIN_CMP_LOWER: the first register is true
192 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
193 MIPS_BUILTIN_CMP_ANY,
194 MIPS_BUILTIN_CMP_ALL,
195 MIPS_BUILTIN_CMP_UPPER,
196 MIPS_BUILTIN_CMP_LOWER,
198 /* As above, but the instruction only sets a single $fcc register. */
199 MIPS_BUILTIN_CMP_SINGLE,
201 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
202 MIPS_BUILTIN_BPOSGE32
205 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
206 #define MIPS_FP_CONDITIONS(MACRO) \
207 MACRO (f), \
208 MACRO (un), \
209 MACRO (eq), \
210 MACRO (ueq), \
211 MACRO (olt), \
212 MACRO (ult), \
213 MACRO (ole), \
214 MACRO (ule), \
215 MACRO (sf), \
216 MACRO (ngle), \
217 MACRO (seq), \
218 MACRO (ngl), \
219 MACRO (lt), \
220 MACRO (nge), \
221 MACRO (le), \
222 MACRO (ngt)
224 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
225 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
226 enum mips_fp_condition {
227 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
230 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
231 #define STRINGIFY(X) #X
232 static const char *const mips_fp_conditions[] = {
233 MIPS_FP_CONDITIONS (STRINGIFY)
236 /* Information about a function's frame layout. */
237 struct mips_frame_info GTY(()) {
238 /* The size of the frame in bytes. */
239 HOST_WIDE_INT total_size;
241 /* The number of bytes allocated to variables. */
242 HOST_WIDE_INT var_size;
244 /* The number of bytes allocated to outgoing function arguments. */
245 HOST_WIDE_INT args_size;
247 /* The number of bytes allocated to the .cprestore slot, or 0 if there
248 is no such slot. */
249 HOST_WIDE_INT cprestore_size;
251 /* Bit X is set if the function saves or restores GPR X. */
252 unsigned int mask;
254 /* Likewise FPR X. */
255 unsigned int fmask;
257 /* The number of GPRs and FPRs saved. */
258 unsigned int num_gp;
259 unsigned int num_fp;
261 /* The offset of the topmost GPR and FPR save slots from the top of
262 the frame, or zero if no such slots are needed. */
263 HOST_WIDE_INT gp_save_offset;
264 HOST_WIDE_INT fp_save_offset;
266 /* Likewise, but giving offsets from the bottom of the frame. */
267 HOST_WIDE_INT gp_sp_offset;
268 HOST_WIDE_INT fp_sp_offset;
270 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
271 HOST_WIDE_INT arg_pointer_offset;
273 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
274 HOST_WIDE_INT hard_frame_pointer_offset;
277 struct machine_function GTY(()) {
278 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
279 rtx mips16_gp_pseudo_rtx;
281 /* The number of extra stack bytes taken up by register varargs.
282 This area is allocated by the callee at the very top of the frame. */
283 int varargs_size;
285 /* The current frame information, calculated by mips_compute_frame_info. */
286 struct mips_frame_info frame;
288 /* The register to use as the function's global pointer. */
289 unsigned int global_pointer;
291 /* True if mips_adjust_insn_length should ignore an instruction's
292 hazard attribute. */
293 bool ignore_hazard_length_p;
295 /* True if the whole function is suitable for .set noreorder and
296 .set nomacro. */
297 bool all_noreorder_p;
299 /* True if the function is known to have an instruction that needs $gp. */
300 bool has_gp_insn_p;
302 /* True if we have emitted an instruction to initialize
303 mips16_gp_pseudo_rtx. */
304 bool initialized_mips16_gp_pseudo_p;
307 /* Information about a single argument. */
308 struct mips_arg_info {
309 /* True if the argument is passed in a floating-point register, or
310 would have been if we hadn't run out of registers. */
311 bool fpr_p;
313 /* The number of words passed in registers, rounded up. */
314 unsigned int reg_words;
316 /* For EABI, the offset of the first register from GP_ARG_FIRST or
317 FP_ARG_FIRST. For other ABIs, the offset of the first register from
318 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
319 comment for details).
321 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
322 on the stack. */
323 unsigned int reg_offset;
325 /* The number of words that must be passed on the stack, rounded up. */
326 unsigned int stack_words;
328 /* The offset from the start of the stack overflow area of the argument's
329 first stack word. Only meaningful when STACK_WORDS is nonzero. */
330 unsigned int stack_offset;
333 /* Information about an address described by mips_address_type.
335 ADDRESS_CONST_INT
336 No fields are used.
338 ADDRESS_REG
339 REG is the base register and OFFSET is the constant offset.
341 ADDRESS_LO_SUM
342 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
343 is the type of symbol it references.
345 ADDRESS_SYMBOLIC
346 SYMBOL_TYPE is the type of symbol that the address references. */
347 struct mips_address_info {
348 enum mips_address_type type;
349 rtx reg;
350 rtx offset;
351 enum mips_symbol_type symbol_type;
354 /* One stage in a constant building sequence. These sequences have
355 the form:
357 A = VALUE[0]
358 A = A CODE[1] VALUE[1]
359 A = A CODE[2] VALUE[2]
362 where A is an accumulator, each CODE[i] is a binary rtl operation
363 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
364 struct mips_integer_op {
365 enum rtx_code code;
366 unsigned HOST_WIDE_INT value;
369 /* The largest number of operations needed to load an integer constant.
370 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
371 When the lowest bit is clear, we can try, but reject a sequence with
372 an extra SLL at the end. */
373 #define MIPS_MAX_INTEGER_OPS 7
375 /* Information about a MIPS16e SAVE or RESTORE instruction. */
376 struct mips16e_save_restore_info {
377 /* The number of argument registers saved by a SAVE instruction.
378 0 for RESTORE instructions. */
379 unsigned int nargs;
381 /* Bit X is set if the instruction saves or restores GPR X. */
382 unsigned int mask;
384 /* The total number of bytes to allocate. */
385 HOST_WIDE_INT size;
388 /* Global variables for machine-dependent things. */
390 /* The -G setting, or the configuration's default small-data limit if
391 no -G option is given. */
392 static unsigned int mips_small_data_threshold;
394 /* The number of file directives written by mips_output_filename. */
395 int num_source_filenames;
397 /* The name that appeared in the last .file directive written by
398 mips_output_filename, or "" if mips_output_filename hasn't
399 written anything yet. */
400 const char *current_function_file = "";
402 /* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
403 int sdb_label_count;
405 /* Arrays that map GCC register numbers to debugger register numbers. */
406 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
407 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
409 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
410 int set_noreorder;
411 int set_nomacro;
412 static int set_noat;
414 /* True if we're writing out a branch-likely instruction rather than a
415 normal branch. */
416 static bool mips_branch_likely;
418 /* The operands passed to the last cmpMM expander. */
419 rtx cmp_operands[2];
421 /* The current instruction-set architecture. */
422 enum processor_type mips_arch;
423 const struct mips_cpu_info *mips_arch_info;
425 /* The processor that we should tune the code for. */
426 enum processor_type mips_tune;
427 const struct mips_cpu_info *mips_tune_info;
429 /* The ISA level associated with mips_arch. */
430 int mips_isa;
432 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
433 static const struct mips_cpu_info *mips_isa_option_info;
435 /* Which ABI to use. */
436 int mips_abi = MIPS_ABI_DEFAULT;
438 /* Which cost information to use. */
439 const struct mips_rtx_cost_data *mips_cost;
441 /* The ambient target flags, excluding MASK_MIPS16. */
442 static int mips_base_target_flags;
444 /* True if MIPS16 is the default mode. */
445 bool mips_base_mips16;
447 /* The ambient values of other global variables. */
448 static int mips_base_delayed_branch; /* flag_delayed_branch */
449 static int mips_base_schedule_insns; /* flag_schedule_insns */
450 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
451 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
452 static int mips_base_align_loops; /* align_loops */
453 static int mips_base_align_jumps; /* align_jumps */
454 static int mips_base_align_functions; /* align_functions */
456 /* The -mcode-readable setting. */
457 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
459 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
460 bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
462 /* Index C is true if character C is a valid PRINT_OPERAND punctation
463 character. */
464 bool mips_print_operand_punct[256];
466 static GTY (()) int mips_output_filename_first_time = 1;
468 /* mips_split_p[X] is true if symbols of type X can be split by
469 mips_split_symbol. */
470 bool mips_split_p[NUM_SYMBOL_TYPES];
472 /* mips_split_hi_p[X] is true if the high parts of symbols of type X
473 can be split by mips_split_symbol. */
474 bool mips_split_hi_p[NUM_SYMBOL_TYPES];
476 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
477 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
478 if they are matched by a special .md file pattern. */
479 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
481 /* Likewise for HIGHs. */
482 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
484 /* Index R is the smallest register class that contains register R. */
485 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
486 LEA_REGS, LEA_REGS, M16_REGS, V1_REG,
487 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
488 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
489 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
490 M16_REGS, M16_REGS, LEA_REGS, LEA_REGS,
491 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
492 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
493 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
494 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
495 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
496 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
497 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
498 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
499 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
500 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
501 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
502 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
503 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
504 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
505 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
506 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
507 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
508 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
509 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
510 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
511 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
512 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
513 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
514 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
515 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
516 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
517 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
518 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
519 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
520 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
521 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
522 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
523 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
524 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
525 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
526 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
527 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
528 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
529 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
530 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
531 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
532 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
535 /* The value of TARGET_ATTRIBUTE_TABLE. */
536 const struct attribute_spec mips_attribute_table[] = {
537 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
538 { "long_call", 0, 0, false, true, true, NULL },
539 { "far", 0, 0, false, true, true, NULL },
540 { "near", 0, 0, false, true, true, NULL },
541 /* We would really like to treat "mips16" and "nomips16" as type
542 attributes, but GCC doesn't provide the hooks we need to support
543 the right conversion rules. As declaration attributes, they affect
544 code generation but don't carry other semantics. */
545 { "mips16", 0, 0, true, false, false, NULL },
546 { "nomips16", 0, 0, true, false, false, NULL },
547 { NULL, 0, 0, false, false, false, NULL }
550 /* A table describing all the processors GCC knows about. Names are
551 matched in the order listed. The first mention of an ISA level is
552 taken as the canonical name for that ISA.
554 To ease comparison, please keep this table in the same order
555 as GAS's mips_cpu_info_table. Please also make sure that
556 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
557 options correctly. */
558 static const struct mips_cpu_info mips_cpu_info_table[] = {
559 /* Entries for generic ISAs. */
560 { "mips1", PROCESSOR_R3000, 1, 0 },
561 { "mips2", PROCESSOR_R6000, 2, 0 },
562 { "mips3", PROCESSOR_R4000, 3, 0 },
563 { "mips4", PROCESSOR_R8000, 4, 0 },
564 /* Prefer not to use branch-likely instructions for generic MIPS32rX
565 and MIPS64rX code. The instructions were officially deprecated
566 in revisions 2 and earlier, but revision 3 is likely to downgrade
567 that to a recommendation to avoid the instructions in code that
568 isn't tuned to a specific processor. */
569 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
570 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
571 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
572 /* ??? For now just tune the generic MIPS64r2 for 5KC as well. */
573 { "mips64r2", PROCESSOR_5KC, 65, PTF_AVOID_BRANCHLIKELY },
575 /* MIPS I processors. */
576 { "r3000", PROCESSOR_R3000, 1, 0 },
577 { "r2000", PROCESSOR_R3000, 1, 0 },
578 { "r3900", PROCESSOR_R3900, 1, 0 },
580 /* MIPS II processors. */
581 { "r6000", PROCESSOR_R6000, 2, 0 },
583 /* MIPS III processors. */
584 { "r4000", PROCESSOR_R4000, 3, 0 },
585 { "vr4100", PROCESSOR_R4100, 3, 0 },
586 { "vr4111", PROCESSOR_R4111, 3, 0 },
587 { "vr4120", PROCESSOR_R4120, 3, 0 },
588 { "vr4130", PROCESSOR_R4130, 3, 0 },
589 { "vr4300", PROCESSOR_R4300, 3, 0 },
590 { "r4400", PROCESSOR_R4000, 3, 0 },
591 { "r4600", PROCESSOR_R4600, 3, 0 },
592 { "orion", PROCESSOR_R4600, 3, 0 },
593 { "r4650", PROCESSOR_R4650, 3, 0 },
594 /* ST Loongson 2E/2F processors. */
595 { "loongson2e", PROCESSOR_LOONGSON_2E, 3, PTF_AVOID_BRANCHLIKELY },
596 { "loongson2f", PROCESSOR_LOONGSON_2F, 3, PTF_AVOID_BRANCHLIKELY },
598 /* MIPS IV processors. */
599 { "r8000", PROCESSOR_R8000, 4, 0 },
600 { "vr5000", PROCESSOR_R5000, 4, 0 },
601 { "vr5400", PROCESSOR_R5400, 4, 0 },
602 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
603 { "rm7000", PROCESSOR_R7000, 4, 0 },
604 { "rm9000", PROCESSOR_R9000, 4, 0 },
606 /* MIPS32 processors. */
607 { "4kc", PROCESSOR_4KC, 32, 0 },
608 { "4km", PROCESSOR_4KC, 32, 0 },
609 { "4kp", PROCESSOR_4KP, 32, 0 },
610 { "4ksc", PROCESSOR_4KC, 32, 0 },
612 /* MIPS32 Release 2 processors. */
613 { "m4k", PROCESSOR_M4K, 33, 0 },
614 { "4kec", PROCESSOR_4KC, 33, 0 },
615 { "4kem", PROCESSOR_4KC, 33, 0 },
616 { "4kep", PROCESSOR_4KP, 33, 0 },
617 { "4ksd", PROCESSOR_4KC, 33, 0 },
619 { "24kc", PROCESSOR_24KC, 33, 0 },
620 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
621 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
622 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
623 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
624 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
626 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP. */
627 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
628 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
629 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
630 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
631 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
633 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP. */
634 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
635 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
636 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
637 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
638 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
640 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2. */
641 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
642 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
643 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
644 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
645 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
646 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
648 /* MIPS64 processors. */
649 { "5kc", PROCESSOR_5KC, 64, 0 },
650 { "5kf", PROCESSOR_5KF, 64, 0 },
651 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
652 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
653 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
654 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
655 { "xlr", PROCESSOR_XLR, 64, 0 }
658 /* Default costs. If these are used for a processor we should look
659 up the actual costs. */
660 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
661 COSTS_N_INSNS (7), /* fp_mult_sf */ \
662 COSTS_N_INSNS (8), /* fp_mult_df */ \
663 COSTS_N_INSNS (23), /* fp_div_sf */ \
664 COSTS_N_INSNS (36), /* fp_div_df */ \
665 COSTS_N_INSNS (10), /* int_mult_si */ \
666 COSTS_N_INSNS (10), /* int_mult_di */ \
667 COSTS_N_INSNS (69), /* int_div_si */ \
668 COSTS_N_INSNS (69), /* int_div_di */ \
669 2, /* branch_cost */ \
670 4 /* memory_latency */
672 /* Floating-point costs for processors without an FPU. Just assume that
673 all floating-point libcalls are very expensive. */
674 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
675 COSTS_N_INSNS (256), /* fp_mult_sf */ \
676 COSTS_N_INSNS (256), /* fp_mult_df */ \
677 COSTS_N_INSNS (256), /* fp_div_sf */ \
678 COSTS_N_INSNS (256) /* fp_div_df */
680 /* Costs to use when optimizing for size. */
681 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
682 COSTS_N_INSNS (1), /* fp_add */
683 COSTS_N_INSNS (1), /* fp_mult_sf */
684 COSTS_N_INSNS (1), /* fp_mult_df */
685 COSTS_N_INSNS (1), /* fp_div_sf */
686 COSTS_N_INSNS (1), /* fp_div_df */
687 COSTS_N_INSNS (1), /* int_mult_si */
688 COSTS_N_INSNS (1), /* int_mult_di */
689 COSTS_N_INSNS (1), /* int_div_si */
690 COSTS_N_INSNS (1), /* int_div_di */
691 2, /* branch_cost */
692 4 /* memory_latency */
695 /* Costs to use when optimizing for speed, indexed by processor. */
696 static const struct mips_rtx_cost_data mips_rtx_cost_data[PROCESSOR_MAX] = {
697 { /* R3000 */
698 COSTS_N_INSNS (2), /* fp_add */
699 COSTS_N_INSNS (4), /* fp_mult_sf */
700 COSTS_N_INSNS (5), /* fp_mult_df */
701 COSTS_N_INSNS (12), /* fp_div_sf */
702 COSTS_N_INSNS (19), /* fp_div_df */
703 COSTS_N_INSNS (12), /* int_mult_si */
704 COSTS_N_INSNS (12), /* int_mult_di */
705 COSTS_N_INSNS (35), /* int_div_si */
706 COSTS_N_INSNS (35), /* int_div_di */
707 1, /* branch_cost */
708 4 /* memory_latency */
710 { /* 4KC */
711 SOFT_FP_COSTS,
712 COSTS_N_INSNS (6), /* int_mult_si */
713 COSTS_N_INSNS (6), /* int_mult_di */
714 COSTS_N_INSNS (36), /* int_div_si */
715 COSTS_N_INSNS (36), /* int_div_di */
716 1, /* branch_cost */
717 4 /* memory_latency */
719 { /* 4KP */
720 SOFT_FP_COSTS,
721 COSTS_N_INSNS (36), /* int_mult_si */
722 COSTS_N_INSNS (36), /* int_mult_di */
723 COSTS_N_INSNS (37), /* int_div_si */
724 COSTS_N_INSNS (37), /* int_div_di */
725 1, /* branch_cost */
726 4 /* memory_latency */
728 { /* 5KC */
729 SOFT_FP_COSTS,
730 COSTS_N_INSNS (4), /* int_mult_si */
731 COSTS_N_INSNS (11), /* int_mult_di */
732 COSTS_N_INSNS (36), /* int_div_si */
733 COSTS_N_INSNS (68), /* int_div_di */
734 1, /* branch_cost */
735 4 /* memory_latency */
737 { /* 5KF */
738 COSTS_N_INSNS (4), /* fp_add */
739 COSTS_N_INSNS (4), /* fp_mult_sf */
740 COSTS_N_INSNS (5), /* fp_mult_df */
741 COSTS_N_INSNS (17), /* fp_div_sf */
742 COSTS_N_INSNS (32), /* fp_div_df */
743 COSTS_N_INSNS (4), /* int_mult_si */
744 COSTS_N_INSNS (11), /* int_mult_di */
745 COSTS_N_INSNS (36), /* int_div_si */
746 COSTS_N_INSNS (68), /* int_div_di */
747 1, /* branch_cost */
748 4 /* memory_latency */
750 { /* 20KC */
751 COSTS_N_INSNS (4), /* fp_add */
752 COSTS_N_INSNS (4), /* fp_mult_sf */
753 COSTS_N_INSNS (5), /* fp_mult_df */
754 COSTS_N_INSNS (17), /* fp_div_sf */
755 COSTS_N_INSNS (32), /* fp_div_df */
756 COSTS_N_INSNS (4), /* int_mult_si */
757 COSTS_N_INSNS (7), /* int_mult_di */
758 COSTS_N_INSNS (42), /* int_div_si */
759 COSTS_N_INSNS (72), /* int_div_di */
760 1, /* branch_cost */
761 4 /* memory_latency */
763 { /* 24KC */
764 SOFT_FP_COSTS,
765 COSTS_N_INSNS (5), /* int_mult_si */
766 COSTS_N_INSNS (5), /* int_mult_di */
767 COSTS_N_INSNS (41), /* int_div_si */
768 COSTS_N_INSNS (41), /* int_div_di */
769 1, /* branch_cost */
770 4 /* memory_latency */
772 { /* 24KF2_1 */
773 COSTS_N_INSNS (8), /* fp_add */
774 COSTS_N_INSNS (8), /* fp_mult_sf */
775 COSTS_N_INSNS (10), /* fp_mult_df */
776 COSTS_N_INSNS (34), /* fp_div_sf */
777 COSTS_N_INSNS (64), /* fp_div_df */
778 COSTS_N_INSNS (5), /* int_mult_si */
779 COSTS_N_INSNS (5), /* int_mult_di */
780 COSTS_N_INSNS (41), /* int_div_si */
781 COSTS_N_INSNS (41), /* int_div_di */
782 1, /* branch_cost */
783 4 /* memory_latency */
785 { /* 24KF1_1 */
786 COSTS_N_INSNS (4), /* fp_add */
787 COSTS_N_INSNS (4), /* fp_mult_sf */
788 COSTS_N_INSNS (5), /* fp_mult_df */
789 COSTS_N_INSNS (17), /* fp_div_sf */
790 COSTS_N_INSNS (32), /* fp_div_df */
791 COSTS_N_INSNS (5), /* int_mult_si */
792 COSTS_N_INSNS (5), /* int_mult_di */
793 COSTS_N_INSNS (41), /* int_div_si */
794 COSTS_N_INSNS (41), /* int_div_di */
795 1, /* branch_cost */
796 4 /* memory_latency */
798 { /* 74KC */
799 SOFT_FP_COSTS,
800 COSTS_N_INSNS (5), /* int_mult_si */
801 COSTS_N_INSNS (5), /* int_mult_di */
802 COSTS_N_INSNS (41), /* int_div_si */
803 COSTS_N_INSNS (41), /* int_div_di */
804 1, /* branch_cost */
805 4 /* memory_latency */
807 { /* 74KF2_1 */
808 COSTS_N_INSNS (8), /* fp_add */
809 COSTS_N_INSNS (8), /* fp_mult_sf */
810 COSTS_N_INSNS (10), /* fp_mult_df */
811 COSTS_N_INSNS (34), /* fp_div_sf */
812 COSTS_N_INSNS (64), /* fp_div_df */
813 COSTS_N_INSNS (5), /* int_mult_si */
814 COSTS_N_INSNS (5), /* int_mult_di */
815 COSTS_N_INSNS (41), /* int_div_si */
816 COSTS_N_INSNS (41), /* int_div_di */
817 1, /* branch_cost */
818 4 /* memory_latency */
820 { /* 74KF1_1 */
821 COSTS_N_INSNS (4), /* fp_add */
822 COSTS_N_INSNS (4), /* fp_mult_sf */
823 COSTS_N_INSNS (5), /* fp_mult_df */
824 COSTS_N_INSNS (17), /* fp_div_sf */
825 COSTS_N_INSNS (32), /* fp_div_df */
826 COSTS_N_INSNS (5), /* int_mult_si */
827 COSTS_N_INSNS (5), /* int_mult_di */
828 COSTS_N_INSNS (41), /* int_div_si */
829 COSTS_N_INSNS (41), /* int_div_di */
830 1, /* branch_cost */
831 4 /* memory_latency */
833 { /* 74KF3_2 */
834 COSTS_N_INSNS (6), /* fp_add */
835 COSTS_N_INSNS (6), /* fp_mult_sf */
836 COSTS_N_INSNS (7), /* fp_mult_df */
837 COSTS_N_INSNS (25), /* fp_div_sf */
838 COSTS_N_INSNS (48), /* fp_div_df */
839 COSTS_N_INSNS (5), /* int_mult_si */
840 COSTS_N_INSNS (5), /* int_mult_di */
841 COSTS_N_INSNS (41), /* int_div_si */
842 COSTS_N_INSNS (41), /* int_div_di */
843 1, /* branch_cost */
844 4 /* memory_latency */
846 { /* Loongson-2E */
847 DEFAULT_COSTS
849 { /* Loongson-2F */
850 DEFAULT_COSTS
852 { /* M4k */
853 DEFAULT_COSTS
855 { /* R3900 */
856 COSTS_N_INSNS (2), /* fp_add */
857 COSTS_N_INSNS (4), /* fp_mult_sf */
858 COSTS_N_INSNS (5), /* fp_mult_df */
859 COSTS_N_INSNS (12), /* fp_div_sf */
860 COSTS_N_INSNS (19), /* fp_div_df */
861 COSTS_N_INSNS (2), /* int_mult_si */
862 COSTS_N_INSNS (2), /* int_mult_di */
863 COSTS_N_INSNS (35), /* int_div_si */
864 COSTS_N_INSNS (35), /* int_div_di */
865 1, /* branch_cost */
866 4 /* memory_latency */
868 { /* R6000 */
869 COSTS_N_INSNS (3), /* fp_add */
870 COSTS_N_INSNS (5), /* fp_mult_sf */
871 COSTS_N_INSNS (6), /* fp_mult_df */
872 COSTS_N_INSNS (15), /* fp_div_sf */
873 COSTS_N_INSNS (16), /* fp_div_df */
874 COSTS_N_INSNS (17), /* int_mult_si */
875 COSTS_N_INSNS (17), /* int_mult_di */
876 COSTS_N_INSNS (38), /* int_div_si */
877 COSTS_N_INSNS (38), /* int_div_di */
878 2, /* branch_cost */
879 6 /* memory_latency */
881 { /* R4000 */
882 COSTS_N_INSNS (6), /* fp_add */
883 COSTS_N_INSNS (7), /* fp_mult_sf */
884 COSTS_N_INSNS (8), /* fp_mult_df */
885 COSTS_N_INSNS (23), /* fp_div_sf */
886 COSTS_N_INSNS (36), /* fp_div_df */
887 COSTS_N_INSNS (10), /* int_mult_si */
888 COSTS_N_INSNS (10), /* int_mult_di */
889 COSTS_N_INSNS (69), /* int_div_si */
890 COSTS_N_INSNS (69), /* int_div_di */
891 2, /* branch_cost */
892 6 /* memory_latency */
894 { /* R4100 */
895 DEFAULT_COSTS
897 { /* R4111 */
898 DEFAULT_COSTS
900 { /* R4120 */
901 DEFAULT_COSTS
903 { /* R4130 */
904 /* The only costs that appear to be updated here are
905 integer multiplication. */
906 SOFT_FP_COSTS,
907 COSTS_N_INSNS (4), /* int_mult_si */
908 COSTS_N_INSNS (6), /* int_mult_di */
909 COSTS_N_INSNS (69), /* int_div_si */
910 COSTS_N_INSNS (69), /* int_div_di */
911 1, /* branch_cost */
912 4 /* memory_latency */
914 { /* R4300 */
915 DEFAULT_COSTS
917 { /* R4600 */
918 DEFAULT_COSTS
920 { /* R4650 */
921 DEFAULT_COSTS
923 { /* R5000 */
924 COSTS_N_INSNS (6), /* fp_add */
925 COSTS_N_INSNS (4), /* fp_mult_sf */
926 COSTS_N_INSNS (5), /* fp_mult_df */
927 COSTS_N_INSNS (23), /* fp_div_sf */
928 COSTS_N_INSNS (36), /* fp_div_df */
929 COSTS_N_INSNS (5), /* int_mult_si */
930 COSTS_N_INSNS (5), /* int_mult_di */
931 COSTS_N_INSNS (36), /* int_div_si */
932 COSTS_N_INSNS (36), /* int_div_di */
933 1, /* branch_cost */
934 4 /* memory_latency */
936 { /* R5400 */
937 COSTS_N_INSNS (6), /* fp_add */
938 COSTS_N_INSNS (5), /* fp_mult_sf */
939 COSTS_N_INSNS (6), /* fp_mult_df */
940 COSTS_N_INSNS (30), /* fp_div_sf */
941 COSTS_N_INSNS (59), /* fp_div_df */
942 COSTS_N_INSNS (3), /* int_mult_si */
943 COSTS_N_INSNS (4), /* int_mult_di */
944 COSTS_N_INSNS (42), /* int_div_si */
945 COSTS_N_INSNS (74), /* int_div_di */
946 1, /* branch_cost */
947 4 /* memory_latency */
949 { /* R5500 */
950 COSTS_N_INSNS (6), /* fp_add */
951 COSTS_N_INSNS (5), /* fp_mult_sf */
952 COSTS_N_INSNS (6), /* fp_mult_df */
953 COSTS_N_INSNS (30), /* fp_div_sf */
954 COSTS_N_INSNS (59), /* fp_div_df */
955 COSTS_N_INSNS (5), /* int_mult_si */
956 COSTS_N_INSNS (9), /* int_mult_di */
957 COSTS_N_INSNS (42), /* int_div_si */
958 COSTS_N_INSNS (74), /* int_div_di */
959 1, /* branch_cost */
960 4 /* memory_latency */
962 { /* R7000 */
963 /* The only costs that are changed here are
964 integer multiplication. */
965 COSTS_N_INSNS (6), /* fp_add */
966 COSTS_N_INSNS (7), /* fp_mult_sf */
967 COSTS_N_INSNS (8), /* fp_mult_df */
968 COSTS_N_INSNS (23), /* fp_div_sf */
969 COSTS_N_INSNS (36), /* fp_div_df */
970 COSTS_N_INSNS (5), /* int_mult_si */
971 COSTS_N_INSNS (9), /* int_mult_di */
972 COSTS_N_INSNS (69), /* int_div_si */
973 COSTS_N_INSNS (69), /* int_div_di */
974 1, /* branch_cost */
975 4 /* memory_latency */
977 { /* R8000 */
978 DEFAULT_COSTS
980 { /* R9000 */
981 /* The only costs that are changed here are
982 integer multiplication. */
983 COSTS_N_INSNS (6), /* fp_add */
984 COSTS_N_INSNS (7), /* fp_mult_sf */
985 COSTS_N_INSNS (8), /* fp_mult_df */
986 COSTS_N_INSNS (23), /* fp_div_sf */
987 COSTS_N_INSNS (36), /* fp_div_df */
988 COSTS_N_INSNS (3), /* int_mult_si */
989 COSTS_N_INSNS (8), /* int_mult_di */
990 COSTS_N_INSNS (69), /* int_div_si */
991 COSTS_N_INSNS (69), /* int_div_di */
992 1, /* branch_cost */
993 4 /* memory_latency */
995 { /* SB1 */
996 /* These costs are the same as the SB-1A below. */
997 COSTS_N_INSNS (4), /* fp_add */
998 COSTS_N_INSNS (4), /* fp_mult_sf */
999 COSTS_N_INSNS (4), /* fp_mult_df */
1000 COSTS_N_INSNS (24), /* fp_div_sf */
1001 COSTS_N_INSNS (32), /* fp_div_df */
1002 COSTS_N_INSNS (3), /* int_mult_si */
1003 COSTS_N_INSNS (4), /* int_mult_di */
1004 COSTS_N_INSNS (36), /* int_div_si */
1005 COSTS_N_INSNS (68), /* int_div_di */
1006 1, /* branch_cost */
1007 4 /* memory_latency */
1009 { /* SB1-A */
1010 /* These costs are the same as the SB-1 above. */
1011 COSTS_N_INSNS (4), /* fp_add */
1012 COSTS_N_INSNS (4), /* fp_mult_sf */
1013 COSTS_N_INSNS (4), /* fp_mult_df */
1014 COSTS_N_INSNS (24), /* fp_div_sf */
1015 COSTS_N_INSNS (32), /* fp_div_df */
1016 COSTS_N_INSNS (3), /* int_mult_si */
1017 COSTS_N_INSNS (4), /* int_mult_di */
1018 COSTS_N_INSNS (36), /* int_div_si */
1019 COSTS_N_INSNS (68), /* int_div_di */
1020 1, /* branch_cost */
1021 4 /* memory_latency */
1023 { /* SR71000 */
1024 DEFAULT_COSTS
1026 { /* XLR */
1027 /* Need to replace first five with the costs of calling the appropriate
1028 libgcc routine. */
1029 COSTS_N_INSNS (256), /* fp_add */
1030 COSTS_N_INSNS (256), /* fp_mult_sf */
1031 COSTS_N_INSNS (256), /* fp_mult_df */
1032 COSTS_N_INSNS (256), /* fp_div_sf */
1033 COSTS_N_INSNS (256), /* fp_div_df */
1034 COSTS_N_INSNS (8), /* int_mult_si */
1035 COSTS_N_INSNS (8), /* int_mult_di */
1036 COSTS_N_INSNS (72), /* int_div_si */
1037 COSTS_N_INSNS (72), /* int_div_di */
1038 1, /* branch_cost */
1039 4 /* memory_latency */
1043 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1044 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1045 struct mflip_mips16_entry GTY (()) {
1046 const char *name;
1047 bool mips16_p;
1049 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1051 /* Hash table callbacks for mflip_mips16_htab. */
1053 static hashval_t
1054 mflip_mips16_htab_hash (const void *entry)
1056 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1059 static int
1060 mflip_mips16_htab_eq (const void *entry, const void *name)
1062 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1063 (const char *) name) == 0;
1066 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1067 mode, false if it should next add an attribute for the opposite mode. */
1068 static GTY(()) bool mips16_flipper;
1070 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1071 for -mflip-mips16. Return true if it should use "mips16" and false if
1072 it should use "nomips16". */
1074 static bool
1075 mflip_mips16_use_mips16_p (tree decl)
1077 struct mflip_mips16_entry *entry;
1078 const char *name;
1079 hashval_t hash;
1080 void **slot;
1082 /* Use the opposite of the command-line setting for anonymous decls. */
1083 if (!DECL_NAME (decl))
1084 return !mips_base_mips16;
1086 if (!mflip_mips16_htab)
1087 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1088 mflip_mips16_htab_eq, NULL);
1090 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1091 hash = htab_hash_string (name);
1092 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1093 entry = (struct mflip_mips16_entry *) *slot;
1094 if (!entry)
1096 mips16_flipper = !mips16_flipper;
1097 entry = GGC_NEW (struct mflip_mips16_entry);
1098 entry->name = name;
1099 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1100 *slot = entry;
1102 return entry->mips16_p;
1105 /* Predicates to test for presence of "near" and "far"/"long_call"
1106 attributes on the given TYPE. */
1108 static bool
1109 mips_near_type_p (const_tree type)
1111 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1114 static bool
1115 mips_far_type_p (const_tree type)
1117 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1118 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1121 /* Similar predicates for "mips16"/"nomips16" function attributes. */
1123 static bool
1124 mips_mips16_decl_p (const_tree decl)
1126 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1129 static bool
1130 mips_nomips16_decl_p (const_tree decl)
1132 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1135 /* Return true if function DECL is a MIPS16 function. Return the ambient
1136 setting if DECL is null. */
1138 static bool
1139 mips_use_mips16_mode_p (tree decl)
1141 if (decl)
1143 /* Nested functions must use the same frame pointer as their
1144 parent and must therefore use the same ISA mode. */
1145 tree parent = decl_function_context (decl);
1146 if (parent)
1147 decl = parent;
1148 if (mips_mips16_decl_p (decl))
1149 return true;
1150 if (mips_nomips16_decl_p (decl))
1151 return false;
1153 return mips_base_mips16;
1156 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1158 static int
1159 mips_comp_type_attributes (const_tree type1, const_tree type2)
1161 /* Disallow mixed near/far attributes. */
1162 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1163 return 0;
1164 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1165 return 0;
1166 return 1;
1169 /* Implement TARGET_INSERT_ATTRIBUTES. */
1171 static void
1172 mips_insert_attributes (tree decl, tree *attributes)
1174 const char *name;
1175 bool mips16_p, nomips16_p;
1177 /* Check for "mips16" and "nomips16" attributes. */
1178 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1179 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1180 if (TREE_CODE (decl) != FUNCTION_DECL)
1182 if (mips16_p)
1183 error ("%qs attribute only applies to functions", "mips16");
1184 if (nomips16_p)
1185 error ("%qs attribute only applies to functions", "nomips16");
1187 else
1189 mips16_p |= mips_mips16_decl_p (decl);
1190 nomips16_p |= mips_nomips16_decl_p (decl);
1191 if (mips16_p || nomips16_p)
1193 /* DECL cannot be simultaneously "mips16" and "nomips16". */
1194 if (mips16_p && nomips16_p)
1195 error ("%qs cannot have both %<mips16%> and "
1196 "%<nomips16%> attributes",
1197 IDENTIFIER_POINTER (DECL_NAME (decl)));
1199 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1201 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1202 "mips16" attribute, arbitrarily pick one. We must pick the same
1203 setting for duplicate declarations of a function. */
1204 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1205 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1210 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1212 static tree
1213 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1215 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1216 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1217 error ("%qs redeclared with conflicting %qs attributes",
1218 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1219 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1220 error ("%qs redeclared with conflicting %qs attributes",
1221 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1223 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1224 DECL_ATTRIBUTES (newdecl));
1227 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1228 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1230 static void
1231 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1233 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1235 *base_ptr = XEXP (x, 0);
1236 *offset_ptr = INTVAL (XEXP (x, 1));
1238 else
1240 *base_ptr = x;
1241 *offset_ptr = 0;
1245 static unsigned int mips_build_integer (struct mips_integer_op *,
1246 unsigned HOST_WIDE_INT);
1248 /* A subroutine of mips_build_integer, with the same interface.
1249 Assume that the final action in the sequence should be a left shift. */
1251 static unsigned int
1252 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1254 unsigned int i, shift;
1256 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1257 since signed numbers are easier to load than unsigned ones. */
1258 shift = 0;
1259 while ((value & 1) == 0)
1260 value /= 2, shift++;
1262 i = mips_build_integer (codes, value);
1263 codes[i].code = ASHIFT;
1264 codes[i].value = shift;
1265 return i + 1;
1268 /* As for mips_build_shift, but assume that the final action will be
1269 an IOR or PLUS operation. */
1271 static unsigned int
1272 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1274 unsigned HOST_WIDE_INT high;
1275 unsigned int i;
1277 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1278 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1280 /* The constant is too complex to load with a simple LUI/ORI pair,
1281 so we want to give the recursive call as many trailing zeros as
1282 possible. In this case, we know bit 16 is set and that the
1283 low 16 bits form a negative number. If we subtract that number
1284 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1285 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1286 codes[i].code = PLUS;
1287 codes[i].value = CONST_LOW_PART (value);
1289 else
1291 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1292 bits gives a value with at least 17 trailing zeros. */
1293 i = mips_build_integer (codes, high);
1294 codes[i].code = IOR;
1295 codes[i].value = value & 0xffff;
1297 return i + 1;
1300 /* Fill CODES with a sequence of rtl operations to load VALUE.
1301 Return the number of operations needed. */
1303 static unsigned int
1304 mips_build_integer (struct mips_integer_op *codes,
1305 unsigned HOST_WIDE_INT value)
1307 if (SMALL_OPERAND (value)
1308 || SMALL_OPERAND_UNSIGNED (value)
1309 || LUI_OPERAND (value))
1311 /* The value can be loaded with a single instruction. */
1312 codes[0].code = UNKNOWN;
1313 codes[0].value = value;
1314 return 1;
1316 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1318 /* Either the constant is a simple LUI/ORI combination or its
1319 lowest bit is set. We don't want to shift in this case. */
1320 return mips_build_lower (codes, value);
1322 else if ((value & 0xffff) == 0)
1324 /* The constant will need at least three actions. The lowest
1325 16 bits are clear, so the final action will be a shift. */
1326 return mips_build_shift (codes, value);
1328 else
1330 /* The final action could be a shift, add or inclusive OR.
1331 Rather than use a complex condition to select the best
1332 approach, try both mips_build_shift and mips_build_lower
1333 and pick the one that gives the shortest sequence.
1334 Note that this case is only used once per constant. */
1335 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1336 unsigned int cost, alt_cost;
1338 cost = mips_build_shift (codes, value);
1339 alt_cost = mips_build_lower (alt_codes, value);
1340 if (alt_cost < cost)
1342 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1343 cost = alt_cost;
1345 return cost;
1349 /* Return true if symbols of type TYPE require a GOT access. */
1351 static bool
1352 mips_got_symbol_type_p (enum mips_symbol_type type)
1354 switch (type)
1356 case SYMBOL_GOT_PAGE_OFST:
1357 case SYMBOL_GOT_DISP:
1358 return true;
1360 default:
1361 return false;
1365 /* Return true if X is a thread-local symbol. */
1367 static bool
1368 mips_tls_symbol_p (rtx x)
1370 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1373 /* Return true if SYMBOL_REF X is associated with a global symbol
1374 (in the STB_GLOBAL sense). */
1376 static bool
1377 mips_global_symbol_p (const_rtx x)
1379 const_tree decl = SYMBOL_REF_DECL (x);
1381 if (!decl)
1382 return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x);
1384 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1385 or weak symbols. Relocations in the object file will be against
1386 the target symbol, so it's that symbol's binding that matters here. */
1387 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1390 /* Return true if function X is a libgcc MIPS16 stub function. */
1392 static bool
1393 mips16_stub_function_p (const_rtx x)
1395 return (GET_CODE (x) == SYMBOL_REF
1396 && strncmp (XSTR (x, 0), "__mips16_", 9) == 0);
1399 /* Return true if function X is a locally-defined and locally-binding
1400 MIPS16 function. */
1402 static bool
1403 mips16_local_function_p (const_rtx x)
1405 return (GET_CODE (x) == SYMBOL_REF
1406 && SYMBOL_REF_LOCAL_P (x)
1407 && !SYMBOL_REF_EXTERNAL_P (x)
1408 && mips_use_mips16_mode_p (SYMBOL_REF_DECL (x)));
1411 /* Return true if SYMBOL_REF X binds locally. */
1413 static bool
1414 mips_symbol_binds_local_p (const_rtx x)
1416 return (SYMBOL_REF_DECL (x)
1417 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1418 : SYMBOL_REF_LOCAL_P (x));
1421 /* Return true if rtx constants of mode MODE should be put into a small
1422 data section. */
1424 static bool
1425 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1427 return (!TARGET_EMBEDDED_DATA
1428 && TARGET_LOCAL_SDATA
1429 && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1432 /* Return true if X should not be moved directly into register $25.
1433 We need this because many versions of GAS will treat "la $25,foo" as
1434 part of a call sequence and so allow a global "foo" to be lazily bound. */
1436 bool
1437 mips_dangerous_for_la25_p (rtx x)
1439 return (!TARGET_EXPLICIT_RELOCS
1440 && TARGET_USE_GOT
1441 && GET_CODE (x) == SYMBOL_REF
1442 && mips_global_symbol_p (x));
1445 /* Return true if calls to X might need $25 to be valid on entry. */
1447 bool
1448 mips_use_pic_fn_addr_reg_p (const_rtx x)
1450 if (!TARGET_USE_PIC_FN_ADDR_REG)
1451 return false;
1453 /* MIPS16 stub functions are guaranteed not to use $25. */
1454 if (mips16_stub_function_p (x))
1455 return false;
1457 if (GET_CODE (x) == SYMBOL_REF)
1459 /* If PLTs and copy relocations are available, the static linker
1460 will make sure that $25 is valid on entry to the target function. */
1461 if (TARGET_ABICALLS_PIC0)
1462 return false;
1464 /* Locally-defined functions use absolute accesses to set up
1465 the global pointer. */
1466 if (TARGET_ABSOLUTE_ABICALLS
1467 && mips_symbol_binds_local_p (x)
1468 && !SYMBOL_REF_EXTERNAL_P (x))
1469 return false;
1472 return true;
1475 /* Return the method that should be used to access SYMBOL_REF or
1476 LABEL_REF X in context CONTEXT. */
1478 static enum mips_symbol_type
1479 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1481 if (TARGET_RTP_PIC)
1482 return SYMBOL_GOT_DISP;
1484 if (GET_CODE (x) == LABEL_REF)
1486 /* LABEL_REFs are used for jump tables as well as text labels.
1487 Only return SYMBOL_PC_RELATIVE if we know the label is in
1488 the text section. */
1489 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1490 return SYMBOL_PC_RELATIVE;
1492 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1493 return SYMBOL_GOT_PAGE_OFST;
1495 return SYMBOL_ABSOLUTE;
1498 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1500 if (SYMBOL_REF_TLS_MODEL (x))
1501 return SYMBOL_TLS;
1503 if (CONSTANT_POOL_ADDRESS_P (x))
1505 if (TARGET_MIPS16_TEXT_LOADS)
1506 return SYMBOL_PC_RELATIVE;
1508 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1509 return SYMBOL_PC_RELATIVE;
1511 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1512 return SYMBOL_GP_RELATIVE;
1515 /* Do not use small-data accesses for weak symbols; they may end up
1516 being zero. */
1517 if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1518 return SYMBOL_GP_RELATIVE;
1520 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1521 is in effect. */
1522 if (TARGET_ABICALLS_PIC2
1523 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1525 /* There are three cases to consider:
1527 - o32 PIC (either with or without explicit relocs)
1528 - n32/n64 PIC without explicit relocs
1529 - n32/n64 PIC with explicit relocs
1531 In the first case, both local and global accesses will use an
1532 R_MIPS_GOT16 relocation. We must correctly predict which of
1533 the two semantics (local or global) the assembler and linker
1534 will apply. The choice depends on the symbol's binding rather
1535 than its visibility.
1537 In the second case, the assembler will not use R_MIPS_GOT16
1538 relocations, but it chooses between local and global accesses
1539 in the same way as for o32 PIC.
1541 In the third case we have more freedom since both forms of
1542 access will work for any kind of symbol. However, there seems
1543 little point in doing things differently. */
1544 if (mips_global_symbol_p (x))
1545 return SYMBOL_GOT_DISP;
1547 return SYMBOL_GOT_PAGE_OFST;
1550 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1551 return SYMBOL_FORCE_TO_MEM;
1553 return SYMBOL_ABSOLUTE;
1556 /* Classify the base of symbolic expression X, given that X appears in
1557 context CONTEXT. */
1559 static enum mips_symbol_type
1560 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1562 rtx offset;
1564 split_const (x, &x, &offset);
1565 if (UNSPEC_ADDRESS_P (x))
1566 return UNSPEC_ADDRESS_TYPE (x);
1568 return mips_classify_symbol (x, context);
1571 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1572 is the alignment in bytes of SYMBOL_REF X. */
1574 static bool
1575 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1577 HOST_WIDE_INT align;
1579 align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1580 return IN_RANGE (offset, 0, align - 1);
1583 /* Return true if X is a symbolic constant that can be used in context
1584 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1586 bool
1587 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1588 enum mips_symbol_type *symbol_type)
1590 rtx offset;
1592 split_const (x, &x, &offset);
1593 if (UNSPEC_ADDRESS_P (x))
1595 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1596 x = UNSPEC_ADDRESS (x);
1598 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1600 *symbol_type = mips_classify_symbol (x, context);
1601 if (*symbol_type == SYMBOL_TLS)
1602 return false;
1604 else
1605 return false;
1607 if (offset == const0_rtx)
1608 return true;
1610 /* Check whether a nonzero offset is valid for the underlying
1611 relocations. */
1612 switch (*symbol_type)
1614 case SYMBOL_ABSOLUTE:
1615 case SYMBOL_FORCE_TO_MEM:
1616 case SYMBOL_32_HIGH:
1617 case SYMBOL_64_HIGH:
1618 case SYMBOL_64_MID:
1619 case SYMBOL_64_LOW:
1620 /* If the target has 64-bit pointers and the object file only
1621 supports 32-bit symbols, the values of those symbols will be
1622 sign-extended. In this case we can't allow an arbitrary offset
1623 in case the 32-bit value X + OFFSET has a different sign from X. */
1624 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1625 return offset_within_block_p (x, INTVAL (offset));
1627 /* In other cases the relocations can handle any offset. */
1628 return true;
1630 case SYMBOL_PC_RELATIVE:
1631 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1632 In this case, we no longer have access to the underlying constant,
1633 but the original symbol-based access was known to be valid. */
1634 if (GET_CODE (x) == LABEL_REF)
1635 return true;
1637 /* Fall through. */
1639 case SYMBOL_GP_RELATIVE:
1640 /* Make sure that the offset refers to something within the
1641 same object block. This should guarantee that the final
1642 PC- or GP-relative offset is within the 16-bit limit. */
1643 return offset_within_block_p (x, INTVAL (offset));
1645 case SYMBOL_GOT_PAGE_OFST:
1646 case SYMBOL_GOTOFF_PAGE:
1647 /* If the symbol is global, the GOT entry will contain the symbol's
1648 address, and we will apply a 16-bit offset after loading it.
1649 If the symbol is local, the linker should provide enough local
1650 GOT entries for a 16-bit offset, but larger offsets may lead
1651 to GOT overflow. */
1652 return SMALL_INT (offset);
1654 case SYMBOL_TPREL:
1655 case SYMBOL_DTPREL:
1656 /* There is no carry between the HI and LO REL relocations, so the
1657 offset is only valid if we know it won't lead to such a carry. */
1658 return mips_offset_within_alignment_p (x, INTVAL (offset));
1660 case SYMBOL_GOT_DISP:
1661 case SYMBOL_GOTOFF_DISP:
1662 case SYMBOL_GOTOFF_CALL:
1663 case SYMBOL_GOTOFF_LOADGP:
1664 case SYMBOL_TLSGD:
1665 case SYMBOL_TLSLDM:
1666 case SYMBOL_GOTTPREL:
1667 case SYMBOL_TLS:
1668 case SYMBOL_HALF:
1669 return false;
1671 gcc_unreachable ();
1674 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1675 single instruction. We rely on the fact that, in the worst case,
1676 all instructions involved in a MIPS16 address calculation are usually
1677 extended ones. */
1679 static int
1680 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1682 switch (type)
1684 case SYMBOL_ABSOLUTE:
1685 /* When using 64-bit symbols, we need 5 preparatory instructions,
1686 such as:
1688 lui $at,%highest(symbol)
1689 daddiu $at,$at,%higher(symbol)
1690 dsll $at,$at,16
1691 daddiu $at,$at,%hi(symbol)
1692 dsll $at,$at,16
1694 The final address is then $at + %lo(symbol). With 32-bit
1695 symbols we just need a preparatory LUI for normal mode and
1696 a preparatory LI and SLL for MIPS16. */
1697 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1699 case SYMBOL_GP_RELATIVE:
1700 /* Treat GP-relative accesses as taking a single instruction on
1701 MIPS16 too; the copy of $gp can often be shared. */
1702 return 1;
1704 case SYMBOL_PC_RELATIVE:
1705 /* PC-relative constants can be only be used with ADDIUPC,
1706 DADDIUPC, LWPC and LDPC. */
1707 if (mode == MAX_MACHINE_MODE
1708 || GET_MODE_SIZE (mode) == 4
1709 || GET_MODE_SIZE (mode) == 8)
1710 return 1;
1712 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1713 return 0;
1715 case SYMBOL_FORCE_TO_MEM:
1716 /* LEAs will be converted into constant-pool references by
1717 mips_reorg. */
1718 if (mode == MAX_MACHINE_MODE)
1719 return 1;
1721 /* The constant must be loaded and then dereferenced. */
1722 return 0;
1724 case SYMBOL_GOT_DISP:
1725 /* The constant will have to be loaded from the GOT before it
1726 is used in an address. */
1727 if (mode != MAX_MACHINE_MODE)
1728 return 0;
1730 /* Fall through. */
1732 case SYMBOL_GOT_PAGE_OFST:
1733 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1734 local/global classification is accurate. The worst cases are:
1736 (1) For local symbols when generating o32 or o64 code. The assembler
1737 will use:
1739 lw $at,%got(symbol)
1742 ...and the final address will be $at + %lo(symbol).
1744 (2) For global symbols when -mxgot. The assembler will use:
1746 lui $at,%got_hi(symbol)
1747 (d)addu $at,$at,$gp
1749 ...and the final address will be $at + %got_lo(symbol). */
1750 return 3;
1752 case SYMBOL_GOTOFF_PAGE:
1753 case SYMBOL_GOTOFF_DISP:
1754 case SYMBOL_GOTOFF_CALL:
1755 case SYMBOL_GOTOFF_LOADGP:
1756 case SYMBOL_32_HIGH:
1757 case SYMBOL_64_HIGH:
1758 case SYMBOL_64_MID:
1759 case SYMBOL_64_LOW:
1760 case SYMBOL_TLSGD:
1761 case SYMBOL_TLSLDM:
1762 case SYMBOL_DTPREL:
1763 case SYMBOL_GOTTPREL:
1764 case SYMBOL_TPREL:
1765 case SYMBOL_HALF:
1766 /* A 16-bit constant formed by a single relocation, or a 32-bit
1767 constant formed from a high 16-bit relocation and a low 16-bit
1768 relocation. Use mips_split_p to determine which. 32-bit
1769 constants need an "lui; addiu" sequence for normal mode and
1770 an "li; sll; addiu" sequence for MIPS16 mode. */
1771 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1773 case SYMBOL_TLS:
1774 /* We don't treat a bare TLS symbol as a constant. */
1775 return 0;
1777 gcc_unreachable ();
1780 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1781 to load symbols of type TYPE into a register. Return 0 if the given
1782 type of symbol cannot be used as an immediate operand.
1784 Otherwise, return the number of instructions needed to load or store
1785 values of mode MODE to or from addresses of type TYPE. Return 0 if
1786 the given type of symbol is not valid in addresses.
1788 In both cases, treat extended MIPS16 instructions as two instructions. */
1790 static int
1791 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1793 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1796 /* A for_each_rtx callback. Stop the search if *X references a
1797 thread-local symbol. */
1799 static int
1800 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1802 return mips_tls_symbol_p (*x);
1805 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1807 static bool
1808 mips_cannot_force_const_mem (rtx x)
1810 enum mips_symbol_type type;
1811 rtx base, offset;
1813 /* There is no assembler syntax for expressing an address-sized
1814 high part. */
1815 if (GET_CODE (x) == HIGH)
1816 return true;
1818 /* As an optimization, reject constants that mips_legitimize_move
1819 can expand inline.
1821 Suppose we have a multi-instruction sequence that loads constant C
1822 into register R. If R does not get allocated a hard register, and
1823 R is used in an operand that allows both registers and memory
1824 references, reload will consider forcing C into memory and using
1825 one of the instruction's memory alternatives. Returning false
1826 here will force it to use an input reload instead. */
1827 if (GET_CODE (x) == CONST_INT && LEGITIMATE_CONSTANT_P (x))
1828 return true;
1830 split_const (x, &base, &offset);
1831 if (mips_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type)
1832 && type != SYMBOL_FORCE_TO_MEM)
1834 /* The same optimization as for CONST_INT. */
1835 if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0)
1836 return true;
1838 /* If MIPS16 constant pools live in the text section, they should
1839 not refer to anything that might need run-time relocation. */
1840 if (TARGET_MIPS16_PCREL_LOADS && mips_got_symbol_type_p (type))
1841 return true;
1844 /* TLS symbols must be computed by mips_legitimize_move. */
1845 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
1846 return true;
1848 return false;
1851 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1852 constants when we're using a per-function constant pool. */
1854 static bool
1855 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1856 const_rtx x ATTRIBUTE_UNUSED)
1858 return !TARGET_MIPS16_PCREL_LOADS;
1861 /* Return true if register REGNO is a valid base register for mode MODE.
1862 STRICT_P is true if REG_OK_STRICT is in effect. */
1865 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
1866 bool strict_p)
1868 if (!HARD_REGISTER_NUM_P (regno))
1870 if (!strict_p)
1871 return true;
1872 regno = reg_renumber[regno];
1875 /* These fake registers will be eliminated to either the stack or
1876 hard frame pointer, both of which are usually valid base registers.
1877 Reload deals with the cases where the eliminated form isn't valid. */
1878 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1879 return true;
1881 /* In MIPS16 mode, the stack pointer can only address word and doubleword
1882 values, nothing smaller. There are two problems here:
1884 (a) Instantiating virtual registers can introduce new uses of the
1885 stack pointer. If these virtual registers are valid addresses,
1886 the stack pointer should be too.
1888 (b) Most uses of the stack pointer are not made explicit until
1889 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1890 We don't know until that stage whether we'll be eliminating to the
1891 stack pointer (which needs the restriction) or the hard frame
1892 pointer (which doesn't).
1894 All in all, it seems more consistent to only enforce this restriction
1895 during and after reload. */
1896 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1897 return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1899 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1902 /* Return true if X is a valid base register for mode MODE.
1903 STRICT_P is true if REG_OK_STRICT is in effect. */
1905 static bool
1906 mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
1908 if (!strict_p && GET_CODE (x) == SUBREG)
1909 x = SUBREG_REG (x);
1911 return (REG_P (x)
1912 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
1915 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
1916 can address a value of mode MODE. */
1918 static bool
1919 mips_valid_offset_p (rtx x, enum machine_mode mode)
1921 /* Check that X is a signed 16-bit number. */
1922 if (!const_arith_operand (x, Pmode))
1923 return false;
1925 /* We may need to split multiword moves, so make sure that every word
1926 is accessible. */
1927 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1928 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
1929 return false;
1931 return true;
1934 /* Return true if a LO_SUM can address a value of mode MODE when the
1935 LO_SUM symbol has type SYMBOL_TYPE. */
1937 static bool
1938 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
1940 /* Check that symbols of type SYMBOL_TYPE can be used to access values
1941 of mode MODE. */
1942 if (mips_symbol_insns (symbol_type, mode) == 0)
1943 return false;
1945 /* Check that there is a known low-part relocation. */
1946 if (mips_lo_relocs[symbol_type] == NULL)
1947 return false;
1949 /* We may need to split multiword moves, so make sure that each word
1950 can be accessed without inducing a carry. This is mainly needed
1951 for o64, which has historically only guaranteed 64-bit alignment
1952 for 128-bit types. */
1953 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1954 && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
1955 return false;
1957 return true;
1960 /* Return true if X is a valid address for machine mode MODE. If it is,
1961 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
1962 effect. */
1964 static bool
1965 mips_classify_address (struct mips_address_info *info, rtx x,
1966 enum machine_mode mode, bool strict_p)
1968 switch (GET_CODE (x))
1970 case REG:
1971 case SUBREG:
1972 info->type = ADDRESS_REG;
1973 info->reg = x;
1974 info->offset = const0_rtx;
1975 return mips_valid_base_register_p (info->reg, mode, strict_p);
1977 case PLUS:
1978 info->type = ADDRESS_REG;
1979 info->reg = XEXP (x, 0);
1980 info->offset = XEXP (x, 1);
1981 return (mips_valid_base_register_p (info->reg, mode, strict_p)
1982 && mips_valid_offset_p (info->offset, mode));
1984 case LO_SUM:
1985 info->type = ADDRESS_LO_SUM;
1986 info->reg = XEXP (x, 0);
1987 info->offset = XEXP (x, 1);
1988 /* We have to trust the creator of the LO_SUM to do something vaguely
1989 sane. Target-independent code that creates a LO_SUM should also
1990 create and verify the matching HIGH. Target-independent code that
1991 adds an offset to a LO_SUM must prove that the offset will not
1992 induce a carry. Failure to do either of these things would be
1993 a bug, and we are not required to check for it here. The MIPS
1994 backend itself should only create LO_SUMs for valid symbolic
1995 constants, with the high part being either a HIGH or a copy
1996 of _gp. */
1997 info->symbol_type
1998 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1999 return (mips_valid_base_register_p (info->reg, mode, strict_p)
2000 && mips_valid_lo_sum_p (info->symbol_type, mode));
2002 case CONST_INT:
2003 /* Small-integer addresses don't occur very often, but they
2004 are legitimate if $0 is a valid base register. */
2005 info->type = ADDRESS_CONST_INT;
2006 return !TARGET_MIPS16 && SMALL_INT (x);
2008 case CONST:
2009 case LABEL_REF:
2010 case SYMBOL_REF:
2011 info->type = ADDRESS_SYMBOLIC;
2012 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
2013 &info->symbol_type)
2014 && mips_symbol_insns (info->symbol_type, mode) > 0
2015 && !mips_split_p[info->symbol_type]);
2017 default:
2018 return false;
2022 /* Return true if X is a legitimate address for a memory operand of mode
2023 MODE. STRICT_P is true if REG_OK_STRICT is in effect. */
2025 bool
2026 mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2028 struct mips_address_info addr;
2030 return mips_classify_address (&addr, x, mode, strict_p);
2033 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2035 bool
2036 mips_stack_address_p (rtx x, enum machine_mode mode)
2038 struct mips_address_info addr;
2040 return (mips_classify_address (&addr, x, mode, false)
2041 && addr.type == ADDRESS_REG
2042 && addr.reg == stack_pointer_rtx);
2045 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2046 address instruction. Note that such addresses are not considered
2047 legitimate in the GO_IF_LEGITIMATE_ADDRESS sense, because their use
2048 is so restricted. */
2050 static bool
2051 mips_lwxs_address_p (rtx addr)
2053 if (ISA_HAS_LWXS
2054 && GET_CODE (addr) == PLUS
2055 && REG_P (XEXP (addr, 1)))
2057 rtx offset = XEXP (addr, 0);
2058 if (GET_CODE (offset) == MULT
2059 && REG_P (XEXP (offset, 0))
2060 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2061 && INTVAL (XEXP (offset, 1)) == 4)
2062 return true;
2064 return false;
2067 /* Return true if a value at OFFSET bytes from base register BASE can be
2068 accessed using an unextended MIPS16 instruction. MODE is the mode of
2069 the value.
2071 Usually the offset in an unextended instruction is a 5-bit field.
2072 The offset is unsigned and shifted left once for LH and SH, twice
2073 for LW and SW, and so on. An exception is LWSP and SWSP, which have
2074 an 8-bit immediate field that's shifted left twice. */
2076 static bool
2077 mips16_unextended_reference_p (enum machine_mode mode, rtx base,
2078 unsigned HOST_WIDE_INT offset)
2080 if (offset % GET_MODE_SIZE (mode) == 0)
2082 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2083 return offset < 256U * GET_MODE_SIZE (mode);
2084 return offset < 32U * GET_MODE_SIZE (mode);
2086 return false;
2089 /* Return the number of instructions needed to load or store a value
2090 of mode MODE at address X. Return 0 if X isn't valid for MODE.
2091 Assume that multiword moves may need to be split into word moves
2092 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2093 enough.
2095 For MIPS16 code, count extended instructions as two instructions. */
2098 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2100 struct mips_address_info addr;
2101 int factor;
2103 /* BLKmode is used for single unaligned loads and stores and should
2104 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2105 meaningless, so we have to single it out as a special case one way
2106 or the other.) */
2107 if (mode != BLKmode && might_split_p)
2108 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2109 else
2110 factor = 1;
2112 if (mips_classify_address (&addr, x, mode, false))
2113 switch (addr.type)
2115 case ADDRESS_REG:
2116 if (TARGET_MIPS16
2117 && !mips16_unextended_reference_p (mode, addr.reg,
2118 UINTVAL (addr.offset)))
2119 return factor * 2;
2120 return factor;
2122 case ADDRESS_LO_SUM:
2123 return TARGET_MIPS16 ? factor * 2 : factor;
2125 case ADDRESS_CONST_INT:
2126 return factor;
2128 case ADDRESS_SYMBOLIC:
2129 return factor * mips_symbol_insns (addr.symbol_type, mode);
2131 return 0;
2134 /* Return the number of instructions needed to load constant X.
2135 Return 0 if X isn't a valid constant. */
2138 mips_const_insns (rtx x)
2140 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2141 enum mips_symbol_type symbol_type;
2142 rtx offset;
2144 switch (GET_CODE (x))
2146 case HIGH:
2147 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2148 &symbol_type)
2149 || !mips_split_p[symbol_type])
2150 return 0;
2152 /* This is simply an LUI for normal mode. It is an extended
2153 LI followed by an extended SLL for MIPS16. */
2154 return TARGET_MIPS16 ? 4 : 1;
2156 case CONST_INT:
2157 if (TARGET_MIPS16)
2158 /* Unsigned 8-bit constants can be loaded using an unextended
2159 LI instruction. Unsigned 16-bit constants can be loaded
2160 using an extended LI. Negative constants must be loaded
2161 using LI and then negated. */
2162 return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2163 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2164 : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2165 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2166 : 0);
2168 return mips_build_integer (codes, INTVAL (x));
2170 case CONST_DOUBLE:
2171 case CONST_VECTOR:
2172 /* Allow zeros for normal mode, where we can use $0. */
2173 return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2175 case CONST:
2176 if (CONST_GP_P (x))
2177 return 1;
2179 /* See if we can refer to X directly. */
2180 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2181 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2183 /* Otherwise try splitting the constant into a base and offset.
2184 If the offset is a 16-bit value, we can load the base address
2185 into a register and then use (D)ADDIU to add in the offset.
2186 If the offset is larger, we can load the base and offset
2187 into separate registers and add them together with (D)ADDU.
2188 However, the latter is only possible before reload; during
2189 and after reload, we must have the option of forcing the
2190 constant into the pool instead. */
2191 split_const (x, &x, &offset);
2192 if (offset != 0)
2194 int n = mips_const_insns (x);
2195 if (n != 0)
2197 if (SMALL_INT (offset))
2198 return n + 1;
2199 else if (!targetm.cannot_force_const_mem (x))
2200 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2203 return 0;
2205 case SYMBOL_REF:
2206 case LABEL_REF:
2207 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2208 MAX_MACHINE_MODE);
2210 default:
2211 return 0;
2215 /* X is a doubleword constant that can be handled by splitting it into
2216 two words and loading each word separately. Return the number of
2217 instructions required to do this. */
2220 mips_split_const_insns (rtx x)
2222 unsigned int low, high;
2224 low = mips_const_insns (mips_subword (x, false));
2225 high = mips_const_insns (mips_subword (x, true));
2226 gcc_assert (low > 0 && high > 0);
2227 return low + high;
2230 /* Return the number of instructions needed to implement INSN,
2231 given that it loads from or stores to MEM. Count extended
2232 MIPS16 instructions as two instructions. */
2235 mips_load_store_insns (rtx mem, rtx insn)
2237 enum machine_mode mode;
2238 bool might_split_p;
2239 rtx set;
2241 gcc_assert (MEM_P (mem));
2242 mode = GET_MODE (mem);
2244 /* Try to prove that INSN does not need to be split. */
2245 might_split_p = true;
2246 if (GET_MODE_BITSIZE (mode) == 64)
2248 set = single_set (insn);
2249 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2250 might_split_p = false;
2253 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2256 /* Return the number of instructions needed for an integer division. */
2259 mips_idiv_insns (void)
2261 int count;
2263 count = 1;
2264 if (TARGET_CHECK_ZERO_DIV)
2266 if (GENERATE_DIVIDE_TRAPS)
2267 count++;
2268 else
2269 count += 2;
2272 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2273 count++;
2274 return count;
2277 /* Emit a move from SRC to DEST. Assume that the move expanders can
2278 handle all moves if !can_create_pseudo_p (). The distinction is
2279 important because, unlike emit_move_insn, the move expanders know
2280 how to force Pmode objects into the constant pool even when the
2281 constant pool address is not itself legitimate. */
2284 mips_emit_move (rtx dest, rtx src)
2286 return (can_create_pseudo_p ()
2287 ? emit_move_insn (dest, src)
2288 : emit_move_insn_1 (dest, src));
2291 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2293 static void
2294 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2296 emit_insn (gen_rtx_SET (VOIDmode, target,
2297 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2300 /* Compute (CODE OP0 OP1) and store the result in a new register
2301 of mode MODE. Return that new register. */
2303 static rtx
2304 mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
2306 rtx reg;
2308 reg = gen_reg_rtx (mode);
2309 mips_emit_binary (code, reg, op0, op1);
2310 return reg;
2313 /* Copy VALUE to a register and return that register. If new pseudos
2314 are allowed, copy it into a new register, otherwise use DEST. */
2316 static rtx
2317 mips_force_temporary (rtx dest, rtx value)
2319 if (can_create_pseudo_p ())
2320 return force_reg (Pmode, value);
2321 else
2323 mips_emit_move (dest, value);
2324 return dest;
2328 /* Emit a call sequence with call pattern PATTERN and return the call
2329 instruction itself (which is not necessarily the last instruction
2330 emitted). ORIG_ADDR is the original, unlegitimized address,
2331 ADDR is the legitimized form, and LAZY_P is true if the call
2332 address is lazily-bound. */
2334 static rtx
2335 mips_emit_call_insn (rtx pattern, rtx orig_addr, rtx addr, bool lazy_p)
2337 rtx insn, reg;
2339 insn = emit_call_insn (pattern);
2341 if (TARGET_MIPS16 && mips_use_pic_fn_addr_reg_p (orig_addr))
2343 /* MIPS16 JALRs only take MIPS16 registers. If the target
2344 function requires $25 to be valid on entry, we must copy it
2345 there separately. The move instruction can be put in the
2346 call's delay slot. */
2347 reg = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
2348 emit_insn_before (gen_move_insn (reg, addr), insn);
2349 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
2352 if (lazy_p)
2353 /* Lazy-binding stubs require $gp to be valid on entry. */
2354 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2356 if (TARGET_USE_GOT)
2358 /* See the comment above load_call<mode> for details. */
2359 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2360 gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2361 emit_insn (gen_update_got_version ());
2363 return insn;
2366 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2367 then add CONST_INT OFFSET to the result. */
2369 static rtx
2370 mips_unspec_address_offset (rtx base, rtx offset,
2371 enum mips_symbol_type symbol_type)
2373 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2374 UNSPEC_ADDRESS_FIRST + symbol_type);
2375 if (offset != const0_rtx)
2376 base = gen_rtx_PLUS (Pmode, base, offset);
2377 return gen_rtx_CONST (Pmode, base);
2380 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2381 type SYMBOL_TYPE. */
2384 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2386 rtx base, offset;
2388 split_const (address, &base, &offset);
2389 return mips_unspec_address_offset (base, offset, symbol_type);
2392 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2393 high part to BASE and return the result. Just return BASE otherwise.
2394 TEMP is as for mips_force_temporary.
2396 The returned expression can be used as the first operand to a LO_SUM. */
2398 static rtx
2399 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2400 enum mips_symbol_type symbol_type)
2402 if (mips_split_p[symbol_type])
2404 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2405 addr = mips_force_temporary (temp, addr);
2406 base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2408 return base;
2411 /* Return an instruction that copies $gp into register REG. We want
2412 GCC to treat the register's value as constant, so that its value
2413 can be rematerialized on demand. */
2415 static rtx
2416 gen_load_const_gp (rtx reg)
2418 return (Pmode == SImode
2419 ? gen_load_const_gp_si (reg)
2420 : gen_load_const_gp_di (reg));
2423 /* Return a pseudo register that contains the value of $gp throughout
2424 the current function. Such registers are needed by MIPS16 functions,
2425 for which $gp itself is not a valid base register or addition operand. */
2427 static rtx
2428 mips16_gp_pseudo_reg (void)
2430 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2431 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2433 /* Don't emit an instruction to initialize the pseudo register if
2434 we are being called from the tree optimizers' cost-calculation
2435 routines. */
2436 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2437 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2439 rtx insn, scan;
2441 push_topmost_sequence ();
2443 scan = get_insns ();
2444 while (NEXT_INSN (scan) && !INSN_P (NEXT_INSN (scan)))
2445 scan = NEXT_INSN (scan);
2447 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2448 emit_insn_after (insn, scan);
2450 pop_topmost_sequence ();
2452 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2455 return cfun->machine->mips16_gp_pseudo_rtx;
2458 /* Return a base register that holds pic_offset_table_rtx.
2459 TEMP, if nonnull, is a scratch Pmode base register. */
2462 mips_pic_base_register (rtx temp)
2464 if (!TARGET_MIPS16)
2465 return pic_offset_table_rtx;
2467 if (can_create_pseudo_p ())
2468 return mips16_gp_pseudo_reg ();
2470 if (TARGET_USE_GOT)
2471 /* The first post-reload split exposes all references to $gp
2472 (both uses and definitions). All references must remain
2473 explicit after that point.
2475 It is safe to introduce uses of $gp at any time, so for
2476 simplicity, we do that before the split too. */
2477 mips_emit_move (temp, pic_offset_table_rtx);
2478 else
2479 emit_insn (gen_load_const_gp (temp));
2480 return temp;
2483 /* Create and return a GOT reference of type TYPE for address ADDR.
2484 TEMP, if nonnull, is a scratch Pmode base register. */
2487 mips_got_load (rtx temp, rtx addr, enum mips_symbol_type type)
2489 rtx base, high, lo_sum_symbol;
2491 base = mips_pic_base_register (temp);
2493 /* If we used the temporary register to load $gp, we can't use
2494 it for the high part as well. */
2495 if (temp != NULL && reg_overlap_mentioned_p (base, temp))
2496 temp = NULL;
2498 high = mips_unspec_offset_high (temp, base, addr, type);
2499 lo_sum_symbol = mips_unspec_address (addr, type);
2501 if (type == SYMBOL_GOTOFF_CALL)
2502 return (Pmode == SImode
2503 ? gen_unspec_callsi (high, lo_sum_symbol)
2504 : gen_unspec_calldi (high, lo_sum_symbol));
2505 else
2506 return (Pmode == SImode
2507 ? gen_unspec_gotsi (high, lo_sum_symbol)
2508 : gen_unspec_gotdi (high, lo_sum_symbol));
2511 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2512 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2513 constant in that context and can be split into high and low parts.
2514 If so, and if LOW_OUT is nonnull, emit the high part and store the
2515 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
2517 TEMP is as for mips_force_temporary and is used to load the high
2518 part into a register.
2520 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
2521 a legitimize SET_SRC for an .md pattern, otherwise the low part
2522 is guaranteed to be a legitimate address for mode MODE. */
2524 bool
2525 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
2527 enum mips_symbol_context context;
2528 enum mips_symbol_type symbol_type;
2529 rtx high;
2531 context = (mode == MAX_MACHINE_MODE
2532 ? SYMBOL_CONTEXT_LEA
2533 : SYMBOL_CONTEXT_MEM);
2534 if (GET_CODE (addr) == HIGH && context == SYMBOL_CONTEXT_LEA)
2536 addr = XEXP (addr, 0);
2537 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2538 && mips_symbol_insns (symbol_type, mode) > 0
2539 && mips_split_hi_p[symbol_type])
2541 if (low_out)
2542 switch (symbol_type)
2544 case SYMBOL_GOT_PAGE_OFST:
2545 /* The high part of a page/ofst pair is loaded from the GOT. */
2546 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_PAGE);
2547 break;
2549 default:
2550 gcc_unreachable ();
2552 return true;
2555 else
2557 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2558 && mips_symbol_insns (symbol_type, mode) > 0
2559 && mips_split_p[symbol_type])
2561 if (low_out)
2562 switch (symbol_type)
2564 case SYMBOL_GOT_DISP:
2565 /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
2566 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_DISP);
2567 break;
2569 case SYMBOL_GP_RELATIVE:
2570 high = mips_pic_base_register (temp);
2571 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2572 break;
2574 default:
2575 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2576 high = mips_force_temporary (temp, high);
2577 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2578 break;
2580 return true;
2583 return false;
2586 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2587 mips_force_temporary; it is only needed when OFFSET is not a
2588 SMALL_OPERAND. */
2590 static rtx
2591 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2593 if (!SMALL_OPERAND (offset))
2595 rtx high;
2597 if (TARGET_MIPS16)
2599 /* Load the full offset into a register so that we can use
2600 an unextended instruction for the address itself. */
2601 high = GEN_INT (offset);
2602 offset = 0;
2604 else
2606 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2607 high = GEN_INT (CONST_HIGH_PART (offset));
2608 offset = CONST_LOW_PART (offset);
2610 high = mips_force_temporary (temp, high);
2611 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2613 return plus_constant (reg, offset);
2616 /* The __tls_get_attr symbol. */
2617 static GTY(()) rtx mips_tls_symbol;
2619 /* Return an instruction sequence that calls __tls_get_addr. SYM is
2620 the TLS symbol we are referencing and TYPE is the symbol type to use
2621 (either global dynamic or local dynamic). V0 is an RTX for the
2622 return value location. */
2624 static rtx
2625 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2627 rtx insn, loc, a0;
2629 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2631 if (!mips_tls_symbol)
2632 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2634 loc = mips_unspec_address (sym, type);
2636 start_sequence ();
2638 emit_insn (gen_rtx_SET (Pmode, a0,
2639 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2640 insn = mips_expand_call (MIPS_CALL_NORMAL, v0, mips_tls_symbol,
2641 const0_rtx, NULL_RTX, false);
2642 RTL_CONST_CALL_P (insn) = 1;
2643 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2644 insn = get_insns ();
2646 end_sequence ();
2648 return insn;
2651 /* Return a pseudo register that contains the current thread pointer. */
2653 static rtx
2654 mips_get_tp (void)
2656 rtx tp;
2658 tp = gen_reg_rtx (Pmode);
2659 if (Pmode == DImode)
2660 emit_insn (gen_tls_get_tp_di (tp));
2661 else
2662 emit_insn (gen_tls_get_tp_si (tp));
2663 return tp;
2666 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2667 its address. The return value will be both a valid address and a valid
2668 SET_SRC (either a REG or a LO_SUM). */
2670 static rtx
2671 mips_legitimize_tls_address (rtx loc)
2673 rtx dest, insn, v0, tp, tmp1, tmp2, eqv;
2674 enum tls_model model;
2676 if (TARGET_MIPS16)
2678 sorry ("MIPS16 TLS");
2679 return gen_reg_rtx (Pmode);
2682 model = SYMBOL_REF_TLS_MODEL (loc);
2683 /* Only TARGET_ABICALLS code can have more than one module; other
2684 code must be be static and should not use a GOT. All TLS models
2685 reduce to local exec in this situation. */
2686 if (!TARGET_ABICALLS)
2687 model = TLS_MODEL_LOCAL_EXEC;
2689 switch (model)
2691 case TLS_MODEL_GLOBAL_DYNAMIC:
2692 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2693 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2694 dest = gen_reg_rtx (Pmode);
2695 emit_libcall_block (insn, dest, v0, loc);
2696 break;
2698 case TLS_MODEL_LOCAL_DYNAMIC:
2699 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2700 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2701 tmp1 = gen_reg_rtx (Pmode);
2703 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2704 share the LDM result with other LD model accesses. */
2705 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2706 UNSPEC_TLS_LDM);
2707 emit_libcall_block (insn, tmp1, v0, eqv);
2709 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2710 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2711 mips_unspec_address (loc, SYMBOL_DTPREL));
2712 break;
2714 case TLS_MODEL_INITIAL_EXEC:
2715 tp = mips_get_tp ();
2716 tmp1 = gen_reg_rtx (Pmode);
2717 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2718 if (Pmode == DImode)
2719 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2720 else
2721 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2722 dest = gen_reg_rtx (Pmode);
2723 emit_insn (gen_add3_insn (dest, tmp1, tp));
2724 break;
2726 case TLS_MODEL_LOCAL_EXEC:
2727 tp = mips_get_tp ();
2728 tmp1 = mips_unspec_offset_high (NULL, tp, loc, SYMBOL_TPREL);
2729 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2730 mips_unspec_address (loc, SYMBOL_TPREL));
2731 break;
2733 default:
2734 gcc_unreachable ();
2736 return dest;
2739 /* If X is not a valid address for mode MODE, force it into a register. */
2741 static rtx
2742 mips_force_address (rtx x, enum machine_mode mode)
2744 if (!mips_legitimate_address_p (mode, x, false))
2745 x = force_reg (Pmode, x);
2746 return x;
2749 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2750 be legitimized in a way that the generic machinery might not expect,
2751 put the new address in *XLOC and return true. MODE is the mode of
2752 the memory being accessed. */
2754 bool
2755 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2757 rtx base, addr;
2758 HOST_WIDE_INT offset;
2760 if (mips_tls_symbol_p (*xloc))
2762 *xloc = mips_legitimize_tls_address (*xloc);
2763 return true;
2766 /* See if the address can split into a high part and a LO_SUM. */
2767 if (mips_split_symbol (NULL, *xloc, mode, &addr))
2769 *xloc = mips_force_address (addr, mode);
2770 return true;
2773 /* Handle BASE + OFFSET using mips_add_offset. */
2774 mips_split_plus (*xloc, &base, &offset);
2775 if (offset != 0)
2777 if (!mips_valid_base_register_p (base, mode, false))
2778 base = copy_to_mode_reg (Pmode, base);
2779 addr = mips_add_offset (NULL, base, offset);
2780 *xloc = mips_force_address (addr, mode);
2781 return true;
2783 return false;
2786 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
2788 void
2789 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
2791 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2792 enum machine_mode mode;
2793 unsigned int i, num_ops;
2794 rtx x;
2796 mode = GET_MODE (dest);
2797 num_ops = mips_build_integer (codes, value);
2799 /* Apply each binary operation to X. Invariant: X is a legitimate
2800 source operand for a SET pattern. */
2801 x = GEN_INT (codes[0].value);
2802 for (i = 1; i < num_ops; i++)
2804 if (!can_create_pseudo_p ())
2806 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2807 x = temp;
2809 else
2810 x = force_reg (mode, x);
2811 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2814 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2817 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2818 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2819 move_operand. */
2821 static void
2822 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2824 rtx base, offset;
2826 /* Split moves of big integers into smaller pieces. */
2827 if (splittable_const_int_operand (src, mode))
2829 mips_move_integer (dest, dest, INTVAL (src));
2830 return;
2833 /* Split moves of symbolic constants into high/low pairs. */
2834 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2836 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2837 return;
2840 /* Generate the appropriate access sequences for TLS symbols. */
2841 if (mips_tls_symbol_p (src))
2843 mips_emit_move (dest, mips_legitimize_tls_address (src));
2844 return;
2847 /* If we have (const (plus symbol offset)), and that expression cannot
2848 be forced into memory, load the symbol first and add in the offset.
2849 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2850 forced into memory, as it usually produces better code. */
2851 split_const (src, &base, &offset);
2852 if (offset != const0_rtx
2853 && (targetm.cannot_force_const_mem (src)
2854 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2856 base = mips_force_temporary (dest, base);
2857 mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
2858 return;
2861 src = force_const_mem (mode, src);
2863 /* When using explicit relocs, constant pool references are sometimes
2864 not legitimate addresses. */
2865 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2866 mips_emit_move (dest, src);
2869 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
2870 sequence that is valid. */
2872 bool
2873 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2875 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2877 mips_emit_move (dest, force_reg (mode, src));
2878 return true;
2881 /* We need to deal with constants that would be legitimate
2882 immediate_operands but aren't legitimate move_operands. */
2883 if (CONSTANT_P (src) && !move_operand (src, mode))
2885 mips_legitimize_const_move (mode, dest, src);
2886 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2887 return true;
2889 return false;
2892 /* Return true if value X in context CONTEXT is a small-data address
2893 that can be rewritten as a LO_SUM. */
2895 static bool
2896 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2898 enum mips_symbol_type symbol_type;
2900 return (mips_lo_relocs[SYMBOL_GP_RELATIVE]
2901 && !mips_split_p[SYMBOL_GP_RELATIVE]
2902 && mips_symbolic_constant_p (x, context, &symbol_type)
2903 && symbol_type == SYMBOL_GP_RELATIVE);
2906 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2907 containing MEM, or null if none. */
2909 static int
2910 mips_small_data_pattern_1 (rtx *loc, void *data)
2912 enum mips_symbol_context context;
2914 if (GET_CODE (*loc) == LO_SUM)
2915 return -1;
2917 if (MEM_P (*loc))
2919 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
2920 return 1;
2921 return -1;
2924 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2925 return mips_rewrite_small_data_p (*loc, context);
2928 /* Return true if OP refers to small data symbols directly, not through
2929 a LO_SUM. */
2931 bool
2932 mips_small_data_pattern_p (rtx op)
2934 return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
2937 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2938 DATA is the containing MEM, or null if none. */
2940 static int
2941 mips_rewrite_small_data_1 (rtx *loc, void *data)
2943 enum mips_symbol_context context;
2945 if (MEM_P (*loc))
2947 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
2948 return -1;
2951 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2952 if (mips_rewrite_small_data_p (*loc, context))
2953 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
2955 if (GET_CODE (*loc) == LO_SUM)
2956 return -1;
2958 return 0;
2961 /* Rewrite instruction pattern PATTERN so that it refers to small data
2962 using explicit relocations. */
2965 mips_rewrite_small_data (rtx pattern)
2967 pattern = copy_insn (pattern);
2968 for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
2969 return pattern;
2972 /* We need a lot of little routines to check the range of MIPS16 immediate
2973 operands. */
2975 static int
2976 m16_check_op (rtx op, int low, int high, int mask)
2978 return (GET_CODE (op) == CONST_INT
2979 && IN_RANGE (INTVAL (op), low, high)
2980 && (INTVAL (op) & mask) == 0);
2984 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2986 return m16_check_op (op, 0x1, 0x8, 0);
2990 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2992 return m16_check_op (op, -0x8, 0x7, 0);
2996 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2998 return m16_check_op (op, -0x7, 0x8, 0);
3002 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3004 return m16_check_op (op, -0x10, 0xf, 0);
3008 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3010 return m16_check_op (op, -0xf, 0x10, 0);
3014 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3016 return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
3020 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3022 return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
3026 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3028 return m16_check_op (op, -0x80, 0x7f, 0);
3032 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3034 return m16_check_op (op, -0x7f, 0x80, 0);
3038 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3040 return m16_check_op (op, 0x0, 0xff, 0);
3044 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3046 return m16_check_op (op, -0xff, 0x0, 0);
3050 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3052 return m16_check_op (op, -0x1, 0xfe, 0);
3056 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3058 return m16_check_op (op, 0x0, 0xff << 2, 3);
3062 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3064 return m16_check_op (op, -0xff << 2, 0x0, 3);
3068 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3070 return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
3074 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3076 return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
3079 /* The cost of loading values from the constant pool. It should be
3080 larger than the cost of any constant we want to synthesize inline. */
3081 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3083 /* Return the cost of X when used as an operand to the MIPS16 instruction
3084 that implements CODE. Return -1 if there is no such instruction, or if
3085 X is not a valid immediate operand for it. */
3087 static int
3088 mips16_constant_cost (int code, HOST_WIDE_INT x)
3090 switch (code)
3092 case ASHIFT:
3093 case ASHIFTRT:
3094 case LSHIFTRT:
3095 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3096 other shifts are extended. The shift patterns truncate the shift
3097 count to the right size, so there are no out-of-range values. */
3098 if (IN_RANGE (x, 1, 8))
3099 return 0;
3100 return COSTS_N_INSNS (1);
3102 case PLUS:
3103 if (IN_RANGE (x, -128, 127))
3104 return 0;
3105 if (SMALL_OPERAND (x))
3106 return COSTS_N_INSNS (1);
3107 return -1;
3109 case LEU:
3110 /* Like LE, but reject the always-true case. */
3111 if (x == -1)
3112 return -1;
3113 case LE:
3114 /* We add 1 to the immediate and use SLT. */
3115 x += 1;
3116 case XOR:
3117 /* We can use CMPI for an xor with an unsigned 16-bit X. */
3118 case LT:
3119 case LTU:
3120 if (IN_RANGE (x, 0, 255))
3121 return 0;
3122 if (SMALL_OPERAND_UNSIGNED (x))
3123 return COSTS_N_INSNS (1);
3124 return -1;
3126 case EQ:
3127 case NE:
3128 /* Equality comparisons with 0 are cheap. */
3129 if (x == 0)
3130 return 0;
3131 return -1;
3133 default:
3134 return -1;
3138 /* Return true if there is a non-MIPS16 instruction that implements CODE
3139 and if that instruction accepts X as an immediate operand. */
3141 static int
3142 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
3144 switch (code)
3146 case ASHIFT:
3147 case ASHIFTRT:
3148 case LSHIFTRT:
3149 /* All shift counts are truncated to a valid constant. */
3150 return true;
3152 case ROTATE:
3153 case ROTATERT:
3154 /* Likewise rotates, if the target supports rotates at all. */
3155 return ISA_HAS_ROR;
3157 case AND:
3158 case IOR:
3159 case XOR:
3160 /* These instructions take 16-bit unsigned immediates. */
3161 return SMALL_OPERAND_UNSIGNED (x);
3163 case PLUS:
3164 case LT:
3165 case LTU:
3166 /* These instructions take 16-bit signed immediates. */
3167 return SMALL_OPERAND (x);
3169 case EQ:
3170 case NE:
3171 case GT:
3172 case GTU:
3173 /* The "immediate" forms of these instructions are really
3174 implemented as comparisons with register 0. */
3175 return x == 0;
3177 case GE:
3178 case GEU:
3179 /* Likewise, meaning that the only valid immediate operand is 1. */
3180 return x == 1;
3182 case LE:
3183 /* We add 1 to the immediate and use SLT. */
3184 return SMALL_OPERAND (x + 1);
3186 case LEU:
3187 /* Likewise SLTU, but reject the always-true case. */
3188 return SMALL_OPERAND (x + 1) && x + 1 != 0;
3190 case SIGN_EXTRACT:
3191 case ZERO_EXTRACT:
3192 /* The bit position and size are immediate operands. */
3193 return ISA_HAS_EXT_INS;
3195 default:
3196 /* By default assume that $0 can be used for 0. */
3197 return x == 0;
3201 /* Return the cost of binary operation X, given that the instruction
3202 sequence for a word-sized or smaller operation has cost SINGLE_COST
3203 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3205 static int
3206 mips_binary_cost (rtx x, int single_cost, int double_cost)
3208 int cost;
3210 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3211 cost = double_cost;
3212 else
3213 cost = single_cost;
3214 return (cost
3215 + rtx_cost (XEXP (x, 0), 0)
3216 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
3219 /* Return the cost of floating-point multiplications of mode MODE. */
3221 static int
3222 mips_fp_mult_cost (enum machine_mode mode)
3224 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3227 /* Return the cost of floating-point divisions of mode MODE. */
3229 static int
3230 mips_fp_div_cost (enum machine_mode mode)
3232 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3235 /* Return the cost of sign-extending OP to mode MODE, not including the
3236 cost of OP itself. */
3238 static int
3239 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3241 if (MEM_P (op))
3242 /* Extended loads are as cheap as unextended ones. */
3243 return 0;
3245 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3246 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3247 return 0;
3249 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3250 /* We can use SEB or SEH. */
3251 return COSTS_N_INSNS (1);
3253 /* We need to use a shift left and a shift right. */
3254 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3257 /* Return the cost of zero-extending OP to mode MODE, not including the
3258 cost of OP itself. */
3260 static int
3261 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3263 if (MEM_P (op))
3264 /* Extended loads are as cheap as unextended ones. */
3265 return 0;
3267 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3268 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3269 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3271 if (GENERATE_MIPS16E)
3272 /* We can use ZEB or ZEH. */
3273 return COSTS_N_INSNS (1);
3275 if (TARGET_MIPS16)
3276 /* We need to load 0xff or 0xffff into a register and use AND. */
3277 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3279 /* We can use ANDI. */
3280 return COSTS_N_INSNS (1);
3283 /* Implement TARGET_RTX_COSTS. */
3285 static bool
3286 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3288 enum machine_mode mode = GET_MODE (x);
3289 bool float_mode_p = FLOAT_MODE_P (mode);
3290 int cost;
3291 rtx addr;
3293 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3294 appear in the instruction stream, and the cost of a comparison is
3295 really the cost of the branch or scc condition. At the time of
3296 writing, GCC only uses an explicit outer COMPARE code when optabs
3297 is testing whether a constant is expensive enough to force into a
3298 register. We want optabs to pass such constants through the MIPS
3299 expanders instead, so make all constants very cheap here. */
3300 if (outer_code == COMPARE)
3302 gcc_assert (CONSTANT_P (x));
3303 *total = 0;
3304 return true;
3307 switch (code)
3309 case CONST_INT:
3310 /* Treat *clear_upper32-style ANDs as having zero cost in the
3311 second operand. The cost is entirely in the first operand.
3313 ??? This is needed because we would otherwise try to CSE
3314 the constant operand. Although that's the right thing for
3315 instructions that continue to be a register operation throughout
3316 compilation, it is disastrous for instructions that could
3317 later be converted into a memory operation. */
3318 if (TARGET_64BIT
3319 && outer_code == AND
3320 && UINTVAL (x) == 0xffffffff)
3322 *total = 0;
3323 return true;
3326 if (TARGET_MIPS16)
3328 cost = mips16_constant_cost (outer_code, INTVAL (x));
3329 if (cost >= 0)
3331 *total = cost;
3332 return true;
3335 else
3337 /* When not optimizing for size, we care more about the cost
3338 of hot code, and hot code is often in a loop. If a constant
3339 operand needs to be forced into a register, we will often be
3340 able to hoist the constant load out of the loop, so the load
3341 should not contribute to the cost. */
3342 if (!optimize_size
3343 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3345 *total = 0;
3346 return true;
3349 /* Fall through. */
3351 case CONST:
3352 case SYMBOL_REF:
3353 case LABEL_REF:
3354 case CONST_DOUBLE:
3355 if (force_to_mem_operand (x, VOIDmode))
3357 *total = COSTS_N_INSNS (1);
3358 return true;
3360 cost = mips_const_insns (x);
3361 if (cost > 0)
3363 /* If the constant is likely to be stored in a GPR, SETs of
3364 single-insn constants are as cheap as register sets; we
3365 never want to CSE them.
3367 Don't reduce the cost of storing a floating-point zero in
3368 FPRs. If we have a zero in an FPR for other reasons, we
3369 can get better cfg-cleanup and delayed-branch results by
3370 using it consistently, rather than using $0 sometimes and
3371 an FPR at other times. Also, moves between floating-point
3372 registers are sometimes cheaper than (D)MTC1 $0. */
3373 if (cost == 1
3374 && outer_code == SET
3375 && !(float_mode_p && TARGET_HARD_FLOAT))
3376 cost = 0;
3377 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3378 want to CSE the constant itself. It is usually better to
3379 have N copies of the last operation in the sequence and one
3380 shared copy of the other operations. (Note that this is
3381 not true for MIPS16 code, where the final operation in the
3382 sequence is often an extended instruction.)
3384 Also, if we have a CONST_INT, we don't know whether it is
3385 for a word or doubleword operation, so we cannot rely on
3386 the result of mips_build_integer. */
3387 else if (!TARGET_MIPS16
3388 && (outer_code == SET || mode == VOIDmode))
3389 cost = 1;
3390 *total = COSTS_N_INSNS (cost);
3391 return true;
3393 /* The value will need to be fetched from the constant pool. */
3394 *total = CONSTANT_POOL_COST;
3395 return true;
3397 case MEM:
3398 /* If the address is legitimate, return the number of
3399 instructions it needs. */
3400 addr = XEXP (x, 0);
3401 cost = mips_address_insns (addr, mode, true);
3402 if (cost > 0)
3404 *total = COSTS_N_INSNS (cost + 1);
3405 return true;
3407 /* Check for a scaled indexed address. */
3408 if (mips_lwxs_address_p (addr))
3410 *total = COSTS_N_INSNS (2);
3411 return true;
3413 /* Otherwise use the default handling. */
3414 return false;
3416 case FFS:
3417 *total = COSTS_N_INSNS (6);
3418 return false;
3420 case NOT:
3421 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3422 return false;
3424 case AND:
3425 /* Check for a *clear_upper32 pattern and treat it like a zero
3426 extension. See the pattern's comment for details. */
3427 if (TARGET_64BIT
3428 && mode == DImode
3429 && CONST_INT_P (XEXP (x, 1))
3430 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3432 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3433 + rtx_cost (XEXP (x, 0), 0));
3434 return true;
3436 /* Fall through. */
3438 case IOR:
3439 case XOR:
3440 /* Double-word operations use two single-word operations. */
3441 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3442 return true;
3444 case ASHIFT:
3445 case ASHIFTRT:
3446 case LSHIFTRT:
3447 case ROTATE:
3448 case ROTATERT:
3449 if (CONSTANT_P (XEXP (x, 1)))
3450 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3451 else
3452 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3453 return true;
3455 case ABS:
3456 if (float_mode_p)
3457 *total = mips_cost->fp_add;
3458 else
3459 *total = COSTS_N_INSNS (4);
3460 return false;
3462 case LO_SUM:
3463 /* Low-part immediates need an extended MIPS16 instruction. */
3464 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3465 + rtx_cost (XEXP (x, 0), 0));
3466 return true;
3468 case LT:
3469 case LTU:
3470 case LE:
3471 case LEU:
3472 case GT:
3473 case GTU:
3474 case GE:
3475 case GEU:
3476 case EQ:
3477 case NE:
3478 case UNORDERED:
3479 case LTGT:
3480 /* Branch comparisons have VOIDmode, so use the first operand's
3481 mode instead. */
3482 mode = GET_MODE (XEXP (x, 0));
3483 if (FLOAT_MODE_P (mode))
3485 *total = mips_cost->fp_add;
3486 return false;
3488 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3489 return true;
3491 case MINUS:
3492 if (float_mode_p
3493 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3494 && TARGET_FUSED_MADD
3495 && !HONOR_NANS (mode)
3496 && !HONOR_SIGNED_ZEROS (mode))
3498 /* See if we can use NMADD or NMSUB. See mips.md for the
3499 associated patterns. */
3500 rtx op0 = XEXP (x, 0);
3501 rtx op1 = XEXP (x, 1);
3502 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3504 *total = (mips_fp_mult_cost (mode)
3505 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3506 + rtx_cost (XEXP (op0, 1), 0)
3507 + rtx_cost (op1, 0));
3508 return true;
3510 if (GET_CODE (op1) == MULT)
3512 *total = (mips_fp_mult_cost (mode)
3513 + rtx_cost (op0, 0)
3514 + rtx_cost (XEXP (op1, 0), 0)
3515 + rtx_cost (XEXP (op1, 1), 0));
3516 return true;
3519 /* Fall through. */
3521 case PLUS:
3522 if (float_mode_p)
3524 /* If this is part of a MADD or MSUB, treat the PLUS as
3525 being free. */
3526 if (ISA_HAS_FP4
3527 && TARGET_FUSED_MADD
3528 && GET_CODE (XEXP (x, 0)) == MULT)
3529 *total = 0;
3530 else
3531 *total = mips_cost->fp_add;
3532 return false;
3535 /* Double-word operations require three single-word operations and
3536 an SLTU. The MIPS16 version then needs to move the result of
3537 the SLTU from $24 to a MIPS16 register. */
3538 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3539 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3540 return true;
3542 case NEG:
3543 if (float_mode_p
3544 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3545 && TARGET_FUSED_MADD
3546 && !HONOR_NANS (mode)
3547 && HONOR_SIGNED_ZEROS (mode))
3549 /* See if we can use NMADD or NMSUB. See mips.md for the
3550 associated patterns. */
3551 rtx op = XEXP (x, 0);
3552 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3553 && GET_CODE (XEXP (op, 0)) == MULT)
3555 *total = (mips_fp_mult_cost (mode)
3556 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3557 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3558 + rtx_cost (XEXP (op, 1), 0));
3559 return true;
3563 if (float_mode_p)
3564 *total = mips_cost->fp_add;
3565 else
3566 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3567 return false;
3569 case MULT:
3570 if (float_mode_p)
3571 *total = mips_fp_mult_cost (mode);
3572 else if (mode == DImode && !TARGET_64BIT)
3573 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3574 where the mulsidi3 always includes an MFHI and an MFLO. */
3575 *total = (optimize_size
3576 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3577 : mips_cost->int_mult_si * 3 + 6);
3578 else if (optimize_size)
3579 *total = (ISA_HAS_MUL3 ? 1 : 2);
3580 else if (mode == DImode)
3581 *total = mips_cost->int_mult_di;
3582 else
3583 *total = mips_cost->int_mult_si;
3584 return false;
3586 case DIV:
3587 /* Check for a reciprocal. */
3588 if (float_mode_p
3589 && ISA_HAS_FP4
3590 && flag_unsafe_math_optimizations
3591 && XEXP (x, 0) == CONST1_RTX (mode))
3593 if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3594 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3595 division as being free. */
3596 *total = rtx_cost (XEXP (x, 1), 0);
3597 else
3598 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3599 return true;
3601 /* Fall through. */
3603 case SQRT:
3604 case MOD:
3605 if (float_mode_p)
3607 *total = mips_fp_div_cost (mode);
3608 return false;
3610 /* Fall through. */
3612 case UDIV:
3613 case UMOD:
3614 if (optimize_size)
3616 /* It is our responsibility to make division by a power of 2
3617 as cheap as 2 register additions if we want the division
3618 expanders to be used for such operations; see the setting
3619 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3620 should always produce shorter code than using
3621 expand_sdiv2_pow2. */
3622 if (TARGET_MIPS16
3623 && CONST_INT_P (XEXP (x, 1))
3624 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3626 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3627 return true;
3629 *total = COSTS_N_INSNS (mips_idiv_insns ());
3631 else if (mode == DImode)
3632 *total = mips_cost->int_div_di;
3633 else
3634 *total = mips_cost->int_div_si;
3635 return false;
3637 case SIGN_EXTEND:
3638 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3639 return false;
3641 case ZERO_EXTEND:
3642 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3643 return false;
3645 case FLOAT:
3646 case UNSIGNED_FLOAT:
3647 case FIX:
3648 case FLOAT_EXTEND:
3649 case FLOAT_TRUNCATE:
3650 *total = mips_cost->fp_add;
3651 return false;
3653 default:
3654 return false;
3658 /* Implement TARGET_ADDRESS_COST. */
3660 static int
3661 mips_address_cost (rtx addr)
3663 return mips_address_insns (addr, SImode, false);
3666 /* Return one word of double-word value OP, taking into account the fixed
3667 endianness of certain registers. HIGH_P is true to select the high part,
3668 false to select the low part. */
3671 mips_subword (rtx op, bool high_p)
3673 unsigned int byte, offset;
3674 enum machine_mode mode;
3676 mode = GET_MODE (op);
3677 if (mode == VOIDmode)
3678 mode = TARGET_64BIT ? TImode : DImode;
3680 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3681 byte = UNITS_PER_WORD;
3682 else
3683 byte = 0;
3685 if (FP_REG_RTX_P (op))
3687 /* Paired FPRs are always ordered little-endian. */
3688 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3689 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3692 if (MEM_P (op))
3693 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3695 return simplify_gen_subreg (word_mode, op, mode, byte);
3698 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3700 bool
3701 mips_split_64bit_move_p (rtx dest, rtx src)
3703 if (TARGET_64BIT)
3704 return false;
3706 /* FPR-to-FPR moves can be done in a single instruction, if they're
3707 allowed at all. */
3708 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3709 return false;
3711 /* Check for floating-point loads and stores. */
3712 if (ISA_HAS_LDC1_SDC1)
3714 if (FP_REG_RTX_P (dest) && MEM_P (src))
3715 return false;
3716 if (FP_REG_RTX_P (src) && MEM_P (dest))
3717 return false;
3719 return true;
3722 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3723 this function handles 64-bit moves for which mips_split_64bit_move_p
3724 holds. For 64-bit targets, this function handles 128-bit moves. */
3726 void
3727 mips_split_doubleword_move (rtx dest, rtx src)
3729 rtx low_dest;
3731 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3733 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3734 emit_insn (gen_move_doubleword_fprdi (dest, src));
3735 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3736 emit_insn (gen_move_doubleword_fprdf (dest, src));
3737 else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
3738 emit_insn (gen_move_doubleword_fprv2sf (dest, src));
3739 else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
3740 emit_insn (gen_move_doubleword_fprv2si (dest, src));
3741 else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
3742 emit_insn (gen_move_doubleword_fprv4hi (dest, src));
3743 else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
3744 emit_insn (gen_move_doubleword_fprv8qi (dest, src));
3745 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3746 emit_insn (gen_move_doubleword_fprtf (dest, src));
3747 else
3748 gcc_unreachable ();
3750 else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
3752 low_dest = mips_subword (dest, false);
3753 mips_emit_move (low_dest, mips_subword (src, false));
3754 if (TARGET_64BIT)
3755 emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
3756 else
3757 emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
3759 else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
3761 mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
3762 if (TARGET_64BIT)
3763 emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
3764 else
3765 emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
3767 else
3769 /* The operation can be split into two normal moves. Decide in
3770 which order to do them. */
3771 low_dest = mips_subword (dest, false);
3772 if (REG_P (low_dest)
3773 && reg_overlap_mentioned_p (low_dest, src))
3775 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3776 mips_emit_move (low_dest, mips_subword (src, false));
3778 else
3780 mips_emit_move (low_dest, mips_subword (src, false));
3781 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
3786 /* Return the appropriate instructions to move SRC into DEST. Assume
3787 that SRC is operand 1 and DEST is operand 0. */
3789 const char *
3790 mips_output_move (rtx dest, rtx src)
3792 enum rtx_code dest_code, src_code;
3793 enum machine_mode mode;
3794 enum mips_symbol_type symbol_type;
3795 bool dbl_p;
3797 dest_code = GET_CODE (dest);
3798 src_code = GET_CODE (src);
3799 mode = GET_MODE (dest);
3800 dbl_p = (GET_MODE_SIZE (mode) == 8);
3802 if (dbl_p && mips_split_64bit_move_p (dest, src))
3803 return "#";
3805 if ((src_code == REG && GP_REG_P (REGNO (src)))
3806 || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
3808 if (dest_code == REG)
3810 if (GP_REG_P (REGNO (dest)))
3811 return "move\t%0,%z1";
3813 /* Moves to HI are handled by special .md insns. */
3814 if (REGNO (dest) == LO_REGNUM)
3815 return "mtlo\t%z1";
3817 if (DSP_ACC_REG_P (REGNO (dest)))
3819 static char retval[] = "mt__\t%z1,%q0";
3821 retval[2] = reg_names[REGNO (dest)][4];
3822 retval[3] = reg_names[REGNO (dest)][5];
3823 return retval;
3826 if (FP_REG_P (REGNO (dest)))
3827 return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
3829 if (ALL_COP_REG_P (REGNO (dest)))
3831 static char retval[] = "dmtc_\t%z1,%0";
3833 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3834 return dbl_p ? retval : retval + 1;
3837 if (dest_code == MEM)
3838 switch (GET_MODE_SIZE (mode))
3840 case 1: return "sb\t%z1,%0";
3841 case 2: return "sh\t%z1,%0";
3842 case 4: return "sw\t%z1,%0";
3843 case 8: return "sd\t%z1,%0";
3846 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3848 if (src_code == REG)
3850 /* Moves from HI are handled by special .md insns. */
3851 if (REGNO (src) == LO_REGNUM)
3853 /* When generating VR4120 or VR4130 code, we use MACC and
3854 DMACC instead of MFLO. This avoids both the normal
3855 MIPS III HI/LO hazards and the errata related to
3856 -mfix-vr4130. */
3857 if (ISA_HAS_MACCHI)
3858 return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
3859 return "mflo\t%0";
3862 if (DSP_ACC_REG_P (REGNO (src)))
3864 static char retval[] = "mf__\t%0,%q1";
3866 retval[2] = reg_names[REGNO (src)][4];
3867 retval[3] = reg_names[REGNO (src)][5];
3868 return retval;
3871 if (FP_REG_P (REGNO (src)))
3872 return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
3874 if (ALL_COP_REG_P (REGNO (src)))
3876 static char retval[] = "dmfc_\t%0,%1";
3878 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3879 return dbl_p ? retval : retval + 1;
3882 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3883 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3886 if (src_code == MEM)
3887 switch (GET_MODE_SIZE (mode))
3889 case 1: return "lbu\t%0,%1";
3890 case 2: return "lhu\t%0,%1";
3891 case 4: return "lw\t%0,%1";
3892 case 8: return "ld\t%0,%1";
3895 if (src_code == CONST_INT)
3897 /* Don't use the X format for the operand itself, because that
3898 will give out-of-range numbers for 64-bit hosts and 32-bit
3899 targets. */
3900 if (!TARGET_MIPS16)
3901 return "li\t%0,%1\t\t\t# %X1";
3903 if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
3904 return "li\t%0,%1";
3906 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
3907 return "#";
3910 if (src_code == HIGH)
3911 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3913 if (CONST_GP_P (src))
3914 return "move\t%0,%1";
3916 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3917 && mips_lo_relocs[symbol_type] != 0)
3919 /* A signed 16-bit constant formed by applying a relocation
3920 operator to a symbolic address. */
3921 gcc_assert (!mips_split_p[symbol_type]);
3922 return "li\t%0,%R1";
3925 if (symbolic_operand (src, VOIDmode))
3927 gcc_assert (TARGET_MIPS16
3928 ? TARGET_MIPS16_TEXT_LOADS
3929 : !TARGET_EXPLICIT_RELOCS);
3930 return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
3933 if (src_code == REG && FP_REG_P (REGNO (src)))
3935 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3937 if (GET_MODE (dest) == V2SFmode)
3938 return "mov.ps\t%0,%1";
3939 else
3940 return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
3943 if (dest_code == MEM)
3944 return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
3946 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3948 if (src_code == MEM)
3949 return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
3951 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3953 static char retval[] = "l_c_\t%0,%1";
3955 retval[1] = (dbl_p ? 'd' : 'w');
3956 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3957 return retval;
3959 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3961 static char retval[] = "s_c_\t%1,%0";
3963 retval[1] = (dbl_p ? 'd' : 'w');
3964 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3965 return retval;
3967 gcc_unreachable ();
3970 /* Return true if CMP1 is a suitable second operand for integer ordering
3971 test CODE. See also the *sCC patterns in mips.md. */
3973 static bool
3974 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
3976 switch (code)
3978 case GT:
3979 case GTU:
3980 return reg_or_0_operand (cmp1, VOIDmode);
3982 case GE:
3983 case GEU:
3984 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3986 case LT:
3987 case LTU:
3988 return arith_operand (cmp1, VOIDmode);
3990 case LE:
3991 return sle_operand (cmp1, VOIDmode);
3993 case LEU:
3994 return sleu_operand (cmp1, VOIDmode);
3996 default:
3997 gcc_unreachable ();
4001 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
4002 integer ordering test *CODE, or if an equivalent combination can
4003 be formed by adjusting *CODE and *CMP1. When returning true, update
4004 *CODE and *CMP1 with the chosen code and operand, otherwise leave
4005 them alone. */
4007 static bool
4008 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
4009 enum machine_mode mode)
4011 HOST_WIDE_INT plus_one;
4013 if (mips_int_order_operand_ok_p (*code, *cmp1))
4014 return true;
4016 if (GET_CODE (*cmp1) == CONST_INT)
4017 switch (*code)
4019 case LE:
4020 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4021 if (INTVAL (*cmp1) < plus_one)
4023 *code = LT;
4024 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4025 return true;
4027 break;
4029 case LEU:
4030 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4031 if (plus_one != 0)
4033 *code = LTU;
4034 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4035 return true;
4037 break;
4039 default:
4040 break;
4042 return false;
4045 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
4046 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
4047 is nonnull, it's OK to set TARGET to the inverse of the result and
4048 flip *INVERT_PTR instead. */
4050 static void
4051 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
4052 rtx target, rtx cmp0, rtx cmp1)
4054 enum machine_mode mode;
4056 /* First see if there is a MIPS instruction that can do this operation.
4057 If not, try doing the same for the inverse operation. If that also
4058 fails, force CMP1 into a register and try again. */
4059 mode = GET_MODE (cmp0);
4060 if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
4061 mips_emit_binary (code, target, cmp0, cmp1);
4062 else
4064 enum rtx_code inv_code = reverse_condition (code);
4065 if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
4067 cmp1 = force_reg (mode, cmp1);
4068 mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
4070 else if (invert_ptr == 0)
4072 rtx inv_target;
4074 inv_target = mips_force_binary (GET_MODE (target),
4075 inv_code, cmp0, cmp1);
4076 mips_emit_binary (XOR, target, inv_target, const1_rtx);
4078 else
4080 *invert_ptr = !*invert_ptr;
4081 mips_emit_binary (inv_code, target, cmp0, cmp1);
4086 /* Return a register that is zero iff CMP0 and CMP1 are equal.
4087 The register will have the same mode as CMP0. */
4089 static rtx
4090 mips_zero_if_equal (rtx cmp0, rtx cmp1)
4092 if (cmp1 == const0_rtx)
4093 return cmp0;
4095 if (uns_arith_operand (cmp1, VOIDmode))
4096 return expand_binop (GET_MODE (cmp0), xor_optab,
4097 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4099 return expand_binop (GET_MODE (cmp0), sub_optab,
4100 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4103 /* Convert *CODE into a code that can be used in a floating-point
4104 scc instruction (C.cond.fmt). Return true if the values of
4105 the condition code registers will be inverted, with 0 indicating
4106 that the condition holds. */
4108 static bool
4109 mips_reversed_fp_cond (enum rtx_code *code)
4111 switch (*code)
4113 case NE:
4114 case LTGT:
4115 case ORDERED:
4116 *code = reverse_condition_maybe_unordered (*code);
4117 return true;
4119 default:
4120 return false;
4124 /* Convert a comparison into something that can be used in a branch or
4125 conditional move. cmp_operands[0] and cmp_operands[1] are the values
4126 being compared and *CODE is the code used to compare them.
4128 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
4129 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
4130 otherwise any standard branch condition can be used. The standard branch
4131 conditions are:
4133 - EQ or NE between two registers.
4134 - any comparison between a register and zero. */
4136 static void
4137 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
4139 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
4141 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
4143 *op0 = cmp_operands[0];
4144 *op1 = cmp_operands[1];
4146 else if (*code == EQ || *code == NE)
4148 if (need_eq_ne_p)
4150 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4151 *op1 = const0_rtx;
4153 else
4155 *op0 = cmp_operands[0];
4156 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
4159 else
4161 /* The comparison needs a separate scc instruction. Store the
4162 result of the scc in *OP0 and compare it against zero. */
4163 bool invert = false;
4164 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
4165 mips_emit_int_order_test (*code, &invert, *op0,
4166 cmp_operands[0], cmp_operands[1]);
4167 *code = (invert ? EQ : NE);
4168 *op1 = const0_rtx;
4171 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
4173 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
4174 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
4175 *code = NE;
4176 *op1 = const0_rtx;
4178 else
4180 enum rtx_code cmp_code;
4182 /* Floating-point tests use a separate C.cond.fmt comparison to
4183 set a condition code register. The branch or conditional move
4184 will then compare that register against zero.
4186 Set CMP_CODE to the code of the comparison instruction and
4187 *CODE to the code that the branch or move should use. */
4188 cmp_code = *code;
4189 *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
4190 *op0 = (ISA_HAS_8CC
4191 ? gen_reg_rtx (CCmode)
4192 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4193 *op1 = const0_rtx;
4194 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
4198 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4199 Store the result in TARGET and return true if successful.
4201 On 64-bit targets, TARGET may be narrower than cmp_operands[0]. */
4203 bool
4204 mips_expand_scc (enum rtx_code code, rtx target)
4206 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4207 return false;
4209 if (code == EQ || code == NE)
4211 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4212 mips_emit_binary (code, target, zie, const0_rtx);
4214 else
4215 mips_emit_int_order_test (code, 0, target,
4216 cmp_operands[0], cmp_operands[1]);
4217 return true;
4220 /* Compare cmp_operands[0] with cmp_operands[1] using comparison code
4221 CODE and jump to OPERANDS[0] if the condition holds. */
4223 void
4224 mips_expand_conditional_branch (rtx *operands, enum rtx_code code)
4226 rtx op0, op1, condition;
4228 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4229 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4230 emit_jump_insn (gen_condjump (condition, operands[0]));
4233 /* Implement:
4235 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4236 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4238 void
4239 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4240 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4242 rtx cmp_result;
4243 bool reversed_p;
4245 reversed_p = mips_reversed_fp_cond (&cond);
4246 cmp_result = gen_reg_rtx (CCV2mode);
4247 emit_insn (gen_scc_ps (cmp_result,
4248 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4249 if (reversed_p)
4250 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4251 cmp_result));
4252 else
4253 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4254 cmp_result));
4257 /* Compare cmp_operands[0] with cmp_operands[1] using the code of
4258 OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] if the condition
4259 holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
4261 void
4262 mips_expand_conditional_move (rtx *operands)
4264 enum rtx_code code;
4265 rtx cond, op0, op1;
4267 code = GET_CODE (operands[1]);
4268 mips_emit_compare (&code, &op0, &op1, true);
4269 cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1),
4270 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4271 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
4272 operands[2], operands[3])));
4275 /* Compare cmp_operands[0] with cmp_operands[1] using rtl code CODE,
4276 then trap if the condition holds. */
4278 void
4279 mips_expand_conditional_trap (enum rtx_code code)
4281 rtx op0, op1;
4282 enum machine_mode mode;
4284 /* MIPS conditional trap instructions don't have GT or LE flavors,
4285 so we must swap the operands and convert to LT and GE respectively. */
4286 switch (code)
4288 case GT:
4289 case LE:
4290 case GTU:
4291 case LEU:
4292 code = swap_condition (code);
4293 op0 = cmp_operands[1];
4294 op1 = cmp_operands[0];
4295 break;
4297 default:
4298 op0 = cmp_operands[0];
4299 op1 = cmp_operands[1];
4300 break;
4303 mode = GET_MODE (cmp_operands[0]);
4304 op0 = force_reg (mode, op0);
4305 if (!arith_operand (op1, mode))
4306 op1 = force_reg (mode, op1);
4308 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4309 gen_rtx_fmt_ee (code, mode, op0, op1),
4310 const0_rtx));
4313 /* Initialize *CUM for a call to a function of type FNTYPE. */
4315 void
4316 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
4318 memset (cum, 0, sizeof (*cum));
4319 cum->prototype = (fntype && prototype_p (fntype));
4320 cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
4323 /* Fill INFO with information about a single argument. CUM is the
4324 cumulative state for earlier arguments. MODE is the mode of this
4325 argument and TYPE is its type (if known). NAMED is true if this
4326 is a named (fixed) argument rather than a variable one. */
4328 static void
4329 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4330 enum machine_mode mode, tree type, int named)
4332 bool doubleword_aligned_p;
4333 unsigned int num_bytes, num_words, max_regs;
4335 /* Work out the size of the argument. */
4336 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4337 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4339 /* Decide whether it should go in a floating-point register, assuming
4340 one is free. Later code checks for availability.
4342 The checks against UNITS_PER_FPVALUE handle the soft-float and
4343 single-float cases. */
4344 switch (mips_abi)
4346 case ABI_EABI:
4347 /* The EABI conventions have traditionally been defined in terms
4348 of TYPE_MODE, regardless of the actual type. */
4349 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4350 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4351 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4352 break;
4354 case ABI_32:
4355 case ABI_O64:
4356 /* Only leading floating-point scalars are passed in
4357 floating-point registers. We also handle vector floats the same
4358 say, which is OK because they are not covered by the standard ABI. */
4359 info->fpr_p = (!cum->gp_reg_found
4360 && cum->arg_number < 2
4361 && (type == 0
4362 || SCALAR_FLOAT_TYPE_P (type)
4363 || VECTOR_FLOAT_TYPE_P (type))
4364 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4365 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4366 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4367 break;
4369 case ABI_N32:
4370 case ABI_64:
4371 /* Scalar, complex and vector floating-point types are passed in
4372 floating-point registers, as long as this is a named rather
4373 than a variable argument. */
4374 info->fpr_p = (named
4375 && (type == 0 || FLOAT_TYPE_P (type))
4376 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4377 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4378 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4379 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4381 /* ??? According to the ABI documentation, the real and imaginary
4382 parts of complex floats should be passed in individual registers.
4383 The real and imaginary parts of stack arguments are supposed
4384 to be contiguous and there should be an extra word of padding
4385 at the end.
4387 This has two problems. First, it makes it impossible to use a
4388 single "void *" va_list type, since register and stack arguments
4389 are passed differently. (At the time of writing, MIPSpro cannot
4390 handle complex float varargs correctly.) Second, it's unclear
4391 what should happen when there is only one register free.
4393 For now, we assume that named complex floats should go into FPRs
4394 if there are two FPRs free, otherwise they should be passed in the
4395 same way as a struct containing two floats. */
4396 if (info->fpr_p
4397 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4398 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4400 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4401 info->fpr_p = false;
4402 else
4403 num_words = 2;
4405 break;
4407 default:
4408 gcc_unreachable ();
4411 /* See whether the argument has doubleword alignment. */
4412 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4414 /* Set REG_OFFSET to the register count we're interested in.
4415 The EABI allocates the floating-point registers separately,
4416 but the other ABIs allocate them like integer registers. */
4417 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4418 ? cum->num_fprs
4419 : cum->num_gprs);
4421 /* Advance to an even register if the argument is doubleword-aligned. */
4422 if (doubleword_aligned_p)
4423 info->reg_offset += info->reg_offset & 1;
4425 /* Work out the offset of a stack argument. */
4426 info->stack_offset = cum->stack_words;
4427 if (doubleword_aligned_p)
4428 info->stack_offset += info->stack_offset & 1;
4430 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4432 /* Partition the argument between registers and stack. */
4433 info->reg_words = MIN (num_words, max_regs);
4434 info->stack_words = num_words - info->reg_words;
4437 /* INFO describes a register argument that has the normal format for the
4438 argument's mode. Return the register it uses, assuming that FPRs are
4439 available if HARD_FLOAT_P. */
4441 static unsigned int
4442 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4444 if (!info->fpr_p || !hard_float_p)
4445 return GP_ARG_FIRST + info->reg_offset;
4446 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4447 /* In o32, the second argument is always passed in $f14
4448 for TARGET_DOUBLE_FLOAT, regardless of whether the
4449 first argument was a word or doubleword. */
4450 return FP_ARG_FIRST + 2;
4451 else
4452 return FP_ARG_FIRST + info->reg_offset;
4455 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
4457 static bool
4458 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4460 return !TARGET_OLDABI;
4463 /* Implement FUNCTION_ARG. */
4466 mips_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4467 tree type, int named)
4469 struct mips_arg_info info;
4471 /* We will be called with a mode of VOIDmode after the last argument
4472 has been seen. Whatever we return will be passed to the call expander.
4473 If we need a MIPS16 fp_code, return a REG with the code stored as
4474 the mode. */
4475 if (mode == VOIDmode)
4477 if (TARGET_MIPS16 && cum->fp_code != 0)
4478 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4479 else
4480 return NULL;
4483 mips_get_arg_info (&info, cum, mode, type, named);
4485 /* Return straight away if the whole argument is passed on the stack. */
4486 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4487 return NULL;
4489 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4490 contains a double in its entirety, then that 64-bit chunk is passed
4491 in a floating-point register. */
4492 if (TARGET_NEWABI
4493 && TARGET_HARD_FLOAT
4494 && named
4495 && type != 0
4496 && TREE_CODE (type) == RECORD_TYPE
4497 && TYPE_SIZE_UNIT (type)
4498 && host_integerp (TYPE_SIZE_UNIT (type), 1))
4500 tree field;
4502 /* First check to see if there is any such field. */
4503 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4504 if (TREE_CODE (field) == FIELD_DECL
4505 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4506 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4507 && host_integerp (bit_position (field), 0)
4508 && int_bit_position (field) % BITS_PER_WORD == 0)
4509 break;
4511 if (field != 0)
4513 /* Now handle the special case by returning a PARALLEL
4514 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4515 chunks are passed in registers. */
4516 unsigned int i;
4517 HOST_WIDE_INT bitpos;
4518 rtx ret;
4520 /* assign_parms checks the mode of ENTRY_PARM, so we must
4521 use the actual mode here. */
4522 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4524 bitpos = 0;
4525 field = TYPE_FIELDS (type);
4526 for (i = 0; i < info.reg_words; i++)
4528 rtx reg;
4530 for (; field; field = TREE_CHAIN (field))
4531 if (TREE_CODE (field) == FIELD_DECL
4532 && int_bit_position (field) >= bitpos)
4533 break;
4535 if (field
4536 && int_bit_position (field) == bitpos
4537 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4538 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4539 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4540 else
4541 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4543 XVECEXP (ret, 0, i)
4544 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4545 GEN_INT (bitpos / BITS_PER_UNIT));
4547 bitpos += BITS_PER_WORD;
4549 return ret;
4553 /* Handle the n32/n64 conventions for passing complex floating-point
4554 arguments in FPR pairs. The real part goes in the lower register
4555 and the imaginary part goes in the upper register. */
4556 if (TARGET_NEWABI
4557 && info.fpr_p
4558 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4560 rtx real, imag;
4561 enum machine_mode inner;
4562 unsigned int regno;
4564 inner = GET_MODE_INNER (mode);
4565 regno = FP_ARG_FIRST + info.reg_offset;
4566 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4568 /* Real part in registers, imaginary part on stack. */
4569 gcc_assert (info.stack_words == info.reg_words);
4570 return gen_rtx_REG (inner, regno);
4572 else
4574 gcc_assert (info.stack_words == 0);
4575 real = gen_rtx_EXPR_LIST (VOIDmode,
4576 gen_rtx_REG (inner, regno),
4577 const0_rtx);
4578 imag = gen_rtx_EXPR_LIST (VOIDmode,
4579 gen_rtx_REG (inner,
4580 regno + info.reg_words / 2),
4581 GEN_INT (GET_MODE_SIZE (inner)));
4582 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4586 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4589 /* Implement FUNCTION_ARG_ADVANCE. */
4591 void
4592 mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4593 tree type, int named)
4595 struct mips_arg_info info;
4597 mips_get_arg_info (&info, cum, mode, type, named);
4599 if (!info.fpr_p)
4600 cum->gp_reg_found = true;
4602 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4603 an explanation of what this code does. It assumes that we're using
4604 either the o32 or the o64 ABI, both of which pass at most 2 arguments
4605 in FPRs. */
4606 if (cum->arg_number < 2 && info.fpr_p)
4607 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4609 /* Advance the register count. This has the effect of setting
4610 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4611 argument required us to skip the final GPR and pass the whole
4612 argument on the stack. */
4613 if (mips_abi != ABI_EABI || !info.fpr_p)
4614 cum->num_gprs = info.reg_offset + info.reg_words;
4615 else if (info.reg_words > 0)
4616 cum->num_fprs += MAX_FPRS_PER_FMT;
4618 /* Advance the stack word count. */
4619 if (info.stack_words > 0)
4620 cum->stack_words = info.stack_offset + info.stack_words;
4622 cum->arg_number++;
4625 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4627 static int
4628 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4629 enum machine_mode mode, tree type, bool named)
4631 struct mips_arg_info info;
4633 mips_get_arg_info (&info, cum, mode, type, named);
4634 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4637 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4638 PARM_BOUNDARY bits of alignment, but will be given anything up
4639 to STACK_BOUNDARY bits if the type requires it. */
4642 mips_function_arg_boundary (enum machine_mode mode, tree type)
4644 unsigned int alignment;
4646 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4647 if (alignment < PARM_BOUNDARY)
4648 alignment = PARM_BOUNDARY;
4649 if (alignment > STACK_BOUNDARY)
4650 alignment = STACK_BOUNDARY;
4651 return alignment;
4654 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4655 upward rather than downward. In other words, return true if the
4656 first byte of the stack slot has useful data, false if the last
4657 byte does. */
4659 bool
4660 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4662 /* On little-endian targets, the first byte of every stack argument
4663 is passed in the first byte of the stack slot. */
4664 if (!BYTES_BIG_ENDIAN)
4665 return true;
4667 /* Otherwise, integral types are padded downward: the last byte of a
4668 stack argument is passed in the last byte of the stack slot. */
4669 if (type != 0
4670 ? (INTEGRAL_TYPE_P (type)
4671 || POINTER_TYPE_P (type)
4672 || FIXED_POINT_TYPE_P (type))
4673 : (SCALAR_INT_MODE_P (mode)
4674 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4675 return false;
4677 /* Big-endian o64 pads floating-point arguments downward. */
4678 if (mips_abi == ABI_O64)
4679 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4680 return false;
4682 /* Other types are padded upward for o32, o64, n32 and n64. */
4683 if (mips_abi != ABI_EABI)
4684 return true;
4686 /* Arguments smaller than a stack slot are padded downward. */
4687 if (mode != BLKmode)
4688 return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
4689 else
4690 return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
4693 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4694 if the least significant byte of the register has useful data. Return
4695 the opposite if the most significant byte does. */
4697 bool
4698 mips_pad_reg_upward (enum machine_mode mode, tree type)
4700 /* No shifting is required for floating-point arguments. */
4701 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4702 return !BYTES_BIG_ENDIAN;
4704 /* Otherwise, apply the same padding to register arguments as we do
4705 to stack arguments. */
4706 return mips_pad_arg_upward (mode, type);
4709 /* Return nonzero when an argument must be passed by reference. */
4711 static bool
4712 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4713 enum machine_mode mode, const_tree type,
4714 bool named ATTRIBUTE_UNUSED)
4716 if (mips_abi == ABI_EABI)
4718 int size;
4720 /* ??? How should SCmode be handled? */
4721 if (mode == DImode || mode == DFmode
4722 || mode == DQmode || mode == UDQmode
4723 || mode == DAmode || mode == UDAmode)
4724 return 0;
4726 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4727 return size == -1 || size > UNITS_PER_WORD;
4729 else
4731 /* If we have a variable-sized parameter, we have no choice. */
4732 return targetm.calls.must_pass_in_stack (mode, type);
4736 /* Implement TARGET_CALLEE_COPIES. */
4738 static bool
4739 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4740 enum machine_mode mode ATTRIBUTE_UNUSED,
4741 const_tree type ATTRIBUTE_UNUSED, bool named)
4743 return mips_abi == ABI_EABI && named;
4746 /* See whether VALTYPE is a record whose fields should be returned in
4747 floating-point registers. If so, return the number of fields and
4748 list them in FIELDS (which should have two elements). Return 0
4749 otherwise.
4751 For n32 & n64, a structure with one or two fields is returned in
4752 floating-point registers as long as every field has a floating-point
4753 type. */
4755 static int
4756 mips_fpr_return_fields (const_tree valtype, tree *fields)
4758 tree field;
4759 int i;
4761 if (!TARGET_NEWABI)
4762 return 0;
4764 if (TREE_CODE (valtype) != RECORD_TYPE)
4765 return 0;
4767 i = 0;
4768 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4770 if (TREE_CODE (field) != FIELD_DECL)
4771 continue;
4773 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
4774 return 0;
4776 if (i == 2)
4777 return 0;
4779 fields[i++] = field;
4781 return i;
4784 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4785 a value in the most significant part of $2/$3 if:
4787 - the target is big-endian;
4789 - the value has a structure or union type (we generalize this to
4790 cover aggregates from other languages too); and
4792 - the structure is not returned in floating-point registers. */
4794 static bool
4795 mips_return_in_msb (const_tree valtype)
4797 tree fields[2];
4799 return (TARGET_NEWABI
4800 && TARGET_BIG_ENDIAN
4801 && AGGREGATE_TYPE_P (valtype)
4802 && mips_fpr_return_fields (valtype, fields) == 0);
4805 /* Return true if the function return value MODE will get returned in a
4806 floating-point register. */
4808 static bool
4809 mips_return_mode_in_fpr_p (enum machine_mode mode)
4811 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4812 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4813 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4814 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4817 /* Return the representation of an FPR return register when the
4818 value being returned in FP_RETURN has mode VALUE_MODE and the
4819 return type itself has mode TYPE_MODE. On NewABI targets,
4820 the two modes may be different for structures like:
4822 struct __attribute__((packed)) foo { float f; }
4824 where we return the SFmode value of "f" in FP_RETURN, but where
4825 the structure itself has mode BLKmode. */
4827 static rtx
4828 mips_return_fpr_single (enum machine_mode type_mode,
4829 enum machine_mode value_mode)
4831 rtx x;
4833 x = gen_rtx_REG (value_mode, FP_RETURN);
4834 if (type_mode != value_mode)
4836 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
4837 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
4839 return x;
4842 /* Return a composite value in a pair of floating-point registers.
4843 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4844 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4845 complete value.
4847 For n32 & n64, $f0 always holds the first value and $f2 the second.
4848 Otherwise the values are packed together as closely as possible. */
4850 static rtx
4851 mips_return_fpr_pair (enum machine_mode mode,
4852 enum machine_mode mode1, HOST_WIDE_INT offset1,
4853 enum machine_mode mode2, HOST_WIDE_INT offset2)
4855 int inc;
4857 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4858 return gen_rtx_PARALLEL
4859 (mode,
4860 gen_rtvec (2,
4861 gen_rtx_EXPR_LIST (VOIDmode,
4862 gen_rtx_REG (mode1, FP_RETURN),
4863 GEN_INT (offset1)),
4864 gen_rtx_EXPR_LIST (VOIDmode,
4865 gen_rtx_REG (mode2, FP_RETURN + inc),
4866 GEN_INT (offset2))));
4870 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4871 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4872 VALTYPE is null and MODE is the mode of the return value. */
4875 mips_function_value (const_tree valtype, enum machine_mode mode)
4877 if (valtype)
4879 tree fields[2];
4880 int unsigned_p;
4882 mode = TYPE_MODE (valtype);
4883 unsigned_p = TYPE_UNSIGNED (valtype);
4885 /* Since TARGET_PROMOTE_FUNCTION_RETURN unconditionally returns true,
4886 we must promote the mode just as PROMOTE_MODE does. */
4887 mode = promote_mode (valtype, mode, &unsigned_p, 1);
4889 /* Handle structures whose fields are returned in $f0/$f2. */
4890 switch (mips_fpr_return_fields (valtype, fields))
4892 case 1:
4893 return mips_return_fpr_single (mode,
4894 TYPE_MODE (TREE_TYPE (fields[0])));
4896 case 2:
4897 return mips_return_fpr_pair (mode,
4898 TYPE_MODE (TREE_TYPE (fields[0])),
4899 int_byte_position (fields[0]),
4900 TYPE_MODE (TREE_TYPE (fields[1])),
4901 int_byte_position (fields[1]));
4904 /* If a value is passed in the most significant part of a register, see
4905 whether we have to round the mode up to a whole number of words. */
4906 if (mips_return_in_msb (valtype))
4908 HOST_WIDE_INT size = int_size_in_bytes (valtype);
4909 if (size % UNITS_PER_WORD != 0)
4911 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4912 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4916 /* For EABI, the class of return register depends entirely on MODE.
4917 For example, "struct { some_type x; }" and "union { some_type x; }"
4918 are returned in the same way as a bare "some_type" would be.
4919 Other ABIs only use FPRs for scalar, complex or vector types. */
4920 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
4921 return gen_rtx_REG (mode, GP_RETURN);
4924 if (!TARGET_MIPS16)
4926 /* Handle long doubles for n32 & n64. */
4927 if (mode == TFmode)
4928 return mips_return_fpr_pair (mode,
4929 DImode, 0,
4930 DImode, GET_MODE_SIZE (mode) / 2);
4932 if (mips_return_mode_in_fpr_p (mode))
4934 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4935 return mips_return_fpr_pair (mode,
4936 GET_MODE_INNER (mode), 0,
4937 GET_MODE_INNER (mode),
4938 GET_MODE_SIZE (mode) / 2);
4939 else
4940 return gen_rtx_REG (mode, FP_RETURN);
4944 return gen_rtx_REG (mode, GP_RETURN);
4947 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
4948 all BLKmode objects are returned in memory. Under the n32, n64
4949 and embedded ABIs, small structures are returned in a register.
4950 Objects with varying size must still be returned in memory, of
4951 course. */
4953 static bool
4954 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
4956 return (TARGET_OLDABI
4957 ? TYPE_MODE (type) == BLKmode
4958 : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
4961 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
4963 static void
4964 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4965 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4966 int no_rtl)
4968 CUMULATIVE_ARGS local_cum;
4969 int gp_saved, fp_saved;
4971 /* The caller has advanced CUM up to, but not beyond, the last named
4972 argument. Advance a local copy of CUM past the last "real" named
4973 argument, to find out how many registers are left over. */
4974 local_cum = *cum;
4975 FUNCTION_ARG_ADVANCE (local_cum, mode, type, true);
4977 /* Found out how many registers we need to save. */
4978 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4979 fp_saved = (EABI_FLOAT_VARARGS_P
4980 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4981 : 0);
4983 if (!no_rtl)
4985 if (gp_saved > 0)
4987 rtx ptr, mem;
4989 ptr = plus_constant (virtual_incoming_args_rtx,
4990 REG_PARM_STACK_SPACE (cfun->decl)
4991 - gp_saved * UNITS_PER_WORD);
4992 mem = gen_frame_mem (BLKmode, ptr);
4993 set_mem_alias_set (mem, get_varargs_alias_set ());
4995 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4996 mem, gp_saved);
4998 if (fp_saved > 0)
5000 /* We can't use move_block_from_reg, because it will use
5001 the wrong mode. */
5002 enum machine_mode mode;
5003 int off, i;
5005 /* Set OFF to the offset from virtual_incoming_args_rtx of
5006 the first float register. The FP save area lies below
5007 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
5008 off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
5009 off -= fp_saved * UNITS_PER_FPREG;
5011 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
5013 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
5014 i += MAX_FPRS_PER_FMT)
5016 rtx ptr, mem;
5018 ptr = plus_constant (virtual_incoming_args_rtx, off);
5019 mem = gen_frame_mem (mode, ptr);
5020 set_mem_alias_set (mem, get_varargs_alias_set ());
5021 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
5022 off += UNITS_PER_HWFPVALUE;
5026 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
5027 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
5028 + fp_saved * UNITS_PER_FPREG);
5031 /* Implement TARGET_BUILTIN_VA_LIST. */
5033 static tree
5034 mips_build_builtin_va_list (void)
5036 if (EABI_FLOAT_VARARGS_P)
5038 /* We keep 3 pointers, and two offsets.
5040 Two pointers are to the overflow area, which starts at the CFA.
5041 One of these is constant, for addressing into the GPR save area
5042 below it. The other is advanced up the stack through the
5043 overflow region.
5045 The third pointer is to the bottom of the GPR save area.
5046 Since the FPR save area is just below it, we can address
5047 FPR slots off this pointer.
5049 We also keep two one-byte offsets, which are to be subtracted
5050 from the constant pointers to yield addresses in the GPR and
5051 FPR save areas. These are downcounted as float or non-float
5052 arguments are used, and when they get to zero, the argument
5053 must be obtained from the overflow region. */
5054 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
5055 tree array, index;
5057 record = lang_hooks.types.make_type (RECORD_TYPE);
5059 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
5060 ptr_type_node);
5061 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
5062 ptr_type_node);
5063 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
5064 ptr_type_node);
5065 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
5066 unsigned_char_type_node);
5067 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
5068 unsigned_char_type_node);
5069 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5070 warn on every user file. */
5071 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
5072 array = build_array_type (unsigned_char_type_node,
5073 build_index_type (index));
5074 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
5076 DECL_FIELD_CONTEXT (f_ovfl) = record;
5077 DECL_FIELD_CONTEXT (f_gtop) = record;
5078 DECL_FIELD_CONTEXT (f_ftop) = record;
5079 DECL_FIELD_CONTEXT (f_goff) = record;
5080 DECL_FIELD_CONTEXT (f_foff) = record;
5081 DECL_FIELD_CONTEXT (f_res) = record;
5083 TYPE_FIELDS (record) = f_ovfl;
5084 TREE_CHAIN (f_ovfl) = f_gtop;
5085 TREE_CHAIN (f_gtop) = f_ftop;
5086 TREE_CHAIN (f_ftop) = f_goff;
5087 TREE_CHAIN (f_goff) = f_foff;
5088 TREE_CHAIN (f_foff) = f_res;
5090 layout_type (record);
5091 return record;
5093 else if (TARGET_IRIX && TARGET_IRIX6)
5094 /* On IRIX 6, this type is 'char *'. */
5095 return build_pointer_type (char_type_node);
5096 else
5097 /* Otherwise, we use 'void *'. */
5098 return ptr_type_node;
5101 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
5103 static void
5104 mips_va_start (tree valist, rtx nextarg)
5106 if (EABI_FLOAT_VARARGS_P)
5108 const CUMULATIVE_ARGS *cum;
5109 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5110 tree ovfl, gtop, ftop, goff, foff;
5111 tree t;
5112 int gpr_save_area_size;
5113 int fpr_save_area_size;
5114 int fpr_offset;
5116 cum = &crtl->args.info;
5117 gpr_save_area_size
5118 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5119 fpr_save_area_size
5120 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5122 f_ovfl = TYPE_FIELDS (va_list_type_node);
5123 f_gtop = TREE_CHAIN (f_ovfl);
5124 f_ftop = TREE_CHAIN (f_gtop);
5125 f_goff = TREE_CHAIN (f_ftop);
5126 f_foff = TREE_CHAIN (f_goff);
5128 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5129 NULL_TREE);
5130 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5131 NULL_TREE);
5132 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5133 NULL_TREE);
5134 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5135 NULL_TREE);
5136 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5137 NULL_TREE);
5139 /* Emit code to initialize OVFL, which points to the next varargs
5140 stack argument. CUM->STACK_WORDS gives the number of stack
5141 words used by named arguments. */
5142 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5143 if (cum->stack_words > 0)
5144 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
5145 size_int (cum->stack_words * UNITS_PER_WORD));
5146 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5147 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5149 /* Emit code to initialize GTOP, the top of the GPR save area. */
5150 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5151 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
5152 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5154 /* Emit code to initialize FTOP, the top of the FPR save area.
5155 This address is gpr_save_area_bytes below GTOP, rounded
5156 down to the next fp-aligned boundary. */
5157 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5158 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5159 fpr_offset &= -UNITS_PER_FPVALUE;
5160 if (fpr_offset)
5161 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
5162 size_int (-fpr_offset));
5163 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
5164 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5166 /* Emit code to initialize GOFF, the offset from GTOP of the
5167 next GPR argument. */
5168 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
5169 build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
5170 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5172 /* Likewise emit code to initialize FOFF, the offset from FTOP
5173 of the next FPR argument. */
5174 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
5175 build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
5176 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5178 else
5180 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5181 std_expand_builtin_va_start (valist, nextarg);
5185 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
5187 static tree
5188 mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5189 gimple_seq *post_p)
5191 tree addr;
5192 bool indirect_p;
5194 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5195 if (indirect_p)
5196 type = build_pointer_type (type);
5198 if (!EABI_FLOAT_VARARGS_P)
5199 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5200 else
5202 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5203 tree ovfl, top, off, align;
5204 HOST_WIDE_INT size, rsize, osize;
5205 tree t, u;
5207 f_ovfl = TYPE_FIELDS (va_list_type_node);
5208 f_gtop = TREE_CHAIN (f_ovfl);
5209 f_ftop = TREE_CHAIN (f_gtop);
5210 f_goff = TREE_CHAIN (f_ftop);
5211 f_foff = TREE_CHAIN (f_goff);
5213 /* Let:
5215 TOP be the top of the GPR or FPR save area;
5216 OFF be the offset from TOP of the next register;
5217 ADDR_RTX be the address of the argument;
5218 SIZE be the number of bytes in the argument type;
5219 RSIZE be the number of bytes used to store the argument
5220 when it's in the register save area; and
5221 OSIZE be the number of bytes used to store it when it's
5222 in the stack overflow area.
5224 The code we want is:
5226 1: off &= -rsize; // round down
5227 2: if (off != 0)
5228 3: {
5229 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
5230 5: off -= rsize;
5231 6: }
5232 7: else
5233 8: {
5234 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
5235 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
5236 11: ovfl += osize;
5237 14: }
5239 [1] and [9] can sometimes be optimized away. */
5241 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5242 NULL_TREE);
5243 size = int_size_in_bytes (type);
5245 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5246 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5248 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5249 NULL_TREE);
5250 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5251 NULL_TREE);
5253 /* When va_start saves FPR arguments to the stack, each slot
5254 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
5255 argument's precision. */
5256 rsize = UNITS_PER_HWFPVALUE;
5258 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5259 (= PARM_BOUNDARY bits). This can be different from RSIZE
5260 in two cases:
5262 (1) On 32-bit targets when TYPE is a structure such as:
5264 struct s { float f; };
5266 Such structures are passed in paired FPRs, so RSIZE
5267 will be 8 bytes. However, the structure only takes
5268 up 4 bytes of memory, so OSIZE will only be 4.
5270 (2) In combinations such as -mgp64 -msingle-float
5271 -fshort-double. Doubles passed in registers will then take
5272 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
5273 stack take up UNITS_PER_WORD bytes. */
5274 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5276 else
5278 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5279 NULL_TREE);
5280 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5281 NULL_TREE);
5282 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5283 if (rsize > UNITS_PER_WORD)
5285 /* [1] Emit code for: off &= -rsize. */
5286 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5287 build_int_cst (NULL_TREE, -rsize));
5288 gimplify_assign (off, t, pre_p);
5290 osize = rsize;
5293 /* [2] Emit code to branch if off == 0. */
5294 t = build2 (NE_EXPR, boolean_type_node, off,
5295 build_int_cst (TREE_TYPE (off), 0));
5296 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5298 /* [5] Emit code for: off -= rsize. We do this as a form of
5299 post-decrement not available to C. */
5300 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5301 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5303 /* [4] Emit code for:
5304 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
5305 t = fold_convert (sizetype, t);
5306 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5307 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5308 if (BYTES_BIG_ENDIAN && rsize > size)
5310 u = size_int (rsize - size);
5311 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5313 COND_EXPR_THEN (addr) = t;
5315 if (osize > UNITS_PER_WORD)
5317 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
5318 u = size_int (osize - 1);
5319 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5320 t = fold_convert (sizetype, t);
5321 u = size_int (-osize);
5322 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5323 t = fold_convert (TREE_TYPE (ovfl), t);
5324 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5326 else
5327 align = NULL;
5329 /* [10, 11] Emit code for:
5330 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5331 ovfl += osize. */
5332 u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
5333 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5334 if (BYTES_BIG_ENDIAN && osize > size)
5336 u = size_int (osize - size);
5337 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5340 /* String [9] and [10, 11] together. */
5341 if (align)
5342 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5343 COND_EXPR_ELSE (addr) = t;
5345 addr = fold_convert (build_pointer_type (type), addr);
5346 addr = build_va_arg_indirect_ref (addr);
5349 if (indirect_p)
5350 addr = build_va_arg_indirect_ref (addr);
5352 return addr;
5355 /* Start a definition of function NAME. MIPS16_P indicates whether the
5356 function contains MIPS16 code. */
5358 static void
5359 mips_start_function_definition (const char *name, bool mips16_p)
5361 if (mips16_p)
5362 fprintf (asm_out_file, "\t.set\tmips16\n");
5363 else
5364 fprintf (asm_out_file, "\t.set\tnomips16\n");
5366 if (!flag_inhibit_size_directive)
5368 fputs ("\t.ent\t", asm_out_file);
5369 assemble_name (asm_out_file, name);
5370 fputs ("\n", asm_out_file);
5373 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function");
5375 /* Start the definition proper. */
5376 assemble_name (asm_out_file, name);
5377 fputs (":\n", asm_out_file);
5380 /* End a function definition started by mips_start_function_definition. */
5382 static void
5383 mips_end_function_definition (const char *name)
5385 if (!flag_inhibit_size_directive)
5387 fputs ("\t.end\t", asm_out_file);
5388 assemble_name (asm_out_file, name);
5389 fputs ("\n", asm_out_file);
5393 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5395 static bool
5396 mips_ok_for_lazy_binding_p (rtx x)
5398 return (TARGET_USE_GOT
5399 && GET_CODE (x) == SYMBOL_REF
5400 && !SYMBOL_REF_BIND_NOW_P (x)
5401 && !mips_symbol_binds_local_p (x));
5404 /* Load function address ADDR into register DEST. TYPE is as for
5405 mips_expand_call. Return true if we used an explicit lazy-binding
5406 sequence. */
5408 static bool
5409 mips_load_call_address (enum mips_call_type type, rtx dest, rtx addr)
5411 /* If we're generating PIC, and this call is to a global function,
5412 try to allow its address to be resolved lazily. This isn't
5413 possible for sibcalls when $gp is call-saved because the value
5414 of $gp on entry to the stub would be our caller's gp, not ours. */
5415 if (TARGET_EXPLICIT_RELOCS
5416 && !(type == MIPS_CALL_SIBCALL && TARGET_CALL_SAVED_GP)
5417 && mips_ok_for_lazy_binding_p (addr))
5419 addr = mips_got_load (dest, addr, SYMBOL_GOTOFF_CALL);
5420 emit_insn (gen_rtx_SET (VOIDmode, dest, addr));
5421 return true;
5423 else
5425 mips_emit_move (dest, addr);
5426 return false;
5430 /* Each locally-defined hard-float MIPS16 function has a local symbol
5431 associated with it. This hash table maps the function symbol (FUNC)
5432 to the local symbol (LOCAL). */
5433 struct mips16_local_alias GTY(()) {
5434 rtx func;
5435 rtx local;
5437 static GTY ((param_is (struct mips16_local_alias))) htab_t mips16_local_aliases;
5439 /* Hash table callbacks for mips16_local_aliases. */
5441 static hashval_t
5442 mips16_local_aliases_hash (const void *entry)
5444 const struct mips16_local_alias *alias;
5446 alias = (const struct mips16_local_alias *) entry;
5447 return htab_hash_string (XSTR (alias->func, 0));
5450 static int
5451 mips16_local_aliases_eq (const void *entry1, const void *entry2)
5453 const struct mips16_local_alias *alias1, *alias2;
5455 alias1 = (const struct mips16_local_alias *) entry1;
5456 alias2 = (const struct mips16_local_alias *) entry2;
5457 return rtx_equal_p (alias1->func, alias2->func);
5460 /* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
5461 Return a local alias for it, creating a new one if necessary. */
5463 static rtx
5464 mips16_local_alias (rtx func)
5466 struct mips16_local_alias *alias, tmp_alias;
5467 void **slot;
5469 /* Create the hash table if this is the first call. */
5470 if (mips16_local_aliases == NULL)
5471 mips16_local_aliases = htab_create_ggc (37, mips16_local_aliases_hash,
5472 mips16_local_aliases_eq, NULL);
5474 /* Look up the function symbol, creating a new entry if need be. */
5475 tmp_alias.func = func;
5476 slot = htab_find_slot (mips16_local_aliases, &tmp_alias, INSERT);
5477 gcc_assert (slot != NULL);
5479 alias = (struct mips16_local_alias *) *slot;
5480 if (alias == NULL)
5482 const char *func_name, *local_name;
5483 rtx local;
5485 /* Create a new SYMBOL_REF for the local symbol. The choice of
5486 __fn_local_* is based on the __fn_stub_* names that we've
5487 traditionally used for the non-MIPS16 stub. */
5488 func_name = targetm.strip_name_encoding (XSTR (func, 0));
5489 local_name = ACONCAT (("__fn_local_", func_name, NULL));
5490 local = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (local_name));
5491 SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
5493 /* Create a new structure to represent the mapping. */
5494 alias = GGC_NEW (struct mips16_local_alias);
5495 alias->func = func;
5496 alias->local = local;
5497 *slot = alias;
5499 return alias->local;
5502 /* A chained list of functions for which mips16_build_call_stub has already
5503 generated a stub. NAME is the name of the function and FP_RET_P is true
5504 if the function returns a value in floating-point registers. */
5505 struct mips16_stub {
5506 struct mips16_stub *next;
5507 char *name;
5508 bool fp_ret_p;
5510 static struct mips16_stub *mips16_stubs;
5512 /* Return a SYMBOL_REF for a MIPS16 function called NAME. */
5514 static rtx
5515 mips16_stub_function (const char *name)
5517 rtx x;
5519 x = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5520 SYMBOL_REF_FLAGS (x) |= (SYMBOL_FLAG_EXTERNAL | SYMBOL_FLAG_FUNCTION);
5521 return x;
5524 /* Return the two-character string that identifies floating-point
5525 return mode MODE in the name of a MIPS16 function stub. */
5527 static const char *
5528 mips16_call_stub_mode_suffix (enum machine_mode mode)
5530 if (mode == SFmode)
5531 return "sf";
5532 else if (mode == DFmode)
5533 return "df";
5534 else if (mode == SCmode)
5535 return "sc";
5536 else if (mode == DCmode)
5537 return "dc";
5538 else if (mode == V2SFmode)
5539 return "df";
5540 else
5541 gcc_unreachable ();
5544 /* Write instructions to move a 32-bit value between general register
5545 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5546 from GPREG to FPREG and 'f' to move in the opposite direction. */
5548 static void
5549 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5551 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5552 reg_names[gpreg], reg_names[fpreg]);
5555 /* Likewise for 64-bit values. */
5557 static void
5558 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5560 if (TARGET_64BIT)
5561 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
5562 reg_names[gpreg], reg_names[fpreg]);
5563 else if (TARGET_FLOAT64)
5565 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5566 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5567 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
5568 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
5570 else
5572 /* Move the least-significant word. */
5573 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5574 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5575 /* ...then the most significant word. */
5576 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5577 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
5581 /* Write out code to move floating-point arguments into or out of
5582 general registers. FP_CODE is the code describing which arguments
5583 are present (see the comment above the definition of CUMULATIVE_ARGS
5584 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
5586 static void
5587 mips_output_args_xfer (int fp_code, char direction)
5589 unsigned int gparg, fparg, f;
5590 CUMULATIVE_ARGS cum;
5592 /* This code only works for o32 and o64. */
5593 gcc_assert (TARGET_OLDABI);
5595 mips_init_cumulative_args (&cum, NULL);
5597 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5599 enum machine_mode mode;
5600 struct mips_arg_info info;
5602 if ((f & 3) == 1)
5603 mode = SFmode;
5604 else if ((f & 3) == 2)
5605 mode = DFmode;
5606 else
5607 gcc_unreachable ();
5609 mips_get_arg_info (&info, &cum, mode, NULL, true);
5610 gparg = mips_arg_regno (&info, false);
5611 fparg = mips_arg_regno (&info, true);
5613 if (mode == SFmode)
5614 mips_output_32bit_xfer (direction, gparg, fparg);
5615 else
5616 mips_output_64bit_xfer (direction, gparg, fparg);
5618 mips_function_arg_advance (&cum, mode, NULL, true);
5622 /* Write a MIPS16 stub for the current function. This stub is used
5623 for functions which take arguments in the floating-point registers.
5624 It is normal-mode code that moves the floating-point arguments
5625 into the general registers and then jumps to the MIPS16 code. */
5627 static void
5628 mips16_build_function_stub (void)
5630 const char *fnname, *alias_name, *separator;
5631 char *secname, *stubname;
5632 tree stubdecl;
5633 unsigned int f;
5634 rtx symbol, alias;
5636 /* Create the name of the stub, and its unique section. */
5637 symbol = XEXP (DECL_RTL (current_function_decl), 0);
5638 alias = mips16_local_alias (symbol);
5640 fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
5641 alias_name = targetm.strip_name_encoding (XSTR (alias, 0));
5642 secname = ACONCAT ((".mips16.fn.", fnname, NULL));
5643 stubname = ACONCAT (("__fn_stub_", fnname, NULL));
5645 /* Build a decl for the stub. */
5646 stubdecl = build_decl (FUNCTION_DECL, get_identifier (stubname),
5647 build_function_type (void_type_node, NULL_TREE));
5648 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5649 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5651 /* Output a comment. */
5652 fprintf (asm_out_file, "\t# Stub function for %s (",
5653 current_function_name ());
5654 separator = "";
5655 for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
5657 fprintf (asm_out_file, "%s%s", separator,
5658 (f & 3) == 1 ? "float" : "double");
5659 separator = ", ";
5661 fprintf (asm_out_file, ")\n");
5663 /* Start the function definition. */
5664 assemble_start_function (stubdecl, stubname);
5665 mips_start_function_definition (stubname, false);
5667 /* If generating pic2 code, either set up the global pointer or
5668 switch to pic0. */
5669 if (TARGET_ABICALLS_PIC2)
5671 if (TARGET_ABSOLUTE_ABICALLS)
5672 fprintf (asm_out_file, "\t.option\tpic0\n");
5673 else
5675 output_asm_insn ("%(.cpload\t%^%)", NULL);
5676 /* Emit an R_MIPS_NONE relocation to tell the linker what the
5677 target function is. Use a local GOT access when loading the
5678 symbol, to cut down on the number of unnecessary GOT entries
5679 for stubs that aren't needed. */
5680 output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol);
5681 symbol = alias;
5685 /* Load the address of the MIPS16 function into $25. Do this first so
5686 that targets with coprocessor interlocks can use an MFC1 to fill the
5687 delay slot. */
5688 output_asm_insn ("la\t%^,%0", &symbol);
5690 /* Move the arguments from floating-point registers to general registers. */
5691 mips_output_args_xfer (crtl->args.info.fp_code, 'f');
5693 /* Jump to the MIPS16 function. */
5694 output_asm_insn ("jr\t%^", NULL);
5696 if (TARGET_ABICALLS_PIC2 && TARGET_ABSOLUTE_ABICALLS)
5697 fprintf (asm_out_file, "\t.option\tpic2\n");
5699 mips_end_function_definition (stubname);
5701 /* If the linker needs to create a dynamic symbol for the target
5702 function, it will associate the symbol with the stub (which,
5703 unlike the target function, follows the proper calling conventions).
5704 It is therefore useful to have a local alias for the target function,
5705 so that it can still be identified as MIPS16 code. As an optimization,
5706 this symbol can also be used for indirect MIPS16 references from
5707 within this file. */
5708 ASM_OUTPUT_DEF (asm_out_file, alias_name, fnname);
5710 switch_to_section (function_section (current_function_decl));
5713 /* The current function is a MIPS16 function that returns a value in an FPR.
5714 Copy the return value from its soft-float to its hard-float location.
5715 libgcc2 has special non-MIPS16 helper functions for each case. */
5717 static void
5718 mips16_copy_fpr_return_value (void)
5720 rtx fn, insn, retval;
5721 tree return_type;
5722 enum machine_mode return_mode;
5723 const char *name;
5725 return_type = DECL_RESULT (current_function_decl);
5726 return_mode = DECL_MODE (return_type);
5728 name = ACONCAT (("__mips16_ret_",
5729 mips16_call_stub_mode_suffix (return_mode),
5730 NULL));
5731 fn = mips16_stub_function (name);
5733 /* The function takes arguments in $2 (and possibly $3), so calls
5734 to it cannot be lazily bound. */
5735 SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_BIND_NOW;
5737 /* Model the call as something that takes the GPR return value as
5738 argument and returns an "updated" value. */
5739 retval = gen_rtx_REG (return_mode, GP_RETURN);
5740 insn = mips_expand_call (MIPS_CALL_EPILOGUE, retval, fn,
5741 const0_rtx, NULL_RTX, false);
5742 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
5745 /* Consider building a stub for a MIPS16 call to function *FN_PTR.
5746 RETVAL is the location of the return value, or null if this is
5747 a "call" rather than a "call_value". ARGS_SIZE is the size of the
5748 arguments and FP_CODE is the code built by mips_function_arg;
5749 see the comment above CUMULATIVE_ARGS for details.
5751 There are three alternatives:
5753 - If a stub was needed, emit the call and return the call insn itself.
5755 - If we can avoid using a stub by redirecting the call, set *FN_PTR
5756 to the new target and return null.
5758 - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
5759 unmodified.
5761 A stub is needed for calls to functions that, in normal mode,
5762 receive arguments in FPRs or return values in FPRs. The stub
5763 copies the arguments from their soft-float positions to their
5764 hard-float positions, calls the real function, then copies the
5765 return value from its hard-float position to its soft-float
5766 position.
5768 We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
5769 If *FN_PTR turns out to be to a non-MIPS16 function, the linker
5770 automatically redirects the JAL to the stub, otherwise the JAL
5771 continues to call FN directly. */
5773 static rtx
5774 mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
5776 const char *fnname;
5777 bool fp_ret_p;
5778 struct mips16_stub *l;
5779 rtx insn, fn;
5781 /* We don't need to do anything if we aren't in MIPS16 mode, or if
5782 we were invoked with the -msoft-float option. */
5783 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5784 return NULL_RTX;
5786 /* Figure out whether the value might come back in a floating-point
5787 register. */
5788 fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
5790 /* We don't need to do anything if there were no floating-point
5791 arguments and the value will not be returned in a floating-point
5792 register. */
5793 if (fp_code == 0 && !fp_ret_p)
5794 return NULL_RTX;
5796 /* We don't need to do anything if this is a call to a special
5797 MIPS16 support function. */
5798 fn = *fn_ptr;
5799 if (mips16_stub_function_p (fn))
5800 return NULL_RTX;
5802 /* This code will only work for o32 and o64 abis. The other ABI's
5803 require more sophisticated support. */
5804 gcc_assert (TARGET_OLDABI);
5806 /* If we're calling via a function pointer, use one of the magic
5807 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
5808 Each stub expects the function address to arrive in register $2. */
5809 if (GET_CODE (fn) != SYMBOL_REF
5810 || !call_insn_operand (fn, VOIDmode))
5812 char buf[30];
5813 rtx stub_fn, insn, addr;
5814 bool lazy_p;
5816 /* If this is a locally-defined and locally-binding function,
5817 avoid the stub by calling the local alias directly. */
5818 if (mips16_local_function_p (fn))
5820 *fn_ptr = mips16_local_alias (fn);
5821 return NULL_RTX;
5824 /* Create a SYMBOL_REF for the libgcc.a function. */
5825 if (fp_ret_p)
5826 sprintf (buf, "__mips16_call_stub_%s_%d",
5827 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5828 fp_code);
5829 else
5830 sprintf (buf, "__mips16_call_stub_%d", fp_code);
5831 stub_fn = mips16_stub_function (buf);
5833 /* The function uses $2 as an argument, so calls to it
5834 cannot be lazily bound. */
5835 SYMBOL_REF_FLAGS (stub_fn) |= SYMBOL_FLAG_BIND_NOW;
5837 /* Load the target function into $2. */
5838 addr = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
5839 lazy_p = mips_load_call_address (MIPS_CALL_NORMAL, addr, fn);
5841 /* Emit the call. */
5842 insn = mips_expand_call (MIPS_CALL_NORMAL, retval, stub_fn,
5843 args_size, NULL_RTX, lazy_p);
5845 /* Tell GCC that this call does indeed use the value of $2. */
5846 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), addr);
5848 /* If we are handling a floating-point return value, we need to
5849 save $18 in the function prologue. Putting a note on the
5850 call will mean that df_regs_ever_live_p ($18) will be true if the
5851 call is not eliminated, and we can check that in the prologue
5852 code. */
5853 if (fp_ret_p)
5854 CALL_INSN_FUNCTION_USAGE (insn) =
5855 gen_rtx_EXPR_LIST (VOIDmode,
5856 gen_rtx_CLOBBER (VOIDmode,
5857 gen_rtx_REG (word_mode, 18)),
5858 CALL_INSN_FUNCTION_USAGE (insn));
5860 return insn;
5863 /* We know the function we are going to call. If we have already
5864 built a stub, we don't need to do anything further. */
5865 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5866 for (l = mips16_stubs; l != NULL; l = l->next)
5867 if (strcmp (l->name, fnname) == 0)
5868 break;
5870 if (l == NULL)
5872 const char *separator;
5873 char *secname, *stubname;
5874 tree stubid, stubdecl;
5875 unsigned int f;
5877 /* If the function does not return in FPRs, the special stub
5878 section is named
5879 .mips16.call.FNNAME
5881 If the function does return in FPRs, the stub section is named
5882 .mips16.call.fp.FNNAME
5884 Build a decl for the stub. */
5885 secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
5886 fnname, NULL));
5887 stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
5888 fnname, NULL));
5889 stubid = get_identifier (stubname);
5890 stubdecl = build_decl (FUNCTION_DECL, stubid,
5891 build_function_type (void_type_node, NULL_TREE));
5892 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5893 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE,
5894 void_type_node);
5896 /* Output a comment. */
5897 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
5898 (fp_ret_p
5899 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
5900 : ""),
5901 fnname);
5902 separator = "";
5903 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5905 fprintf (asm_out_file, "%s%s", separator,
5906 (f & 3) == 1 ? "float" : "double");
5907 separator = ", ";
5909 fprintf (asm_out_file, ")\n");
5911 /* Start the function definition. */
5912 assemble_start_function (stubdecl, stubname);
5913 mips_start_function_definition (stubname, false);
5915 if (!fp_ret_p)
5917 /* Load the address of the MIPS16 function into $25. Do this
5918 first so that targets with coprocessor interlocks can use
5919 an MFC1 to fill the delay slot. */
5920 if (TARGET_EXPLICIT_RELOCS)
5922 output_asm_insn ("lui\t%^,%%hi(%0)", &fn);
5923 output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn);
5925 else
5926 output_asm_insn ("la\t%^,%0", &fn);
5929 /* Move the arguments from general registers to floating-point
5930 registers. */
5931 mips_output_args_xfer (fp_code, 't');
5933 if (!fp_ret_p)
5935 /* Jump to the previously-loaded address. */
5936 output_asm_insn ("jr\t%^", NULL);
5938 else
5940 /* Save the return address in $18 and call the non-MIPS16 function.
5941 The stub's caller knows that $18 might be clobbered, even though
5942 $18 is usually a call-saved register. */
5943 fprintf (asm_out_file, "\tmove\t%s,%s\n",
5944 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
5945 output_asm_insn (MIPS_CALL ("jal", &fn, 0), &fn);
5947 /* Move the result from floating-point registers to
5948 general registers. */
5949 switch (GET_MODE (retval))
5951 case SCmode:
5952 mips_output_32bit_xfer ('f', GP_RETURN + 1,
5953 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5954 /* Fall though. */
5955 case SFmode:
5956 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5957 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
5959 /* On 64-bit targets, complex floats are returned in
5960 a single GPR, such that "sd" on a suitably-aligned
5961 target would store the value correctly. */
5962 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
5963 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
5964 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
5965 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
5966 reg_names[GP_RETURN],
5967 reg_names[GP_RETURN],
5968 reg_names[GP_RETURN + 1]);
5970 break;
5972 case DCmode:
5973 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
5974 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5975 /* Fall though. */
5976 case DFmode:
5977 case V2SFmode:
5978 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5979 break;
5981 default:
5982 gcc_unreachable ();
5984 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
5987 #ifdef ASM_DECLARE_FUNCTION_SIZE
5988 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
5989 #endif
5991 mips_end_function_definition (stubname);
5993 /* Record this stub. */
5994 l = XNEW (struct mips16_stub);
5995 l->name = xstrdup (fnname);
5996 l->fp_ret_p = fp_ret_p;
5997 l->next = mips16_stubs;
5998 mips16_stubs = l;
6001 /* If we expect a floating-point return value, but we've built a
6002 stub which does not expect one, then we're in trouble. We can't
6003 use the existing stub, because it won't handle the floating-point
6004 value. We can't build a new stub, because the linker won't know
6005 which stub to use for the various calls in this object file.
6006 Fortunately, this case is illegal, since it means that a function
6007 was declared in two different ways in a single compilation. */
6008 if (fp_ret_p && !l->fp_ret_p)
6009 error ("cannot handle inconsistent calls to %qs", fnname);
6011 if (retval == NULL_RTX)
6012 insn = gen_call_internal_direct (fn, args_size);
6013 else
6014 insn = gen_call_value_internal_direct (retval, fn, args_size);
6015 insn = mips_emit_call_insn (insn, fn, fn, false);
6017 /* If we are calling a stub which handles a floating-point return
6018 value, we need to arrange to save $18 in the prologue. We do this
6019 by marking the function call as using the register. The prologue
6020 will later see that it is used, and emit code to save it. */
6021 if (fp_ret_p)
6022 CALL_INSN_FUNCTION_USAGE (insn) =
6023 gen_rtx_EXPR_LIST (VOIDmode,
6024 gen_rtx_CLOBBER (VOIDmode,
6025 gen_rtx_REG (word_mode, 18)),
6026 CALL_INSN_FUNCTION_USAGE (insn));
6028 return insn;
6031 /* Expand a call of type TYPE. RESULT is where the result will go (null
6032 for "call"s and "sibcall"s), ADDR is the address of the function,
6033 ARGS_SIZE is the size of the arguments and AUX is the value passed
6034 to us by mips_function_arg. LAZY_P is true if this call already
6035 involves a lazily-bound function address (such as when calling
6036 functions through a MIPS16 hard-float stub).
6038 Return the call itself. */
6041 mips_expand_call (enum mips_call_type type, rtx result, rtx addr,
6042 rtx args_size, rtx aux, bool lazy_p)
6044 rtx orig_addr, pattern, insn;
6045 int fp_code;
6047 fp_code = aux == 0 ? 0 : (int) GET_MODE (aux);
6048 insn = mips16_build_call_stub (result, &addr, args_size, fp_code);
6049 if (insn)
6051 gcc_assert (!lazy_p && type == MIPS_CALL_NORMAL);
6052 return insn;
6055 orig_addr = addr;
6056 if (!call_insn_operand (addr, VOIDmode))
6058 if (type == MIPS_CALL_EPILOGUE)
6059 addr = MIPS_EPILOGUE_TEMP (Pmode);
6060 else
6061 addr = gen_reg_rtx (Pmode);
6062 lazy_p |= mips_load_call_address (type, addr, orig_addr);
6065 if (result == 0)
6067 rtx (*fn) (rtx, rtx);
6069 if (type == MIPS_CALL_EPILOGUE && TARGET_SPLIT_CALLS)
6070 fn = gen_call_split;
6071 else if (type == MIPS_CALL_SIBCALL)
6072 fn = gen_sibcall_internal;
6073 else
6074 fn = gen_call_internal;
6076 pattern = fn (addr, args_size);
6078 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
6080 /* Handle return values created by mips_return_fpr_pair. */
6081 rtx (*fn) (rtx, rtx, rtx, rtx);
6082 rtx reg1, reg2;
6084 if (type == MIPS_CALL_EPILOGUE && TARGET_SPLIT_CALLS)
6085 fn = gen_call_value_multiple_split;
6086 else if (type == MIPS_CALL_SIBCALL)
6087 fn = gen_sibcall_value_multiple_internal;
6088 else
6089 fn = gen_call_value_multiple_internal;
6091 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
6092 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
6093 pattern = fn (reg1, addr, args_size, reg2);
6095 else
6097 rtx (*fn) (rtx, rtx, rtx);
6099 if (type == MIPS_CALL_EPILOGUE && TARGET_SPLIT_CALLS)
6100 fn = gen_call_value_split;
6101 else if (type == MIPS_CALL_SIBCALL)
6102 fn = gen_sibcall_value_internal;
6103 else
6104 fn = gen_call_value_internal;
6106 /* Handle return values created by mips_return_fpr_single. */
6107 if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
6108 result = XEXP (XVECEXP (result, 0, 0), 0);
6109 pattern = fn (result, addr, args_size);
6112 return mips_emit_call_insn (pattern, orig_addr, addr, lazy_p);
6115 /* Split call instruction INSN into a $gp-clobbering call and
6116 (where necessary) an instruction to restore $gp from its save slot.
6117 CALL_PATTERN is the pattern of the new call. */
6119 void
6120 mips_split_call (rtx insn, rtx call_pattern)
6122 rtx new_insn;
6124 new_insn = emit_call_insn (call_pattern);
6125 CALL_INSN_FUNCTION_USAGE (new_insn)
6126 = copy_rtx (CALL_INSN_FUNCTION_USAGE (insn));
6127 if (!find_reg_note (insn, REG_NORETURN, 0))
6128 /* Pick a temporary register that is suitable for both MIPS16 and
6129 non-MIPS16 code. $4 and $5 are used for returning complex double
6130 values in soft-float code, so $6 is the first suitable candidate. */
6131 mips_restore_gp (gen_rtx_REG (Pmode, GP_ARG_FIRST + 2));
6134 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
6136 static bool
6137 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
6139 if (!TARGET_SIBCALLS)
6140 return false;
6142 /* We can't do a sibcall if the called function is a MIPS16 function
6143 because there is no direct "jx" instruction equivalent to "jalx" to
6144 switch the ISA mode. We only care about cases where the sibling
6145 and normal calls would both be direct. */
6146 if (mips_use_mips16_mode_p (decl)
6147 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6148 return false;
6150 /* When -minterlink-mips16 is in effect, assume that non-locally-binding
6151 functions could be MIPS16 ones unless an attribute explicitly tells
6152 us otherwise. */
6153 if (TARGET_INTERLINK_MIPS16
6154 && decl
6155 && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
6156 && !mips_nomips16_decl_p (decl)
6157 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6158 return false;
6160 /* Otherwise OK. */
6161 return true;
6164 /* Emit code to move general operand SRC into condition-code
6165 register DEST given that SCRATCH is a scratch TFmode FPR.
6166 The sequence is:
6168 FP1 = SRC
6169 FP2 = 0.0f
6170 DEST = FP2 < FP1
6172 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
6174 void
6175 mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
6177 rtx fp1, fp2;
6179 /* Change the source to SFmode. */
6180 if (MEM_P (src))
6181 src = adjust_address (src, SFmode, 0);
6182 else if (REG_P (src) || GET_CODE (src) == SUBREG)
6183 src = gen_rtx_REG (SFmode, true_regnum (src));
6185 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
6186 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
6188 mips_emit_move (copy_rtx (fp1), src);
6189 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
6190 emit_insn (gen_slt_sf (dest, fp2, fp1));
6193 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
6194 Assume that the areas do not overlap. */
6196 static void
6197 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
6199 HOST_WIDE_INT offset, delta;
6200 unsigned HOST_WIDE_INT bits;
6201 int i;
6202 enum machine_mode mode;
6203 rtx *regs;
6205 /* Work out how many bits to move at a time. If both operands have
6206 half-word alignment, it is usually better to move in half words.
6207 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
6208 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
6209 Otherwise move word-sized chunks. */
6210 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
6211 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
6212 bits = BITS_PER_WORD / 2;
6213 else
6214 bits = BITS_PER_WORD;
6216 mode = mode_for_size (bits, MODE_INT, 0);
6217 delta = bits / BITS_PER_UNIT;
6219 /* Allocate a buffer for the temporary registers. */
6220 regs = XALLOCAVEC (rtx, length / delta);
6222 /* Load as many BITS-sized chunks as possible. Use a normal load if
6223 the source has enough alignment, otherwise use left/right pairs. */
6224 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6226 regs[i] = gen_reg_rtx (mode);
6227 if (MEM_ALIGN (src) >= bits)
6228 mips_emit_move (regs[i], adjust_address (src, mode, offset));
6229 else
6231 rtx part = adjust_address (src, BLKmode, offset);
6232 if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
6233 gcc_unreachable ();
6237 /* Copy the chunks to the destination. */
6238 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6239 if (MEM_ALIGN (dest) >= bits)
6240 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
6241 else
6243 rtx part = adjust_address (dest, BLKmode, offset);
6244 if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
6245 gcc_unreachable ();
6248 /* Mop up any left-over bytes. */
6249 if (offset < length)
6251 src = adjust_address (src, BLKmode, offset);
6252 dest = adjust_address (dest, BLKmode, offset);
6253 move_by_pieces (dest, src, length - offset,
6254 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
6258 /* Helper function for doing a loop-based block operation on memory
6259 reference MEM. Each iteration of the loop will operate on LENGTH
6260 bytes of MEM.
6262 Create a new base register for use within the loop and point it to
6263 the start of MEM. Create a new memory reference that uses this
6264 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
6266 static void
6267 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
6268 rtx *loop_reg, rtx *loop_mem)
6270 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
6272 /* Although the new mem does not refer to a known location,
6273 it does keep up to LENGTH bytes of alignment. */
6274 *loop_mem = change_address (mem, BLKmode, *loop_reg);
6275 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
6278 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
6279 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
6280 the memory regions do not overlap. */
6282 static void
6283 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
6284 HOST_WIDE_INT bytes_per_iter)
6286 rtx label, src_reg, dest_reg, final_src;
6287 HOST_WIDE_INT leftover;
6289 leftover = length % bytes_per_iter;
6290 length -= leftover;
6292 /* Create registers and memory references for use within the loop. */
6293 mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
6294 mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
6296 /* Calculate the value that SRC_REG should have after the last iteration
6297 of the loop. */
6298 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
6299 0, 0, OPTAB_WIDEN);
6301 /* Emit the start of the loop. */
6302 label = gen_label_rtx ();
6303 emit_label (label);
6305 /* Emit the loop body. */
6306 mips_block_move_straight (dest, src, bytes_per_iter);
6308 /* Move on to the next block. */
6309 mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
6310 mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
6312 /* Emit the loop condition. */
6313 if (Pmode == DImode)
6314 emit_insn (gen_cmpdi (src_reg, final_src));
6315 else
6316 emit_insn (gen_cmpsi (src_reg, final_src));
6317 emit_jump_insn (gen_bne (label));
6319 /* Mop up any left-over bytes. */
6320 if (leftover)
6321 mips_block_move_straight (dest, src, leftover);
6324 /* Expand a movmemsi instruction, which copies LENGTH bytes from
6325 memory reference SRC to memory reference DEST. */
6327 bool
6328 mips_expand_block_move (rtx dest, rtx src, rtx length)
6330 if (GET_CODE (length) == CONST_INT)
6332 if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
6334 mips_block_move_straight (dest, src, INTVAL (length));
6335 return true;
6337 else if (optimize)
6339 mips_block_move_loop (dest, src, INTVAL (length),
6340 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
6341 return true;
6344 return false;
6347 /* Expand a loop of synci insns for the address range [BEGIN, END). */
6349 void
6350 mips_expand_synci_loop (rtx begin, rtx end)
6352 rtx inc, label, cmp, cmp_result;
6354 /* Load INC with the cache line size (rdhwr INC,$1). */
6355 inc = gen_reg_rtx (SImode);
6356 emit_insn (gen_rdhwr (inc, const1_rtx));
6358 /* Loop back to here. */
6359 label = gen_label_rtx ();
6360 emit_label (label);
6362 emit_insn (gen_synci (begin));
6364 cmp = mips_force_binary (Pmode, GTU, begin, end);
6366 mips_emit_binary (PLUS, begin, begin, inc);
6368 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
6369 emit_jump_insn (gen_condjump (cmp_result, label));
6372 /* Expand a QI or HI mode atomic memory operation.
6374 GENERATOR contains a pointer to the gen_* function that generates
6375 the SI mode underlying atomic operation using masks that we
6376 calculate.
6378 RESULT is the return register for the operation. Its value is NULL
6379 if unused.
6381 MEM is the location of the atomic access.
6383 OLDVAL is the first operand for the operation.
6385 NEWVAL is the optional second operand for the operation. Its value
6386 is NULL if unused. */
6388 void
6389 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
6390 rtx result, rtx mem, rtx oldval, rtx newval)
6392 rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
6393 rtx unshifted_mask_reg, mask, inverted_mask, si_op;
6394 rtx res = NULL;
6395 enum machine_mode mode;
6397 mode = GET_MODE (mem);
6399 /* Compute the address of the containing SImode value. */
6400 orig_addr = force_reg (Pmode, XEXP (mem, 0));
6401 memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
6402 force_reg (Pmode, GEN_INT (-4)));
6404 /* Create a memory reference for it. */
6405 memsi = gen_rtx_MEM (SImode, memsi_addr);
6406 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
6407 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
6409 /* Work out the byte offset of the QImode or HImode value,
6410 counting from the least significant byte. */
6411 shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
6412 if (TARGET_BIG_ENDIAN)
6413 mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
6415 /* Multiply by eight to convert the shift value from bytes to bits. */
6416 mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
6418 /* Make the final shift an SImode value, so that it can be used in
6419 SImode operations. */
6420 shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
6422 /* Set MASK to an inclusive mask of the QImode or HImode value. */
6423 unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
6424 unshifted_mask_reg = force_reg (SImode, unshifted_mask);
6425 mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
6427 /* Compute the equivalent exclusive mask. */
6428 inverted_mask = gen_reg_rtx (SImode);
6429 emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
6430 gen_rtx_NOT (SImode, mask)));
6432 /* Shift the old value into place. */
6433 if (oldval != const0_rtx)
6435 oldval = convert_modes (SImode, mode, oldval, true);
6436 oldval = force_reg (SImode, oldval);
6437 oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
6440 /* Do the same for the new value. */
6441 if (newval && newval != const0_rtx)
6443 newval = convert_modes (SImode, mode, newval, true);
6444 newval = force_reg (SImode, newval);
6445 newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
6448 /* Do the SImode atomic access. */
6449 if (result)
6450 res = gen_reg_rtx (SImode);
6451 if (newval)
6452 si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
6453 else if (result)
6454 si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
6455 else
6456 si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
6458 emit_insn (si_op);
6460 if (result)
6462 /* Shift and convert the result. */
6463 mips_emit_binary (AND, res, res, mask);
6464 mips_emit_binary (LSHIFTRT, res, res, shiftsi);
6465 mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
6469 /* Return true if it is possible to use left/right accesses for a
6470 bitfield of WIDTH bits starting BITPOS bits into *OP. When
6471 returning true, update *OP, *LEFT and *RIGHT as follows:
6473 *OP is a BLKmode reference to the whole field.
6475 *LEFT is a QImode reference to the first byte if big endian or
6476 the last byte if little endian. This address can be used in the
6477 left-side instructions (LWL, SWL, LDL, SDL).
6479 *RIGHT is a QImode reference to the opposite end of the field and
6480 can be used in the patterning right-side instruction. */
6482 static bool
6483 mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
6484 rtx *left, rtx *right)
6486 rtx first, last;
6488 /* Check that the operand really is a MEM. Not all the extv and
6489 extzv predicates are checked. */
6490 if (!MEM_P (*op))
6491 return false;
6493 /* Check that the size is valid. */
6494 if (width != 32 && (!TARGET_64BIT || width != 64))
6495 return false;
6497 /* We can only access byte-aligned values. Since we are always passed
6498 a reference to the first byte of the field, it is not necessary to
6499 do anything with BITPOS after this check. */
6500 if (bitpos % BITS_PER_UNIT != 0)
6501 return false;
6503 /* Reject aligned bitfields: we want to use a normal load or store
6504 instead of a left/right pair. */
6505 if (MEM_ALIGN (*op) >= width)
6506 return false;
6508 /* Adjust *OP to refer to the whole field. This also has the effect
6509 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
6510 *op = adjust_address (*op, BLKmode, 0);
6511 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
6513 /* Get references to both ends of the field. We deliberately don't
6514 use the original QImode *OP for FIRST since the new BLKmode one
6515 might have a simpler address. */
6516 first = adjust_address (*op, QImode, 0);
6517 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
6519 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
6520 correspond to the MSB and RIGHT to the LSB. */
6521 if (TARGET_BIG_ENDIAN)
6522 *left = first, *right = last;
6523 else
6524 *left = last, *right = first;
6526 return true;
6529 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
6530 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
6531 the operation is the equivalent of:
6533 (set DEST (*_extract SRC WIDTH BITPOS))
6535 Return true on success. */
6537 bool
6538 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
6539 HOST_WIDE_INT bitpos)
6541 rtx left, right, temp;
6543 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
6544 be a paradoxical word_mode subreg. This is the only case in which
6545 we allow the destination to be larger than the source. */
6546 if (GET_CODE (dest) == SUBREG
6547 && GET_MODE (dest) == DImode
6548 && GET_MODE (SUBREG_REG (dest)) == SImode)
6549 dest = SUBREG_REG (dest);
6551 /* After the above adjustment, the destination must be the same
6552 width as the source. */
6553 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
6554 return false;
6556 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
6557 return false;
6559 temp = gen_reg_rtx (GET_MODE (dest));
6560 if (GET_MODE (dest) == DImode)
6562 emit_insn (gen_mov_ldl (temp, src, left));
6563 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
6565 else
6567 emit_insn (gen_mov_lwl (temp, src, left));
6568 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
6570 return true;
6573 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
6574 BITPOS and SRC are the operands passed to the expander; the operation
6575 is the equivalent of:
6577 (set (zero_extract DEST WIDTH BITPOS) SRC)
6579 Return true on success. */
6581 bool
6582 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
6583 HOST_WIDE_INT bitpos)
6585 rtx left, right;
6586 enum machine_mode mode;
6588 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
6589 return false;
6591 mode = mode_for_size (width, MODE_INT, 0);
6592 src = gen_lowpart (mode, src);
6593 if (mode == DImode)
6595 emit_insn (gen_mov_sdl (dest, src, left));
6596 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
6598 else
6600 emit_insn (gen_mov_swl (dest, src, left));
6601 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
6603 return true;
6606 /* Return true if X is a MEM with the same size as MODE. */
6608 bool
6609 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6611 rtx size;
6613 if (!MEM_P (x))
6614 return false;
6616 size = MEM_SIZE (x);
6617 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6620 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
6621 source of an "ext" instruction or the destination of an "ins"
6622 instruction. OP must be a register operand and the following
6623 conditions must hold:
6625 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
6626 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6627 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
6629 Also reject lengths equal to a word as they are better handled
6630 by the move patterns. */
6632 bool
6633 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
6635 if (!ISA_HAS_EXT_INS
6636 || !register_operand (op, VOIDmode)
6637 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6638 return false;
6640 if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
6641 return false;
6643 if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
6644 return false;
6646 return true;
6649 /* Return true if -msplit-addresses is selected and should be honored.
6651 -msplit-addresses is a half-way house between explicit relocations
6652 and the traditional assembler macros. It can split absolute 32-bit
6653 symbolic constants into a high/lo_sum pair but uses macros for other
6654 sorts of access.
6656 Like explicit relocation support for REL targets, it relies
6657 on GNU extensions in the assembler and the linker.
6659 Although this code should work for -O0, it has traditionally
6660 been treated as an optimization. */
6662 static bool
6663 mips_split_addresses_p (void)
6665 return (TARGET_SPLIT_ADDRESSES
6666 && optimize
6667 && !TARGET_MIPS16
6668 && !flag_pic
6669 && !ABI_HAS_64BIT_SYMBOLS);
6672 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
6674 static void
6675 mips_init_relocs (void)
6677 memset (mips_split_p, '\0', sizeof (mips_split_p));
6678 memset (mips_split_hi_p, '\0', sizeof (mips_split_hi_p));
6679 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6680 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6682 if (ABI_HAS_64BIT_SYMBOLS)
6684 if (TARGET_EXPLICIT_RELOCS)
6686 mips_split_p[SYMBOL_64_HIGH] = true;
6687 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6688 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6690 mips_split_p[SYMBOL_64_MID] = true;
6691 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6692 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6694 mips_split_p[SYMBOL_64_LOW] = true;
6695 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6696 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6698 mips_split_p[SYMBOL_ABSOLUTE] = true;
6699 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6702 else
6704 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses_p () || TARGET_MIPS16)
6706 mips_split_p[SYMBOL_ABSOLUTE] = true;
6707 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6708 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6710 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6714 if (TARGET_MIPS16)
6716 /* The high part is provided by a pseudo copy of $gp. */
6717 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6718 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6720 else if (TARGET_EXPLICIT_RELOCS)
6721 /* Small data constants are kept whole until after reload,
6722 then lowered by mips_rewrite_small_data. */
6723 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6725 if (TARGET_EXPLICIT_RELOCS)
6727 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6728 if (TARGET_NEWABI)
6730 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6731 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6733 else
6735 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6736 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6738 if (TARGET_MIPS16)
6739 /* Expose the use of $28 as soon as possible. */
6740 mips_split_hi_p[SYMBOL_GOT_PAGE_OFST] = true;
6742 if (TARGET_XGOT)
6744 /* The HIGH and LO_SUM are matched by special .md patterns. */
6745 mips_split_p[SYMBOL_GOT_DISP] = true;
6747 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6748 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6749 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6751 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6752 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6753 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6755 else
6757 if (TARGET_NEWABI)
6758 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6759 else
6760 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6761 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6762 if (TARGET_MIPS16)
6763 /* Expose the use of $28 as soon as possible. */
6764 mips_split_p[SYMBOL_GOT_DISP] = true;
6768 if (TARGET_NEWABI)
6770 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6771 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6772 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6775 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6776 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6778 mips_split_p[SYMBOL_DTPREL] = true;
6779 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6780 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6782 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6784 mips_split_p[SYMBOL_TPREL] = true;
6785 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6786 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6788 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6791 /* If OP is an UNSPEC address, return the address to which it refers,
6792 otherwise return OP itself. */
6794 static rtx
6795 mips_strip_unspec_address (rtx op)
6797 rtx base, offset;
6799 split_const (op, &base, &offset);
6800 if (UNSPEC_ADDRESS_P (base))
6801 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6802 return op;
6805 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6806 in context CONTEXT. RELOCS is the array of relocations to use. */
6808 static void
6809 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6810 const char **relocs)
6812 enum mips_symbol_type symbol_type;
6813 const char *p;
6815 symbol_type = mips_classify_symbolic_expression (op, context);
6816 gcc_assert (relocs[symbol_type]);
6818 fputs (relocs[symbol_type], file);
6819 output_addr_const (file, mips_strip_unspec_address (op));
6820 for (p = relocs[symbol_type]; *p != 0; p++)
6821 if (*p == '(')
6822 fputc (')', file);
6825 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6826 The punctuation characters are:
6828 '(' Start a nested ".set noreorder" block.
6829 ')' End a nested ".set noreorder" block.
6830 '[' Start a nested ".set noat" block.
6831 ']' End a nested ".set noat" block.
6832 '<' Start a nested ".set nomacro" block.
6833 '>' End a nested ".set nomacro" block.
6834 '*' Behave like %(%< if generating a delayed-branch sequence.
6835 '#' Print a nop if in a ".set noreorder" block.
6836 '/' Like '#', but do nothing within a delayed-branch sequence.
6837 '?' Print "l" if mips_branch_likely is true
6838 '.' Print the name of the register with a hard-wired zero (zero or $0).
6839 '@' Print the name of the assembler temporary register (at or $1).
6840 '^' Print the name of the pic call-through register (t9 or $25).
6841 '+' Print the name of the gp register (usually gp or $28).
6842 '$' Print the name of the stack pointer register (sp or $29).
6843 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6844 '-' Print ".set pop" under the same conditions for '|'.
6846 See also mips_init_print_operand_pucnt. */
6848 static void
6849 mips_print_operand_punctuation (FILE *file, int ch)
6851 switch (ch)
6853 case '(':
6854 if (set_noreorder++ == 0)
6855 fputs (".set\tnoreorder\n\t", file);
6856 break;
6858 case ')':
6859 gcc_assert (set_noreorder > 0);
6860 if (--set_noreorder == 0)
6861 fputs ("\n\t.set\treorder", file);
6862 break;
6864 case '[':
6865 if (set_noat++ == 0)
6866 fputs (".set\tnoat\n\t", file);
6867 break;
6869 case ']':
6870 gcc_assert (set_noat > 0);
6871 if (--set_noat == 0)
6872 fputs ("\n\t.set\tat", file);
6873 break;
6875 case '<':
6876 if (set_nomacro++ == 0)
6877 fputs (".set\tnomacro\n\t", file);
6878 break;
6880 case '>':
6881 gcc_assert (set_nomacro > 0);
6882 if (--set_nomacro == 0)
6883 fputs ("\n\t.set\tmacro", file);
6884 break;
6886 case '*':
6887 if (final_sequence != 0)
6889 mips_print_operand_punctuation (file, '(');
6890 mips_print_operand_punctuation (file, '<');
6892 break;
6894 case '#':
6895 if (set_noreorder != 0)
6896 fputs ("\n\tnop", file);
6897 break;
6899 case '/':
6900 /* Print an extra newline so that the delayed insn is separated
6901 from the following ones. This looks neater and is consistent
6902 with non-nop delayed sequences. */
6903 if (set_noreorder != 0 && final_sequence == 0)
6904 fputs ("\n\tnop\n", file);
6905 break;
6907 case '?':
6908 if (mips_branch_likely)
6909 putc ('l', file);
6910 break;
6912 case '.':
6913 fputs (reg_names[GP_REG_FIRST + 0], file);
6914 break;
6916 case '@':
6917 fputs (reg_names[GP_REG_FIRST + 1], file);
6918 break;
6920 case '^':
6921 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
6922 break;
6924 case '+':
6925 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6926 break;
6928 case '$':
6929 fputs (reg_names[STACK_POINTER_REGNUM], file);
6930 break;
6932 case '|':
6933 if (!ISA_HAS_LL_SC)
6934 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6935 break;
6937 case '-':
6938 if (!ISA_HAS_LL_SC)
6939 fputs ("\n\t.set\tpop", file);
6940 break;
6942 default:
6943 gcc_unreachable ();
6944 break;
6948 /* Initialize mips_print_operand_punct. */
6950 static void
6951 mips_init_print_operand_punct (void)
6953 const char *p;
6955 for (p = "()[]<>*#/?.@^+$|-"; *p; p++)
6956 mips_print_operand_punct[(unsigned char) *p] = true;
6959 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
6960 associated with condition CODE. Print the condition part of the
6961 opcode to FILE. */
6963 static void
6964 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
6966 switch (code)
6968 case EQ:
6969 case NE:
6970 case GT:
6971 case GE:
6972 case LT:
6973 case LE:
6974 case GTU:
6975 case GEU:
6976 case LTU:
6977 case LEU:
6978 /* Conveniently, the MIPS names for these conditions are the same
6979 as their RTL equivalents. */
6980 fputs (GET_RTX_NAME (code), file);
6981 break;
6983 default:
6984 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6985 break;
6989 /* Likewise floating-point branches. */
6991 static void
6992 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
6994 switch (code)
6996 case EQ:
6997 fputs ("c1f", file);
6998 break;
7000 case NE:
7001 fputs ("c1t", file);
7002 break;
7004 default:
7005 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
7006 break;
7010 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
7012 'X' Print CONST_INT OP in hexadecimal format.
7013 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
7014 'd' Print CONST_INT OP in decimal.
7015 'h' Print the high-part relocation associated with OP, after stripping
7016 any outermost HIGH.
7017 'R' Print the low-part relocation associated with OP.
7018 'C' Print the integer branch condition for comparison OP.
7019 'N' Print the inverse of the integer branch condition for comparison OP.
7020 'F' Print the FPU branch condition for comparison OP.
7021 'W' Print the inverse of the FPU branch condition for comparison OP.
7022 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
7023 'z' for (eq:?I ...), 'n' for (ne:?I ...).
7024 't' Like 'T', but with the EQ/NE cases reversed
7025 'Y' Print mips_fp_conditions[INTVAL (OP)]
7026 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
7027 'q' Print a DSP accumulator register.
7028 'D' Print the second part of a double-word register or memory operand.
7029 'L' Print the low-order register in a double-word register operand.
7030 'M' Print high-order register in a double-word register operand.
7031 'z' Print $0 if OP is zero, otherwise print OP normally. */
7033 void
7034 mips_print_operand (FILE *file, rtx op, int letter)
7036 enum rtx_code code;
7038 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
7040 mips_print_operand_punctuation (file, letter);
7041 return;
7044 gcc_assert (op);
7045 code = GET_CODE (op);
7047 switch (letter)
7049 case 'X':
7050 if (GET_CODE (op) == CONST_INT)
7051 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
7052 else
7053 output_operand_lossage ("invalid use of '%%%c'", letter);
7054 break;
7056 case 'x':
7057 if (GET_CODE (op) == CONST_INT)
7058 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
7059 else
7060 output_operand_lossage ("invalid use of '%%%c'", letter);
7061 break;
7063 case 'd':
7064 if (GET_CODE (op) == CONST_INT)
7065 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
7066 else
7067 output_operand_lossage ("invalid use of '%%%c'", letter);
7068 break;
7070 case 'h':
7071 if (code == HIGH)
7072 op = XEXP (op, 0);
7073 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
7074 break;
7076 case 'R':
7077 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
7078 break;
7080 case 'C':
7081 mips_print_int_branch_condition (file, code, letter);
7082 break;
7084 case 'N':
7085 mips_print_int_branch_condition (file, reverse_condition (code), letter);
7086 break;
7088 case 'F':
7089 mips_print_float_branch_condition (file, code, letter);
7090 break;
7092 case 'W':
7093 mips_print_float_branch_condition (file, reverse_condition (code),
7094 letter);
7095 break;
7097 case 'T':
7098 case 't':
7100 int truth = (code == NE) == (letter == 'T');
7101 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
7103 break;
7105 case 'Y':
7106 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
7107 fputs (mips_fp_conditions[UINTVAL (op)], file);
7108 else
7109 output_operand_lossage ("'%%%c' is not a valid operand prefix",
7110 letter);
7111 break;
7113 case 'Z':
7114 if (ISA_HAS_8CC)
7116 mips_print_operand (file, op, 0);
7117 fputc (',', file);
7119 break;
7121 case 'q':
7122 if (code == REG && MD_REG_P (REGNO (op)))
7123 fprintf (file, "$ac0");
7124 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
7125 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
7126 else
7127 output_operand_lossage ("invalid use of '%%%c'", letter);
7128 break;
7130 default:
7131 switch (code)
7133 case REG:
7135 unsigned int regno = REGNO (op);
7136 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
7137 || (letter == 'L' && TARGET_BIG_ENDIAN)
7138 || letter == 'D')
7139 regno++;
7140 fprintf (file, "%s", reg_names[regno]);
7142 break;
7144 case MEM:
7145 if (letter == 'D')
7146 output_address (plus_constant (XEXP (op, 0), 4));
7147 else
7148 output_address (XEXP (op, 0));
7149 break;
7151 default:
7152 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
7153 fputs (reg_names[GP_REG_FIRST], file);
7154 else if (CONST_GP_P (op))
7155 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
7156 else
7157 output_addr_const (file, mips_strip_unspec_address (op));
7158 break;
7163 /* Output address operand X to FILE. */
7165 void
7166 mips_print_operand_address (FILE *file, rtx x)
7168 struct mips_address_info addr;
7170 if (mips_classify_address (&addr, x, word_mode, true))
7171 switch (addr.type)
7173 case ADDRESS_REG:
7174 mips_print_operand (file, addr.offset, 0);
7175 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7176 return;
7178 case ADDRESS_LO_SUM:
7179 mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
7180 mips_lo_relocs);
7181 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7182 return;
7184 case ADDRESS_CONST_INT:
7185 output_addr_const (file, x);
7186 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
7187 return;
7189 case ADDRESS_SYMBOLIC:
7190 output_addr_const (file, mips_strip_unspec_address (x));
7191 return;
7193 gcc_unreachable ();
7196 /* Implement TARGET_ENCODE_SECTION_INFO. */
7198 static void
7199 mips_encode_section_info (tree decl, rtx rtl, int first)
7201 default_encode_section_info (decl, rtl, first);
7203 if (TREE_CODE (decl) == FUNCTION_DECL)
7205 rtx symbol = XEXP (rtl, 0);
7206 tree type = TREE_TYPE (decl);
7208 /* Encode whether the symbol is short or long. */
7209 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
7210 || mips_far_type_p (type))
7211 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
7215 /* Implement TARGET_SELECT_RTX_SECTION. */
7217 static section *
7218 mips_select_rtx_section (enum machine_mode mode, rtx x,
7219 unsigned HOST_WIDE_INT align)
7221 /* ??? Consider using mergeable small data sections. */
7222 if (mips_rtx_constant_in_small_data_p (mode))
7223 return get_named_section (NULL, ".sdata", 0);
7225 return default_elf_select_rtx_section (mode, x, align);
7228 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7230 The complication here is that, with the combination TARGET_ABICALLS
7231 && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
7232 absolute addresses, and should therefore not be included in the
7233 read-only part of a DSO. Handle such cases by selecting a normal
7234 data section instead of a read-only one. The logic apes that in
7235 default_function_rodata_section. */
7237 static section *
7238 mips_function_rodata_section (tree decl)
7240 if (!TARGET_ABICALLS || TARGET_ABSOLUTE_ABICALLS || TARGET_GPWORD)
7241 return default_function_rodata_section (decl);
7243 if (decl && DECL_SECTION_NAME (decl))
7245 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7246 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7248 char *rname = ASTRDUP (name);
7249 rname[14] = 'd';
7250 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7252 else if (flag_function_sections
7253 && flag_data_sections
7254 && strncmp (name, ".text.", 6) == 0)
7256 char *rname = ASTRDUP (name);
7257 memcpy (rname + 1, "data", 4);
7258 return get_section (rname, SECTION_WRITE, decl);
7261 return data_section;
7264 /* Implement TARGET_IN_SMALL_DATA_P. */
7266 static bool
7267 mips_in_small_data_p (const_tree decl)
7269 unsigned HOST_WIDE_INT size;
7271 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7272 return false;
7274 /* We don't yet generate small-data references for -mabicalls
7275 or VxWorks RTP code. See the related -G handling in
7276 mips_override_options. */
7277 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
7278 return false;
7280 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7282 const char *name;
7284 /* Reject anything that isn't in a known small-data section. */
7285 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7286 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7287 return false;
7289 /* If a symbol is defined externally, the assembler will use the
7290 usual -G rules when deciding how to implement macros. */
7291 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
7292 return true;
7294 else if (TARGET_EMBEDDED_DATA)
7296 /* Don't put constants into the small data section: we want them
7297 to be in ROM rather than RAM. */
7298 if (TREE_CODE (decl) != VAR_DECL)
7299 return false;
7301 if (TREE_READONLY (decl)
7302 && !TREE_SIDE_EFFECTS (decl)
7303 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7304 return false;
7307 /* Enforce -mlocal-sdata. */
7308 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
7309 return false;
7311 /* Enforce -mextern-sdata. */
7312 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
7314 if (DECL_EXTERNAL (decl))
7315 return false;
7316 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
7317 return false;
7320 /* We have traditionally not treated zero-sized objects as small data,
7321 so this is now effectively part of the ABI. */
7322 size = int_size_in_bytes (TREE_TYPE (decl));
7323 return size > 0 && size <= mips_small_data_threshold;
7326 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7327 anchors for small data: the GP register acts as an anchor in that
7328 case. We also don't want to use them for PC-relative accesses,
7329 where the PC acts as an anchor. */
7331 static bool
7332 mips_use_anchors_for_symbol_p (const_rtx symbol)
7334 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
7336 case SYMBOL_PC_RELATIVE:
7337 case SYMBOL_GP_RELATIVE:
7338 return false;
7340 default:
7341 return default_use_anchors_for_symbol_p (symbol);
7345 /* The MIPS debug format wants all automatic variables and arguments
7346 to be in terms of the virtual frame pointer (stack pointer before
7347 any adjustment in the function), while the MIPS 3.0 linker wants
7348 the frame pointer to be the stack pointer after the initial
7349 adjustment. So, we do the adjustment here. The arg pointer (which
7350 is eliminated) points to the virtual frame pointer, while the frame
7351 pointer (which may be eliminated) points to the stack pointer after
7352 the initial adjustments. */
7354 HOST_WIDE_INT
7355 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
7357 rtx offset2 = const0_rtx;
7358 rtx reg = eliminate_constant_term (addr, &offset2);
7360 if (offset == 0)
7361 offset = INTVAL (offset2);
7363 if (reg == stack_pointer_rtx
7364 || reg == frame_pointer_rtx
7365 || reg == hard_frame_pointer_rtx)
7367 offset -= cfun->machine->frame.total_size;
7368 if (reg == hard_frame_pointer_rtx)
7369 offset += cfun->machine->frame.hard_frame_pointer_offset;
7372 /* sdbout_parms does not want this to crash for unrecognized cases. */
7373 #if 0
7374 else if (reg != arg_pointer_rtx)
7375 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
7376 addr);
7377 #endif
7379 return offset;
7382 /* Implement ASM_OUTPUT_EXTERNAL. */
7384 void
7385 mips_output_external (FILE *file, tree decl, const char *name)
7387 default_elf_asm_output_external (file, decl, name);
7389 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
7390 set in order to avoid putting out names that are never really
7391 used. */
7392 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
7394 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
7396 /* When using assembler macros, emit .extern directives for
7397 all small-data externs so that the assembler knows how
7398 big they are.
7400 In most cases it would be safe (though pointless) to emit
7401 .externs for other symbols too. One exception is when an
7402 object is within the -G limit but declared by the user to
7403 be in a section other than .sbss or .sdata. */
7404 fputs ("\t.extern\t", file);
7405 assemble_name (file, name);
7406 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
7407 int_size_in_bytes (TREE_TYPE (decl)));
7409 else if (TARGET_IRIX
7410 && mips_abi == ABI_32
7411 && TREE_CODE (decl) == FUNCTION_DECL)
7413 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
7414 `.global name .text' directive for every used but
7415 undefined function. If we don't, the linker may perform
7416 an optimization (skipping over the insns that set $gp)
7417 when it is unsafe. */
7418 fputs ("\t.globl ", file);
7419 assemble_name (file, name);
7420 fputs (" .text\n", file);
7425 /* Implement ASM_OUTPUT_SOURCE_FILENAME. */
7427 void
7428 mips_output_filename (FILE *stream, const char *name)
7430 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
7431 directives. */
7432 if (write_symbols == DWARF2_DEBUG)
7433 return;
7434 else if (mips_output_filename_first_time)
7436 mips_output_filename_first_time = 0;
7437 num_source_filenames += 1;
7438 current_function_file = name;
7439 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7440 output_quoted_string (stream, name);
7441 putc ('\n', stream);
7443 /* If we are emitting stabs, let dbxout.c handle this (except for
7444 the mips_output_filename_first_time case). */
7445 else if (write_symbols == DBX_DEBUG)
7446 return;
7447 else if (name != current_function_file
7448 && strcmp (name, current_function_file) != 0)
7450 num_source_filenames += 1;
7451 current_function_file = name;
7452 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7453 output_quoted_string (stream, name);
7454 putc ('\n', stream);
7458 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
7460 static void ATTRIBUTE_UNUSED
7461 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
7463 switch (size)
7465 case 4:
7466 fputs ("\t.dtprelword\t", file);
7467 break;
7469 case 8:
7470 fputs ("\t.dtpreldword\t", file);
7471 break;
7473 default:
7474 gcc_unreachable ();
7476 output_addr_const (file, x);
7477 fputs ("+0x8000", file);
7480 /* Implement TARGET_DWARF_REGISTER_SPAN. */
7482 static rtx
7483 mips_dwarf_register_span (rtx reg)
7485 rtx high, low;
7486 enum machine_mode mode;
7488 /* By default, GCC maps increasing register numbers to increasing
7489 memory locations, but paired FPRs are always little-endian,
7490 regardless of the prevailing endianness. */
7491 mode = GET_MODE (reg);
7492 if (FP_REG_P (REGNO (reg))
7493 && TARGET_BIG_ENDIAN
7494 && MAX_FPRS_PER_FMT > 1
7495 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
7497 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
7498 high = mips_subword (reg, true);
7499 low = mips_subword (reg, false);
7500 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
7503 return NULL_RTX;
7506 /* Implement ASM_OUTPUT_ASCII. */
7508 void
7509 mips_output_ascii (FILE *stream, const char *string, size_t len)
7511 size_t i;
7512 int cur_pos;
7514 cur_pos = 17;
7515 fprintf (stream, "\t.ascii\t\"");
7516 for (i = 0; i < len; i++)
7518 int c;
7520 c = (unsigned char) string[i];
7521 if (ISPRINT (c))
7523 if (c == '\\' || c == '\"')
7525 putc ('\\', stream);
7526 cur_pos++;
7528 putc (c, stream);
7529 cur_pos++;
7531 else
7533 fprintf (stream, "\\%03o", c);
7534 cur_pos += 4;
7537 if (cur_pos > 72 && i+1 < len)
7539 cur_pos = 17;
7540 fprintf (stream, "\"\n\t.ascii\t\"");
7543 fprintf (stream, "\"\n");
7546 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7547 macros, mark the symbol as written so that mips_asm_output_external
7548 won't emit an .extern for it. STREAM is the output file, NAME is the
7549 name of the symbol, INIT_STRING is the string that should be written
7550 before the symbol and FINAL_STRING is the string that should be
7551 written after it. FINAL_STRING is a printf format that consumes the
7552 remaining arguments. */
7554 void
7555 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7556 const char *final_string, ...)
7558 va_list ap;
7560 fputs (init_string, stream);
7561 assemble_name (stream, name);
7562 va_start (ap, final_string);
7563 vfprintf (stream, final_string, ap);
7564 va_end (ap);
7566 if (!TARGET_EXPLICIT_RELOCS)
7568 tree name_tree = get_identifier (name);
7569 TREE_ASM_WRITTEN (name_tree) = 1;
7573 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7574 NAME is the name of the object and ALIGN is the required alignment
7575 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7576 alignment argument. */
7578 void
7579 mips_declare_common_object (FILE *stream, const char *name,
7580 const char *init_string,
7581 unsigned HOST_WIDE_INT size,
7582 unsigned int align, bool takes_alignment_p)
7584 if (!takes_alignment_p)
7586 size += (align / BITS_PER_UNIT) - 1;
7587 size -= size % (align / BITS_PER_UNIT);
7588 mips_declare_object (stream, name, init_string,
7589 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7591 else
7592 mips_declare_object (stream, name, init_string,
7593 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7594 size, align / BITS_PER_UNIT);
7597 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7598 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7600 void
7601 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7602 unsigned HOST_WIDE_INT size,
7603 unsigned int align)
7605 /* If the target wants uninitialized const declarations in
7606 .rdata then don't put them in .comm. */
7607 if (TARGET_EMBEDDED_DATA
7608 && TARGET_UNINIT_CONST_IN_RODATA
7609 && TREE_CODE (decl) == VAR_DECL
7610 && TREE_READONLY (decl)
7611 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7613 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7614 targetm.asm_out.globalize_label (stream, name);
7616 switch_to_section (readonly_data_section);
7617 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7618 mips_declare_object (stream, name, "",
7619 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7620 size);
7622 else
7623 mips_declare_common_object (stream, name, "\n\t.comm\t",
7624 size, align, true);
7627 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7628 extern int size_directive_output;
7630 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7631 definitions except that it uses mips_declare_object to emit the label. */
7633 void
7634 mips_declare_object_name (FILE *stream, const char *name,
7635 tree decl ATTRIBUTE_UNUSED)
7637 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7638 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7639 #endif
7641 size_directive_output = 0;
7642 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7644 HOST_WIDE_INT size;
7646 size_directive_output = 1;
7647 size = int_size_in_bytes (TREE_TYPE (decl));
7648 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7651 mips_declare_object (stream, name, "", ":\n");
7654 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7656 void
7657 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7659 const char *name;
7661 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7662 if (!flag_inhibit_size_directive
7663 && DECL_SIZE (decl) != 0
7664 && !at_end
7665 && top_level
7666 && DECL_INITIAL (decl) == error_mark_node
7667 && !size_directive_output)
7669 HOST_WIDE_INT size;
7671 size_directive_output = 1;
7672 size = int_size_in_bytes (TREE_TYPE (decl));
7673 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7676 #endif
7678 /* Return the FOO in the name of the ".mdebug.FOO" section associated
7679 with the current ABI. */
7681 static const char *
7682 mips_mdebug_abi_name (void)
7684 switch (mips_abi)
7686 case ABI_32:
7687 return "abi32";
7688 case ABI_O64:
7689 return "abiO64";
7690 case ABI_N32:
7691 return "abiN32";
7692 case ABI_64:
7693 return "abiN64";
7694 case ABI_EABI:
7695 return TARGET_64BIT ? "eabi64" : "eabi32";
7696 default:
7697 gcc_unreachable ();
7701 /* Implement TARGET_ASM_FILE_START. */
7703 static void
7704 mips_file_start (void)
7706 default_file_start ();
7708 /* Generate a special section to describe the ABI switches used to
7709 produce the resultant binary. This is unnecessary on IRIX and
7710 causes unwanted warnings from the native linker. */
7711 if (!TARGET_IRIX)
7713 /* Record the ABI itself. Modern versions of binutils encode
7714 this information in the ELF header flags, but GDB needs the
7715 information in order to correctly debug binaries produced by
7716 older binutils. See the function mips_gdbarch_init in
7717 gdb/mips-tdep.c. */
7718 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7719 mips_mdebug_abi_name ());
7721 /* There is no ELF header flag to distinguish long32 forms of the
7722 EABI from long64 forms. Emit a special section to help tools
7723 such as GDB. Do the same for o64, which is sometimes used with
7724 -mlong64. */
7725 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7726 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7727 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7729 #ifdef HAVE_AS_GNU_ATTRIBUTE
7730 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7731 (TARGET_HARD_FLOAT_ABI
7732 ? (TARGET_DOUBLE_FLOAT
7733 ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
7734 #endif
7737 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
7738 if (TARGET_ABICALLS)
7740 fprintf (asm_out_file, "\t.abicalls\n");
7741 if (TARGET_ABICALLS_PIC0)
7742 fprintf (asm_out_file, "\t.option\tpic0\n");
7745 if (flag_verbose_asm)
7746 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7747 ASM_COMMENT_START,
7748 mips_small_data_threshold, mips_arch_info->name, mips_isa);
7751 /* Make the last instruction frame-related and note that it performs
7752 the operation described by FRAME_PATTERN. */
7754 static void
7755 mips_set_frame_expr (rtx frame_pattern)
7757 rtx insn;
7759 insn = get_last_insn ();
7760 RTX_FRAME_RELATED_P (insn) = 1;
7761 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7762 frame_pattern,
7763 REG_NOTES (insn));
7766 /* Return a frame-related rtx that stores REG at MEM.
7767 REG must be a single register. */
7769 static rtx
7770 mips_frame_set (rtx mem, rtx reg)
7772 rtx set;
7774 /* If we're saving the return address register and the DWARF return
7775 address column differs from the hard register number, adjust the
7776 note reg to refer to the former. */
7777 if (REGNO (reg) == GP_REG_FIRST + 31
7778 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7779 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7781 set = gen_rtx_SET (VOIDmode, mem, reg);
7782 RTX_FRAME_RELATED_P (set) = 1;
7784 return set;
7787 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7788 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7789 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7790 static const unsigned char mips16e_s2_s8_regs[] = {
7791 30, 23, 22, 21, 20, 19, 18
7793 static const unsigned char mips16e_a0_a3_regs[] = {
7794 4, 5, 6, 7
7797 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7798 ordered from the uppermost in memory to the lowest in memory. */
7799 static const unsigned char mips16e_save_restore_regs[] = {
7800 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7803 /* Return the index of the lowest X in the range [0, SIZE) for which
7804 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7806 static unsigned int
7807 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7808 unsigned int size)
7810 unsigned int i;
7812 for (i = 0; i < size; i++)
7813 if (BITSET_P (mask, regs[i]))
7814 break;
7816 return i;
7819 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7820 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7821 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7822 is true for all indexes (X, SIZE). */
7824 static void
7825 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7826 unsigned int size, unsigned int *num_regs_ptr)
7828 unsigned int i;
7830 i = mips16e_find_first_register (*mask_ptr, regs, size);
7831 for (i++; i < size; i++)
7832 if (!BITSET_P (*mask_ptr, regs[i]))
7834 *num_regs_ptr += 1;
7835 *mask_ptr |= 1 << regs[i];
7839 /* Return a simplified form of X using the register values in REG_VALUES.
7840 REG_VALUES[R] is the last value assigned to hard register R, or null
7841 if R has not been modified.
7843 This function is rather limited, but is good enough for our purposes. */
7845 static rtx
7846 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7848 x = avoid_constant_pool_reference (x);
7850 if (UNARY_P (x))
7852 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7853 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7854 x0, GET_MODE (XEXP (x, 0)));
7857 if (ARITHMETIC_P (x))
7859 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7860 rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7861 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7864 if (REG_P (x)
7865 && reg_values[REGNO (x)]
7866 && !rtx_unstable_p (reg_values[REGNO (x)]))
7867 return reg_values[REGNO (x)];
7869 return x;
7872 /* Return true if (set DEST SRC) stores an argument register into its
7873 caller-allocated save slot, storing the number of that argument
7874 register in *REGNO_PTR if so. REG_VALUES is as for
7875 mips16e_collect_propagate_value. */
7877 static bool
7878 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7879 unsigned int *regno_ptr)
7881 unsigned int argno, regno;
7882 HOST_WIDE_INT offset, required_offset;
7883 rtx addr, base;
7885 /* Check that this is a word-mode store. */
7886 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7887 return false;
7889 /* Check that the register being saved is an unmodified argument
7890 register. */
7891 regno = REGNO (src);
7892 if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
7893 return false;
7894 argno = regno - GP_ARG_FIRST;
7896 /* Check whether the address is an appropriate stack-pointer or
7897 frame-pointer access. */
7898 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7899 mips_split_plus (addr, &base, &offset);
7900 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7901 if (base == hard_frame_pointer_rtx)
7902 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
7903 else if (base != stack_pointer_rtx)
7904 return false;
7905 if (offset != required_offset)
7906 return false;
7908 *regno_ptr = regno;
7909 return true;
7912 /* A subroutine of mips_expand_prologue, called only when generating
7913 MIPS16e SAVE instructions. Search the start of the function for any
7914 instructions that save argument registers into their caller-allocated
7915 save slots. Delete such instructions and return a value N such that
7916 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7917 instructions redundant. */
7919 static unsigned int
7920 mips16e_collect_argument_saves (void)
7922 rtx reg_values[FIRST_PSEUDO_REGISTER];
7923 rtx insn, next, set, dest, src;
7924 unsigned int nargs, regno;
7926 push_topmost_sequence ();
7927 nargs = 0;
7928 memset (reg_values, 0, sizeof (reg_values));
7929 for (insn = get_insns (); insn; insn = next)
7931 next = NEXT_INSN (insn);
7932 if (NOTE_P (insn))
7933 continue;
7935 if (!INSN_P (insn))
7936 break;
7938 set = PATTERN (insn);
7939 if (GET_CODE (set) != SET)
7940 break;
7942 dest = SET_DEST (set);
7943 src = SET_SRC (set);
7944 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
7946 if (!BITSET_P (cfun->machine->frame.mask, regno))
7948 delete_insn (insn);
7949 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7952 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7953 reg_values[REGNO (dest)]
7954 = mips16e_collect_propagate_value (src, reg_values);
7955 else
7956 break;
7958 pop_topmost_sequence ();
7960 return nargs;
7963 /* Return a move between register REGNO and memory location SP + OFFSET.
7964 Make the move a load if RESTORE_P, otherwise make it a frame-related
7965 store. */
7967 static rtx
7968 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7969 unsigned int regno)
7971 rtx reg, mem;
7973 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7974 reg = gen_rtx_REG (SImode, regno);
7975 return (restore_p
7976 ? gen_rtx_SET (VOIDmode, reg, mem)
7977 : mips_frame_set (mem, reg));
7980 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7981 The instruction must:
7983 - Allocate or deallocate SIZE bytes in total; SIZE is known
7984 to be nonzero.
7986 - Save or restore as many registers in *MASK_PTR as possible.
7987 The instruction saves the first registers at the top of the
7988 allocated area, with the other registers below it.
7990 - Save NARGS argument registers above the allocated area.
7992 (NARGS is always zero if RESTORE_P.)
7994 The SAVE and RESTORE instructions cannot save and restore all general
7995 registers, so there may be some registers left over for the caller to
7996 handle. Destructively modify *MASK_PTR so that it contains the registers
7997 that still need to be saved or restored. The caller can save these
7998 registers in the memory immediately below *OFFSET_PTR, which is a
7999 byte offset from the bottom of the allocated stack area. */
8001 static rtx
8002 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
8003 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
8004 HOST_WIDE_INT size)
8006 rtx pattern, set;
8007 HOST_WIDE_INT offset, top_offset;
8008 unsigned int i, regno;
8009 int n;
8011 gcc_assert (cfun->machine->frame.num_fp == 0);
8013 /* Calculate the number of elements in the PARALLEL. We need one element
8014 for the stack adjustment, one for each argument register save, and one
8015 for each additional register move. */
8016 n = 1 + nargs;
8017 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8018 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
8019 n++;
8021 /* Create the final PARALLEL. */
8022 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
8023 n = 0;
8025 /* Add the stack pointer adjustment. */
8026 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8027 plus_constant (stack_pointer_rtx,
8028 restore_p ? size : -size));
8029 RTX_FRAME_RELATED_P (set) = 1;
8030 XVECEXP (pattern, 0, n++) = set;
8032 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8033 top_offset = restore_p ? size : 0;
8035 /* Save the arguments. */
8036 for (i = 0; i < nargs; i++)
8038 offset = top_offset + i * UNITS_PER_WORD;
8039 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
8040 XVECEXP (pattern, 0, n++) = set;
8043 /* Then fill in the other register moves. */
8044 offset = top_offset;
8045 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8047 regno = mips16e_save_restore_regs[i];
8048 if (BITSET_P (*mask_ptr, regno))
8050 offset -= UNITS_PER_WORD;
8051 set = mips16e_save_restore_reg (restore_p, offset, regno);
8052 XVECEXP (pattern, 0, n++) = set;
8053 *mask_ptr &= ~(1 << regno);
8057 /* Tell the caller what offset it should use for the remaining registers. */
8058 *offset_ptr = size + (offset - top_offset);
8060 gcc_assert (n == XVECLEN (pattern, 0));
8062 return pattern;
8065 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8066 pointer. Return true if PATTERN matches the kind of instruction
8067 generated by mips16e_build_save_restore. If INFO is nonnull,
8068 initialize it when returning true. */
8070 bool
8071 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
8072 struct mips16e_save_restore_info *info)
8074 unsigned int i, nargs, mask, extra;
8075 HOST_WIDE_INT top_offset, save_offset, offset;
8076 rtx set, reg, mem, base;
8077 int n;
8079 if (!GENERATE_MIPS16E_SAVE_RESTORE)
8080 return false;
8082 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8083 top_offset = adjust > 0 ? adjust : 0;
8085 /* Interpret all other members of the PARALLEL. */
8086 save_offset = top_offset - UNITS_PER_WORD;
8087 mask = 0;
8088 nargs = 0;
8089 i = 0;
8090 for (n = 1; n < XVECLEN (pattern, 0); n++)
8092 /* Check that we have a SET. */
8093 set = XVECEXP (pattern, 0, n);
8094 if (GET_CODE (set) != SET)
8095 return false;
8097 /* Check that the SET is a load (if restoring) or a store
8098 (if saving). */
8099 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8100 if (!MEM_P (mem))
8101 return false;
8103 /* Check that the address is the sum of the stack pointer and a
8104 possibly-zero constant offset. */
8105 mips_split_plus (XEXP (mem, 0), &base, &offset);
8106 if (base != stack_pointer_rtx)
8107 return false;
8109 /* Check that SET's other operand is a register. */
8110 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8111 if (!REG_P (reg))
8112 return false;
8114 /* Check for argument saves. */
8115 if (offset == top_offset + nargs * UNITS_PER_WORD
8116 && REGNO (reg) == GP_ARG_FIRST + nargs)
8117 nargs++;
8118 else if (offset == save_offset)
8120 while (mips16e_save_restore_regs[i++] != REGNO (reg))
8121 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8122 return false;
8124 mask |= 1 << REGNO (reg);
8125 save_offset -= UNITS_PER_WORD;
8127 else
8128 return false;
8131 /* Check that the restrictions on register ranges are met. */
8132 extra = 0;
8133 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8134 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8135 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8136 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8137 if (extra != 0)
8138 return false;
8140 /* Make sure that the topmost argument register is not saved twice.
8141 The checks above ensure that the same is then true for the other
8142 argument registers. */
8143 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8144 return false;
8146 /* Pass back information, if requested. */
8147 if (info)
8149 info->nargs = nargs;
8150 info->mask = mask;
8151 info->size = (adjust > 0 ? adjust : -adjust);
8154 return true;
8157 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8158 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8159 the null terminator. */
8161 static char *
8162 mips16e_add_register_range (char *s, unsigned int min_reg,
8163 unsigned int max_reg)
8165 if (min_reg != max_reg)
8166 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8167 else
8168 s += sprintf (s, ",%s", reg_names[min_reg]);
8169 return s;
8172 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8173 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8175 const char *
8176 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8178 static char buffer[300];
8180 struct mips16e_save_restore_info info;
8181 unsigned int i, end;
8182 char *s;
8184 /* Parse the pattern. */
8185 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8186 gcc_unreachable ();
8188 /* Add the mnemonic. */
8189 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8190 s += strlen (s);
8192 /* Save the arguments. */
8193 if (info.nargs > 1)
8194 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8195 reg_names[GP_ARG_FIRST + info.nargs - 1]);
8196 else if (info.nargs == 1)
8197 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8199 /* Emit the amount of stack space to allocate or deallocate. */
8200 s += sprintf (s, "%d", (int) info.size);
8202 /* Save or restore $16. */
8203 if (BITSET_P (info.mask, 16))
8204 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8206 /* Save or restore $17. */
8207 if (BITSET_P (info.mask, 17))
8208 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8210 /* Save or restore registers in the range $s2...$s8, which
8211 mips16e_s2_s8_regs lists in decreasing order. Note that this
8212 is a software register range; the hardware registers are not
8213 numbered consecutively. */
8214 end = ARRAY_SIZE (mips16e_s2_s8_regs);
8215 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
8216 if (i < end)
8217 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
8218 mips16e_s2_s8_regs[i]);
8220 /* Save or restore registers in the range $a0...$a3. */
8221 end = ARRAY_SIZE (mips16e_a0_a3_regs);
8222 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
8223 if (i < end)
8224 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
8225 mips16e_a0_a3_regs[end - 1]);
8227 /* Save or restore $31. */
8228 if (BITSET_P (info.mask, 31))
8229 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
8231 return buffer;
8234 /* Return true if the current function has an insn that implicitly
8235 refers to $gp. */
8237 static bool
8238 mips_function_has_gp_insn (void)
8240 /* Don't bother rechecking if we found one last time. */
8241 if (!cfun->machine->has_gp_insn_p)
8243 rtx insn;
8245 push_topmost_sequence ();
8246 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8247 if (USEFUL_INSN_P (insn)
8248 && (get_attr_got (insn) != GOT_UNSET
8249 || mips_small_data_pattern_p (PATTERN (insn))))
8251 cfun->machine->has_gp_insn_p = true;
8252 break;
8254 pop_topmost_sequence ();
8256 return cfun->machine->has_gp_insn_p;
8259 /* Return true if the current function returns its value in a floating-point
8260 register in MIPS16 mode. */
8262 static bool
8263 mips16_cfun_returns_in_fpr_p (void)
8265 tree return_type = DECL_RESULT (current_function_decl);
8266 return (TARGET_MIPS16
8267 && TARGET_HARD_FLOAT_ABI
8268 && !aggregate_value_p (return_type, current_function_decl)
8269 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
8272 /* Return the register that should be used as the global pointer
8273 within this function. Return 0 if the function doesn't need
8274 a global pointer. */
8276 static unsigned int
8277 mips_global_pointer (void)
8279 unsigned int regno;
8281 /* $gp is always available unless we're using a GOT. */
8282 if (!TARGET_USE_GOT)
8283 return GLOBAL_POINTER_REGNUM;
8285 /* We must always provide $gp when it is used implicitly. */
8286 if (!TARGET_EXPLICIT_RELOCS)
8287 return GLOBAL_POINTER_REGNUM;
8289 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
8290 a valid gp. */
8291 if (crtl->profile)
8292 return GLOBAL_POINTER_REGNUM;
8294 /* If the function has a nonlocal goto, $gp must hold the correct
8295 global pointer for the target function. */
8296 if (crtl->has_nonlocal_goto)
8297 return GLOBAL_POINTER_REGNUM;
8299 /* There's no need to initialize $gp if it isn't referenced now,
8300 and if we can be sure that no new references will be added during
8301 or after reload. */
8302 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
8303 && !mips_function_has_gp_insn ())
8305 /* The function doesn't use $gp at the moment. If we're generating
8306 -call_nonpic code, no new uses will be introduced during or after
8307 reload. */
8308 if (TARGET_ABICALLS_PIC0)
8309 return 0;
8311 /* We need to handle the following implicit gp references:
8313 - Reload can sometimes introduce constant pool references
8314 into a function that otherwise didn't need them. For example,
8315 suppose we have an instruction like:
8317 (set (reg:DF R1) (float:DF (reg:SI R2)))
8319 If R2 turns out to be constant such as 1, the instruction may
8320 have a REG_EQUAL note saying that R1 == 1.0. Reload then has
8321 the option of using this constant if R2 doesn't get allocated
8322 to a register.
8324 In cases like these, reload will have added the constant to the
8325 pool but no instruction will yet refer to it.
8327 - MIPS16 functions that return in FPRs need to call an
8328 external libgcc routine. */
8329 if (!crtl->uses_const_pool
8330 && !mips16_cfun_returns_in_fpr_p ())
8331 return 0;
8334 /* We need a global pointer, but perhaps we can use a call-clobbered
8335 register instead of $gp. */
8336 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
8337 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8338 if (!df_regs_ever_live_p (regno)
8339 && call_really_used_regs[regno]
8340 && !fixed_regs[regno]
8341 && regno != PIC_FUNCTION_ADDR_REGNUM)
8342 return regno;
8344 return GLOBAL_POINTER_REGNUM;
8347 /* Return true if the current function must save register REGNO. */
8349 static bool
8350 mips_save_reg_p (unsigned int regno)
8352 /* We need to save $gp if TARGET_CALL_SAVED_GP and if we have not
8353 chosen a call-clobbered substitute. */
8354 if (TARGET_CALL_SAVED_GP
8355 && regno == GLOBAL_POINTER_REGNUM
8356 && cfun->machine->global_pointer == regno)
8357 return true;
8359 /* Check call-saved registers. */
8360 if ((crtl->saves_all_registers || df_regs_ever_live_p (regno))
8361 && !call_really_used_regs[regno])
8362 return true;
8364 /* Save both registers in an FPR pair if either one is used. This is
8365 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
8366 register to be used without the even register. */
8367 if (FP_REG_P (regno)
8368 && MAX_FPRS_PER_FMT == 2
8369 && df_regs_ever_live_p (regno + 1)
8370 && !call_really_used_regs[regno + 1])
8371 return true;
8373 /* We need to save the old frame pointer before setting up a new one. */
8374 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
8375 return true;
8377 /* Check for registers that must be saved for FUNCTION_PROFILER. */
8378 if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
8379 return true;
8381 /* We need to save the incoming return address if it is ever clobbered
8382 within the function, if __builtin_eh_return is being used to set a
8383 different return address, or if a stub is being used to return a
8384 value in FPRs. */
8385 if (regno == GP_REG_FIRST + 31
8386 && (df_regs_ever_live_p (regno)
8387 || crtl->calls_eh_return
8388 || mips16_cfun_returns_in_fpr_p ()))
8389 return true;
8391 return false;
8394 /* Populate the current function's mips_frame_info structure.
8396 MIPS stack frames look like:
8398 +-------------------------------+
8400 | incoming stack arguments |
8402 +-------------------------------+
8404 | caller-allocated save area |
8405 A | for register arguments |
8407 +-------------------------------+ <-- incoming stack pointer
8409 | callee-allocated save area |
8410 B | for arguments that are |
8411 | split between registers and |
8412 | the stack |
8414 +-------------------------------+ <-- arg_pointer_rtx
8416 C | callee-allocated save area |
8417 | for register varargs |
8419 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
8420 | | + UNITS_PER_HWFPVALUE
8421 | FPR save area |
8423 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
8424 | | + UNITS_PER_WORD
8425 | GPR save area |
8427 +-------------------------------+
8428 | | \
8429 | local variables | | var_size
8430 | | /
8431 +-------------------------------+
8432 | | \
8433 | $gp save area | | cprestore_size
8434 | | /
8435 P +-------------------------------+ <-- hard_frame_pointer_rtx for
8436 | | MIPS16 code
8437 | outgoing stack arguments |
8439 +-------------------------------+
8441 | caller-allocated save area |
8442 | for register arguments |
8444 +-------------------------------+ <-- stack_pointer_rtx
8445 frame_pointer_rtx
8446 hard_frame_pointer_rtx for
8447 non-MIPS16 code.
8449 At least two of A, B and C will be empty.
8451 Dynamic stack allocations such as alloca insert data at point P.
8452 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
8453 hard_frame_pointer_rtx unchanged. */
8455 static void
8456 mips_compute_frame_info (void)
8458 struct mips_frame_info *frame;
8459 HOST_WIDE_INT offset, size;
8460 unsigned int regno, i;
8462 frame = &cfun->machine->frame;
8463 memset (frame, 0, sizeof (*frame));
8464 size = get_frame_size ();
8466 cfun->machine->global_pointer = mips_global_pointer ();
8468 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
8469 area and the $gp save slot. This area isn't needed in leaf functions,
8470 but if the target-independent frame size is nonzero, we're committed
8471 to allocating it anyway. */
8472 if (size == 0 && current_function_is_leaf)
8474 /* The MIPS 3.0 linker does not like functions that dynamically
8475 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
8476 looks like we are trying to create a second frame pointer to the
8477 function, so allocate some stack space to make it happy. */
8478 if (cfun->calls_alloca)
8479 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
8480 else
8481 frame->args_size = 0;
8482 frame->cprestore_size = 0;
8484 else
8486 frame->args_size = crtl->outgoing_args_size;
8487 frame->cprestore_size = STARTING_FRAME_OFFSET - frame->args_size;
8489 offset = frame->args_size + frame->cprestore_size;
8491 /* Move above the local variables. */
8492 frame->var_size = MIPS_STACK_ALIGN (size);
8493 offset += frame->var_size;
8495 /* Find out which GPRs we need to save. */
8496 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8497 if (mips_save_reg_p (regno))
8499 frame->num_gp++;
8500 frame->mask |= 1 << (regno - GP_REG_FIRST);
8503 /* If this function calls eh_return, we must also save and restore the
8504 EH data registers. */
8505 if (crtl->calls_eh_return)
8506 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
8508 frame->num_gp++;
8509 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
8512 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
8513 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
8514 save all later registers too. */
8515 if (GENERATE_MIPS16E_SAVE_RESTORE)
8517 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
8518 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
8519 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
8520 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
8523 /* Move above the GPR save area. */
8524 if (frame->num_gp > 0)
8526 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
8527 frame->gp_sp_offset = offset - UNITS_PER_WORD;
8530 /* Find out which FPRs we need to save. This loop must iterate over
8531 the same space as its companion in mips_for_each_saved_reg. */
8532 if (TARGET_HARD_FLOAT)
8533 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
8534 if (mips_save_reg_p (regno))
8536 frame->num_fp += MAX_FPRS_PER_FMT;
8537 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
8540 /* Move above the FPR save area. */
8541 if (frame->num_fp > 0)
8543 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
8544 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
8547 /* Move above the callee-allocated varargs save area. */
8548 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
8549 frame->arg_pointer_offset = offset;
8551 /* Move above the callee-allocated area for pretend stack arguments. */
8552 offset += crtl->args.pretend_args_size;
8553 frame->total_size = offset;
8555 /* Work out the offsets of the save areas from the top of the frame. */
8556 if (frame->gp_sp_offset > 0)
8557 frame->gp_save_offset = frame->gp_sp_offset - offset;
8558 if (frame->fp_sp_offset > 0)
8559 frame->fp_save_offset = frame->fp_sp_offset - offset;
8561 /* MIPS16 code offsets the frame pointer by the size of the outgoing
8562 arguments. This tends to increase the chances of using unextended
8563 instructions for local variables and incoming arguments. */
8564 if (TARGET_MIPS16)
8565 frame->hard_frame_pointer_offset = frame->args_size;
8568 /* Return the style of GP load sequence that is being used for the
8569 current function. */
8571 enum mips_loadgp_style
8572 mips_current_loadgp_style (void)
8574 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
8575 return LOADGP_NONE;
8577 if (TARGET_RTP_PIC)
8578 return LOADGP_RTP;
8580 if (TARGET_ABSOLUTE_ABICALLS)
8581 return LOADGP_ABSOLUTE;
8583 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
8586 /* Implement FRAME_POINTER_REQUIRED. */
8588 bool
8589 mips_frame_pointer_required (void)
8591 /* If the function contains dynamic stack allocations, we need to
8592 use the frame pointer to access the static parts of the frame. */
8593 if (cfun->calls_alloca)
8594 return true;
8596 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
8597 reload may be unable to compute the address of a local variable,
8598 since there is no way to add a large constant to the stack pointer
8599 without using a second temporary register. */
8600 if (TARGET_MIPS16)
8602 mips_compute_frame_info ();
8603 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
8604 return true;
8607 return false;
8610 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
8611 or argument pointer. TO is either the stack pointer or hard frame
8612 pointer. */
8614 HOST_WIDE_INT
8615 mips_initial_elimination_offset (int from, int to)
8617 HOST_WIDE_INT offset;
8619 mips_compute_frame_info ();
8621 /* Set OFFSET to the offset from the soft frame pointer, which is also
8622 the offset from the end-of-prologue stack pointer. */
8623 switch (from)
8625 case FRAME_POINTER_REGNUM:
8626 offset = 0;
8627 break;
8629 case ARG_POINTER_REGNUM:
8630 offset = cfun->machine->frame.arg_pointer_offset;
8631 break;
8633 default:
8634 gcc_unreachable ();
8637 if (to == HARD_FRAME_POINTER_REGNUM)
8638 offset -= cfun->machine->frame.hard_frame_pointer_offset;
8640 return offset;
8643 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
8645 static void
8646 mips_extra_live_on_entry (bitmap regs)
8648 if (TARGET_USE_GOT)
8650 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
8651 the global pointer. */
8652 if (!TARGET_ABSOLUTE_ABICALLS)
8653 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8655 /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
8656 the global pointer. */
8657 if (TARGET_MIPS16)
8658 bitmap_set_bit (regs, MIPS16_PIC_TEMP_REGNUM);
8660 /* See the comment above load_call<mode> for details. */
8661 bitmap_set_bit (regs, GOT_VERSION_REGNUM);
8665 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
8666 previous frame. */
8669 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8671 if (count != 0)
8672 return const0_rtx;
8674 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8677 /* Emit code to change the current function's return address to
8678 ADDRESS. SCRATCH is available as a scratch register, if needed.
8679 ADDRESS and SCRATCH are both word-mode GPRs. */
8681 void
8682 mips_set_return_address (rtx address, rtx scratch)
8684 rtx slot_address;
8686 gcc_assert (BITSET_P (cfun->machine->frame.mask, 31));
8687 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
8688 cfun->machine->frame.gp_sp_offset);
8689 mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
8692 /* Return a MEM rtx for the cprestore slot, using TEMP as a temporary base
8693 register if need be. */
8695 static rtx
8696 mips_cprestore_slot (rtx temp)
8698 const struct mips_frame_info *frame;
8699 rtx base;
8700 HOST_WIDE_INT offset;
8702 frame = &cfun->machine->frame;
8703 if (frame_pointer_needed)
8705 base = hard_frame_pointer_rtx;
8706 offset = frame->args_size - frame->hard_frame_pointer_offset;
8708 else
8710 base = stack_pointer_rtx;
8711 offset = frame->args_size;
8713 return gen_frame_mem (Pmode, mips_add_offset (temp, base, offset));
8716 /* Restore $gp from its save slot, using TEMP as a temporary base register
8717 if need be. This function is for o32 and o64 abicalls only. */
8719 void
8720 mips_restore_gp (rtx temp)
8722 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
8724 if (cfun->machine->global_pointer == 0)
8725 return;
8727 if (TARGET_MIPS16)
8729 mips_emit_move (temp, mips_cprestore_slot (temp));
8730 mips_emit_move (pic_offset_table_rtx, temp);
8732 else
8733 mips_emit_move (pic_offset_table_rtx, mips_cprestore_slot (temp));
8734 if (!TARGET_EXPLICIT_RELOCS)
8735 emit_insn (gen_blockage ());
8738 /* A function to save or store a register. The first argument is the
8739 register and the second is the stack slot. */
8740 typedef void (*mips_save_restore_fn) (rtx, rtx);
8742 /* Use FN to save or restore register REGNO. MODE is the register's
8743 mode and OFFSET is the offset of its save slot from the current
8744 stack pointer. */
8746 static void
8747 mips_save_restore_reg (enum machine_mode mode, int regno,
8748 HOST_WIDE_INT offset, mips_save_restore_fn fn)
8750 rtx mem;
8752 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
8753 fn (gen_rtx_REG (mode, regno), mem);
8756 /* Call FN for each register that is saved by the current function.
8757 SP_OFFSET is the offset of the current stack pointer from the start
8758 of the frame. */
8760 static void
8761 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
8763 enum machine_mode fpr_mode;
8764 HOST_WIDE_INT offset;
8765 int regno;
8767 /* Save registers starting from high to low. The debuggers prefer at least
8768 the return register be stored at func+4, and also it allows us not to
8769 need a nop in the epilogue if at least one register is reloaded in
8770 addition to return address. */
8771 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
8772 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
8773 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
8775 mips_save_restore_reg (word_mode, regno, offset, fn);
8776 offset -= UNITS_PER_WORD;
8779 /* This loop must iterate over the same space as its companion in
8780 mips_compute_frame_info. */
8781 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
8782 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
8783 for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
8784 regno >= FP_REG_FIRST;
8785 regno -= MAX_FPRS_PER_FMT)
8786 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
8788 mips_save_restore_reg (fpr_mode, regno, offset, fn);
8789 offset -= GET_MODE_SIZE (fpr_mode);
8793 /* If we're generating n32 or n64 abicalls, and the current function
8794 does not use $28 as its global pointer, emit a cplocal directive.
8795 Use pic_offset_table_rtx as the argument to the directive. */
8797 static void
8798 mips_output_cplocal (void)
8800 if (!TARGET_EXPLICIT_RELOCS
8801 && cfun->machine->global_pointer > 0
8802 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
8803 output_asm_insn (".cplocal %+", 0);
8806 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
8808 static void
8809 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8811 const char *fnname;
8813 #ifdef SDB_DEBUGGING_INFO
8814 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
8815 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
8816 #endif
8818 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
8819 floating-point arguments. */
8820 if (TARGET_MIPS16
8821 && TARGET_HARD_FLOAT_ABI
8822 && crtl->args.info.fp_code != 0)
8823 mips16_build_function_stub ();
8825 /* Get the function name the same way that toplev.c does before calling
8826 assemble_start_function. This is needed so that the name used here
8827 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8828 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8829 mips_start_function_definition (fnname, TARGET_MIPS16);
8831 /* Stop mips_file_end from treating this function as external. */
8832 if (TARGET_IRIX && mips_abi == ABI_32)
8833 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
8835 /* Output MIPS-specific frame information. */
8836 if (!flag_inhibit_size_directive)
8838 const struct mips_frame_info *frame;
8840 frame = &cfun->machine->frame;
8842 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
8843 fprintf (file,
8844 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
8845 "# vars= " HOST_WIDE_INT_PRINT_DEC
8846 ", regs= %d/%d"
8847 ", args= " HOST_WIDE_INT_PRINT_DEC
8848 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
8849 reg_names[frame_pointer_needed
8850 ? HARD_FRAME_POINTER_REGNUM
8851 : STACK_POINTER_REGNUM],
8852 (frame_pointer_needed
8853 ? frame->total_size - frame->hard_frame_pointer_offset
8854 : frame->total_size),
8855 reg_names[GP_REG_FIRST + 31],
8856 frame->var_size,
8857 frame->num_gp, frame->num_fp,
8858 frame->args_size,
8859 frame->cprestore_size);
8861 /* .mask MASK, OFFSET. */
8862 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8863 frame->mask, frame->gp_save_offset);
8865 /* .fmask MASK, OFFSET. */
8866 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8867 frame->fmask, frame->fp_save_offset);
8870 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
8871 Also emit the ".set noreorder; .set nomacro" sequence for functions
8872 that need it. */
8873 if (mips_current_loadgp_style () == LOADGP_OLDABI)
8875 if (TARGET_MIPS16)
8877 /* This is a fixed-form sequence. The position of the
8878 first two instructions is important because of the
8879 way _gp_disp is defined. */
8880 output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
8881 output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
8882 output_asm_insn ("sll\t$2,16", 0);
8883 output_asm_insn ("addu\t$2,$3", 0);
8885 /* .cpload must be in a .set noreorder but not a .set nomacro block. */
8886 else if (!cfun->machine->all_noreorder_p)
8887 output_asm_insn ("%(.cpload\t%^%)", 0);
8888 else
8889 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8891 else if (cfun->machine->all_noreorder_p)
8892 output_asm_insn ("%(%<", 0);
8894 /* Tell the assembler which register we're using as the global
8895 pointer. This is needed for thunks, since they can use either
8896 explicit relocs or assembler macros. */
8897 mips_output_cplocal ();
8900 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
8902 static void
8903 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8904 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8906 const char *fnname;
8908 /* Reinstate the normal $gp. */
8909 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8910 mips_output_cplocal ();
8912 if (cfun->machine->all_noreorder_p)
8914 /* Avoid using %>%) since it adds excess whitespace. */
8915 output_asm_insn (".set\tmacro", 0);
8916 output_asm_insn (".set\treorder", 0);
8917 set_noreorder = set_nomacro = 0;
8920 /* Get the function name the same way that toplev.c does before calling
8921 assemble_start_function. This is needed so that the name used here
8922 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8923 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8924 mips_end_function_definition (fnname);
8927 /* Save register REG to MEM. Make the instruction frame-related. */
8929 static void
8930 mips_save_reg (rtx reg, rtx mem)
8932 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8934 rtx x1, x2;
8936 if (mips_split_64bit_move_p (mem, reg))
8937 mips_split_doubleword_move (mem, reg);
8938 else
8939 mips_emit_move (mem, reg);
8941 x1 = mips_frame_set (mips_subword (mem, false),
8942 mips_subword (reg, false));
8943 x2 = mips_frame_set (mips_subword (mem, true),
8944 mips_subword (reg, true));
8945 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8947 else
8949 if (TARGET_MIPS16
8950 && REGNO (reg) != GP_REG_FIRST + 31
8951 && !M16_REG_P (REGNO (reg)))
8953 /* Save a non-MIPS16 register by moving it through a temporary.
8954 We don't need to do this for $31 since there's a special
8955 instruction for it. */
8956 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8957 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8959 else
8960 mips_emit_move (mem, reg);
8962 mips_set_frame_expr (mips_frame_set (mem, reg));
8966 /* The __gnu_local_gp symbol. */
8968 static GTY(()) rtx mips_gnu_local_gp;
8970 /* If we're generating n32 or n64 abicalls, emit instructions
8971 to set up the global pointer. */
8973 static void
8974 mips_emit_loadgp (void)
8976 rtx addr, offset, incoming_address, base, index, pic_reg;
8978 pic_reg = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
8979 switch (mips_current_loadgp_style ())
8981 case LOADGP_ABSOLUTE:
8982 if (mips_gnu_local_gp == NULL)
8984 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
8985 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
8987 emit_insn (Pmode == SImode
8988 ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
8989 : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
8990 break;
8992 case LOADGP_OLDABI:
8993 /* Added by mips_output_function_prologue. */
8994 break;
8996 case LOADGP_NEWABI:
8997 addr = XEXP (DECL_RTL (current_function_decl), 0);
8998 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
8999 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
9000 emit_insn (Pmode == SImode
9001 ? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
9002 : gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
9003 break;
9005 case LOADGP_RTP:
9006 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
9007 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
9008 emit_insn (Pmode == SImode
9009 ? gen_loadgp_rtp_si (pic_reg, base, index)
9010 : gen_loadgp_rtp_di (pic_reg, base, index));
9011 break;
9013 default:
9014 return;
9017 if (TARGET_MIPS16)
9018 emit_insn (gen_copygp_mips16 (pic_offset_table_rtx, pic_reg));
9020 /* Emit a blockage if there are implicit uses of the GP register.
9021 This includes profiled functions, because FUNCTION_PROFILE uses
9022 a jal macro. */
9023 if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
9024 emit_insn (gen_loadgp_blockage ());
9027 /* Expand the "prologue" pattern. */
9029 void
9030 mips_expand_prologue (void)
9032 const struct mips_frame_info *frame;
9033 HOST_WIDE_INT size;
9034 unsigned int nargs;
9035 rtx insn;
9037 if (cfun->machine->global_pointer > 0)
9038 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
9040 frame = &cfun->machine->frame;
9041 size = frame->total_size;
9043 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
9044 bytes beforehand; this is enough to cover the register save area
9045 without going out of range. */
9046 if ((frame->mask | frame->fmask) != 0)
9048 HOST_WIDE_INT step1;
9050 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
9051 if (GENERATE_MIPS16E_SAVE_RESTORE)
9053 HOST_WIDE_INT offset;
9054 unsigned int mask, regno;
9056 /* Try to merge argument stores into the save instruction. */
9057 nargs = mips16e_collect_argument_saves ();
9059 /* Build the save instruction. */
9060 mask = frame->mask;
9061 insn = mips16e_build_save_restore (false, &mask, &offset,
9062 nargs, step1);
9063 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9064 size -= step1;
9066 /* Check if we need to save other registers. */
9067 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
9068 if (BITSET_P (mask, regno - GP_REG_FIRST))
9070 offset -= UNITS_PER_WORD;
9071 mips_save_restore_reg (word_mode, regno,
9072 offset, mips_save_reg);
9075 else
9077 insn = gen_add3_insn (stack_pointer_rtx,
9078 stack_pointer_rtx,
9079 GEN_INT (-step1));
9080 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9081 size -= step1;
9082 mips_for_each_saved_reg (size, mips_save_reg);
9086 /* Allocate the rest of the frame. */
9087 if (size > 0)
9089 if (SMALL_OPERAND (-size))
9090 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
9091 stack_pointer_rtx,
9092 GEN_INT (-size)))) = 1;
9093 else
9095 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
9096 if (TARGET_MIPS16)
9098 /* There are no instructions to add or subtract registers
9099 from the stack pointer, so use the frame pointer as a
9100 temporary. We should always be using a frame pointer
9101 in this case anyway. */
9102 gcc_assert (frame_pointer_needed);
9103 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
9104 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
9105 hard_frame_pointer_rtx,
9106 MIPS_PROLOGUE_TEMP (Pmode)));
9107 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
9109 else
9110 emit_insn (gen_sub3_insn (stack_pointer_rtx,
9111 stack_pointer_rtx,
9112 MIPS_PROLOGUE_TEMP (Pmode)));
9114 /* Describe the combined effect of the previous instructions. */
9115 mips_set_frame_expr
9116 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9117 plus_constant (stack_pointer_rtx, -size)));
9121 /* Set up the frame pointer, if we're using one. */
9122 if (frame_pointer_needed)
9124 HOST_WIDE_INT offset;
9126 offset = frame->hard_frame_pointer_offset;
9127 if (offset == 0)
9129 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
9130 RTX_FRAME_RELATED_P (insn) = 1;
9132 else if (SMALL_OPERAND (offset))
9134 insn = gen_add3_insn (hard_frame_pointer_rtx,
9135 stack_pointer_rtx, GEN_INT (offset));
9136 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
9138 else
9140 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
9141 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
9142 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
9143 hard_frame_pointer_rtx,
9144 MIPS_PROLOGUE_TEMP (Pmode)));
9145 mips_set_frame_expr
9146 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
9147 plus_constant (stack_pointer_rtx, offset)));
9151 mips_emit_loadgp ();
9153 /* Initialize the $gp save slot. */
9154 if (frame->cprestore_size > 0
9155 && cfun->machine->global_pointer != 0)
9157 if (TARGET_MIPS16)
9158 mips_emit_move (mips_cprestore_slot (MIPS_PROLOGUE_TEMP (Pmode)),
9159 MIPS16_PIC_TEMP);
9160 else if (TARGET_ABICALLS_PIC2)
9161 emit_insn (gen_cprestore (GEN_INT (frame->args_size)));
9162 else
9163 emit_move_insn (mips_cprestore_slot (MIPS_PROLOGUE_TEMP (Pmode)),
9164 pic_offset_table_rtx);
9167 /* If we are profiling, make sure no instructions are scheduled before
9168 the call to mcount. */
9169 if (crtl->profile)
9170 emit_insn (gen_blockage ());
9173 /* Emit instructions to restore register REG from slot MEM. */
9175 static void
9176 mips_restore_reg (rtx reg, rtx mem)
9178 /* There's no MIPS16 instruction to load $31 directly. Load into
9179 $7 instead and adjust the return insn appropriately. */
9180 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
9181 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
9183 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
9185 /* Can't restore directly; move through a temporary. */
9186 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
9187 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
9189 else
9190 mips_emit_move (reg, mem);
9193 /* Emit any instructions needed before a return. */
9195 void
9196 mips_expand_before_return (void)
9198 /* When using a call-clobbered gp, we start out with unified call
9199 insns that include instructions to restore the gp. We then split
9200 these unified calls after reload. These split calls explicitly
9201 clobber gp, so there is no need to define
9202 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
9204 For consistency, we should also insert an explicit clobber of $28
9205 before return insns, so that the post-reload optimizers know that
9206 the register is not live on exit. */
9207 if (TARGET_CALL_CLOBBERED_GP)
9208 emit_clobber (pic_offset_table_rtx);
9211 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
9212 says which. */
9214 void
9215 mips_expand_epilogue (bool sibcall_p)
9217 const struct mips_frame_info *frame;
9218 HOST_WIDE_INT step1, step2;
9219 rtx base, target;
9221 if (!sibcall_p && mips_can_use_return_insn ())
9223 emit_jump_insn (gen_return ());
9224 return;
9227 /* In MIPS16 mode, if the return value should go into a floating-point
9228 register, we need to call a helper routine to copy it over. */
9229 if (mips16_cfun_returns_in_fpr_p ())
9230 mips16_copy_fpr_return_value ();
9232 /* Split the frame into two. STEP1 is the amount of stack we should
9233 deallocate before restoring the registers. STEP2 is the amount we
9234 should deallocate afterwards.
9236 Start off by assuming that no registers need to be restored. */
9237 frame = &cfun->machine->frame;
9238 step1 = frame->total_size;
9239 step2 = 0;
9241 /* Work out which register holds the frame address. */
9242 if (!frame_pointer_needed)
9243 base = stack_pointer_rtx;
9244 else
9246 base = hard_frame_pointer_rtx;
9247 step1 -= frame->hard_frame_pointer_offset;
9250 /* If we need to restore registers, deallocate as much stack as
9251 possible in the second step without going out of range. */
9252 if ((frame->mask | frame->fmask) != 0)
9254 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
9255 step1 -= step2;
9258 /* Set TARGET to BASE + STEP1. */
9259 target = base;
9260 if (step1 > 0)
9262 rtx adjust;
9264 /* Get an rtx for STEP1 that we can add to BASE. */
9265 adjust = GEN_INT (step1);
9266 if (!SMALL_OPERAND (step1))
9268 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
9269 adjust = MIPS_EPILOGUE_TEMP (Pmode);
9272 /* Normal mode code can copy the result straight into $sp. */
9273 if (!TARGET_MIPS16)
9274 target = stack_pointer_rtx;
9276 emit_insn (gen_add3_insn (target, base, adjust));
9279 /* Copy TARGET into the stack pointer. */
9280 if (target != stack_pointer_rtx)
9281 mips_emit_move (stack_pointer_rtx, target);
9283 /* If we're using addressing macros, $gp is implicitly used by all
9284 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
9285 from the stack. */
9286 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
9287 emit_insn (gen_blockage ());
9289 if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
9291 unsigned int regno, mask;
9292 HOST_WIDE_INT offset;
9293 rtx restore;
9295 /* Generate the restore instruction. */
9296 mask = frame->mask;
9297 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
9299 /* Restore any other registers manually. */
9300 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
9301 if (BITSET_P (mask, regno - GP_REG_FIRST))
9303 offset -= UNITS_PER_WORD;
9304 mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
9307 /* Restore the remaining registers and deallocate the final bit
9308 of the frame. */
9309 emit_insn (restore);
9311 else
9313 /* Restore the registers. */
9314 mips_for_each_saved_reg (frame->total_size - step2, mips_restore_reg);
9316 /* Deallocate the final bit of the frame. */
9317 if (step2 > 0)
9318 emit_insn (gen_add3_insn (stack_pointer_rtx,
9319 stack_pointer_rtx,
9320 GEN_INT (step2)));
9323 /* Add in the __builtin_eh_return stack adjustment. We need to
9324 use a temporary in MIPS16 code. */
9325 if (crtl->calls_eh_return)
9327 if (TARGET_MIPS16)
9329 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
9330 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
9331 MIPS_EPILOGUE_TEMP (Pmode),
9332 EH_RETURN_STACKADJ_RTX));
9333 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
9335 else
9336 emit_insn (gen_add3_insn (stack_pointer_rtx,
9337 stack_pointer_rtx,
9338 EH_RETURN_STACKADJ_RTX));
9341 if (!sibcall_p)
9343 unsigned int regno;
9345 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
9346 path will restore the return address into $7 rather than $31. */
9347 if (TARGET_MIPS16
9348 && !GENERATE_MIPS16E_SAVE_RESTORE
9349 && BITSET_P (frame->mask, 31))
9350 regno = GP_REG_FIRST + 7;
9351 else
9352 regno = GP_REG_FIRST + 31;
9353 mips_expand_before_return ();
9354 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
9358 /* Return nonzero if this function is known to have a null epilogue.
9359 This allows the optimizer to omit jumps to jumps if no stack
9360 was created. */
9362 bool
9363 mips_can_use_return_insn (void)
9365 if (!reload_completed)
9366 return false;
9368 if (crtl->profile)
9369 return false;
9371 /* In MIPS16 mode, a function that returns a floating-point value
9372 needs to arrange to copy the return value into the floating-point
9373 registers. */
9374 if (mips16_cfun_returns_in_fpr_p ())
9375 return false;
9377 return cfun->machine->frame.total_size == 0;
9380 /* Return true if register REGNO can store a value of mode MODE.
9381 The result of this function is cached in mips_hard_regno_mode_ok. */
9383 static bool
9384 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
9386 unsigned int size;
9387 enum mode_class mclass;
9389 if (mode == CCV2mode)
9390 return (ISA_HAS_8CC
9391 && ST_REG_P (regno)
9392 && (regno - ST_REG_FIRST) % 2 == 0);
9394 if (mode == CCV4mode)
9395 return (ISA_HAS_8CC
9396 && ST_REG_P (regno)
9397 && (regno - ST_REG_FIRST) % 4 == 0);
9399 if (mode == CCmode)
9401 if (!ISA_HAS_8CC)
9402 return regno == FPSW_REGNUM;
9404 return (ST_REG_P (regno)
9405 || GP_REG_P (regno)
9406 || FP_REG_P (regno));
9409 size = GET_MODE_SIZE (mode);
9410 mclass = GET_MODE_CLASS (mode);
9412 if (GP_REG_P (regno))
9413 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
9415 if (FP_REG_P (regno)
9416 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
9417 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
9419 /* Allow TFmode for CCmode reloads. */
9420 if (mode == TFmode && ISA_HAS_8CC)
9421 return true;
9423 /* Allow 64-bit vector modes for Loongson-2E/2F. */
9424 if (TARGET_LOONGSON_VECTORS
9425 && (mode == V2SImode
9426 || mode == V4HImode
9427 || mode == V8QImode
9428 || mode == DImode))
9429 return true;
9431 if (mclass == MODE_FLOAT
9432 || mclass == MODE_COMPLEX_FLOAT
9433 || mclass == MODE_VECTOR_FLOAT)
9434 return size <= UNITS_PER_FPVALUE;
9436 /* Allow integer modes that fit into a single register. We need
9437 to put integers into FPRs when using instructions like CVT
9438 and TRUNC. There's no point allowing sizes smaller than a word,
9439 because the FPU has no appropriate load/store instructions. */
9440 if (mclass == MODE_INT)
9441 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
9444 if (ACC_REG_P (regno)
9445 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
9447 if (MD_REG_P (regno))
9449 /* After a multiplication or division, clobbering HI makes
9450 the value of LO unpredictable, and vice versa. This means
9451 that, for all interesting cases, HI and LO are effectively
9452 a single register.
9454 We model this by requiring that any value that uses HI
9455 also uses LO. */
9456 if (size <= UNITS_PER_WORD * 2)
9457 return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
9459 else
9461 /* DSP accumulators do not have the same restrictions as
9462 HI and LO, so we can treat them as normal doubleword
9463 registers. */
9464 if (size <= UNITS_PER_WORD)
9465 return true;
9467 if (size <= UNITS_PER_WORD * 2
9468 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
9469 return true;
9473 if (ALL_COP_REG_P (regno))
9474 return mclass == MODE_INT && size <= UNITS_PER_WORD;
9476 if (regno == GOT_VERSION_REGNUM)
9477 return mode == SImode;
9479 return false;
9482 /* Implement HARD_REGNO_NREGS. */
9484 unsigned int
9485 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9487 if (ST_REG_P (regno))
9488 /* The size of FP status registers is always 4, because they only hold
9489 CCmode values, and CCmode is always considered to be 4 bytes wide. */
9490 return (GET_MODE_SIZE (mode) + 3) / 4;
9492 if (FP_REG_P (regno))
9493 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9495 /* All other registers are word-sized. */
9496 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9499 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
9500 in mips_hard_regno_nregs. */
9503 mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
9505 int size;
9506 HARD_REG_SET left;
9508 size = 0x8000;
9509 COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
9510 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
9512 size = MIN (size, 4);
9513 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
9515 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
9517 size = MIN (size, UNITS_PER_FPREG);
9518 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
9520 if (!hard_reg_set_empty_p (left))
9521 size = MIN (size, UNITS_PER_WORD);
9522 return (GET_MODE_SIZE (mode) + size - 1) / size;
9525 /* Implement CANNOT_CHANGE_MODE_CLASS. */
9527 bool
9528 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
9529 enum machine_mode to ATTRIBUTE_UNUSED,
9530 enum reg_class rclass)
9532 /* There are several problems with changing the modes of values
9533 in floating-point registers:
9535 - When a multi-word value is stored in paired floating-point
9536 registers, the first register always holds the low word.
9537 We therefore can't allow FPRs to change between single-word
9538 and multi-word modes on big-endian targets.
9540 - GCC assumes that each word of a multiword register can be accessed
9541 individually using SUBREGs. This is not true for floating-point
9542 registers if they are bigger than a word.
9544 - Loading a 32-bit value into a 64-bit floating-point register
9545 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9546 We can't allow FPRs to change from SImode to to a wider mode on
9547 64-bit targets.
9549 - If the FPU has already interpreted a value in one format, we must
9550 not ask it to treat the value as having a different format.
9552 We therefore disallow all mode changes involving FPRs. */
9553 return reg_classes_intersect_p (FP_REGS, rclass);
9556 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
9558 static bool
9559 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
9561 switch (mode)
9563 case SFmode:
9564 return TARGET_HARD_FLOAT;
9566 case DFmode:
9567 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
9569 case V2SFmode:
9570 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
9572 default:
9573 return false;
9577 /* Implement MODES_TIEABLE_P. */
9579 bool
9580 mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9582 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
9583 prefer to put one of them in FPRs. */
9584 return (mode1 == mode2
9585 || (!mips_mode_ok_for_mov_fmt_p (mode1)
9586 && !mips_mode_ok_for_mov_fmt_p (mode2)));
9589 /* Implement PREFERRED_RELOAD_CLASS. */
9591 enum reg_class
9592 mips_preferred_reload_class (rtx x, enum reg_class rclass)
9594 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
9595 return LEA_REGS;
9597 if (reg_class_subset_p (FP_REGS, rclass)
9598 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
9599 return FP_REGS;
9601 if (reg_class_subset_p (GR_REGS, rclass))
9602 rclass = GR_REGS;
9604 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
9605 rclass = M16_REGS;
9607 return rclass;
9610 /* Implement REGISTER_MOVE_COST. */
9613 mips_register_move_cost (enum machine_mode mode,
9614 enum reg_class to, enum reg_class from)
9616 if (TARGET_MIPS16)
9618 /* ??? We cannot move general registers into HI and LO because
9619 MIPS16 has no MTHI and MTLO instructions. Make the cost of
9620 moves in the opposite direction just as high, which stops the
9621 register allocators from using HI and LO for pseudos. */
9622 if (reg_class_subset_p (from, GENERAL_REGS)
9623 && reg_class_subset_p (to, GENERAL_REGS))
9625 if (reg_class_subset_p (from, M16_REGS)
9626 || reg_class_subset_p (to, M16_REGS))
9627 return 2;
9628 /* Two MOVEs. */
9629 return 4;
9632 else if (reg_class_subset_p (from, GENERAL_REGS))
9634 if (reg_class_subset_p (to, GENERAL_REGS))
9635 return 2;
9636 if (reg_class_subset_p (to, FP_REGS))
9637 return 4;
9638 if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
9639 return 5;
9640 if (reg_class_subset_p (to, ACC_REGS))
9641 return 6;
9643 else if (reg_class_subset_p (to, GENERAL_REGS))
9645 if (reg_class_subset_p (from, FP_REGS))
9646 return 4;
9647 if (reg_class_subset_p (from, ST_REGS))
9648 /* LUI followed by MOVF. */
9649 return 4;
9650 if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
9651 return 5;
9652 if (reg_class_subset_p (from, ACC_REGS))
9653 return 6;
9655 else if (reg_class_subset_p (from, FP_REGS))
9657 if (reg_class_subset_p (to, FP_REGS)
9658 && mips_mode_ok_for_mov_fmt_p (mode))
9659 return 4;
9660 if (reg_class_subset_p (to, ST_REGS))
9661 /* An expensive sequence. */
9662 return 8;
9665 return 12;
9668 /* Return the register class required for a secondary register when
9669 copying between one of the registers in RCLASS and value X, which
9670 has mode MODE. X is the source of the move if IN_P, otherwise it
9671 is the destination. Return NO_REGS if no secondary register is
9672 needed. */
9674 enum reg_class
9675 mips_secondary_reload_class (enum reg_class rclass,
9676 enum machine_mode mode, rtx x, bool in_p)
9678 int regno;
9680 /* If X is a constant that cannot be loaded into $25, it must be loaded
9681 into some other GPR. No other register class allows a direct move. */
9682 if (mips_dangerous_for_la25_p (x))
9683 return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
9685 regno = true_regnum (x);
9686 if (TARGET_MIPS16)
9688 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
9689 if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
9690 return M16_REGS;
9692 /* We can't really copy to HI or LO at all in MIPS16 mode. */
9693 if (in_p ? reg_classes_intersect_p (rclass, ACC_REGS) : ACC_REG_P (regno))
9694 return M16_REGS;
9696 return NO_REGS;
9699 /* Copying from accumulator registers to anywhere other than a general
9700 register requires a temporary general register. */
9701 if (reg_class_subset_p (rclass, ACC_REGS))
9702 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9703 if (ACC_REG_P (regno))
9704 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
9706 /* We can only copy a value to a condition code register from a
9707 floating-point register, and even then we require a scratch
9708 floating-point register. We can only copy a value out of a
9709 condition-code register into a general register. */
9710 if (reg_class_subset_p (rclass, ST_REGS))
9712 if (in_p)
9713 return FP_REGS;
9714 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9716 if (ST_REG_P (regno))
9718 if (!in_p)
9719 return FP_REGS;
9720 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
9723 if (reg_class_subset_p (rclass, FP_REGS))
9725 if (MEM_P (x)
9726 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
9727 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9728 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9729 return NO_REGS;
9731 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
9732 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9733 return NO_REGS;
9735 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
9736 /* We can force the constant to memory and use lwc1
9737 and ldc1. As above, we will use pairs of lwc1s if
9738 ldc1 is not supported. */
9739 return NO_REGS;
9741 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
9742 /* In this case we can use mov.fmt. */
9743 return NO_REGS;
9745 /* Otherwise, we need to reload through an integer register. */
9746 return GR_REGS;
9748 if (FP_REG_P (regno))
9749 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
9751 return NO_REGS;
9754 /* Implement TARGET_MODE_REP_EXTENDED. */
9756 static int
9757 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
9759 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
9760 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
9761 return SIGN_EXTEND;
9763 return UNKNOWN;
9766 /* Implement TARGET_VALID_POINTER_MODE. */
9768 static bool
9769 mips_valid_pointer_mode (enum machine_mode mode)
9771 return mode == SImode || (TARGET_64BIT && mode == DImode);
9774 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
9776 static bool
9777 mips_vector_mode_supported_p (enum machine_mode mode)
9779 switch (mode)
9781 case V2SFmode:
9782 return TARGET_PAIRED_SINGLE_FLOAT;
9784 case V2HImode:
9785 case V4QImode:
9786 case V2HQmode:
9787 case V2UHQmode:
9788 case V2HAmode:
9789 case V2UHAmode:
9790 case V4QQmode:
9791 case V4UQQmode:
9792 return TARGET_DSP;
9794 case V2SImode:
9795 case V4HImode:
9796 case V8QImode:
9797 return TARGET_LOONGSON_VECTORS;
9799 default:
9800 return false;
9804 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9806 static bool
9807 mips_scalar_mode_supported_p (enum machine_mode mode)
9809 if (ALL_FIXED_POINT_MODE_P (mode)
9810 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9811 return true;
9813 return default_scalar_mode_supported_p (mode);
9816 /* Implement TARGET_INIT_LIBFUNCS. */
9818 #include "config/gofast.h"
9820 static void
9821 mips_init_libfuncs (void)
9823 if (TARGET_FIX_VR4120)
9825 /* Register the special divsi3 and modsi3 functions needed to work
9826 around VR4120 division errata. */
9827 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9828 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9831 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
9833 /* Register the MIPS16 -mhard-float stubs. */
9834 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9835 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9836 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9837 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9839 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9840 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9841 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9842 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9843 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9844 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9845 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
9847 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9848 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9849 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
9851 if (TARGET_DOUBLE_FLOAT)
9853 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9854 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9855 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9856 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9858 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9859 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9860 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9861 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9862 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9863 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9864 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
9866 set_conv_libfunc (sext_optab, DFmode, SFmode,
9867 "__mips16_extendsfdf2");
9868 set_conv_libfunc (trunc_optab, SFmode, DFmode,
9869 "__mips16_truncdfsf2");
9870 set_conv_libfunc (sfix_optab, SImode, DFmode,
9871 "__mips16_fix_truncdfsi");
9872 set_conv_libfunc (sfloat_optab, DFmode, SImode,
9873 "__mips16_floatsidf");
9874 set_conv_libfunc (ufloat_optab, DFmode, SImode,
9875 "__mips16_floatunsidf");
9878 else
9879 /* Register the gofast functions if selected using --enable-gofast. */
9880 gofast_maybe_init_libfuncs ();
9882 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
9883 on an external non-MIPS16 routine to implement __sync_synchronize. */
9884 if (TARGET_MIPS16)
9885 synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
9888 /* Return the length of INSN. LENGTH is the initial length computed by
9889 attributes in the machine-description file. */
9892 mips_adjust_insn_length (rtx insn, int length)
9894 /* A unconditional jump has an unfilled delay slot if it is not part
9895 of a sequence. A conditional jump normally has a delay slot, but
9896 does not on MIPS16. */
9897 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9898 length += 4;
9900 /* See how many nops might be needed to avoid hardware hazards. */
9901 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9902 switch (get_attr_hazard (insn))
9904 case HAZARD_NONE:
9905 break;
9907 case HAZARD_DELAY:
9908 length += 4;
9909 break;
9911 case HAZARD_HILO:
9912 length += 8;
9913 break;
9916 /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
9917 the .md file length attributes are 4-based for both modes.
9918 Adjust the MIPS16 ones here. */
9919 if (TARGET_MIPS16)
9920 length /= 2;
9922 return length;
9925 /* Return an asm sequence to start a noat block and load the address
9926 of a label into $1. */
9928 const char *
9929 mips_output_load_label (void)
9931 if (TARGET_EXPLICIT_RELOCS)
9932 switch (mips_abi)
9934 case ABI_N32:
9935 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9937 case ABI_64:
9938 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9940 default:
9941 if (ISA_HAS_LOAD_DELAY)
9942 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9943 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9945 else
9947 if (Pmode == DImode)
9948 return "%[dla\t%@,%0";
9949 else
9950 return "%[la\t%@,%0";
9954 /* Return the assembly code for INSN, which has the operands given by
9955 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9956 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9957 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9958 version of BRANCH_IF_TRUE. */
9960 const char *
9961 mips_output_conditional_branch (rtx insn, rtx *operands,
9962 const char *branch_if_true,
9963 const char *branch_if_false)
9965 unsigned int length;
9966 rtx taken, not_taken;
9968 length = get_attr_length (insn);
9969 if (length <= 8)
9971 /* Just a simple conditional branch. */
9972 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9973 return branch_if_true;
9976 /* Generate a reversed branch around a direct jump. This fallback does
9977 not use branch-likely instructions. */
9978 mips_branch_likely = false;
9979 not_taken = gen_label_rtx ();
9980 taken = operands[1];
9982 /* Generate the reversed branch to NOT_TAKEN. */
9983 operands[1] = not_taken;
9984 output_asm_insn (branch_if_false, operands);
9986 /* If INSN has a delay slot, we must provide delay slots for both the
9987 branch to NOT_TAKEN and the conditional jump. We must also ensure
9988 that INSN's delay slot is executed in the appropriate cases. */
9989 if (final_sequence)
9991 /* This first delay slot will always be executed, so use INSN's
9992 delay slot if is not annulled. */
9993 if (!INSN_ANNULLED_BRANCH_P (insn))
9995 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9996 asm_out_file, optimize, 1, NULL);
9997 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9999 else
10000 output_asm_insn ("nop", 0);
10001 fprintf (asm_out_file, "\n");
10004 /* Output the unconditional branch to TAKEN. */
10005 if (length <= 16)
10006 output_asm_insn ("j\t%0%/", &taken);
10007 else
10009 output_asm_insn (mips_output_load_label (), &taken);
10010 output_asm_insn ("jr\t%@%]%/", 0);
10013 /* Now deal with its delay slot; see above. */
10014 if (final_sequence)
10016 /* This delay slot will only be executed if the branch is taken.
10017 Use INSN's delay slot if is annulled. */
10018 if (INSN_ANNULLED_BRANCH_P (insn))
10020 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10021 asm_out_file, optimize, 1, NULL);
10022 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10024 else
10025 output_asm_insn ("nop", 0);
10026 fprintf (asm_out_file, "\n");
10029 /* Output NOT_TAKEN. */
10030 targetm.asm_out.internal_label (asm_out_file, "L",
10031 CODE_LABEL_NUMBER (not_taken));
10032 return "";
10035 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10036 if some ordering condition is true. The condition is given by
10037 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10038 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10039 its second is always zero. */
10041 const char *
10042 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10044 const char *branch[2];
10046 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10047 Make BRANCH[0] branch on the inverse condition. */
10048 switch (GET_CODE (operands[0]))
10050 /* These cases are equivalent to comparisons against zero. */
10051 case LEU:
10052 inverted_p = !inverted_p;
10053 /* Fall through. */
10054 case GTU:
10055 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10056 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10057 break;
10059 /* These cases are always true or always false. */
10060 case LTU:
10061 inverted_p = !inverted_p;
10062 /* Fall through. */
10063 case GEU:
10064 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10065 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10066 break;
10068 default:
10069 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10070 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10071 break;
10073 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10076 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
10077 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
10079 When working around R4000 and R4400 errata, we need to make sure that
10080 the division is not immediately followed by a shift[1][2]. We also
10081 need to stop the division from being put into a branch delay slot[3].
10082 The easiest way to avoid both problems is to add a nop after the
10083 division. When a divide-by-zero check is needed, this nop can be
10084 used to fill the branch delay slot.
10086 [1] If a double-word or a variable shift executes immediately
10087 after starting an integer division, the shift may give an
10088 incorrect result. See quotations of errata #16 and #28 from
10089 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10090 in mips.md for details.
10092 [2] A similar bug to [1] exists for all revisions of the
10093 R4000 and the R4400 when run in an MC configuration.
10094 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10096 "19. In this following sequence:
10098 ddiv (or ddivu or div or divu)
10099 dsll32 (or dsrl32, dsra32)
10101 if an MPT stall occurs, while the divide is slipping the cpu
10102 pipeline, then the following double shift would end up with an
10103 incorrect result.
10105 Workaround: The compiler needs to avoid generating any
10106 sequence with divide followed by extended double shift."
10108 This erratum is also present in "MIPS R4400MC Errata, Processor
10109 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10110 & 3.0" as errata #10 and #4, respectively.
10112 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10113 (also valid for MIPS R4000MC processors):
10115 "52. R4000SC: This bug does not apply for the R4000PC.
10117 There are two flavors of this bug:
10119 1) If the instruction just after divide takes an RF exception
10120 (tlb-refill, tlb-invalid) and gets an instruction cache
10121 miss (both primary and secondary) and the line which is
10122 currently in secondary cache at this index had the first
10123 data word, where the bits 5..2 are set, then R4000 would
10124 get a wrong result for the div.
10128 div r8, r9
10129 ------------------- # end-of page. -tlb-refill
10133 div r8, r9
10134 ------------------- # end-of page. -tlb-invalid
10137 2) If the divide is in the taken branch delay slot, where the
10138 target takes RF exception and gets an I-cache miss for the
10139 exception vector or where I-cache miss occurs for the
10140 target address, under the above mentioned scenarios, the
10141 div would get wrong results.
10144 j r2 # to next page mapped or unmapped
10145 div r8,r9 # this bug would be there as long
10146 # as there is an ICache miss and
10147 nop # the "data pattern" is present
10150 beq r0, r0, NextPage # to Next page
10151 div r8,r9
10154 This bug is present for div, divu, ddiv, and ddivu
10155 instructions.
10157 Workaround: For item 1), OS could make sure that the next page
10158 after the divide instruction is also mapped. For item 2), the
10159 compiler could make sure that the divide instruction is not in
10160 the branch delay slot."
10162 These processors have PRId values of 0x00004220 and 0x00004300 for
10163 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10165 const char *
10166 mips_output_division (const char *division, rtx *operands)
10168 const char *s;
10170 s = division;
10171 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10173 output_asm_insn (s, operands);
10174 s = "nop";
10176 if (TARGET_CHECK_ZERO_DIV)
10178 if (TARGET_MIPS16)
10180 output_asm_insn (s, operands);
10181 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10183 else if (GENERATE_DIVIDE_TRAPS)
10185 output_asm_insn (s, operands);
10186 s = "teq\t%2,%.,7";
10188 else
10190 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10191 output_asm_insn (s, operands);
10192 s = "break\t7%)\n1:";
10195 return s;
10198 /* Return true if IN_INSN is a multiply-add or multiply-subtract
10199 instruction and if OUT_INSN assigns to the accumulator operand. */
10201 bool
10202 mips_linked_madd_p (rtx out_insn, rtx in_insn)
10204 rtx x;
10206 x = single_set (in_insn);
10207 if (x == 0)
10208 return false;
10210 x = SET_SRC (x);
10212 if (GET_CODE (x) == PLUS
10213 && GET_CODE (XEXP (x, 0)) == MULT
10214 && reg_set_p (XEXP (x, 1), out_insn))
10215 return true;
10217 if (GET_CODE (x) == MINUS
10218 && GET_CODE (XEXP (x, 1)) == MULT
10219 && reg_set_p (XEXP (x, 0), out_insn))
10220 return true;
10222 return false;
10225 /* True if the dependency between OUT_INSN and IN_INSN is on the store
10226 data rather than the address. We need this because the cprestore
10227 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
10228 which causes the default routine to abort. We just return false
10229 for that case. */
10231 bool
10232 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
10234 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
10235 return false;
10237 return !store_data_bypass_p (out_insn, in_insn);
10241 /* Variables and flags used in scheduler hooks when tuning for
10242 Loongson 2E/2F. */
10243 static struct
10245 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
10246 strategy. */
10248 /* If true, then next ALU1/2 instruction will go to ALU1. */
10249 bool alu1_turn_p;
10251 /* If true, then next FALU1/2 unstruction will go to FALU1. */
10252 bool falu1_turn_p;
10254 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
10255 int alu1_core_unit_code;
10256 int alu2_core_unit_code;
10257 int falu1_core_unit_code;
10258 int falu2_core_unit_code;
10260 /* True if current cycle has a multi instruction.
10261 This flag is used in mips_ls2_dfa_post_advance_cycle. */
10262 bool cycle_has_multi_p;
10264 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
10265 These are used in mips_ls2_dfa_post_advance_cycle to initialize
10266 DFA state.
10267 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
10268 instruction to go ALU1. */
10269 rtx alu1_turn_enabled_insn;
10270 rtx alu2_turn_enabled_insn;
10271 rtx falu1_turn_enabled_insn;
10272 rtx falu2_turn_enabled_insn;
10273 } mips_ls2;
10275 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
10276 dependencies have no cost, except on the 20Kc where output-dependence
10277 is treated like input-dependence. */
10279 static int
10280 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
10281 rtx dep ATTRIBUTE_UNUSED, int cost)
10283 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
10284 && TUNE_20KC)
10285 return cost;
10286 if (REG_NOTE_KIND (link) != 0)
10287 return 0;
10288 return cost;
10291 /* Return the number of instructions that can be issued per cycle. */
10293 static int
10294 mips_issue_rate (void)
10296 switch (mips_tune)
10298 case PROCESSOR_74KC:
10299 case PROCESSOR_74KF2_1:
10300 case PROCESSOR_74KF1_1:
10301 case PROCESSOR_74KF3_2:
10302 /* The 74k is not strictly quad-issue cpu, but can be seen as one
10303 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
10304 but in reality only a maximum of 3 insns can be issued as
10305 floating-point loads and stores also require a slot in the
10306 AGEN pipe. */
10307 return 4;
10309 case PROCESSOR_20KC:
10310 case PROCESSOR_R4130:
10311 case PROCESSOR_R5400:
10312 case PROCESSOR_R5500:
10313 case PROCESSOR_R7000:
10314 case PROCESSOR_R9000:
10315 return 2;
10317 case PROCESSOR_SB1:
10318 case PROCESSOR_SB1A:
10319 /* This is actually 4, but we get better performance if we claim 3.
10320 This is partly because of unwanted speculative code motion with the
10321 larger number, and partly because in most common cases we can't
10322 reach the theoretical max of 4. */
10323 return 3;
10325 case PROCESSOR_LOONGSON_2E:
10326 case PROCESSOR_LOONGSON_2F:
10327 return 4;
10329 default:
10330 return 1;
10334 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
10336 static void
10337 mips_ls2_init_dfa_post_cycle_insn (void)
10339 start_sequence ();
10340 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
10341 mips_ls2.alu1_turn_enabled_insn = get_insns ();
10342 end_sequence ();
10344 start_sequence ();
10345 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
10346 mips_ls2.alu2_turn_enabled_insn = get_insns ();
10347 end_sequence ();
10349 start_sequence ();
10350 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
10351 mips_ls2.falu1_turn_enabled_insn = get_insns ();
10352 end_sequence ();
10354 start_sequence ();
10355 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
10356 mips_ls2.falu2_turn_enabled_insn = get_insns ();
10357 end_sequence ();
10359 mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
10360 mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
10361 mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
10362 mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
10365 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
10366 Init data used in mips_dfa_post_advance_cycle. */
10368 static void
10369 mips_init_dfa_post_cycle_insn (void)
10371 if (TUNE_LOONGSON_2EF)
10372 mips_ls2_init_dfa_post_cycle_insn ();
10375 /* Initialize STATE when scheduling for Loongson 2E/2F.
10376 Support round-robin dispatch scheme by enabling only one of
10377 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
10378 respectively. */
10380 static void
10381 mips_ls2_dfa_post_advance_cycle (state_t state)
10383 if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
10385 /* Though there are no non-pipelined ALU1 insns,
10386 we can get an instruction of type 'multi' before reload. */
10387 gcc_assert (mips_ls2.cycle_has_multi_p);
10388 mips_ls2.alu1_turn_p = false;
10391 mips_ls2.cycle_has_multi_p = false;
10393 if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
10394 /* We have a non-pipelined alu instruction in the core,
10395 adjust round-robin counter. */
10396 mips_ls2.alu1_turn_p = true;
10398 if (mips_ls2.alu1_turn_p)
10400 if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
10401 gcc_unreachable ();
10403 else
10405 if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
10406 gcc_unreachable ();
10409 if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
10411 /* There are no non-pipelined FALU1 insns. */
10412 gcc_unreachable ();
10413 mips_ls2.falu1_turn_p = false;
10416 if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
10417 /* We have a non-pipelined falu instruction in the core,
10418 adjust round-robin counter. */
10419 mips_ls2.falu1_turn_p = true;
10421 if (mips_ls2.falu1_turn_p)
10423 if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
10424 gcc_unreachable ();
10426 else
10428 if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
10429 gcc_unreachable ();
10433 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
10434 This hook is being called at the start of each cycle. */
10436 static void
10437 mips_dfa_post_advance_cycle (void)
10439 if (TUNE_LOONGSON_2EF)
10440 mips_ls2_dfa_post_advance_cycle (curr_state);
10443 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
10444 be as wide as the scheduling freedom in the DFA. */
10446 static int
10447 mips_multipass_dfa_lookahead (void)
10449 /* Can schedule up to 4 of the 6 function units in any one cycle. */
10450 if (TUNE_SB1)
10451 return 4;
10453 if (TUNE_LOONGSON_2EF)
10454 return 4;
10456 return 0;
10459 /* Remove the instruction at index LOWER from ready queue READY and
10460 reinsert it in front of the instruction at index HIGHER. LOWER must
10461 be <= HIGHER. */
10463 static void
10464 mips_promote_ready (rtx *ready, int lower, int higher)
10466 rtx new_head;
10467 int i;
10469 new_head = ready[lower];
10470 for (i = lower; i < higher; i++)
10471 ready[i] = ready[i + 1];
10472 ready[i] = new_head;
10475 /* If the priority of the instruction at POS2 in the ready queue READY
10476 is within LIMIT units of that of the instruction at POS1, swap the
10477 instructions if POS2 is not already less than POS1. */
10479 static void
10480 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
10482 if (pos1 < pos2
10483 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
10485 rtx temp;
10487 temp = ready[pos1];
10488 ready[pos1] = ready[pos2];
10489 ready[pos2] = temp;
10493 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10494 that may clobber hi or lo. */
10495 static rtx mips_macc_chains_last_hilo;
10497 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10498 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10500 static void
10501 mips_macc_chains_record (rtx insn)
10503 if (get_attr_may_clobber_hilo (insn))
10504 mips_macc_chains_last_hilo = insn;
10507 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10508 has NREADY elements, looking for a multiply-add or multiply-subtract
10509 instruction that is cumulative with mips_macc_chains_last_hilo.
10510 If there is one, promote it ahead of anything else that might
10511 clobber hi or lo. */
10513 static void
10514 mips_macc_chains_reorder (rtx *ready, int nready)
10516 int i, j;
10518 if (mips_macc_chains_last_hilo != 0)
10519 for (i = nready - 1; i >= 0; i--)
10520 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10522 for (j = nready - 1; j > i; j--)
10523 if (recog_memoized (ready[j]) >= 0
10524 && get_attr_may_clobber_hilo (ready[j]))
10526 mips_promote_ready (ready, i, j);
10527 break;
10529 break;
10533 /* The last instruction to be scheduled. */
10534 static rtx vr4130_last_insn;
10536 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10537 points to an rtx that is initially an instruction. Nullify the rtx
10538 if the instruction uses the value of register X. */
10540 static void
10541 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
10542 void *data)
10544 rtx *insn_ptr;
10546 insn_ptr = (rtx *) data;
10547 if (REG_P (x)
10548 && *insn_ptr != 0
10549 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10550 *insn_ptr = 0;
10553 /* Return true if there is true register dependence between vr4130_last_insn
10554 and INSN. */
10556 static bool
10557 vr4130_true_reg_dependence_p (rtx insn)
10559 note_stores (PATTERN (vr4130_last_insn),
10560 vr4130_true_reg_dependence_p_1, &insn);
10561 return insn == 0;
10564 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10565 the ready queue and that INSN2 is the instruction after it, return
10566 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10567 in which INSN1 and INSN2 can probably issue in parallel, but for
10568 which (INSN2, INSN1) should be less sensitive to instruction
10569 alignment than (INSN1, INSN2). See 4130.md for more details. */
10571 static bool
10572 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10574 sd_iterator_def sd_it;
10575 dep_t dep;
10577 /* Check for the following case:
10579 1) there is some other instruction X with an anti dependence on INSN1;
10580 2) X has a higher priority than INSN2; and
10581 3) X is an arithmetic instruction (and thus has no unit restrictions).
10583 If INSN1 is the last instruction blocking X, it would better to
10584 choose (INSN1, X) over (INSN2, INSN1). */
10585 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
10586 if (DEP_TYPE (dep) == REG_DEP_ANTI
10587 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
10588 && recog_memoized (DEP_CON (dep)) >= 0
10589 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
10590 return false;
10592 if (vr4130_last_insn != 0
10593 && recog_memoized (insn1) >= 0
10594 && recog_memoized (insn2) >= 0)
10596 /* See whether INSN1 and INSN2 use different execution units,
10597 or if they are both ALU-type instructions. If so, they can
10598 probably execute in parallel. */
10599 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10600 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10601 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10603 /* If only one of the instructions has a dependence on
10604 vr4130_last_insn, prefer to schedule the other one first. */
10605 bool dep1_p = vr4130_true_reg_dependence_p (insn1);
10606 bool dep2_p = vr4130_true_reg_dependence_p (insn2);
10607 if (dep1_p != dep2_p)
10608 return dep1_p;
10610 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10611 is not an ALU-type instruction and if INSN1 uses the same
10612 execution unit. (Note that if this condition holds, we already
10613 know that INSN2 uses a different execution unit.) */
10614 if (class1 != VR4130_CLASS_ALU
10615 && recog_memoized (vr4130_last_insn) >= 0
10616 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10617 return true;
10620 return false;
10623 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10624 queue with at least two instructions. Swap the first two if
10625 vr4130_swap_insns_p says that it could be worthwhile. */
10627 static void
10628 vr4130_reorder (rtx *ready, int nready)
10630 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10631 mips_promote_ready (ready, nready - 2, nready - 1);
10634 /* Record whether last 74k AGEN instruction was a load or store. */
10635 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
10637 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
10638 resets to TYPE_UNKNOWN state. */
10640 static void
10641 mips_74k_agen_init (rtx insn)
10643 if (!insn || !NONJUMP_INSN_P (insn))
10644 mips_last_74k_agen_insn = TYPE_UNKNOWN;
10645 else
10647 enum attr_type type = get_attr_type (insn);
10648 if (type == TYPE_LOAD || type == TYPE_STORE)
10649 mips_last_74k_agen_insn = type;
10653 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
10654 loads to be grouped together, and multiple stores to be grouped
10655 together. Swap things around in the ready queue to make this happen. */
10657 static void
10658 mips_74k_agen_reorder (rtx *ready, int nready)
10660 int i;
10661 int store_pos, load_pos;
10663 store_pos = -1;
10664 load_pos = -1;
10666 for (i = nready - 1; i >= 0; i--)
10668 rtx insn = ready[i];
10669 if (USEFUL_INSN_P (insn))
10670 switch (get_attr_type (insn))
10672 case TYPE_STORE:
10673 if (store_pos == -1)
10674 store_pos = i;
10675 break;
10677 case TYPE_LOAD:
10678 if (load_pos == -1)
10679 load_pos = i;
10680 break;
10682 default:
10683 break;
10687 if (load_pos == -1 || store_pos == -1)
10688 return;
10690 switch (mips_last_74k_agen_insn)
10692 case TYPE_UNKNOWN:
10693 /* Prefer to schedule loads since they have a higher latency. */
10694 case TYPE_LOAD:
10695 /* Swap loads to the front of the queue. */
10696 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
10697 break;
10698 case TYPE_STORE:
10699 /* Swap stores to the front of the queue. */
10700 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
10701 break;
10702 default:
10703 break;
10707 /* Implement TARGET_SCHED_INIT. */
10709 static void
10710 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10711 int max_ready ATTRIBUTE_UNUSED)
10713 mips_macc_chains_last_hilo = 0;
10714 vr4130_last_insn = 0;
10715 mips_74k_agen_init (NULL_RTX);
10717 /* When scheduling for Loongson2, branch instructions go to ALU1,
10718 therefore basic block is most likely to start with round-robin counter
10719 pointed to ALU2. */
10720 mips_ls2.alu1_turn_p = false;
10721 mips_ls2.falu1_turn_p = true;
10724 /* Implement TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
10726 static int
10727 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10728 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
10730 if (!reload_completed
10731 && TUNE_MACC_CHAINS
10732 && *nreadyp > 0)
10733 mips_macc_chains_reorder (ready, *nreadyp);
10735 if (reload_completed
10736 && TUNE_MIPS4130
10737 && !TARGET_VR4130_ALIGN
10738 && *nreadyp > 1)
10739 vr4130_reorder (ready, *nreadyp);
10741 if (TUNE_74K)
10742 mips_74k_agen_reorder (ready, *nreadyp);
10744 return mips_issue_rate ();
10747 /* Update round-robin counters for ALU1/2 and FALU1/2. */
10749 static void
10750 mips_ls2_variable_issue (rtx insn)
10752 if (mips_ls2.alu1_turn_p)
10754 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
10755 mips_ls2.alu1_turn_p = false;
10757 else
10759 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
10760 mips_ls2.alu1_turn_p = true;
10763 if (mips_ls2.falu1_turn_p)
10765 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
10766 mips_ls2.falu1_turn_p = false;
10768 else
10770 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
10771 mips_ls2.falu1_turn_p = true;
10774 if (recog_memoized (insn) >= 0)
10775 mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
10778 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
10780 static int
10781 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10782 rtx insn, int more)
10784 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
10785 if (USEFUL_INSN_P (insn))
10787 more--;
10788 if (!reload_completed && TUNE_MACC_CHAINS)
10789 mips_macc_chains_record (insn);
10790 vr4130_last_insn = insn;
10791 if (TUNE_74K)
10792 mips_74k_agen_init (insn);
10793 else if (TUNE_LOONGSON_2EF)
10794 mips_ls2_variable_issue (insn);
10797 /* Instructions of type 'multi' should all be split before
10798 the second scheduling pass. */
10799 gcc_assert (!reload_completed
10800 || recog_memoized (insn) < 0
10801 || get_attr_type (insn) != TYPE_MULTI);
10803 return more;
10806 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
10807 return the first operand of the associated PREF or PREFX insn. */
10810 mips_prefetch_cookie (rtx write, rtx locality)
10812 /* store_streamed / load_streamed. */
10813 if (INTVAL (locality) <= 0)
10814 return GEN_INT (INTVAL (write) + 4);
10816 /* store / load. */
10817 if (INTVAL (locality) <= 2)
10818 return write;
10820 /* store_retained / load_retained. */
10821 return GEN_INT (INTVAL (write) + 6);
10824 /* Flags that indicate when a built-in function is available.
10826 BUILTIN_AVAIL_NON_MIPS16
10827 The function is available on the current target, but only
10828 in non-MIPS16 mode. */
10829 #define BUILTIN_AVAIL_NON_MIPS16 1
10831 /* Declare an availability predicate for built-in functions that
10832 require non-MIPS16 mode and also require COND to be true.
10833 NAME is the main part of the predicate's name. */
10834 #define AVAIL_NON_MIPS16(NAME, COND) \
10835 static unsigned int \
10836 mips_builtin_avail_##NAME (void) \
10838 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
10841 /* This structure describes a single built-in function. */
10842 struct mips_builtin_description {
10843 /* The code of the main .md file instruction. See mips_builtin_type
10844 for more information. */
10845 enum insn_code icode;
10847 /* The floating-point comparison code to use with ICODE, if any. */
10848 enum mips_fp_condition cond;
10850 /* The name of the built-in function. */
10851 const char *name;
10853 /* Specifies how the function should be expanded. */
10854 enum mips_builtin_type builtin_type;
10856 /* The function's prototype. */
10857 enum mips_function_type function_type;
10859 /* Whether the function is available. */
10860 unsigned int (*avail) (void);
10863 AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
10864 AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
10865 AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
10866 AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
10867 AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
10868 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
10869 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
10870 AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
10872 /* Construct a mips_builtin_description from the given arguments.
10874 INSN is the name of the associated instruction pattern, without the
10875 leading CODE_FOR_mips_.
10877 CODE is the floating-point condition code associated with the
10878 function. It can be 'f' if the field is not applicable.
10880 NAME is the name of the function itself, without the leading
10881 "__builtin_mips_".
10883 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
10885 AVAIL is the name of the availability predicate, without the leading
10886 mips_builtin_avail_. */
10887 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
10888 FUNCTION_TYPE, AVAIL) \
10889 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
10890 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
10891 mips_builtin_avail_ ## AVAIL }
10893 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
10894 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
10895 are as for MIPS_BUILTIN. */
10896 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
10897 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
10899 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
10900 are subject to mips_builtin_avail_<AVAIL>. */
10901 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
10902 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
10903 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
10904 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
10905 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
10907 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10908 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
10909 while the any and all forms are subject to mips_builtin_avail_mips3d. */
10910 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
10911 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
10912 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
10913 mips3d), \
10914 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
10915 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
10916 mips3d), \
10917 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
10918 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
10919 AVAIL), \
10920 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
10921 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
10922 AVAIL)
10924 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10925 are subject to mips_builtin_avail_mips3d. */
10926 #define CMP_4S_BUILTINS(INSN, COND) \
10927 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
10928 MIPS_BUILTIN_CMP_ANY, \
10929 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
10930 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
10931 MIPS_BUILTIN_CMP_ALL, \
10932 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
10934 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10935 instruction requires mips_builtin_avail_<AVAIL>. */
10936 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
10937 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
10938 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10939 AVAIL), \
10940 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
10941 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10942 AVAIL)
10944 /* Define all the built-in functions related to C.cond.fmt condition COND. */
10945 #define CMP_BUILTINS(COND) \
10946 MOVTF_BUILTINS (c, COND, paired_single), \
10947 MOVTF_BUILTINS (cabs, COND, mips3d), \
10948 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
10949 CMP_PS_BUILTINS (c, COND, paired_single), \
10950 CMP_PS_BUILTINS (cabs, COND, mips3d), \
10951 CMP_4S_BUILTINS (c, COND), \
10952 CMP_4S_BUILTINS (cabs, COND)
10954 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
10955 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
10956 and AVAIL are as for MIPS_BUILTIN. */
10957 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
10958 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
10959 FUNCTION_TYPE, AVAIL)
10961 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10962 branch instruction. AVAIL is as for MIPS_BUILTIN. */
10963 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
10964 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
10965 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
10967 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
10968 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
10969 builtin_description field. */
10970 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
10971 { CODE_FOR_loongson_ ## INSN, 0, "__builtin_loongson_" #FN_NAME, \
10972 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, mips_builtin_avail_loongson }
10974 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
10975 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
10976 builtin_description field. */
10977 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
10978 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
10980 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
10981 We use functions of this form when the same insn can be usefully applied
10982 to more than one datatype. */
10983 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
10984 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
10986 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10987 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10988 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10989 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10990 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10991 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
10993 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
10994 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
10995 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
10996 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
10997 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
10998 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
10999 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
11000 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
11001 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
11002 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
11003 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
11004 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
11005 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
11006 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
11007 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
11008 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
11009 #define CODE_FOR_loongson_biadd CODE_FOR_reduc_uplus_v8qi
11010 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
11011 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
11012 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
11013 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
11014 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
11015 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
11016 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
11017 #define CODE_FOR_loongson_punpckhbh CODE_FOR_vec_interleave_highv8qi
11018 #define CODE_FOR_loongson_punpckhhw CODE_FOR_vec_interleave_highv4hi
11019 #define CODE_FOR_loongson_punpckhwd CODE_FOR_vec_interleave_highv2si
11020 #define CODE_FOR_loongson_punpcklbh CODE_FOR_vec_interleave_lowv8qi
11021 #define CODE_FOR_loongson_punpcklhw CODE_FOR_vec_interleave_lowv4hi
11022 #define CODE_FOR_loongson_punpcklwd CODE_FOR_vec_interleave_lowv2si
11024 static const struct mips_builtin_description mips_builtins[] = {
11025 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11026 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11027 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11028 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
11029 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
11030 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
11031 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
11032 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
11034 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
11035 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11036 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11037 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
11038 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
11040 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
11041 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
11042 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
11043 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
11044 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
11045 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11047 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
11048 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
11049 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
11050 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
11051 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
11052 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
11054 MIPS_FP_CONDITIONS (CMP_BUILTINS),
11056 /* Built-in functions for the SB-1 processor. */
11057 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
11059 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
11060 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11061 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11062 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
11063 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11064 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11065 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11066 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11067 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
11068 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11069 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11070 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
11071 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
11072 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
11073 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
11074 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
11075 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
11076 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
11077 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
11078 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
11079 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
11080 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
11081 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
11082 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
11083 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
11084 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
11085 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
11086 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
11087 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
11088 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
11089 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
11090 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
11091 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11092 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11093 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
11094 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
11095 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11096 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
11097 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
11098 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
11099 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
11100 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11101 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
11102 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
11103 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
11104 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
11105 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
11106 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
11107 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
11108 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
11109 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
11110 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
11111 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
11112 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
11113 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
11114 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
11115 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
11116 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
11117 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11118 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
11119 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
11120 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
11121 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
11122 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
11123 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
11124 BPOSGE_BUILTIN (32, dsp),
11126 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
11127 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
11128 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11129 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11130 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11131 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11132 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
11133 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
11134 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
11135 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
11136 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
11137 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11138 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11139 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11140 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11141 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11142 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
11143 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
11144 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
11145 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
11146 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
11147 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
11148 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
11149 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11150 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11151 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11152 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
11153 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11154 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11155 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11156 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11157 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11158 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
11159 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11160 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
11162 /* Built-in functions for the DSP ASE (32-bit only). */
11163 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11164 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11165 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11166 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
11167 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11168 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11169 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11170 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
11171 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
11172 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11173 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11174 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11175 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
11176 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
11177 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
11178 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
11179 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
11180 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
11181 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
11182 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
11183 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
11185 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
11186 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11187 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11188 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
11189 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
11190 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
11191 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
11192 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11193 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dspr2_32),
11194 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dspr2_32),
11195 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11196 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11197 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11198 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11199 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11200 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
11202 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
11203 LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
11204 LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
11205 LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
11206 LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11207 LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11208 LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11209 LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11210 LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11211 LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11212 LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
11213 LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
11214 LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11215 LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
11216 LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11217 LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11218 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
11219 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11220 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11221 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11222 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
11223 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
11224 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11225 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
11226 LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11227 LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11228 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11229 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11230 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11231 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11232 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11233 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11234 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11235 LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11236 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11237 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11238 LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11239 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11240 LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
11241 LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
11242 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11243 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11244 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11245 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11246 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11247 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11248 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11249 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11250 LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
11251 LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11252 LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11253 LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11254 LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11255 LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
11256 LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
11257 LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11258 LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11259 LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11260 LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
11261 LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11262 LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
11263 LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
11264 LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI_UQI),
11265 LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_V4HI_UQI),
11266 LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
11267 LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
11268 LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
11269 LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
11270 LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
11271 LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
11272 LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
11273 LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
11274 LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
11275 LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
11276 LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
11277 LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
11278 LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11279 LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11280 LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11281 LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11282 LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11283 LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11284 LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
11285 LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
11286 LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
11287 LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
11288 LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11289 LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11290 LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11291 LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11292 LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11293 LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11294 LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11295 LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
11296 LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
11297 LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
11298 LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
11299 LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
11300 LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
11301 LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI)
11304 /* MODE is a vector mode whose elements have type TYPE. Return the type
11305 of the vector itself. */
11307 static tree
11308 mips_builtin_vector_type (tree type, enum machine_mode mode)
11310 static tree types[2 * (int) MAX_MACHINE_MODE];
11311 int mode_index;
11313 mode_index = (int) mode;
11315 if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
11316 mode_index += MAX_MACHINE_MODE;
11318 if (types[mode_index] == NULL_TREE)
11319 types[mode_index] = build_vector_type_for_mode (type, mode);
11320 return types[mode_index];
11323 /* Source-level argument types. */
11324 #define MIPS_ATYPE_VOID void_type_node
11325 #define MIPS_ATYPE_INT integer_type_node
11326 #define MIPS_ATYPE_POINTER ptr_type_node
11328 /* Standard mode-based argument types. */
11329 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
11330 #define MIPS_ATYPE_SI intSI_type_node
11331 #define MIPS_ATYPE_USI unsigned_intSI_type_node
11332 #define MIPS_ATYPE_DI intDI_type_node
11333 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
11334 #define MIPS_ATYPE_SF float_type_node
11335 #define MIPS_ATYPE_DF double_type_node
11337 /* Vector argument types. */
11338 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
11339 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
11340 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
11341 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
11342 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
11343 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
11344 #define MIPS_ATYPE_UV2SI \
11345 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
11346 #define MIPS_ATYPE_UV4HI \
11347 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
11348 #define MIPS_ATYPE_UV8QI \
11349 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
11351 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
11352 their associated MIPS_ATYPEs. */
11353 #define MIPS_FTYPE_ATYPES1(A, B) \
11354 MIPS_ATYPE_##A, MIPS_ATYPE_##B
11356 #define MIPS_FTYPE_ATYPES2(A, B, C) \
11357 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
11359 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
11360 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
11362 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
11363 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
11364 MIPS_ATYPE_##E
11366 /* Return the function type associated with function prototype TYPE. */
11368 static tree
11369 mips_build_function_type (enum mips_function_type type)
11371 static tree types[(int) MIPS_MAX_FTYPE_MAX];
11373 if (types[(int) type] == NULL_TREE)
11374 switch (type)
11376 #define DEF_MIPS_FTYPE(NUM, ARGS) \
11377 case MIPS_FTYPE_NAME##NUM ARGS: \
11378 types[(int) type] \
11379 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
11380 NULL_TREE); \
11381 break;
11382 #include "config/mips/mips-ftypes.def"
11383 #undef DEF_MIPS_FTYPE
11384 default:
11385 gcc_unreachable ();
11388 return types[(int) type];
11391 /* Implement TARGET_INIT_BUILTINS. */
11393 static void
11394 mips_init_builtins (void)
11396 const struct mips_builtin_description *d;
11397 unsigned int i;
11399 /* Iterate through all of the bdesc arrays, initializing all of the
11400 builtin functions. */
11401 for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
11403 d = &mips_builtins[i];
11404 if (d->avail ())
11405 add_builtin_function (d->name,
11406 mips_build_function_type (d->function_type),
11407 i, BUILT_IN_MD, NULL, NULL);
11411 /* Take argument ARGNO from EXP's argument list and convert it into a
11412 form suitable for input operand OPNO of instruction ICODE. Return the
11413 value. */
11415 static rtx
11416 mips_prepare_builtin_arg (enum insn_code icode,
11417 unsigned int opno, tree exp, unsigned int argno)
11419 rtx value;
11420 enum machine_mode mode;
11422 value = expand_normal (CALL_EXPR_ARG (exp, argno));
11423 mode = insn_data[icode].operand[opno].mode;
11424 if (!insn_data[icode].operand[opno].predicate (value, mode))
11426 value = copy_to_mode_reg (mode, value);
11427 /* Check the predicate again. */
11428 if (!insn_data[icode].operand[opno].predicate (value, mode))
11430 error ("invalid argument to built-in function");
11431 return const0_rtx;
11435 return value;
11438 /* Return an rtx suitable for output operand OP of instruction ICODE.
11439 If TARGET is non-null, try to use it where possible. */
11441 static rtx
11442 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11444 enum machine_mode mode;
11446 mode = insn_data[icode].operand[op].mode;
11447 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11448 target = gen_reg_rtx (mode);
11450 return target;
11453 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
11454 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
11455 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
11456 suggests a good place to put the result. */
11458 static rtx
11459 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11460 bool has_target_p)
11462 rtx ops[MAX_RECOG_OPERANDS];
11463 int opno, argno;
11465 /* Map any target to operand 0. */
11466 opno = 0;
11467 if (has_target_p)
11469 ops[opno] = mips_prepare_builtin_target (icode, opno, target);
11470 opno++;
11473 /* Map the arguments to the other operands. The n_operands value
11474 for an expander includes match_dups and match_scratches as well as
11475 match_operands, so n_operands is only an upper bound on the number
11476 of arguments to the expander function. */
11477 gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
11478 for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
11479 ops[opno] = mips_prepare_builtin_arg (icode, opno, exp, argno);
11481 switch (opno)
11483 case 2:
11484 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11485 break;
11487 case 3:
11488 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
11489 break;
11491 case 4:
11492 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
11493 break;
11495 default:
11496 gcc_unreachable ();
11498 return target;
11501 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
11502 function; TYPE says which. EXP is the CALL_EXPR that calls the
11503 function, ICODE is the instruction that should be used to compare
11504 the first two arguments, and COND is the condition it should test.
11505 TARGET, if nonnull, suggests a good place to put the result. */
11507 static rtx
11508 mips_expand_builtin_movtf (enum mips_builtin_type type,
11509 enum insn_code icode, enum mips_fp_condition cond,
11510 rtx target, tree exp)
11512 rtx cmp_result, op0, op1;
11514 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11515 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
11516 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
11517 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
11519 icode = CODE_FOR_mips_cond_move_tf_ps;
11520 target = mips_prepare_builtin_target (icode, 0, target);
11521 if (type == MIPS_BUILTIN_MOVT)
11523 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
11524 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
11526 else
11528 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
11529 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
11531 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
11532 return target;
11535 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11536 into TARGET otherwise. Return TARGET. */
11538 static rtx
11539 mips_builtin_branch_and_move (rtx condition, rtx target,
11540 rtx value_if_true, rtx value_if_false)
11542 rtx true_label, done_label;
11544 true_label = gen_label_rtx ();
11545 done_label = gen_label_rtx ();
11547 /* First assume that CONDITION is false. */
11548 mips_emit_move (target, value_if_false);
11550 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11551 emit_jump_insn (gen_condjump (condition, true_label));
11552 emit_jump_insn (gen_jump (done_label));
11553 emit_barrier ();
11555 /* Fix TARGET if CONDITION is true. */
11556 emit_label (true_label);
11557 mips_emit_move (target, value_if_true);
11559 emit_label (done_label);
11560 return target;
11563 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
11564 the CALL_EXPR that calls the function, ICODE is the code of the
11565 comparison instruction, and COND is the condition it should test.
11566 TARGET, if nonnull, suggests a good place to put the boolean result. */
11568 static rtx
11569 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
11570 enum insn_code icode, enum mips_fp_condition cond,
11571 rtx target, tree exp)
11573 rtx offset, condition, cmp_result, args[MAX_RECOG_OPERANDS];
11574 int argno;
11576 if (target == 0 || GET_MODE (target) != SImode)
11577 target = gen_reg_rtx (SImode);
11579 /* The instruction should have a target operand, an operand for each
11580 argument, and an operand for COND. */
11581 gcc_assert (call_expr_nargs (exp) + 2 == insn_data[icode].n_operands);
11583 /* Prepare the operands to the comparison. */
11584 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11585 for (argno = 0; argno < call_expr_nargs (exp); argno++)
11586 args[argno] = mips_prepare_builtin_arg (icode, argno + 1, exp, argno);
11588 switch (insn_data[icode].n_operands)
11590 case 4:
11591 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
11592 GEN_INT (cond)));
11593 break;
11595 case 6:
11596 emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
11597 args[2], args[3], GEN_INT (cond)));
11598 break;
11600 default:
11601 gcc_unreachable ();
11604 /* If the comparison sets more than one register, we define the result
11605 to be 0 if all registers are false and -1 if all registers are true.
11606 The value of the complete result is indeterminate otherwise. */
11607 switch (builtin_type)
11609 case MIPS_BUILTIN_CMP_ALL:
11610 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
11611 return mips_builtin_branch_and_move (condition, target,
11612 const0_rtx, const1_rtx);
11614 case MIPS_BUILTIN_CMP_UPPER:
11615 case MIPS_BUILTIN_CMP_LOWER:
11616 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
11617 condition = gen_single_cc (cmp_result, offset);
11618 return mips_builtin_branch_and_move (condition, target,
11619 const1_rtx, const0_rtx);
11621 default:
11622 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
11623 return mips_builtin_branch_and_move (condition, target,
11624 const1_rtx, const0_rtx);
11628 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
11629 if nonnull, suggests a good place to put the boolean result. */
11631 static rtx
11632 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
11634 rtx condition, cmp_result;
11635 int cmp_value;
11637 if (target == 0 || GET_MODE (target) != SImode)
11638 target = gen_reg_rtx (SImode);
11640 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
11642 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
11643 cmp_value = 32;
11644 else
11645 gcc_assert (0);
11647 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
11648 return mips_builtin_branch_and_move (condition, target,
11649 const1_rtx, const0_rtx);
11652 /* Implement TARGET_EXPAND_BUILTIN. */
11654 static rtx
11655 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11656 enum machine_mode mode ATTRIBUTE_UNUSED,
11657 int ignore ATTRIBUTE_UNUSED)
11659 tree fndecl;
11660 unsigned int fcode, avail;
11661 const struct mips_builtin_description *d;
11663 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11664 fcode = DECL_FUNCTION_CODE (fndecl);
11665 gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
11666 d = &mips_builtins[fcode];
11667 avail = d->avail ();
11668 gcc_assert (avail != 0);
11669 if (TARGET_MIPS16)
11671 error ("built-in function %qs not supported for MIPS16",
11672 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
11673 return const0_rtx;
11675 switch (d->builtin_type)
11677 case MIPS_BUILTIN_DIRECT:
11678 return mips_expand_builtin_direct (d->icode, target, exp, true);
11680 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11681 return mips_expand_builtin_direct (d->icode, target, exp, false);
11683 case MIPS_BUILTIN_MOVT:
11684 case MIPS_BUILTIN_MOVF:
11685 return mips_expand_builtin_movtf (d->builtin_type, d->icode,
11686 d->cond, target, exp);
11688 case MIPS_BUILTIN_CMP_ANY:
11689 case MIPS_BUILTIN_CMP_ALL:
11690 case MIPS_BUILTIN_CMP_UPPER:
11691 case MIPS_BUILTIN_CMP_LOWER:
11692 case MIPS_BUILTIN_CMP_SINGLE:
11693 return mips_expand_builtin_compare (d->builtin_type, d->icode,
11694 d->cond, target, exp);
11696 case MIPS_BUILTIN_BPOSGE32:
11697 return mips_expand_builtin_bposge (d->builtin_type, target);
11699 gcc_unreachable ();
11702 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
11703 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
11704 struct mips16_constant {
11705 struct mips16_constant *next;
11706 rtx value;
11707 rtx label;
11708 enum machine_mode mode;
11711 /* Information about an incomplete MIPS16 constant pool. FIRST is the
11712 first constant, HIGHEST_ADDRESS is the highest address that the first
11713 byte of the pool can have, and INSN_ADDRESS is the current instruction
11714 address. */
11715 struct mips16_constant_pool {
11716 struct mips16_constant *first;
11717 int highest_address;
11718 int insn_address;
11721 /* Add constant VALUE to POOL and return its label. MODE is the
11722 value's mode (used for CONST_INTs, etc.). */
11724 static rtx
11725 mips16_add_constant (struct mips16_constant_pool *pool,
11726 rtx value, enum machine_mode mode)
11728 struct mips16_constant **p, *c;
11729 bool first_of_size_p;
11731 /* See whether the constant is already in the pool. If so, return the
11732 existing label, otherwise leave P pointing to the place where the
11733 constant should be added.
11735 Keep the pool sorted in increasing order of mode size so that we can
11736 reduce the number of alignments needed. */
11737 first_of_size_p = true;
11738 for (p = &pool->first; *p != 0; p = &(*p)->next)
11740 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
11741 return (*p)->label;
11742 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
11743 break;
11744 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
11745 first_of_size_p = false;
11748 /* In the worst case, the constant needed by the earliest instruction
11749 will end up at the end of the pool. The entire pool must then be
11750 accessible from that instruction.
11752 When adding the first constant, set the pool's highest address to
11753 the address of the first out-of-range byte. Adjust this address
11754 downwards each time a new constant is added. */
11755 if (pool->first == 0)
11756 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
11757 of the instruction with the lowest two bits clear. The base PC
11758 value for LDPC has the lowest three bits clear. Assume the worst
11759 case here; namely that the PC-relative instruction occupies the
11760 last 2 bytes in an aligned word. */
11761 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
11762 pool->highest_address -= GET_MODE_SIZE (mode);
11763 if (first_of_size_p)
11764 /* Take into account the worst possible padding due to alignment. */
11765 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
11767 /* Create a new entry. */
11768 c = XNEW (struct mips16_constant);
11769 c->value = value;
11770 c->mode = mode;
11771 c->label = gen_label_rtx ();
11772 c->next = *p;
11773 *p = c;
11775 return c->label;
11778 /* Output constant VALUE after instruction INSN and return the last
11779 instruction emitted. MODE is the mode of the constant. */
11781 static rtx
11782 mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
11784 if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
11786 rtx size = GEN_INT (GET_MODE_SIZE (mode));
11787 return emit_insn_after (gen_consttable_int (value, size), insn);
11790 if (SCALAR_FLOAT_MODE_P (mode))
11791 return emit_insn_after (gen_consttable_float (value), insn);
11793 if (VECTOR_MODE_P (mode))
11795 int i;
11797 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
11798 insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
11799 CONST_VECTOR_ELT (value, i), insn);
11800 return insn;
11803 gcc_unreachable ();
11806 /* Dump out the constants in CONSTANTS after INSN. */
11808 static void
11809 mips16_emit_constants (struct mips16_constant *constants, rtx insn)
11811 struct mips16_constant *c, *next;
11812 int align;
11814 align = 0;
11815 for (c = constants; c != NULL; c = next)
11817 /* If necessary, increase the alignment of PC. */
11818 if (align < GET_MODE_SIZE (c->mode))
11820 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
11821 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
11823 align = GET_MODE_SIZE (c->mode);
11825 insn = emit_label_after (c->label, insn);
11826 insn = mips16_emit_constants_1 (c->mode, c->value, insn);
11828 next = c->next;
11829 free (c);
11832 emit_barrier_after (insn);
11835 /* Return the length of instruction INSN. */
11837 static int
11838 mips16_insn_length (rtx insn)
11840 if (JUMP_P (insn))
11842 rtx body = PATTERN (insn);
11843 if (GET_CODE (body) == ADDR_VEC)
11844 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
11845 if (GET_CODE (body) == ADDR_DIFF_VEC)
11846 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
11848 return get_attr_length (insn);
11851 /* If *X is a symbolic constant that refers to the constant pool, add
11852 the constant to POOL and rewrite *X to use the constant's label. */
11854 static void
11855 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
11857 rtx base, offset, label;
11859 split_const (*x, &base, &offset);
11860 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
11862 label = mips16_add_constant (pool, get_pool_constant (base),
11863 get_pool_mode (base));
11864 base = gen_rtx_LABEL_REF (Pmode, label);
11865 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
11869 /* This structure is used to communicate with mips16_rewrite_pool_refs.
11870 INSN is the instruction we're rewriting and POOL points to the current
11871 constant pool. */
11872 struct mips16_rewrite_pool_refs_info {
11873 rtx insn;
11874 struct mips16_constant_pool *pool;
11877 /* Rewrite *X so that constant pool references refer to the constant's
11878 label instead. DATA points to a mips16_rewrite_pool_refs_info
11879 structure. */
11881 static int
11882 mips16_rewrite_pool_refs (rtx *x, void *data)
11884 struct mips16_rewrite_pool_refs_info *info =
11885 (struct mips16_rewrite_pool_refs_info *) data;
11887 if (force_to_mem_operand (*x, Pmode))
11889 rtx mem = force_const_mem (GET_MODE (*x), *x);
11890 validate_change (info->insn, x, mem, false);
11893 if (MEM_P (*x))
11895 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
11896 return -1;
11899 if (TARGET_MIPS16_TEXT_LOADS)
11900 mips16_rewrite_pool_constant (info->pool, x);
11902 return GET_CODE (*x) == CONST ? -1 : 0;
11905 /* Build MIPS16 constant pools. */
11907 static void
11908 mips16_lay_out_constants (void)
11910 struct mips16_constant_pool pool;
11911 struct mips16_rewrite_pool_refs_info info;
11912 rtx insn, barrier;
11914 if (!TARGET_MIPS16_PCREL_LOADS)
11915 return;
11917 split_all_insns_noflow ();
11918 barrier = 0;
11919 memset (&pool, 0, sizeof (pool));
11920 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11922 /* Rewrite constant pool references in INSN. */
11923 if (INSN_P (insn))
11925 info.insn = insn;
11926 info.pool = &pool;
11927 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
11930 pool.insn_address += mips16_insn_length (insn);
11932 if (pool.first != NULL)
11934 /* If there are no natural barriers between the first user of
11935 the pool and the highest acceptable address, we'll need to
11936 create a new instruction to jump around the constant pool.
11937 In the worst case, this instruction will be 4 bytes long.
11939 If it's too late to do this transformation after INSN,
11940 do it immediately before INSN. */
11941 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
11943 rtx label, jump;
11945 label = gen_label_rtx ();
11947 jump = emit_jump_insn_before (gen_jump (label), insn);
11948 JUMP_LABEL (jump) = label;
11949 LABEL_NUSES (label) = 1;
11950 barrier = emit_barrier_after (jump);
11952 emit_label_after (label, barrier);
11953 pool.insn_address += 4;
11956 /* See whether the constant pool is now out of range of the first
11957 user. If so, output the constants after the previous barrier.
11958 Note that any instructions between BARRIER and INSN (inclusive)
11959 will use negative offsets to refer to the pool. */
11960 if (pool.insn_address > pool.highest_address)
11962 mips16_emit_constants (pool.first, barrier);
11963 pool.first = NULL;
11964 barrier = 0;
11966 else if (BARRIER_P (insn))
11967 barrier = insn;
11970 mips16_emit_constants (pool.first, get_last_insn ());
11973 /* A temporary variable used by for_each_rtx callbacks, etc. */
11974 static rtx mips_sim_insn;
11976 /* A structure representing the state of the processor pipeline.
11977 Used by the mips_sim_* family of functions. */
11978 struct mips_sim {
11979 /* The maximum number of instructions that can be issued in a cycle.
11980 (Caches mips_issue_rate.) */
11981 unsigned int issue_rate;
11983 /* The current simulation time. */
11984 unsigned int time;
11986 /* How many more instructions can be issued in the current cycle. */
11987 unsigned int insns_left;
11989 /* LAST_SET[X].INSN is the last instruction to set register X.
11990 LAST_SET[X].TIME is the time at which that instruction was issued.
11991 INSN is null if no instruction has yet set register X. */
11992 struct {
11993 rtx insn;
11994 unsigned int time;
11995 } last_set[FIRST_PSEUDO_REGISTER];
11997 /* The pipeline's current DFA state. */
11998 state_t dfa_state;
12001 /* Reset STATE to the initial simulation state. */
12003 static void
12004 mips_sim_reset (struct mips_sim *state)
12006 state->time = 0;
12007 state->insns_left = state->issue_rate;
12008 memset (&state->last_set, 0, sizeof (state->last_set));
12009 state_reset (state->dfa_state);
12012 /* Initialize STATE before its first use. DFA_STATE points to an
12013 allocated but uninitialized DFA state. */
12015 static void
12016 mips_sim_init (struct mips_sim *state, state_t dfa_state)
12018 state->issue_rate = mips_issue_rate ();
12019 state->dfa_state = dfa_state;
12020 mips_sim_reset (state);
12023 /* Advance STATE by one clock cycle. */
12025 static void
12026 mips_sim_next_cycle (struct mips_sim *state)
12028 state->time++;
12029 state->insns_left = state->issue_rate;
12030 state_transition (state->dfa_state, 0);
12033 /* Advance simulation state STATE until instruction INSN can read
12034 register REG. */
12036 static void
12037 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
12039 unsigned int regno, end_regno;
12041 end_regno = END_REGNO (reg);
12042 for (regno = REGNO (reg); regno < end_regno; regno++)
12043 if (state->last_set[regno].insn != 0)
12045 unsigned int t;
12047 t = (state->last_set[regno].time
12048 + insn_latency (state->last_set[regno].insn, insn));
12049 while (state->time < t)
12050 mips_sim_next_cycle (state);
12054 /* A for_each_rtx callback. If *X is a register, advance simulation state
12055 DATA until mips_sim_insn can read the register's value. */
12057 static int
12058 mips_sim_wait_regs_2 (rtx *x, void *data)
12060 if (REG_P (*x))
12061 mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
12062 return 0;
12065 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
12067 static void
12068 mips_sim_wait_regs_1 (rtx *x, void *data)
12070 for_each_rtx (x, mips_sim_wait_regs_2, data);
12073 /* Advance simulation state STATE until all of INSN's register
12074 dependencies are satisfied. */
12076 static void
12077 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
12079 mips_sim_insn = insn;
12080 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
12083 /* Advance simulation state STATE until the units required by
12084 instruction INSN are available. */
12086 static void
12087 mips_sim_wait_units (struct mips_sim *state, rtx insn)
12089 state_t tmp_state;
12091 tmp_state = alloca (state_size ());
12092 while (state->insns_left == 0
12093 || (memcpy (tmp_state, state->dfa_state, state_size ()),
12094 state_transition (tmp_state, insn) >= 0))
12095 mips_sim_next_cycle (state);
12098 /* Advance simulation state STATE until INSN is ready to issue. */
12100 static void
12101 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
12103 mips_sim_wait_regs (state, insn);
12104 mips_sim_wait_units (state, insn);
12107 /* mips_sim_insn has just set X. Update the LAST_SET array
12108 in simulation state DATA. */
12110 static void
12111 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
12113 struct mips_sim *state;
12115 state = (struct mips_sim *) data;
12116 if (REG_P (x))
12118 unsigned int regno, end_regno;
12120 end_regno = END_REGNO (x);
12121 for (regno = REGNO (x); regno < end_regno; regno++)
12123 state->last_set[regno].insn = mips_sim_insn;
12124 state->last_set[regno].time = state->time;
12129 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
12130 can issue immediately (i.e., that mips_sim_wait_insn has already
12131 been called). */
12133 static void
12134 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
12136 state_transition (state->dfa_state, insn);
12137 state->insns_left--;
12139 mips_sim_insn = insn;
12140 note_stores (PATTERN (insn), mips_sim_record_set, state);
12143 /* Simulate issuing a NOP in state STATE. */
12145 static void
12146 mips_sim_issue_nop (struct mips_sim *state)
12148 if (state->insns_left == 0)
12149 mips_sim_next_cycle (state);
12150 state->insns_left--;
12153 /* Update simulation state STATE so that it's ready to accept the instruction
12154 after INSN. INSN should be part of the main rtl chain, not a member of a
12155 SEQUENCE. */
12157 static void
12158 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
12160 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
12161 if (JUMP_P (insn))
12162 mips_sim_issue_nop (state);
12164 switch (GET_CODE (SEQ_BEGIN (insn)))
12166 case CODE_LABEL:
12167 case CALL_INSN:
12168 /* We can't predict the processor state after a call or label. */
12169 mips_sim_reset (state);
12170 break;
12172 case JUMP_INSN:
12173 /* The delay slots of branch likely instructions are only executed
12174 when the branch is taken. Therefore, if the caller has simulated
12175 the delay slot instruction, STATE does not really reflect the state
12176 of the pipeline for the instruction after the delay slot. Also,
12177 branch likely instructions tend to incur a penalty when not taken,
12178 so there will probably be an extra delay between the branch and
12179 the instruction after the delay slot. */
12180 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
12181 mips_sim_reset (state);
12182 break;
12184 default:
12185 break;
12189 /* The VR4130 pipeline issues aligned pairs of instructions together,
12190 but it stalls the second instruction if it depends on the first.
12191 In order to cut down the amount of logic required, this dependence
12192 check is not based on a full instruction decode. Instead, any non-SPECIAL
12193 instruction is assumed to modify the register specified by bits 20-16
12194 (which is usually the "rt" field).
12196 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
12197 input, so we can end up with a false dependence between the branch
12198 and its delay slot. If this situation occurs in instruction INSN,
12199 try to avoid it by swapping rs and rt. */
12201 static void
12202 vr4130_avoid_branch_rt_conflict (rtx insn)
12204 rtx first, second;
12206 first = SEQ_BEGIN (insn);
12207 second = SEQ_END (insn);
12208 if (JUMP_P (first)
12209 && NONJUMP_INSN_P (second)
12210 && GET_CODE (PATTERN (first)) == SET
12211 && GET_CODE (SET_DEST (PATTERN (first))) == PC
12212 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
12214 /* Check for the right kind of condition. */
12215 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
12216 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
12217 && REG_P (XEXP (cond, 0))
12218 && REG_P (XEXP (cond, 1))
12219 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
12220 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
12222 /* SECOND mentions the rt register but not the rs register. */
12223 rtx tmp = XEXP (cond, 0);
12224 XEXP (cond, 0) = XEXP (cond, 1);
12225 XEXP (cond, 1) = tmp;
12230 /* Implement -mvr4130-align. Go through each basic block and simulate the
12231 processor pipeline. If we find that a pair of instructions could execute
12232 in parallel, and the first of those instructions is not 8-byte aligned,
12233 insert a nop to make it aligned. */
12235 static void
12236 vr4130_align_insns (void)
12238 struct mips_sim state;
12239 rtx insn, subinsn, last, last2, next;
12240 bool aligned_p;
12242 dfa_start ();
12244 /* LAST is the last instruction before INSN to have a nonzero length.
12245 LAST2 is the last such instruction before LAST. */
12246 last = 0;
12247 last2 = 0;
12249 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
12250 aligned_p = true;
12252 mips_sim_init (&state, alloca (state_size ()));
12253 for (insn = get_insns (); insn != 0; insn = next)
12255 unsigned int length;
12257 next = NEXT_INSN (insn);
12259 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
12260 This isn't really related to the alignment pass, but we do it on
12261 the fly to avoid a separate instruction walk. */
12262 vr4130_avoid_branch_rt_conflict (insn);
12264 if (USEFUL_INSN_P (insn))
12265 FOR_EACH_SUBINSN (subinsn, insn)
12267 mips_sim_wait_insn (&state, subinsn);
12269 /* If we want this instruction to issue in parallel with the
12270 previous one, make sure that the previous instruction is
12271 aligned. There are several reasons why this isn't worthwhile
12272 when the second instruction is a call:
12274 - Calls are less likely to be performance critical,
12275 - There's a good chance that the delay slot can execute
12276 in parallel with the call.
12277 - The return address would then be unaligned.
12279 In general, if we're going to insert a nop between instructions
12280 X and Y, it's better to insert it immediately after X. That
12281 way, if the nop makes Y aligned, it will also align any labels
12282 between X and Y. */
12283 if (state.insns_left != state.issue_rate
12284 && !CALL_P (subinsn))
12286 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
12288 /* SUBINSN is the first instruction in INSN and INSN is
12289 aligned. We want to align the previous instruction
12290 instead, so insert a nop between LAST2 and LAST.
12292 Note that LAST could be either a single instruction
12293 or a branch with a delay slot. In the latter case,
12294 LAST, like INSN, is already aligned, but the delay
12295 slot must have some extra delay that stops it from
12296 issuing at the same time as the branch. We therefore
12297 insert a nop before the branch in order to align its
12298 delay slot. */
12299 emit_insn_after (gen_nop (), last2);
12300 aligned_p = false;
12302 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
12304 /* SUBINSN is the delay slot of INSN, but INSN is
12305 currently unaligned. Insert a nop between
12306 LAST and INSN to align it. */
12307 emit_insn_after (gen_nop (), last);
12308 aligned_p = true;
12311 mips_sim_issue_insn (&state, subinsn);
12313 mips_sim_finish_insn (&state, insn);
12315 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
12316 length = get_attr_length (insn);
12317 if (length > 0)
12319 /* If the instruction is an asm statement or multi-instruction
12320 mips.md patern, the length is only an estimate. Insert an
12321 8 byte alignment after it so that the following instructions
12322 can be handled correctly. */
12323 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
12324 && (recog_memoized (insn) < 0 || length >= 8))
12326 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
12327 next = NEXT_INSN (next);
12328 mips_sim_next_cycle (&state);
12329 aligned_p = true;
12331 else if (length & 4)
12332 aligned_p = !aligned_p;
12333 last2 = last;
12334 last = insn;
12337 /* See whether INSN is an aligned label. */
12338 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
12339 aligned_p = true;
12341 dfa_finish ();
12344 /* This structure records that the current function has a LO_SUM
12345 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
12346 the largest offset applied to BASE by all such LO_SUMs. */
12347 struct mips_lo_sum_offset {
12348 rtx base;
12349 HOST_WIDE_INT offset;
12352 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
12354 static hashval_t
12355 mips_hash_base (rtx base)
12357 int do_not_record_p;
12359 return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
12362 /* Hash-table callbacks for mips_lo_sum_offsets. */
12364 static hashval_t
12365 mips_lo_sum_offset_hash (const void *entry)
12367 return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
12370 static int
12371 mips_lo_sum_offset_eq (const void *entry, const void *value)
12373 return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
12374 (const_rtx) value);
12377 /* Look up symbolic constant X in HTAB, which is a hash table of
12378 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
12379 paired with a recorded LO_SUM, otherwise record X in the table. */
12381 static bool
12382 mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
12384 rtx base, offset;
12385 void **slot;
12386 struct mips_lo_sum_offset *entry;
12388 /* Split X into a base and offset. */
12389 split_const (x, &base, &offset);
12390 if (UNSPEC_ADDRESS_P (base))
12391 base = UNSPEC_ADDRESS (base);
12393 /* Look up the base in the hash table. */
12394 slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
12395 if (slot == NULL)
12396 return false;
12398 entry = (struct mips_lo_sum_offset *) *slot;
12399 if (option == INSERT)
12401 if (entry == NULL)
12403 entry = XNEW (struct mips_lo_sum_offset);
12404 entry->base = base;
12405 entry->offset = INTVAL (offset);
12406 *slot = entry;
12408 else
12410 if (INTVAL (offset) > entry->offset)
12411 entry->offset = INTVAL (offset);
12414 return INTVAL (offset) <= entry->offset;
12417 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
12418 Record every LO_SUM in *LOC. */
12420 static int
12421 mips_record_lo_sum (rtx *loc, void *data)
12423 if (GET_CODE (*loc) == LO_SUM)
12424 mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
12425 return 0;
12428 /* Return true if INSN is a SET of an orphaned high-part relocation.
12429 HTAB is a hash table of mips_lo_sum_offsets that describes all the
12430 LO_SUMs in the current function. */
12432 static bool
12433 mips_orphaned_high_part_p (htab_t htab, rtx insn)
12435 enum mips_symbol_type type;
12436 rtx x, set;
12438 set = single_set (insn);
12439 if (set)
12441 /* Check for %his. */
12442 x = SET_SRC (set);
12443 if (GET_CODE (x) == HIGH
12444 && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
12445 return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
12447 /* Check for local %gots (and %got_pages, which is redundant but OK). */
12448 if (GET_CODE (x) == UNSPEC
12449 && XINT (x, 1) == UNSPEC_LOAD_GOT
12450 && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
12451 SYMBOL_CONTEXT_LEA, &type)
12452 && type == SYMBOL_GOTOFF_PAGE)
12453 return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
12455 return false;
12458 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
12459 INSN and a previous instruction, avoid it by inserting nops after
12460 instruction AFTER.
12462 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
12463 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
12464 before using the value of that register. *HILO_DELAY counts the
12465 number of instructions since the last hilo hazard (that is,
12466 the number of instructions since the last MFLO or MFHI).
12468 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
12469 for the next instruction.
12471 LO_REG is an rtx for the LO register, used in dependence checking. */
12473 static void
12474 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
12475 rtx *delayed_reg, rtx lo_reg)
12477 rtx pattern, set;
12478 int nops, ninsns;
12480 pattern = PATTERN (insn);
12482 /* Do not put the whole function in .set noreorder if it contains
12483 an asm statement. We don't know whether there will be hazards
12484 between the asm statement and the gcc-generated code. */
12485 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
12486 cfun->machine->all_noreorder_p = false;
12488 /* Ignore zero-length instructions (barriers and the like). */
12489 ninsns = get_attr_length (insn) / 4;
12490 if (ninsns == 0)
12491 return;
12493 /* Work out how many nops are needed. Note that we only care about
12494 registers that are explicitly mentioned in the instruction's pattern.
12495 It doesn't matter that calls use the argument registers or that they
12496 clobber hi and lo. */
12497 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
12498 nops = 2 - *hilo_delay;
12499 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
12500 nops = 1;
12501 else
12502 nops = 0;
12504 /* Insert the nops between this instruction and the previous one.
12505 Each new nop takes us further from the last hilo hazard. */
12506 *hilo_delay += nops;
12507 while (nops-- > 0)
12508 emit_insn_after (gen_hazard_nop (), after);
12510 /* Set up the state for the next instruction. */
12511 *hilo_delay += ninsns;
12512 *delayed_reg = 0;
12513 if (INSN_CODE (insn) >= 0)
12514 switch (get_attr_hazard (insn))
12516 case HAZARD_NONE:
12517 break;
12519 case HAZARD_HILO:
12520 *hilo_delay = 0;
12521 break;
12523 case HAZARD_DELAY:
12524 set = single_set (insn);
12525 gcc_assert (set);
12526 *delayed_reg = SET_DEST (set);
12527 break;
12531 /* Go through the instruction stream and insert nops where necessary.
12532 Also delete any high-part relocations whose partnering low parts
12533 are now all dead. See if the whole function can then be put into
12534 .set noreorder and .set nomacro. */
12536 static void
12537 mips_reorg_process_insns (void)
12539 rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
12540 int hilo_delay;
12541 htab_t htab;
12543 /* Force all instructions to be split into their final form. */
12544 split_all_insns_noflow ();
12546 /* Recalculate instruction lengths without taking nops into account. */
12547 cfun->machine->ignore_hazard_length_p = true;
12548 shorten_branches (get_insns ());
12550 cfun->machine->all_noreorder_p = true;
12552 /* We don't track MIPS16 PC-relative offsets closely enough to make
12553 a good job of "set .noreorder" code in MIPS16 mode. */
12554 if (TARGET_MIPS16)
12555 cfun->machine->all_noreorder_p = false;
12557 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
12558 if (!TARGET_EXPLICIT_RELOCS)
12559 cfun->machine->all_noreorder_p = false;
12561 /* Profiled functions can't be all noreorder because the profiler
12562 support uses assembler macros. */
12563 if (crtl->profile)
12564 cfun->machine->all_noreorder_p = false;
12566 /* Code compiled with -mfix-vr4120 can't be all noreorder because
12567 we rely on the assembler to work around some errata. */
12568 if (TARGET_FIX_VR4120)
12569 cfun->machine->all_noreorder_p = false;
12571 /* The same is true for -mfix-vr4130 if we might generate MFLO or
12572 MFHI instructions. Note that we avoid using MFLO and MFHI if
12573 the VR4130 MACC and DMACC instructions are available instead;
12574 see the *mfhilo_{si,di}_macc patterns. */
12575 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
12576 cfun->machine->all_noreorder_p = false;
12578 htab = htab_create (37, mips_lo_sum_offset_hash,
12579 mips_lo_sum_offset_eq, free);
12581 /* Make a first pass over the instructions, recording all the LO_SUMs. */
12582 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
12583 FOR_EACH_SUBINSN (subinsn, insn)
12584 if (INSN_P (subinsn))
12585 for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
12587 last_insn = 0;
12588 hilo_delay = 2;
12589 delayed_reg = 0;
12590 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
12592 /* Make a second pass over the instructions. Delete orphaned
12593 high-part relocations or turn them into NOPs. Avoid hazards
12594 by inserting NOPs. */
12595 for (insn = get_insns (); insn != 0; insn = next_insn)
12597 next_insn = NEXT_INSN (insn);
12598 if (INSN_P (insn))
12600 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
12602 /* If we find an orphaned high-part relocation in a delay
12603 slot, it's easier to turn that instruction into a NOP than
12604 to delete it. The delay slot will be a NOP either way. */
12605 FOR_EACH_SUBINSN (subinsn, insn)
12606 if (INSN_P (subinsn))
12608 if (mips_orphaned_high_part_p (htab, subinsn))
12610 PATTERN (subinsn) = gen_nop ();
12611 INSN_CODE (subinsn) = CODE_FOR_nop;
12613 mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
12614 &delayed_reg, lo_reg);
12616 last_insn = insn;
12618 else
12620 /* INSN is a single instruction. Delete it if it's an
12621 orphaned high-part relocation. */
12622 if (mips_orphaned_high_part_p (htab, insn))
12623 delete_insn (insn);
12624 else
12626 mips_avoid_hazard (last_insn, insn, &hilo_delay,
12627 &delayed_reg, lo_reg);
12628 last_insn = insn;
12634 htab_delete (htab);
12637 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
12639 static void
12640 mips_reorg (void)
12642 mips16_lay_out_constants ();
12643 if (mips_base_delayed_branch)
12644 dbr_schedule (get_insns ());
12645 mips_reorg_process_insns ();
12646 if (!TARGET_MIPS16
12647 && TARGET_EXPLICIT_RELOCS
12648 && TUNE_MIPS4130
12649 && TARGET_VR4130_ALIGN)
12650 vr4130_align_insns ();
12653 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
12654 in order to avoid duplicating too much logic from elsewhere. */
12656 static void
12657 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
12658 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12659 tree function)
12661 rtx this_rtx, temp1, temp2, insn, fnaddr;
12662 bool use_sibcall_p;
12664 /* Pretend to be a post-reload pass while generating rtl. */
12665 reload_completed = 1;
12667 /* Mark the end of the (empty) prologue. */
12668 emit_note (NOTE_INSN_PROLOGUE_END);
12670 /* Determine if we can use a sibcall to call FUNCTION directly. */
12671 fnaddr = XEXP (DECL_RTL (function), 0);
12672 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
12673 && const_call_insn_operand (fnaddr, Pmode));
12675 /* Determine if we need to load FNADDR from the GOT. */
12676 if (!use_sibcall_p
12677 && (mips_got_symbol_type_p
12678 (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))))
12680 /* Pick a global pointer. Use a call-clobbered register if
12681 TARGET_CALL_SAVED_GP. */
12682 cfun->machine->global_pointer
12683 = TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
12684 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
12686 /* Set up the global pointer for n32 or n64 abicalls. */
12687 mips_emit_loadgp ();
12690 /* We need two temporary registers in some cases. */
12691 temp1 = gen_rtx_REG (Pmode, 2);
12692 temp2 = gen_rtx_REG (Pmode, 3);
12694 /* Find out which register contains the "this" pointer. */
12695 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12696 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
12697 else
12698 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
12700 /* Add DELTA to THIS_RTX. */
12701 if (delta != 0)
12703 rtx offset = GEN_INT (delta);
12704 if (!SMALL_OPERAND (delta))
12706 mips_emit_move (temp1, offset);
12707 offset = temp1;
12709 emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
12712 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
12713 if (vcall_offset != 0)
12715 rtx addr;
12717 /* Set TEMP1 to *THIS_RTX. */
12718 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
12720 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
12721 addr = mips_add_offset (temp2, temp1, vcall_offset);
12723 /* Load the offset and add it to THIS_RTX. */
12724 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
12725 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
12728 /* Jump to the target function. Use a sibcall if direct jumps are
12729 allowed, otherwise load the address into a register first. */
12730 if (use_sibcall_p)
12732 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
12733 SIBLING_CALL_P (insn) = 1;
12735 else
12737 /* This is messy. GAS treats "la $25,foo" as part of a call
12738 sequence and may allow a global "foo" to be lazily bound.
12739 The general move patterns therefore reject this combination.
12741 In this context, lazy binding would actually be OK
12742 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
12743 TARGET_CALL_SAVED_GP; see mips_load_call_address.
12744 We must therefore load the address via a temporary
12745 register if mips_dangerous_for_la25_p.
12747 If we jump to the temporary register rather than $25,
12748 the assembler can use the move insn to fill the jump's
12749 delay slot.
12751 We can use the same technique for MIPS16 code, where $25
12752 is not a valid JR register. */
12753 if (TARGET_USE_PIC_FN_ADDR_REG
12754 && !TARGET_MIPS16
12755 && !mips_dangerous_for_la25_p (fnaddr))
12756 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
12757 mips_load_call_address (MIPS_CALL_SIBCALL, temp1, fnaddr);
12759 if (TARGET_USE_PIC_FN_ADDR_REG
12760 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
12761 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
12762 emit_jump_insn (gen_indirect_jump (temp1));
12765 /* Run just enough of rest_of_compilation. This sequence was
12766 "borrowed" from alpha.c. */
12767 insn = get_insns ();
12768 insn_locators_alloc ();
12769 split_all_insns_noflow ();
12770 mips16_lay_out_constants ();
12771 shorten_branches (insn);
12772 final_start_function (insn, file, 1);
12773 final (insn, file, 1);
12774 final_end_function ();
12775 free_after_compilation (cfun);
12777 /* Clean up the vars set above. Note that final_end_function resets
12778 the global pointer for us. */
12779 reload_completed = 0;
12782 /* The last argument passed to mips_set_mips16_mode, or negative if the
12783 function hasn't been called yet.
12785 There are two copies of this information. One is saved and restored
12786 by the PCH process while the other is specific to this compiler
12787 invocation. The information calculated by mips_set_mips16_mode
12788 is invalid unless the two variables are the same. */
12789 static int was_mips16_p = -1;
12790 static GTY(()) int was_mips16_pch_p = -1;
12792 /* Set up the target-dependent global state so that it matches the
12793 current function's ISA mode. */
12795 static void
12796 mips_set_mips16_mode (int mips16_p)
12798 if (mips16_p == was_mips16_p
12799 && mips16_p == was_mips16_pch_p)
12800 return;
12802 /* Restore base settings of various flags. */
12803 target_flags = mips_base_target_flags;
12804 flag_schedule_insns = mips_base_schedule_insns;
12805 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
12806 flag_move_loop_invariants = mips_base_move_loop_invariants;
12807 align_loops = mips_base_align_loops;
12808 align_jumps = mips_base_align_jumps;
12809 align_functions = mips_base_align_functions;
12811 if (mips16_p)
12813 /* Switch to MIPS16 mode. */
12814 target_flags |= MASK_MIPS16;
12816 /* Don't run the scheduler before reload, since it tends to
12817 increase register pressure. */
12818 flag_schedule_insns = 0;
12820 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
12821 the whole function to be in a single section. */
12822 flag_reorder_blocks_and_partition = 0;
12824 /* Don't move loop invariants, because it tends to increase
12825 register pressure. It also introduces an extra move in cases
12826 where the constant is the first operand in a two-operand binary
12827 instruction, or when it forms a register argument to a functon
12828 call. */
12829 flag_move_loop_invariants = 0;
12831 target_flags |= MASK_EXPLICIT_RELOCS;
12833 /* Experiments suggest we get the best overall section-anchor
12834 results from using the range of an unextended LW or SW. Code
12835 that makes heavy use of byte or short accesses can do better
12836 with ranges of 0...31 and 0...63 respectively, but most code is
12837 sensitive to the range of LW and SW instead. */
12838 targetm.min_anchor_offset = 0;
12839 targetm.max_anchor_offset = 127;
12841 if (flag_pic && !TARGET_OLDABI)
12842 sorry ("MIPS16 PIC for ABIs other than o32 and o64");
12844 if (TARGET_XGOT)
12845 sorry ("MIPS16 -mxgot code");
12847 if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
12848 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
12850 else
12852 /* Switch to normal (non-MIPS16) mode. */
12853 target_flags &= ~MASK_MIPS16;
12855 /* Provide default values for align_* for 64-bit targets. */
12856 if (TARGET_64BIT)
12858 if (align_loops == 0)
12859 align_loops = 8;
12860 if (align_jumps == 0)
12861 align_jumps = 8;
12862 if (align_functions == 0)
12863 align_functions = 8;
12866 targetm.min_anchor_offset = -32768;
12867 targetm.max_anchor_offset = 32767;
12870 /* (Re)initialize MIPS target internals for new ISA. */
12871 mips_init_relocs ();
12873 if (was_mips16_p >= 0 || was_mips16_pch_p >= 0)
12874 /* Reinitialize target-dependent state. */
12875 target_reinit ();
12877 was_mips16_p = mips16_p;
12878 was_mips16_pch_p = mips16_p;
12881 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
12882 function should use the MIPS16 ISA and switch modes accordingly. */
12884 static void
12885 mips_set_current_function (tree fndecl)
12887 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
12890 /* Allocate a chunk of memory for per-function machine-dependent data. */
12892 static struct machine_function *
12893 mips_init_machine_status (void)
12895 return ((struct machine_function *)
12896 ggc_alloc_cleared (sizeof (struct machine_function)));
12899 /* Return the processor associated with the given ISA level, or null
12900 if the ISA isn't valid. */
12902 static const struct mips_cpu_info *
12903 mips_cpu_info_from_isa (int isa)
12905 unsigned int i;
12907 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
12908 if (mips_cpu_info_table[i].isa == isa)
12909 return mips_cpu_info_table + i;
12911 return NULL;
12914 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
12915 with a final "000" replaced by "k". Ignore case.
12917 Note: this function is shared between GCC and GAS. */
12919 static bool
12920 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
12922 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
12923 given++, canonical++;
12925 return ((*given == 0 && *canonical == 0)
12926 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
12929 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
12930 CPU name. We've traditionally allowed a lot of variation here.
12932 Note: this function is shared between GCC and GAS. */
12934 static bool
12935 mips_matching_cpu_name_p (const char *canonical, const char *given)
12937 /* First see if the name matches exactly, or with a final "000"
12938 turned into "k". */
12939 if (mips_strict_matching_cpu_name_p (canonical, given))
12940 return true;
12942 /* If not, try comparing based on numerical designation alone.
12943 See if GIVEN is an unadorned number, or 'r' followed by a number. */
12944 if (TOLOWER (*given) == 'r')
12945 given++;
12946 if (!ISDIGIT (*given))
12947 return false;
12949 /* Skip over some well-known prefixes in the canonical name,
12950 hoping to find a number there too. */
12951 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
12952 canonical += 2;
12953 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
12954 canonical += 2;
12955 else if (TOLOWER (canonical[0]) == 'r')
12956 canonical += 1;
12958 return mips_strict_matching_cpu_name_p (canonical, given);
12961 /* Return the mips_cpu_info entry for the processor or ISA given
12962 by CPU_STRING. Return null if the string isn't recognized.
12964 A similar function exists in GAS. */
12966 static const struct mips_cpu_info *
12967 mips_parse_cpu (const char *cpu_string)
12969 unsigned int i;
12970 const char *s;
12972 /* In the past, we allowed upper-case CPU names, but it doesn't
12973 work well with the multilib machinery. */
12974 for (s = cpu_string; *s != 0; s++)
12975 if (ISUPPER (*s))
12977 warning (0, "CPU names must be lower case");
12978 break;
12981 /* 'from-abi' selects the most compatible architecture for the given
12982 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
12983 EABIs, we have to decide whether we're using the 32-bit or 64-bit
12984 version. */
12985 if (strcasecmp (cpu_string, "from-abi") == 0)
12986 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
12987 : ABI_NEEDS_64BIT_REGS ? 3
12988 : (TARGET_64BIT ? 3 : 1));
12990 /* 'default' has traditionally been a no-op. Probably not very useful. */
12991 if (strcasecmp (cpu_string, "default") == 0)
12992 return NULL;
12994 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
12995 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
12996 return mips_cpu_info_table + i;
12998 return NULL;
13001 /* Set up globals to generate code for the ISA or processor
13002 described by INFO. */
13004 static void
13005 mips_set_architecture (const struct mips_cpu_info *info)
13007 if (info != 0)
13009 mips_arch_info = info;
13010 mips_arch = info->cpu;
13011 mips_isa = info->isa;
13015 /* Likewise for tuning. */
13017 static void
13018 mips_set_tune (const struct mips_cpu_info *info)
13020 if (info != 0)
13022 mips_tune_info = info;
13023 mips_tune = info->cpu;
13027 /* Implement TARGET_HANDLE_OPTION. */
13029 static bool
13030 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
13032 switch (code)
13034 case OPT_mabi_:
13035 if (strcmp (arg, "32") == 0)
13036 mips_abi = ABI_32;
13037 else if (strcmp (arg, "o64") == 0)
13038 mips_abi = ABI_O64;
13039 else if (strcmp (arg, "n32") == 0)
13040 mips_abi = ABI_N32;
13041 else if (strcmp (arg, "64") == 0)
13042 mips_abi = ABI_64;
13043 else if (strcmp (arg, "eabi") == 0)
13044 mips_abi = ABI_EABI;
13045 else
13046 return false;
13047 return true;
13049 case OPT_march_:
13050 case OPT_mtune_:
13051 return mips_parse_cpu (arg) != 0;
13053 case OPT_mips:
13054 mips_isa_option_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
13055 return mips_isa_option_info != 0;
13057 case OPT_mno_flush_func:
13058 mips_cache_flush_func = NULL;
13059 return true;
13061 case OPT_mcode_readable_:
13062 if (strcmp (arg, "yes") == 0)
13063 mips_code_readable = CODE_READABLE_YES;
13064 else if (strcmp (arg, "pcrel") == 0)
13065 mips_code_readable = CODE_READABLE_PCREL;
13066 else if (strcmp (arg, "no") == 0)
13067 mips_code_readable = CODE_READABLE_NO;
13068 else
13069 return false;
13070 return true;
13072 default:
13073 return true;
13077 /* Implement OVERRIDE_OPTIONS. */
13079 void
13080 mips_override_options (void)
13082 int i, start, regno, mode;
13084 /* Process flags as though we were generating non-MIPS16 code. */
13085 mips_base_mips16 = TARGET_MIPS16;
13086 target_flags &= ~MASK_MIPS16;
13088 #ifdef SUBTARGET_OVERRIDE_OPTIONS
13089 SUBTARGET_OVERRIDE_OPTIONS;
13090 #endif
13092 /* Set the small data limit. */
13093 mips_small_data_threshold = (g_switch_set
13094 ? g_switch_value
13095 : MIPS_DEFAULT_GVALUE);
13097 /* The following code determines the architecture and register size.
13098 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
13099 The GAS and GCC code should be kept in sync as much as possible. */
13101 if (mips_arch_string != 0)
13102 mips_set_architecture (mips_parse_cpu (mips_arch_string));
13104 if (mips_isa_option_info != 0)
13106 if (mips_arch_info == 0)
13107 mips_set_architecture (mips_isa_option_info);
13108 else if (mips_arch_info->isa != mips_isa_option_info->isa)
13109 error ("%<-%s%> conflicts with the other architecture options, "
13110 "which specify a %s processor",
13111 mips_isa_option_info->name,
13112 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
13115 if (mips_arch_info == 0)
13117 #ifdef MIPS_CPU_STRING_DEFAULT
13118 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
13119 #else
13120 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
13121 #endif
13124 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
13125 error ("%<-march=%s%> is not compatible with the selected ABI",
13126 mips_arch_info->name);
13128 /* Optimize for mips_arch, unless -mtune selects a different processor. */
13129 if (mips_tune_string != 0)
13130 mips_set_tune (mips_parse_cpu (mips_tune_string));
13132 if (mips_tune_info == 0)
13133 mips_set_tune (mips_arch_info);
13135 if ((target_flags_explicit & MASK_64BIT) != 0)
13137 /* The user specified the size of the integer registers. Make sure
13138 it agrees with the ABI and ISA. */
13139 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
13140 error ("%<-mgp64%> used with a 32-bit processor");
13141 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
13142 error ("%<-mgp32%> used with a 64-bit ABI");
13143 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
13144 error ("%<-mgp64%> used with a 32-bit ABI");
13146 else
13148 /* Infer the integer register size from the ABI and processor.
13149 Restrict ourselves to 32-bit registers if that's all the
13150 processor has, or if the ABI cannot handle 64-bit registers. */
13151 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
13152 target_flags &= ~MASK_64BIT;
13153 else
13154 target_flags |= MASK_64BIT;
13157 if ((target_flags_explicit & MASK_FLOAT64) != 0)
13159 if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
13160 error ("unsupported combination: %s", "-mfp64 -msingle-float");
13161 else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
13162 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
13163 else if (!TARGET_64BIT && TARGET_FLOAT64)
13165 if (!ISA_HAS_MXHC1)
13166 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
13167 " the target supports the mfhc1 and mthc1 instructions");
13168 else if (mips_abi != ABI_32)
13169 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
13170 " the o32 ABI");
13173 else
13175 /* -msingle-float selects 32-bit float registers. Otherwise the
13176 float registers should be the same size as the integer ones. */
13177 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
13178 target_flags |= MASK_FLOAT64;
13179 else
13180 target_flags &= ~MASK_FLOAT64;
13183 /* End of code shared with GAS. */
13185 /* If no -mlong* option was given, infer it from the other options. */
13186 if ((target_flags_explicit & MASK_LONG64) == 0)
13188 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
13189 target_flags |= MASK_LONG64;
13190 else
13191 target_flags &= ~MASK_LONG64;
13194 if (!TARGET_OLDABI)
13195 flag_pcc_struct_return = 0;
13197 /* Decide which rtx_costs structure to use. */
13198 if (optimize_size)
13199 mips_cost = &mips_rtx_cost_optimize_size;
13200 else
13201 mips_cost = &mips_rtx_cost_data[mips_tune];
13203 /* If the user hasn't specified a branch cost, use the processor's
13204 default. */
13205 if (mips_branch_cost == 0)
13206 mips_branch_cost = mips_cost->branch_cost;
13208 /* If neither -mbranch-likely nor -mno-branch-likely was given
13209 on the command line, set MASK_BRANCHLIKELY based on the target
13210 architecture and tuning flags. Annulled delay slots are a
13211 size win, so we only consider the processor-specific tuning
13212 for !optimize_size. */
13213 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
13215 if (ISA_HAS_BRANCHLIKELY
13216 && (optimize_size
13217 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
13218 target_flags |= MASK_BRANCHLIKELY;
13219 else
13220 target_flags &= ~MASK_BRANCHLIKELY;
13222 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
13223 warning (0, "the %qs architecture does not support branch-likely"
13224 " instructions", mips_arch_info->name);
13226 /* The effect of -mabicalls isn't defined for the EABI. */
13227 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
13229 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
13230 target_flags &= ~MASK_ABICALLS;
13233 if (TARGET_ABICALLS_PIC2)
13234 /* We need to set flag_pic for executables as well as DSOs
13235 because we may reference symbols that are not defined in
13236 the final executable. (MIPS does not use things like
13237 copy relocs, for example.)
13239 There is a body of code that uses __PIC__ to distinguish
13240 between -mabicalls and -mno-abicalls code. The non-__PIC__
13241 variant is usually appropriate for TARGET_ABICALLS_PIC0, as
13242 long as any indirect jumps use $25. */
13243 flag_pic = 1;
13245 /* -mvr4130-align is a "speed over size" optimization: it usually produces
13246 faster code, but at the expense of more nops. Enable it at -O3 and
13247 above. */
13248 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
13249 target_flags |= MASK_VR4130_ALIGN;
13251 /* Prefer a call to memcpy over inline code when optimizing for size,
13252 though see MOVE_RATIO in mips.h. */
13253 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
13254 target_flags |= MASK_MEMCPY;
13256 /* If we have a nonzero small-data limit, check that the -mgpopt
13257 setting is consistent with the other target flags. */
13258 if (mips_small_data_threshold > 0)
13260 if (!TARGET_GPOPT)
13262 if (!TARGET_EXPLICIT_RELOCS)
13263 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
13265 TARGET_LOCAL_SDATA = false;
13266 TARGET_EXTERN_SDATA = false;
13268 else
13270 if (TARGET_VXWORKS_RTP)
13271 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
13273 if (TARGET_ABICALLS)
13274 warning (0, "cannot use small-data accesses for %qs",
13275 "-mabicalls");
13279 #ifdef MIPS_TFMODE_FORMAT
13280 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
13281 #endif
13283 /* Make sure that the user didn't turn off paired single support when
13284 MIPS-3D support is requested. */
13285 if (TARGET_MIPS3D
13286 && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
13287 && !TARGET_PAIRED_SINGLE_FLOAT)
13288 error ("%<-mips3d%> requires %<-mpaired-single%>");
13290 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
13291 if (TARGET_MIPS3D)
13292 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
13294 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
13295 and TARGET_HARD_FLOAT_ABI are both true. */
13296 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
13297 error ("%qs must be used with %qs",
13298 TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
13299 TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
13301 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
13302 enabled. */
13303 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
13304 warning (0, "the %qs architecture does not support paired-single"
13305 " instructions", mips_arch_info->name);
13307 /* If TARGET_DSPR2, enable MASK_DSP. */
13308 if (TARGET_DSPR2)
13309 target_flags |= MASK_DSP;
13311 mips_init_print_operand_punct ();
13313 /* Set up array to map GCC register number to debug register number.
13314 Ignore the special purpose register numbers. */
13316 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
13318 mips_dbx_regno[i] = INVALID_REGNUM;
13319 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
13320 mips_dwarf_regno[i] = i;
13321 else
13322 mips_dwarf_regno[i] = INVALID_REGNUM;
13325 start = GP_DBX_FIRST - GP_REG_FIRST;
13326 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
13327 mips_dbx_regno[i] = i + start;
13329 start = FP_DBX_FIRST - FP_REG_FIRST;
13330 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
13331 mips_dbx_regno[i] = i + start;
13333 /* Accumulator debug registers use big-endian ordering. */
13334 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
13335 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
13336 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
13337 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
13338 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
13340 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
13341 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
13344 /* Set up mips_hard_regno_mode_ok. */
13345 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
13346 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
13347 mips_hard_regno_mode_ok[(int)mode][regno]
13348 = mips_hard_regno_mode_ok_p (regno, mode);
13350 /* Function to allocate machine-dependent function status. */
13351 init_machine_status = &mips_init_machine_status;
13353 /* Default to working around R4000 errata only if the processor
13354 was selected explicitly. */
13355 if ((target_flags_explicit & MASK_FIX_R4000) == 0
13356 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
13357 target_flags |= MASK_FIX_R4000;
13359 /* Default to working around R4400 errata only if the processor
13360 was selected explicitly. */
13361 if ((target_flags_explicit & MASK_FIX_R4400) == 0
13362 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
13363 target_flags |= MASK_FIX_R4400;
13365 /* Save base state of options. */
13366 mips_base_target_flags = target_flags;
13367 mips_base_delayed_branch = flag_delayed_branch;
13368 mips_base_schedule_insns = flag_schedule_insns;
13369 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
13370 mips_base_move_loop_invariants = flag_move_loop_invariants;
13371 mips_base_align_loops = align_loops;
13372 mips_base_align_jumps = align_jumps;
13373 mips_base_align_functions = align_functions;
13375 /* Now select the ISA mode.
13377 Do all CPP-sensitive stuff in non-MIPS16 mode; we'll switch to
13378 MIPS16 mode afterwards if need be. */
13379 mips_set_mips16_mode (false);
13381 /* We call dbr_schedule from within mips_reorg. */
13382 flag_delayed_branch = 0;
13385 /* Swap the register information for registers I and I + 1, which
13386 currently have the wrong endianness. Note that the registers'
13387 fixedness and call-clobberedness might have been set on the
13388 command line. */
13390 static void
13391 mips_swap_registers (unsigned int i)
13393 int tmpi;
13394 const char *tmps;
13396 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
13397 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
13399 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
13400 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
13401 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
13402 SWAP_STRING (reg_names[i], reg_names[i + 1]);
13404 #undef SWAP_STRING
13405 #undef SWAP_INT
13408 /* Implement CONDITIONAL_REGISTER_USAGE. */
13410 void
13411 mips_conditional_register_usage (void)
13413 if (!ISA_HAS_DSP)
13415 int regno;
13417 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
13418 fixed_regs[regno] = call_used_regs[regno] = 1;
13420 if (!TARGET_HARD_FLOAT)
13422 int regno;
13424 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
13425 fixed_regs[regno] = call_used_regs[regno] = 1;
13426 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
13427 fixed_regs[regno] = call_used_regs[regno] = 1;
13429 else if (! ISA_HAS_8CC)
13431 int regno;
13433 /* We only have a single condition-code register. We implement
13434 this by fixing all the condition-code registers and generating
13435 RTL that refers directly to ST_REG_FIRST. */
13436 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
13437 fixed_regs[regno] = call_used_regs[regno] = 1;
13439 /* In MIPS16 mode, we permit the $t temporary registers to be used
13440 for reload. We prohibit the unused $s registers, since they
13441 are call-saved, and saving them via a MIPS16 register would
13442 probably waste more time than just reloading the value. */
13443 if (TARGET_MIPS16)
13445 fixed_regs[18] = call_used_regs[18] = 1;
13446 fixed_regs[19] = call_used_regs[19] = 1;
13447 fixed_regs[20] = call_used_regs[20] = 1;
13448 fixed_regs[21] = call_used_regs[21] = 1;
13449 fixed_regs[22] = call_used_regs[22] = 1;
13450 fixed_regs[23] = call_used_regs[23] = 1;
13451 fixed_regs[26] = call_used_regs[26] = 1;
13452 fixed_regs[27] = call_used_regs[27] = 1;
13453 fixed_regs[30] = call_used_regs[30] = 1;
13455 /* $f20-$f23 are call-clobbered for n64. */
13456 if (mips_abi == ABI_64)
13458 int regno;
13459 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
13460 call_really_used_regs[regno] = call_used_regs[regno] = 1;
13462 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
13463 for n32. */
13464 if (mips_abi == ABI_N32)
13466 int regno;
13467 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
13468 call_really_used_regs[regno] = call_used_regs[regno] = 1;
13470 /* Make sure that double-register accumulator values are correctly
13471 ordered for the current endianness. */
13472 if (TARGET_LITTLE_ENDIAN)
13474 unsigned int regno;
13476 mips_swap_registers (MD_REG_FIRST);
13477 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
13478 mips_swap_registers (regno);
13482 /* Initialize vector TARGET to VALS. */
13484 void
13485 mips_expand_vector_init (rtx target, rtx vals)
13487 enum machine_mode mode;
13488 enum machine_mode inner;
13489 unsigned int i, n_elts;
13490 rtx mem;
13492 mode = GET_MODE (target);
13493 inner = GET_MODE_INNER (mode);
13494 n_elts = GET_MODE_NUNITS (mode);
13496 gcc_assert (VECTOR_MODE_P (mode));
13498 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
13499 for (i = 0; i < n_elts; i++)
13500 emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)),
13501 XVECEXP (vals, 0, i));
13503 emit_move_insn (target, mem);
13506 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
13507 other registers for instructions for which it is possible. This
13508 encourages the compiler to use CMP in cases where an XOR would
13509 require some register shuffling. */
13511 void
13512 mips_order_regs_for_local_alloc (void)
13514 int i;
13516 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
13517 reg_alloc_order[i] = i;
13519 if (TARGET_MIPS16)
13521 /* It really doesn't matter where we put register 0, since it is
13522 a fixed register anyhow. */
13523 reg_alloc_order[0] = 24;
13524 reg_alloc_order[24] = 0;
13528 /* Initialize the GCC target structure. */
13529 #undef TARGET_ASM_ALIGNED_HI_OP
13530 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
13531 #undef TARGET_ASM_ALIGNED_SI_OP
13532 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
13533 #undef TARGET_ASM_ALIGNED_DI_OP
13534 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
13536 #undef TARGET_ASM_FUNCTION_PROLOGUE
13537 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
13538 #undef TARGET_ASM_FUNCTION_EPILOGUE
13539 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
13540 #undef TARGET_ASM_SELECT_RTX_SECTION
13541 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
13542 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
13543 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
13545 #undef TARGET_SCHED_INIT
13546 #define TARGET_SCHED_INIT mips_sched_init
13547 #undef TARGET_SCHED_REORDER
13548 #define TARGET_SCHED_REORDER mips_sched_reorder
13549 #undef TARGET_SCHED_REORDER2
13550 #define TARGET_SCHED_REORDER2 mips_sched_reorder
13551 #undef TARGET_SCHED_VARIABLE_ISSUE
13552 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
13553 #undef TARGET_SCHED_ADJUST_COST
13554 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
13555 #undef TARGET_SCHED_ISSUE_RATE
13556 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
13557 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
13558 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
13559 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
13560 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
13561 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
13562 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
13563 mips_multipass_dfa_lookahead
13565 #undef TARGET_DEFAULT_TARGET_FLAGS
13566 #define TARGET_DEFAULT_TARGET_FLAGS \
13567 (TARGET_DEFAULT \
13568 | TARGET_CPU_DEFAULT \
13569 | TARGET_ENDIAN_DEFAULT \
13570 | TARGET_FP_EXCEPTIONS_DEFAULT \
13571 | MASK_CHECK_ZERO_DIV \
13572 | MASK_FUSED_MADD)
13573 #undef TARGET_HANDLE_OPTION
13574 #define TARGET_HANDLE_OPTION mips_handle_option
13576 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
13577 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
13579 #undef TARGET_INSERT_ATTRIBUTES
13580 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
13581 #undef TARGET_MERGE_DECL_ATTRIBUTES
13582 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
13583 #undef TARGET_SET_CURRENT_FUNCTION
13584 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
13586 #undef TARGET_VALID_POINTER_MODE
13587 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
13588 #undef TARGET_RTX_COSTS
13589 #define TARGET_RTX_COSTS mips_rtx_costs
13590 #undef TARGET_ADDRESS_COST
13591 #define TARGET_ADDRESS_COST mips_address_cost
13593 #undef TARGET_IN_SMALL_DATA_P
13594 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
13596 #undef TARGET_MACHINE_DEPENDENT_REORG
13597 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
13599 #undef TARGET_ASM_FILE_START
13600 #define TARGET_ASM_FILE_START mips_file_start
13601 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
13602 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
13604 #undef TARGET_INIT_LIBFUNCS
13605 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
13607 #undef TARGET_BUILD_BUILTIN_VA_LIST
13608 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
13609 #undef TARGET_EXPAND_BUILTIN_VA_START
13610 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
13611 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
13612 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
13614 #undef TARGET_PROMOTE_FUNCTION_ARGS
13615 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
13616 #undef TARGET_PROMOTE_FUNCTION_RETURN
13617 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
13618 #undef TARGET_PROMOTE_PROTOTYPES
13619 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
13621 #undef TARGET_RETURN_IN_MEMORY
13622 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
13623 #undef TARGET_RETURN_IN_MSB
13624 #define TARGET_RETURN_IN_MSB mips_return_in_msb
13626 #undef TARGET_ASM_OUTPUT_MI_THUNK
13627 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
13628 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
13629 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
13631 #undef TARGET_SETUP_INCOMING_VARARGS
13632 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
13633 #undef TARGET_STRICT_ARGUMENT_NAMING
13634 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
13635 #undef TARGET_MUST_PASS_IN_STACK
13636 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
13637 #undef TARGET_PASS_BY_REFERENCE
13638 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
13639 #undef TARGET_CALLEE_COPIES
13640 #define TARGET_CALLEE_COPIES mips_callee_copies
13641 #undef TARGET_ARG_PARTIAL_BYTES
13642 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
13644 #undef TARGET_MODE_REP_EXTENDED
13645 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
13647 #undef TARGET_VECTOR_MODE_SUPPORTED_P
13648 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
13650 #undef TARGET_SCALAR_MODE_SUPPORTED_P
13651 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
13653 #undef TARGET_INIT_BUILTINS
13654 #define TARGET_INIT_BUILTINS mips_init_builtins
13655 #undef TARGET_EXPAND_BUILTIN
13656 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
13658 #undef TARGET_HAVE_TLS
13659 #define TARGET_HAVE_TLS HAVE_AS_TLS
13661 #undef TARGET_CANNOT_FORCE_CONST_MEM
13662 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
13664 #undef TARGET_ENCODE_SECTION_INFO
13665 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
13667 #undef TARGET_ATTRIBUTE_TABLE
13668 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
13669 /* All our function attributes are related to how out-of-line copies should
13670 be compiled or called. They don't in themselves prevent inlining. */
13671 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
13672 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
13674 #undef TARGET_EXTRA_LIVE_ON_ENTRY
13675 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
13677 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
13678 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
13679 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
13680 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
13682 #undef TARGET_COMP_TYPE_ATTRIBUTES
13683 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
13685 #ifdef HAVE_AS_DTPRELWORD
13686 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
13687 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
13688 #endif
13689 #undef TARGET_DWARF_REGISTER_SPAN
13690 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
13692 struct gcc_target targetm = TARGET_INITIALIZER;
13694 #include "gt-mips.h"