PR c/81544 - attribute noreturn and warn_unused_result on the same function accepted
[official-gcc.git] / gcc / config / arc / arc.c
blobaaefc300a9f2296646c0db12d8d6928c780203fc
1 /* Subroutines used for code generation on the Synopsys DesignWare ARC cpu.
2 Copyright (C) 1994-2017 Free Software Foundation, Inc.
4 Sources derived from work done by Sankhya Technologies (www.sankhya.com) on
5 behalf of Synopsys Inc.
7 Position Independent Code support added,Code cleaned up,
8 Comments and Support For ARC700 instructions added by
9 Saurabh Verma (saurabh.verma@codito.com)
10 Ramana Radhakrishnan(ramana.radhakrishnan@codito.com)
12 Fixing ABI inconsistencies, optimizations for ARC600 / ARC700 pipelines,
13 profiling support added by Joern Rennecke <joern.rennecke@embecosm.com>
15 This file is part of GCC.
17 GCC is free software; you can redistribute it and/or modify
18 it under the terms of the GNU General Public License as published by
19 the Free Software Foundation; either version 3, or (at your option)
20 any later version.
22 GCC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING3. If not see
29 <http://www.gnu.org/licenses/>. */
31 #include "config.h"
32 #include "system.h"
33 #include "coretypes.h"
34 #include "memmodel.h"
35 #include "backend.h"
36 #include "target.h"
37 #include "rtl.h"
38 #include "tree.h"
39 #include "cfghooks.h"
40 #include "df.h"
41 #include "tm_p.h"
42 #include "stringpool.h"
43 #include "attribs.h"
44 #include "optabs.h"
45 #include "regs.h"
46 #include "emit-rtl.h"
47 #include "recog.h"
48 #include "diagnostic.h"
49 #include "fold-const.h"
50 #include "varasm.h"
51 #include "stor-layout.h"
52 #include "calls.h"
53 #include "output.h"
54 #include "insn-attr.h"
55 #include "flags.h"
56 #include "explow.h"
57 #include "expr.h"
58 #include "langhooks.h"
59 #include "tm-constrs.h"
60 #include "reload.h" /* For operands_match_p */
61 #include "cfgrtl.h"
62 #include "tree-pass.h"
63 #include "context.h"
64 #include "builtins.h"
65 #include "rtl-iter.h"
66 #include "alias.h"
67 #include "opts.h"
68 #include "hw-doloop.h"
70 /* Which cpu we're compiling for (ARC600, ARC601, ARC700). */
71 static char arc_cpu_name[10] = "";
72 static const char *arc_cpu_string = arc_cpu_name;
74 /* Maximum size of a loop. */
75 #define ARC_MAX_LOOP_LENGTH 4095
77 /* ??? Loads can handle any constant, stores can only handle small ones. */
78 /* OTOH, LIMMs cost extra, so their usefulness is limited. */
79 #define RTX_OK_FOR_OFFSET_P(MODE, X) \
80 (GET_CODE (X) == CONST_INT \
81 && SMALL_INT_RANGE (INTVAL (X), (GET_MODE_SIZE (MODE) - 1) & -4, \
82 (INTVAL (X) & (GET_MODE_SIZE (MODE) - 1) & 3 \
83 ? 0 \
84 : -(-GET_MODE_SIZE (MODE) | -4) >> 1)))
86 #define LEGITIMATE_SMALL_DATA_OFFSET_P(X) \
87 (GET_CODE (X) == CONST \
88 && GET_CODE (XEXP ((X), 0)) == PLUS \
89 && GET_CODE (XEXP (XEXP ((X), 0), 0)) == SYMBOL_REF \
90 && SYMBOL_REF_SMALL_P (XEXP (XEXP ((X), 0), 0)) \
91 && GET_CODE (XEXP(XEXP ((X), 0), 1)) == CONST_INT \
92 && INTVAL (XEXP (XEXP ((X), 0), 1)) <= g_switch_value)
94 #define LEGITIMATE_SMALL_DATA_ADDRESS_P(X) \
95 (GET_CODE (X) == PLUS \
96 && REG_P (XEXP ((X), 0)) \
97 && REGNO (XEXP ((X), 0)) == SDATA_BASE_REGNUM \
98 && ((GET_CODE (XEXP ((X), 1)) == SYMBOL_REF \
99 && SYMBOL_REF_SMALL_P (XEXP ((X), 1))) \
100 || LEGITIMATE_SMALL_DATA_OFFSET_P (XEXP ((X), 1))))
102 /* Array of valid operand punctuation characters. */
103 char arc_punct_chars[256];
105 /* State used by arc_ccfsm_advance to implement conditional execution. */
106 struct GTY (()) arc_ccfsm
108 int state;
109 int cc;
110 rtx cond;
111 rtx_insn *target_insn;
112 int target_label;
115 /* Status of the IRQ_CTRL_AUX register. */
116 typedef struct irq_ctrl_saved_t
118 /* Last register number used by IRQ_CTRL_SAVED aux_reg. */
119 short irq_save_last_reg;
120 /* True if BLINK is automatically saved. */
121 bool irq_save_blink;
122 /* True if LPCOUNT is automatically saved. */
123 bool irq_save_lpcount;
124 } irq_ctrl_saved_t;
125 static irq_ctrl_saved_t irq_ctrl_saved;
127 #define ARC_AUTOBLINK_IRQ_P(FNTYPE) \
128 ((ARC_INTERRUPT_P (FNTYPE) \
129 && irq_ctrl_saved.irq_save_blink) \
130 || (ARC_FAST_INTERRUPT_P (FNTYPE) \
131 && rgf_banked_register_count > 8))
133 #define ARC_AUTOFP_IRQ_P(FNTYPE) \
134 ((ARC_INTERRUPT_P (FNTYPE) \
135 && (irq_ctrl_saved.irq_save_last_reg > 26)) \
136 || (ARC_FAST_INTERRUPT_P (FNTYPE) \
137 && rgf_banked_register_count > 8))
139 #define ARC_AUTO_IRQ_P(FNTYPE) \
140 (ARC_INTERRUPT_P (FNTYPE) && !ARC_FAST_INTERRUPT_P (FNTYPE) \
141 && (irq_ctrl_saved.irq_save_blink \
142 || (irq_ctrl_saved.irq_save_last_reg >= 0)))
144 /* Number of registers in second bank for FIRQ support. */
145 static int rgf_banked_register_count;
147 #define arc_ccfsm_current cfun->machine->ccfsm_current
149 #define ARC_CCFSM_BRANCH_DELETED_P(STATE) \
150 ((STATE)->state == 1 || (STATE)->state == 2)
152 /* Indicate we're conditionalizing insns now. */
153 #define ARC_CCFSM_RECORD_BRANCH_DELETED(STATE) \
154 ((STATE)->state += 2)
156 #define ARC_CCFSM_COND_EXEC_P(STATE) \
157 ((STATE)->state == 3 || (STATE)->state == 4 || (STATE)->state == 5 \
158 || current_insn_predicate)
160 /* Check if INSN has a 16 bit opcode considering struct arc_ccfsm *STATE. */
161 #define CCFSM_ISCOMPACT(INSN,STATE) \
162 (ARC_CCFSM_COND_EXEC_P (STATE) \
163 ? (get_attr_iscompact (INSN) == ISCOMPACT_TRUE \
164 || get_attr_iscompact (INSN) == ISCOMPACT_TRUE_LIMM) \
165 : get_attr_iscompact (INSN) != ISCOMPACT_FALSE)
167 /* Likewise, but also consider that INSN might be in a delay slot of JUMP. */
168 #define CCFSM_DBR_ISCOMPACT(INSN,JUMP,STATE) \
169 ((ARC_CCFSM_COND_EXEC_P (STATE) \
170 || (JUMP_P (JUMP) \
171 && INSN_ANNULLED_BRANCH_P (JUMP) \
172 && (TARGET_AT_DBR_CONDEXEC || INSN_FROM_TARGET_P (INSN)))) \
173 ? (get_attr_iscompact (INSN) == ISCOMPACT_TRUE \
174 || get_attr_iscompact (INSN) == ISCOMPACT_TRUE_LIMM) \
175 : get_attr_iscompact (INSN) != ISCOMPACT_FALSE)
177 /* The maximum number of insns skipped which will be conditionalised if
178 possible. */
179 /* When optimizing for speed:
180 Let p be the probability that the potentially skipped insns need to
181 be executed, pn the cost of a correctly predicted non-taken branch,
182 mt the cost of a mis/non-predicted taken branch,
183 mn mispredicted non-taken, pt correctly predicted taken ;
184 costs expressed in numbers of instructions like the ones considered
185 skipping.
186 Unfortunately we don't have a measure of predictability - this
187 is linked to probability only in that in the no-eviction-scenario
188 there is a lower bound 1 - 2 * min (p, 1-p), and a somewhat larger
189 value that can be assumed *if* the distribution is perfectly random.
190 A predictability of 1 is perfectly plausible not matter what p is,
191 because the decision could be dependent on an invocation parameter
192 of the program.
193 For large p, we want MAX_INSNS_SKIPPED == pn/(1-p) + mt - pn
194 For small p, we want MAX_INSNS_SKIPPED == pt
196 When optimizing for size:
197 We want to skip insn unless we could use 16 opcodes for the
198 non-conditionalized insn to balance the branch length or more.
199 Performance can be tie-breaker. */
200 /* If the potentially-skipped insns are likely to be executed, we'll
201 generally save one non-taken branch
203 this to be no less than the 1/p */
204 #define MAX_INSNS_SKIPPED 3
206 /* A nop is needed between a 4 byte insn that sets the condition codes and
207 a branch that uses them (the same isn't true for an 8 byte insn that sets
208 the condition codes). Set by arc_ccfsm_advance. Used by
209 arc_print_operand. */
211 static int get_arc_condition_code (rtx);
213 static tree arc_handle_interrupt_attribute (tree *, tree, tree, int, bool *);
214 static tree arc_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
216 /* Initialized arc_attribute_table to NULL since arc doesnot have any
217 machine specific supported attributes. */
218 const struct attribute_spec arc_attribute_table[] =
220 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
221 affects_type_identity } */
222 { "interrupt", 1, 1, true, false, false, arc_handle_interrupt_attribute,
223 true, NULL },
224 /* Function calls made to this symbol must be done indirectly, because
225 it may lie outside of the 21/25 bit addressing range of a normal function
226 call. */
227 { "long_call", 0, 0, false, true, true, NULL, false, NULL },
228 /* Whereas these functions are always known to reside within the 25 bit
229 addressing range of unconditionalized bl. */
230 { "medium_call", 0, 0, false, true, true, NULL, false, NULL },
231 /* And these functions are always known to reside within the 21 bit
232 addressing range of blcc. */
233 { "short_call", 0, 0, false, true, true, NULL, false, NULL },
234 /* Function which are not having the prologue and epilogue generated
235 by the compiler. */
236 { "naked", 0, 0, true, false, false, arc_handle_fndecl_attribute, false,
237 NULL },
238 { NULL, 0, 0, false, false, false, NULL, false, NULL }
240 static int arc_comp_type_attributes (const_tree, const_tree);
241 static void arc_file_start (void);
242 static void arc_internal_label (FILE *, const char *, unsigned long);
243 static void arc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
244 tree);
245 static int arc_address_cost (rtx, machine_mode, addr_space_t, bool);
246 static void arc_encode_section_info (tree decl, rtx rtl, int first);
248 static void arc_init_builtins (void);
249 static rtx arc_expand_builtin (tree, rtx, rtx, machine_mode, int);
251 static int branch_dest (rtx);
253 static void arc_output_pic_addr_const (FILE *, rtx, int);
254 static bool arc_function_ok_for_sibcall (tree, tree);
255 static rtx arc_function_value (const_tree, const_tree, bool);
256 const char * output_shift (rtx *);
257 static void arc_reorg (void);
258 static bool arc_in_small_data_p (const_tree);
260 static void arc_init_reg_tables (void);
261 static bool arc_return_in_memory (const_tree, const_tree);
262 static bool arc_vector_mode_supported_p (machine_mode);
264 static bool arc_can_use_doloop_p (const widest_int &, const widest_int &,
265 unsigned int, bool);
266 static const char *arc_invalid_within_doloop (const rtx_insn *);
268 static void output_short_suffix (FILE *file);
270 static bool arc_frame_pointer_required (void);
272 static bool arc_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT,
273 unsigned int,
274 enum by_pieces_operation op,
275 bool);
277 /* Globally visible information about currently selected cpu. */
278 const arc_cpu_t *arc_selected_cpu;
280 static bool
281 legitimate_scaled_address_p (machine_mode mode, rtx op, bool strict)
283 if (GET_CODE (op) != PLUS)
284 return false;
286 if (GET_CODE (XEXP (op, 0)) != MULT)
287 return false;
289 /* Check multiplication operands. */
290 if (!RTX_OK_FOR_INDEX_P (XEXP (XEXP (op, 0), 0), strict))
291 return false;
293 if (!CONST_INT_P (XEXP (XEXP (op, 0), 1)))
294 return false;
296 switch (GET_MODE_SIZE (mode))
298 case 2:
299 if (INTVAL (XEXP (XEXP (op, 0), 1)) != 2)
300 return false;
301 break;
302 case 8:
303 if (!TARGET_LL64)
304 return false;
305 /* Fall through. */
306 case 4:
307 if (INTVAL (XEXP (XEXP (op, 0), 1)) != 4)
308 return false;
309 default:
310 return false;
313 /* Check the base. */
314 if (RTX_OK_FOR_BASE_P (XEXP (op, 1), (strict)))
315 return true;
317 if (flag_pic)
319 if (CONST_INT_P (XEXP (op, 1)))
320 return true;
321 return false;
323 if (CONSTANT_P (XEXP (op, 1)))
325 /* Scalled addresses for sdata is done other places. */
326 if (GET_CODE (XEXP (op, 1)) == SYMBOL_REF
327 && SYMBOL_REF_SMALL_P (XEXP (op, 1)))
328 return false;
329 return true;
332 return false;
335 /* Check for constructions like REG + OFFS, where OFFS can be a
336 register, an immediate or an long immediate. */
338 static bool
339 legitimate_offset_address_p (machine_mode mode, rtx x, bool index, bool strict)
341 if (GET_CODE (x) != PLUS)
342 return false;
344 if (!RTX_OK_FOR_BASE_P (XEXP (x, 0), (strict)))
345 return false;
347 /* Check for: [Rx + small offset] or [Rx + Ry]. */
348 if (((index && RTX_OK_FOR_INDEX_P (XEXP (x, 1), (strict))
349 && GET_MODE_SIZE ((mode)) <= 4)
350 || RTX_OK_FOR_OFFSET_P (mode, XEXP (x, 1))))
351 return true;
353 /* Check for [Rx + symbol]. */
354 if (!flag_pic
355 && (GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
356 /* Avoid this type of address for double or larger modes. */
357 && (GET_MODE_SIZE (mode) <= 4)
358 /* Avoid small data which ends in something like GP +
359 symb@sda. */
360 && (!SYMBOL_REF_SMALL_P (XEXP (x, 1))))
361 return true;
363 return false;
366 /* Implements target hook vector_mode_supported_p. */
368 static bool
369 arc_vector_mode_supported_p (machine_mode mode)
371 switch (mode)
373 case E_V2HImode:
374 return TARGET_PLUS_DMPY;
375 case E_V4HImode:
376 case E_V2SImode:
377 return TARGET_PLUS_QMACW;
378 case E_V4SImode:
379 case E_V8HImode:
380 return TARGET_SIMD_SET;
382 default:
383 return false;
387 /* Implements target hook TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
389 static machine_mode
390 arc_preferred_simd_mode (scalar_mode mode)
392 switch (mode)
394 case E_HImode:
395 return TARGET_PLUS_QMACW ? V4HImode : V2HImode;
396 case E_SImode:
397 return V2SImode;
399 default:
400 return word_mode;
404 /* Implements target hook
405 TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES. */
407 static unsigned int
408 arc_autovectorize_vector_sizes (void)
410 return TARGET_PLUS_QMACW ? (8 | 4) : 0;
413 /* TARGET_PRESERVE_RELOAD_P is still awaiting patch re-evaluation / review. */
414 static bool arc_preserve_reload_p (rtx in) ATTRIBUTE_UNUSED;
415 static rtx arc_delegitimize_address (rtx);
416 static bool arc_can_follow_jump (const rtx_insn *follower,
417 const rtx_insn *followee);
419 static rtx frame_insn (rtx);
420 static void arc_function_arg_advance (cumulative_args_t, machine_mode,
421 const_tree, bool);
422 static rtx arc_legitimize_address_0 (rtx, rtx, machine_mode mode);
424 static void arc_finalize_pic (void);
426 /* initialize the GCC target structure. */
427 #undef TARGET_COMP_TYPE_ATTRIBUTES
428 #define TARGET_COMP_TYPE_ATTRIBUTES arc_comp_type_attributes
429 #undef TARGET_ASM_FILE_START
430 #define TARGET_ASM_FILE_START arc_file_start
431 #undef TARGET_ATTRIBUTE_TABLE
432 #define TARGET_ATTRIBUTE_TABLE arc_attribute_table
433 #undef TARGET_ASM_INTERNAL_LABEL
434 #define TARGET_ASM_INTERNAL_LABEL arc_internal_label
435 #undef TARGET_RTX_COSTS
436 #define TARGET_RTX_COSTS arc_rtx_costs
437 #undef TARGET_ADDRESS_COST
438 #define TARGET_ADDRESS_COST arc_address_cost
440 #undef TARGET_ENCODE_SECTION_INFO
441 #define TARGET_ENCODE_SECTION_INFO arc_encode_section_info
443 #undef TARGET_CANNOT_FORCE_CONST_MEM
444 #define TARGET_CANNOT_FORCE_CONST_MEM arc_cannot_force_const_mem
446 #undef TARGET_INIT_BUILTINS
447 #define TARGET_INIT_BUILTINS arc_init_builtins
449 #undef TARGET_EXPAND_BUILTIN
450 #define TARGET_EXPAND_BUILTIN arc_expand_builtin
452 #undef TARGET_BUILTIN_DECL
453 #define TARGET_BUILTIN_DECL arc_builtin_decl
455 #undef TARGET_ASM_OUTPUT_MI_THUNK
456 #define TARGET_ASM_OUTPUT_MI_THUNK arc_output_mi_thunk
458 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
459 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
461 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
462 #define TARGET_FUNCTION_OK_FOR_SIBCALL arc_function_ok_for_sibcall
464 #undef TARGET_MACHINE_DEPENDENT_REORG
465 #define TARGET_MACHINE_DEPENDENT_REORG arc_reorg
467 #undef TARGET_IN_SMALL_DATA_P
468 #define TARGET_IN_SMALL_DATA_P arc_in_small_data_p
470 #undef TARGET_PROMOTE_FUNCTION_MODE
471 #define TARGET_PROMOTE_FUNCTION_MODE \
472 default_promote_function_mode_always_promote
474 #undef TARGET_PROMOTE_PROTOTYPES
475 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
477 #undef TARGET_RETURN_IN_MEMORY
478 #define TARGET_RETURN_IN_MEMORY arc_return_in_memory
479 #undef TARGET_PASS_BY_REFERENCE
480 #define TARGET_PASS_BY_REFERENCE arc_pass_by_reference
482 #undef TARGET_SETUP_INCOMING_VARARGS
483 #define TARGET_SETUP_INCOMING_VARARGS arc_setup_incoming_varargs
485 #undef TARGET_ARG_PARTIAL_BYTES
486 #define TARGET_ARG_PARTIAL_BYTES arc_arg_partial_bytes
488 #undef TARGET_MUST_PASS_IN_STACK
489 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
491 #undef TARGET_FUNCTION_VALUE
492 #define TARGET_FUNCTION_VALUE arc_function_value
494 #undef TARGET_SCHED_ADJUST_PRIORITY
495 #define TARGET_SCHED_ADJUST_PRIORITY arc_sched_adjust_priority
497 #undef TARGET_VECTOR_MODE_SUPPORTED_P
498 #define TARGET_VECTOR_MODE_SUPPORTED_P arc_vector_mode_supported_p
500 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
501 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE arc_preferred_simd_mode
503 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
504 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES arc_autovectorize_vector_sizes
506 #undef TARGET_CAN_USE_DOLOOP_P
507 #define TARGET_CAN_USE_DOLOOP_P arc_can_use_doloop_p
509 #undef TARGET_INVALID_WITHIN_DOLOOP
510 #define TARGET_INVALID_WITHIN_DOLOOP arc_invalid_within_doloop
512 #undef TARGET_PRESERVE_RELOAD_P
513 #define TARGET_PRESERVE_RELOAD_P arc_preserve_reload_p
515 #undef TARGET_CAN_FOLLOW_JUMP
516 #define TARGET_CAN_FOLLOW_JUMP arc_can_follow_jump
518 #undef TARGET_DELEGITIMIZE_ADDRESS
519 #define TARGET_DELEGITIMIZE_ADDRESS arc_delegitimize_address
521 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
522 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
523 arc_use_by_pieces_infrastructure_p
525 /* Usually, we will be able to scale anchor offsets.
526 When this fails, we want LEGITIMIZE_ADDRESS to kick in. */
527 #undef TARGET_MIN_ANCHOR_OFFSET
528 #define TARGET_MIN_ANCHOR_OFFSET (-1024)
529 #undef TARGET_MAX_ANCHOR_OFFSET
530 #define TARGET_MAX_ANCHOR_OFFSET (1020)
532 #undef TARGET_SECONDARY_RELOAD
533 #define TARGET_SECONDARY_RELOAD arc_secondary_reload
535 #define TARGET_OPTION_OVERRIDE arc_override_options
537 #define TARGET_CONDITIONAL_REGISTER_USAGE arc_conditional_register_usage
539 #define TARGET_TRAMPOLINE_INIT arc_initialize_trampoline
541 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS arc_trampoline_adjust_address
543 #define TARGET_CAN_ELIMINATE arc_can_eliminate
545 #define TARGET_FRAME_POINTER_REQUIRED arc_frame_pointer_required
547 #define TARGET_FUNCTION_ARG arc_function_arg
549 #define TARGET_FUNCTION_ARG_ADVANCE arc_function_arg_advance
551 #define TARGET_LEGITIMATE_CONSTANT_P arc_legitimate_constant_p
553 #define TARGET_LEGITIMATE_ADDRESS_P arc_legitimate_address_p
555 #define TARGET_MODE_DEPENDENT_ADDRESS_P arc_mode_dependent_address_p
557 #define TARGET_LEGITIMIZE_ADDRESS arc_legitimize_address
559 #define TARGET_ADJUST_INSN_LENGTH arc_adjust_insn_length
561 #define TARGET_INSN_LENGTH_PARAMETERS arc_insn_length_parameters
563 #undef TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P
564 #define TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P \
565 arc_no_speculation_in_delay_slots_p
567 #undef TARGET_LRA_P
568 #define TARGET_LRA_P arc_lra_p
569 #define TARGET_REGISTER_PRIORITY arc_register_priority
570 /* Stores with scaled offsets have different displacement ranges. */
571 #define TARGET_DIFFERENT_ADDR_DISPLACEMENT_P hook_bool_void_true
572 #define TARGET_SPILL_CLASS arc_spill_class
574 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
575 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS arc_allocate_stack_slots_for_args
577 #undef TARGET_WARN_FUNC_RETURN
578 #define TARGET_WARN_FUNC_RETURN arc_warn_func_return
580 #include "target-def.h"
582 #undef TARGET_ASM_ALIGNED_HI_OP
583 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
584 #undef TARGET_ASM_ALIGNED_SI_OP
585 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
587 #ifdef HAVE_AS_TLS
588 #undef TARGET_HAVE_TLS
589 #define TARGET_HAVE_TLS HAVE_AS_TLS
590 #endif
592 #undef TARGET_DWARF_REGISTER_SPAN
593 #define TARGET_DWARF_REGISTER_SPAN arc_dwarf_register_span
595 #undef TARGET_HARD_REGNO_NREGS
596 #define TARGET_HARD_REGNO_NREGS arc_hard_regno_nregs
597 #undef TARGET_HARD_REGNO_MODE_OK
598 #define TARGET_HARD_REGNO_MODE_OK arc_hard_regno_mode_ok
600 #undef TARGET_MODES_TIEABLE_P
601 #define TARGET_MODES_TIEABLE_P arc_modes_tieable_p
602 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
603 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE arc_builtin_setjmp_frame_value
605 /* Try to keep the (mov:DF _, reg) as early as possible so
606 that the d<add/sub/mul>h-lr insns appear together and can
607 use the peephole2 pattern. */
609 static int
610 arc_sched_adjust_priority (rtx_insn *insn, int priority)
612 rtx set = single_set (insn);
613 if (set
614 && GET_MODE (SET_SRC(set)) == DFmode
615 && GET_CODE (SET_SRC(set)) == REG)
617 /* Incrementing priority by 20 (empirically derived). */
618 return priority + 20;
621 return priority;
624 /* For ARC base register + offset addressing, the validity of the
625 address is mode-dependent for most of the offset range, as the
626 offset can be scaled by the access size.
627 We don't expose these as mode-dependent addresses in the
628 mode_dependent_address_p target hook, because that would disable
629 lots of optimizations, and most uses of these addresses are for 32
630 or 64 bit accesses anyways, which are fine.
631 However, that leaves some addresses for 8 / 16 bit values not
632 properly reloaded by the generic code, which is why we have to
633 schedule secondary reloads for these. */
635 static reg_class_t
636 arc_secondary_reload (bool in_p,
637 rtx x,
638 reg_class_t cl,
639 machine_mode mode,
640 secondary_reload_info *sri)
642 enum rtx_code code = GET_CODE (x);
644 if (cl == DOUBLE_REGS)
645 return GENERAL_REGS;
647 /* The loop counter register can be stored, but not loaded directly. */
648 if ((cl == LPCOUNT_REG || cl == WRITABLE_CORE_REGS)
649 && in_p && MEM_P (x))
650 return GENERAL_REGS;
652 /* If we have a subreg (reg), where reg is a pseudo (that will end in
653 a memory location), then we may need a scratch register to handle
654 the fp/sp+largeoffset address. */
655 if (code == SUBREG)
657 rtx addr = NULL_RTX;
658 x = SUBREG_REG (x);
660 if (REG_P (x))
662 int regno = REGNO (x);
663 if (regno >= FIRST_PSEUDO_REGISTER)
664 regno = reg_renumber[regno];
666 if (regno != -1)
667 return NO_REGS;
669 /* It is a pseudo that ends in a stack location. */
670 if (reg_equiv_mem (REGNO (x)))
672 /* Get the equivalent address and check the range of the
673 offset. */
674 rtx mem = reg_equiv_mem (REGNO (x));
675 addr = find_replacement (&XEXP (mem, 0));
678 else
680 gcc_assert (MEM_P (x));
681 addr = XEXP (x, 0);
682 addr = simplify_rtx (addr);
684 if (addr && GET_CODE (addr) == PLUS
685 && CONST_INT_P (XEXP (addr, 1))
686 && (!RTX_OK_FOR_OFFSET_P (mode, XEXP (addr, 1))))
688 switch (mode)
690 case E_QImode:
691 sri->icode =
692 in_p ? CODE_FOR_reload_qi_load : CODE_FOR_reload_qi_store;
693 break;
694 case E_HImode:
695 sri->icode =
696 in_p ? CODE_FOR_reload_hi_load : CODE_FOR_reload_hi_store;
697 break;
698 default:
699 break;
703 return NO_REGS;
706 /* Convert reloads using offsets that are too large to use indirect
707 addressing. */
709 void
710 arc_secondary_reload_conv (rtx reg, rtx mem, rtx scratch, bool store_p)
712 rtx addr;
714 gcc_assert (GET_CODE (mem) == MEM);
715 addr = XEXP (mem, 0);
717 /* Large offset: use a move. FIXME: ld ops accepts limms as
718 offsets. Hence, the following move insn is not required. */
719 emit_move_insn (scratch, addr);
720 mem = replace_equiv_address_nv (mem, scratch);
722 /* Now create the move. */
723 if (store_p)
724 emit_insn (gen_rtx_SET (mem, reg));
725 else
726 emit_insn (gen_rtx_SET (reg, mem));
728 return;
731 static unsigned arc_ifcvt (void);
733 namespace {
735 const pass_data pass_data_arc_ifcvt =
737 RTL_PASS,
738 "arc_ifcvt", /* name */
739 OPTGROUP_NONE, /* optinfo_flags */
740 TV_IFCVT2, /* tv_id */
741 0, /* properties_required */
742 0, /* properties_provided */
743 0, /* properties_destroyed */
744 0, /* todo_flags_start */
745 TODO_df_finish /* todo_flags_finish */
748 class pass_arc_ifcvt : public rtl_opt_pass
750 public:
751 pass_arc_ifcvt(gcc::context *ctxt)
752 : rtl_opt_pass(pass_data_arc_ifcvt, ctxt)
755 /* opt_pass methods: */
756 opt_pass * clone () { return new pass_arc_ifcvt (m_ctxt); }
757 virtual unsigned int execute (function *) { return arc_ifcvt (); }
760 } // anon namespace
762 rtl_opt_pass *
763 make_pass_arc_ifcvt (gcc::context *ctxt)
765 return new pass_arc_ifcvt (ctxt);
768 static unsigned arc_predicate_delay_insns (void);
770 namespace {
772 const pass_data pass_data_arc_predicate_delay_insns =
774 RTL_PASS,
775 "arc_predicate_delay_insns", /* name */
776 OPTGROUP_NONE, /* optinfo_flags */
777 TV_IFCVT2, /* tv_id */
778 0, /* properties_required */
779 0, /* properties_provided */
780 0, /* properties_destroyed */
781 0, /* todo_flags_start */
782 TODO_df_finish /* todo_flags_finish */
785 class pass_arc_predicate_delay_insns : public rtl_opt_pass
787 public:
788 pass_arc_predicate_delay_insns(gcc::context *ctxt)
789 : rtl_opt_pass(pass_data_arc_predicate_delay_insns, ctxt)
792 /* opt_pass methods: */
793 virtual unsigned int execute (function *)
795 return arc_predicate_delay_insns ();
799 } // anon namespace
801 rtl_opt_pass *
802 make_pass_arc_predicate_delay_insns (gcc::context *ctxt)
804 return new pass_arc_predicate_delay_insns (ctxt);
807 /* Called by OVERRIDE_OPTIONS to initialize various things. */
809 static void
810 arc_init (void)
812 if (TARGET_V2)
814 /* I have the multiplier, then use it*/
815 if (TARGET_MPYW || TARGET_MULTI)
816 arc_multcost = COSTS_N_INSNS (1);
818 /* Note: arc_multcost is only used in rtx_cost if speed is true. */
819 if (arc_multcost < 0)
820 switch (arc_tune)
822 case TUNE_ARC700_4_2_STD:
823 /* latency 7;
824 max throughput (1 multiply + 4 other insns) / 5 cycles. */
825 arc_multcost = COSTS_N_INSNS (4);
826 if (TARGET_NOMPY_SET)
827 arc_multcost = COSTS_N_INSNS (30);
828 break;
829 case TUNE_ARC700_4_2_XMAC:
830 /* latency 5;
831 max throughput (1 multiply + 2 other insns) / 3 cycles. */
832 arc_multcost = COSTS_N_INSNS (3);
833 if (TARGET_NOMPY_SET)
834 arc_multcost = COSTS_N_INSNS (30);
835 break;
836 case TUNE_ARC600:
837 if (TARGET_MUL64_SET)
839 arc_multcost = COSTS_N_INSNS (4);
840 break;
842 /* Fall through. */
843 default:
844 arc_multcost = COSTS_N_INSNS (30);
845 break;
848 /* MPY instructions valid only for ARC700 or ARCv2. */
849 if (TARGET_NOMPY_SET && TARGET_ARC600_FAMILY)
850 error ("-mno-mpy supported only for ARC700 or ARCv2");
852 if (!TARGET_DPFP && TARGET_DPFP_DISABLE_LRSR)
853 error ("-mno-dpfp-lrsr supported only with -mdpfp");
855 /* FPX-1. No fast and compact together. */
856 if ((TARGET_DPFP_FAST_SET && TARGET_DPFP_COMPACT_SET)
857 || (TARGET_SPFP_FAST_SET && TARGET_SPFP_COMPACT_SET))
858 error ("FPX fast and compact options cannot be specified together");
860 /* FPX-2. No fast-spfp for arc600 or arc601. */
861 if (TARGET_SPFP_FAST_SET && TARGET_ARC600_FAMILY)
862 error ("-mspfp_fast not available on ARC600 or ARC601");
864 /* FPX-4. No FPX extensions mixed with FPU extensions. */
865 if ((TARGET_DPFP_FAST_SET || TARGET_DPFP_COMPACT_SET || TARGET_SPFP)
866 && TARGET_HARD_FLOAT)
867 error ("No FPX/FPU mixing allowed");
869 /* Warn for unimplemented PIC in pre-ARC700 cores, and disable flag_pic. */
870 if (flag_pic && TARGET_ARC600_FAMILY)
872 warning (DK_WARNING,
873 "PIC is not supported for %s. Generating non-PIC code only..",
874 arc_cpu_string);
875 flag_pic = 0;
878 arc_init_reg_tables ();
880 /* Initialize array for PRINT_OPERAND_PUNCT_VALID_P. */
881 memset (arc_punct_chars, 0, sizeof (arc_punct_chars));
882 arc_punct_chars['#'] = 1;
883 arc_punct_chars['*'] = 1;
884 arc_punct_chars['?'] = 1;
885 arc_punct_chars['!'] = 1;
886 arc_punct_chars['^'] = 1;
887 arc_punct_chars['&'] = 1;
888 arc_punct_chars['+'] = 1;
889 arc_punct_chars['_'] = 1;
891 if (optimize > 1 && !TARGET_NO_COND_EXEC)
893 /* There are two target-independent ifcvt passes, and arc_reorg may do
894 one or more arc_ifcvt calls. */
895 opt_pass *pass_arc_ifcvt_4 = make_pass_arc_ifcvt (g);
896 struct register_pass_info arc_ifcvt4_info
897 = { pass_arc_ifcvt_4, "dbr", 1, PASS_POS_INSERT_AFTER };
898 struct register_pass_info arc_ifcvt5_info
899 = { pass_arc_ifcvt_4->clone (), "shorten", 1, PASS_POS_INSERT_BEFORE };
901 register_pass (&arc_ifcvt4_info);
902 register_pass (&arc_ifcvt5_info);
905 if (flag_delayed_branch)
907 opt_pass *pass_arc_predicate_delay_insns
908 = make_pass_arc_predicate_delay_insns (g);
909 struct register_pass_info arc_predicate_delay_info
910 = { pass_arc_predicate_delay_insns, "dbr", 1, PASS_POS_INSERT_AFTER };
912 register_pass (&arc_predicate_delay_info);
916 /* Parse -mirq-ctrl-saved=RegisterRange, blink, lp_copunt. The
917 register range is specified as two registers separated by a dash.
918 It always starts with r0, and its upper limit is fp register.
919 blink and lp_count registers are optional. */
921 static void
922 irq_range (const char *cstr)
924 int i, first, last, blink, lpcount, xreg;
925 char *str, *dash, *comma;
927 i = strlen (cstr);
928 str = (char *) alloca (i + 1);
929 memcpy (str, cstr, i + 1);
930 blink = -1;
931 lpcount = -1;
933 dash = strchr (str, '-');
934 if (!dash)
936 warning (0, "value of -mirq-ctrl-saved must have form R0-REGx");
937 return;
939 *dash = '\0';
941 comma = strchr (dash + 1, ',');
942 if (comma)
943 *comma = '\0';
945 first = decode_reg_name (str);
946 if (first != 0)
948 warning (0, "first register must be R0");
949 return;
952 /* At this moment we do not have the register names initialized
953 accordingly. */
954 if (!strcmp (dash + 1, "ilink"))
955 last = 29;
956 else
957 last = decode_reg_name (dash + 1);
959 if (last < 0)
961 warning (0, "unknown register name: %s", dash + 1);
962 return;
965 if (!(last & 0x01))
967 warning (0, "last register name %s must be an odd register", dash + 1);
968 return;
971 *dash = '-';
973 if (first > last)
975 warning (0, "%s-%s is an empty range", str, dash + 1);
976 return;
979 while (comma)
981 *comma = ',';
982 str = comma + 1;
984 comma = strchr (str, ',');
985 if (comma)
986 *comma = '\0';
988 xreg = decode_reg_name (str);
989 switch (xreg)
991 case 31:
992 blink = 31;
993 break;
995 case 60:
996 lpcount = 60;
997 break;
999 default:
1000 warning (0, "unknown register name: %s", str);
1001 return;
1005 irq_ctrl_saved.irq_save_last_reg = last;
1006 irq_ctrl_saved.irq_save_blink = (blink == 31) || (last == 31);
1007 irq_ctrl_saved.irq_save_lpcount = (lpcount == 60);
1010 /* Parse -mrgf-banked-regs=NUM option string. Valid values for NUM are 4,
1011 8, 16, or 32. */
1013 static void
1014 parse_mrgf_banked_regs_option (const char *arg)
1016 long int val;
1017 char *end_ptr;
1019 errno = 0;
1020 val = strtol (arg, &end_ptr, 10);
1021 if (errno != 0 || *arg == '\0' || *end_ptr != '\0'
1022 || (val != 0 && val != 4 && val != 8 && val != 16 && val != 32))
1024 error ("invalid number in -mrgf-banked-regs=%s "
1025 "valid values are 0, 4, 8, 16, or 32", arg);
1026 return;
1028 rgf_banked_register_count = (int) val;
1031 /* Check ARC options, generate derived target attributes. */
1033 static void
1034 arc_override_options (void)
1036 unsigned int i;
1037 cl_deferred_option *opt;
1038 vec<cl_deferred_option> *vopt
1039 = (vec<cl_deferred_option> *) arc_deferred_options;
1041 if (arc_cpu == PROCESSOR_NONE)
1042 arc_cpu = TARGET_CPU_DEFAULT;
1044 /* Set the default cpu options. */
1045 arc_selected_cpu = &arc_cpu_types[(int) arc_cpu];
1047 /* Set the architectures. */
1048 switch (arc_selected_cpu->arch_info->arch_id)
1050 case BASE_ARCH_em:
1051 arc_cpu_string = "EM";
1052 break;
1053 case BASE_ARCH_hs:
1054 arc_cpu_string = "HS";
1055 break;
1056 case BASE_ARCH_700:
1057 if (arc_selected_cpu->processor == PROCESSOR_nps400)
1058 arc_cpu_string = "NPS400";
1059 else
1060 arc_cpu_string = "ARC700";
1061 break;
1062 case BASE_ARCH_6xx:
1063 arc_cpu_string = "ARC600";
1064 break;
1065 default:
1066 gcc_unreachable ();
1069 irq_ctrl_saved.irq_save_last_reg = -1;
1070 irq_ctrl_saved.irq_save_blink = false;
1071 irq_ctrl_saved.irq_save_lpcount = false;
1073 rgf_banked_register_count = 0;
1075 /* Handle the deferred options. */
1076 if (vopt)
1077 FOR_EACH_VEC_ELT (*vopt, i, opt)
1079 switch (opt->opt_index)
1081 case OPT_mirq_ctrl_saved_:
1082 if (TARGET_V2)
1083 irq_range (opt->arg);
1084 else
1085 warning (0, "option -mirq-ctrl-saved valid only for ARC v2 processors");
1086 break;
1088 case OPT_mrgf_banked_regs_:
1089 if (TARGET_V2)
1090 parse_mrgf_banked_regs_option (opt->arg);
1091 else
1092 warning (0, "option -mrgf-banked-regs valid only for ARC v2 processors");
1093 break;
1095 default:
1096 gcc_unreachable();
1100 /* Set cpu flags accordingly to architecture/selected cpu. The cpu
1101 specific flags are set in arc-common.c. The architecture forces
1102 the default hardware configurations in, regardless what command
1103 line options are saying. The CPU optional hw options can be
1104 turned on or off. */
1105 #define ARC_OPT(NAME, CODE, MASK, DOC) \
1106 do { \
1107 if ((arc_selected_cpu->flags & CODE) \
1108 && ((target_flags_explicit & MASK) == 0)) \
1109 target_flags |= MASK; \
1110 if (arc_selected_cpu->arch_info->dflags & CODE) \
1111 target_flags |= MASK; \
1112 } while (0);
1113 #define ARC_OPTX(NAME, CODE, VAR, VAL, DOC) \
1114 do { \
1115 if ((arc_selected_cpu->flags & CODE) \
1116 && (VAR == DEFAULT_##VAR)) \
1117 VAR = VAL; \
1118 if (arc_selected_cpu->arch_info->dflags & CODE) \
1119 VAR = VAL; \
1120 } while (0);
1122 #include "arc-options.def"
1124 #undef ARC_OPTX
1125 #undef ARC_OPT
1127 /* Check options against architecture options. Throw an error if
1128 option is not allowed. */
1129 #define ARC_OPTX(NAME, CODE, VAR, VAL, DOC) \
1130 do { \
1131 if ((VAR == VAL) \
1132 && (!(arc_selected_cpu->arch_info->flags & CODE))) \
1134 error ("%s is not available for %s architecture", \
1135 DOC, arc_selected_cpu->arch_info->name); \
1137 } while (0);
1138 #define ARC_OPT(NAME, CODE, MASK, DOC) \
1139 do { \
1140 if ((target_flags & MASK) \
1141 && (!(arc_selected_cpu->arch_info->flags & CODE))) \
1142 error ("%s is not available for %s architecture", \
1143 DOC, arc_selected_cpu->arch_info->name); \
1144 } while (0);
1146 #include "arc-options.def"
1148 #undef ARC_OPTX
1149 #undef ARC_OPT
1151 /* Set Tune option. */
1152 if (arc_tune == TUNE_NONE)
1153 arc_tune = (enum attr_tune) arc_selected_cpu->tune;
1155 if (arc_size_opt_level == 3)
1156 optimize_size = 1;
1158 /* Compact casesi is not a valid option for ARCv2 family. */
1159 if (TARGET_V2)
1161 if (TARGET_COMPACT_CASESI)
1163 warning (0, "compact-casesi is not applicable to ARCv2");
1164 TARGET_COMPACT_CASESI = 0;
1167 else if (optimize_size == 1
1168 && !global_options_set.x_TARGET_COMPACT_CASESI)
1169 TARGET_COMPACT_CASESI = 1;
1171 if (flag_pic)
1172 target_flags |= MASK_NO_SDATA_SET;
1174 if (flag_no_common == 255)
1175 flag_no_common = !TARGET_NO_SDATA_SET;
1177 /* TARGET_COMPACT_CASESI needs the "q" register class. */
1178 if (TARGET_MIXED_CODE)
1179 TARGET_Q_CLASS = 1;
1180 if (!TARGET_Q_CLASS)
1181 TARGET_COMPACT_CASESI = 0;
1182 if (TARGET_COMPACT_CASESI)
1183 TARGET_CASE_VECTOR_PC_RELATIVE = 1;
1185 /* Check for small data option */
1186 if (!global_options_set.x_g_switch_value && !TARGET_NO_SDATA_SET)
1187 g_switch_value = TARGET_LL64 ? 8 : 4;
1189 /* These need to be done at start up. It's convenient to do them here. */
1190 arc_init ();
1193 /* The condition codes of the ARC, and the inverse function. */
1194 /* For short branches, the "c" / "nc" names are not defined in the ARC
1195 Programmers manual, so we have to use "lo" / "hs"" instead. */
1196 static const char *arc_condition_codes[] =
1198 "al", 0, "eq", "ne", "p", "n", "lo", "hs", "v", "nv",
1199 "gt", "le", "ge", "lt", "hi", "ls", "pnz", 0
1202 enum arc_cc_code_index
1204 ARC_CC_AL, ARC_CC_EQ = ARC_CC_AL+2, ARC_CC_NE, ARC_CC_P, ARC_CC_N,
1205 ARC_CC_C, ARC_CC_NC, ARC_CC_V, ARC_CC_NV,
1206 ARC_CC_GT, ARC_CC_LE, ARC_CC_GE, ARC_CC_LT, ARC_CC_HI, ARC_CC_LS, ARC_CC_PNZ,
1207 ARC_CC_LO = ARC_CC_C, ARC_CC_HS = ARC_CC_NC
1210 #define ARC_INVERSE_CONDITION_CODE(X) ((X) ^ 1)
1212 /* Returns the index of the ARC condition code string in
1213 `arc_condition_codes'. COMPARISON should be an rtx like
1214 `(eq (...) (...))'. */
1216 static int
1217 get_arc_condition_code (rtx comparison)
1219 switch (GET_MODE (XEXP (comparison, 0)))
1221 case E_CCmode:
1222 case E_SImode: /* For BRcc. */
1223 switch (GET_CODE (comparison))
1225 case EQ : return ARC_CC_EQ;
1226 case NE : return ARC_CC_NE;
1227 case GT : return ARC_CC_GT;
1228 case LE : return ARC_CC_LE;
1229 case GE : return ARC_CC_GE;
1230 case LT : return ARC_CC_LT;
1231 case GTU : return ARC_CC_HI;
1232 case LEU : return ARC_CC_LS;
1233 case LTU : return ARC_CC_LO;
1234 case GEU : return ARC_CC_HS;
1235 default : gcc_unreachable ();
1237 case E_CC_ZNmode:
1238 switch (GET_CODE (comparison))
1240 case EQ : return ARC_CC_EQ;
1241 case NE : return ARC_CC_NE;
1242 case GE: return ARC_CC_P;
1243 case LT: return ARC_CC_N;
1244 case GT : return ARC_CC_PNZ;
1245 default : gcc_unreachable ();
1247 case E_CC_Zmode:
1248 switch (GET_CODE (comparison))
1250 case EQ : return ARC_CC_EQ;
1251 case NE : return ARC_CC_NE;
1252 default : gcc_unreachable ();
1254 case E_CC_Cmode:
1255 switch (GET_CODE (comparison))
1257 case LTU : return ARC_CC_C;
1258 case GEU : return ARC_CC_NC;
1259 default : gcc_unreachable ();
1261 case E_CC_FP_GTmode:
1262 if (TARGET_ARGONAUT_SET && TARGET_SPFP)
1263 switch (GET_CODE (comparison))
1265 case GT : return ARC_CC_N;
1266 case UNLE: return ARC_CC_P;
1267 default : gcc_unreachable ();
1269 else
1270 switch (GET_CODE (comparison))
1272 case GT : return ARC_CC_HI;
1273 case UNLE : return ARC_CC_LS;
1274 default : gcc_unreachable ();
1276 case E_CC_FP_GEmode:
1277 /* Same for FPX and non-FPX. */
1278 switch (GET_CODE (comparison))
1280 case GE : return ARC_CC_HS;
1281 case UNLT : return ARC_CC_LO;
1282 default : gcc_unreachable ();
1284 case E_CC_FP_UNEQmode:
1285 switch (GET_CODE (comparison))
1287 case UNEQ : return ARC_CC_EQ;
1288 case LTGT : return ARC_CC_NE;
1289 default : gcc_unreachable ();
1291 case E_CC_FP_ORDmode:
1292 switch (GET_CODE (comparison))
1294 case UNORDERED : return ARC_CC_C;
1295 case ORDERED : return ARC_CC_NC;
1296 default : gcc_unreachable ();
1298 case E_CC_FPXmode:
1299 switch (GET_CODE (comparison))
1301 case EQ : return ARC_CC_EQ;
1302 case NE : return ARC_CC_NE;
1303 case UNORDERED : return ARC_CC_C;
1304 case ORDERED : return ARC_CC_NC;
1305 case LTGT : return ARC_CC_HI;
1306 case UNEQ : return ARC_CC_LS;
1307 default : gcc_unreachable ();
1309 case E_CC_FPUmode:
1310 switch (GET_CODE (comparison))
1312 case EQ : return ARC_CC_EQ;
1313 case NE : return ARC_CC_NE;
1314 case GT : return ARC_CC_GT;
1315 case GE : return ARC_CC_GE;
1316 case LT : return ARC_CC_C;
1317 case LE : return ARC_CC_LS;
1318 case UNORDERED : return ARC_CC_V;
1319 case ORDERED : return ARC_CC_NV;
1320 case UNGT : return ARC_CC_HI;
1321 case UNGE : return ARC_CC_HS;
1322 case UNLT : return ARC_CC_LT;
1323 case UNLE : return ARC_CC_LE;
1324 /* UNEQ and LTGT do not have representation. */
1325 case LTGT : /* Fall through. */
1326 case UNEQ : /* Fall through. */
1327 default : gcc_unreachable ();
1329 case E_CC_FPU_UNEQmode:
1330 switch (GET_CODE (comparison))
1332 case LTGT : return ARC_CC_NE;
1333 case UNEQ : return ARC_CC_EQ;
1334 default : gcc_unreachable ();
1336 default : gcc_unreachable ();
1338 /*NOTREACHED*/
1339 return (42);
1342 /* Return true if COMPARISON has a short form that can accomodate OFFSET. */
1344 bool
1345 arc_short_comparison_p (rtx comparison, int offset)
1347 gcc_assert (ARC_CC_NC == ARC_CC_HS);
1348 gcc_assert (ARC_CC_C == ARC_CC_LO);
1349 switch (get_arc_condition_code (comparison))
1351 case ARC_CC_EQ: case ARC_CC_NE:
1352 return offset >= -512 && offset <= 506;
1353 case ARC_CC_GT: case ARC_CC_LE: case ARC_CC_GE: case ARC_CC_LT:
1354 case ARC_CC_HI: case ARC_CC_LS: case ARC_CC_LO: case ARC_CC_HS:
1355 return offset >= -64 && offset <= 58;
1356 default:
1357 return false;
1361 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1362 return the mode to be used for the comparison. */
1364 machine_mode
1365 arc_select_cc_mode (enum rtx_code op, rtx x, rtx y)
1367 machine_mode mode = GET_MODE (x);
1368 rtx x1;
1370 /* For an operation that sets the condition codes as a side-effect, the
1371 C and V flags is not set as for cmp, so we can only use comparisons where
1372 this doesn't matter. (For LT and GE we can use "mi" and "pl"
1373 instead.) */
1374 /* ??? We could use "pnz" for greater than zero, however, we could then
1375 get into trouble because the comparison could not be reversed. */
1376 if (GET_MODE_CLASS (mode) == MODE_INT
1377 && y == const0_rtx
1378 && (op == EQ || op == NE
1379 || ((op == LT || op == GE) && GET_MODE_SIZE (GET_MODE (x)) <= 4)))
1380 return CC_ZNmode;
1382 /* add.f for if (a+b) */
1383 if (mode == SImode
1384 && GET_CODE (y) == NEG
1385 && (op == EQ || op == NE))
1386 return CC_ZNmode;
1388 /* Check if this is a test suitable for bxor.f . */
1389 if (mode == SImode && (op == EQ || op == NE) && CONST_INT_P (y)
1390 && ((INTVAL (y) - 1) & INTVAL (y)) == 0
1391 && INTVAL (y))
1392 return CC_Zmode;
1394 /* Check if this is a test suitable for add / bmsk.f . */
1395 if (mode == SImode && (op == EQ || op == NE) && CONST_INT_P (y)
1396 && GET_CODE (x) == AND && CONST_INT_P ((x1 = XEXP (x, 1)))
1397 && ((INTVAL (x1) + 1) & INTVAL (x1)) == 0
1398 && (~INTVAL (x1) | INTVAL (y)) < 0
1399 && (~INTVAL (x1) | INTVAL (y)) > -0x800)
1400 return CC_Zmode;
1402 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
1403 && GET_CODE (x) == PLUS
1404 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
1405 return CC_Cmode;
1407 if (TARGET_ARGONAUT_SET
1408 && ((mode == SFmode && TARGET_SPFP) || (mode == DFmode && TARGET_DPFP)))
1409 switch (op)
1411 case EQ: case NE: case UNEQ: case LTGT: case ORDERED: case UNORDERED:
1412 return CC_FPXmode;
1413 case LT: case UNGE: case GT: case UNLE:
1414 return CC_FP_GTmode;
1415 case LE: case UNGT: case GE: case UNLT:
1416 return CC_FP_GEmode;
1417 default: gcc_unreachable ();
1419 else if (TARGET_HARD_FLOAT
1420 && ((mode == SFmode && TARGET_FP_SP_BASE)
1421 || (mode == DFmode && TARGET_FP_DP_BASE)))
1422 switch (op)
1424 case EQ:
1425 case NE:
1426 case UNORDERED:
1427 case ORDERED:
1428 case UNLT:
1429 case UNLE:
1430 case UNGT:
1431 case UNGE:
1432 case LT:
1433 case LE:
1434 case GT:
1435 case GE:
1436 return CC_FPUmode;
1438 case LTGT:
1439 case UNEQ:
1440 return CC_FPU_UNEQmode;
1442 default:
1443 gcc_unreachable ();
1445 else if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_OPTFPE)
1447 switch (op)
1449 case EQ: case NE: return CC_Zmode;
1450 case LT: case UNGE:
1451 case GT: case UNLE: return CC_FP_GTmode;
1452 case LE: case UNGT:
1453 case GE: case UNLT: return CC_FP_GEmode;
1454 case UNEQ: case LTGT: return CC_FP_UNEQmode;
1455 case ORDERED: case UNORDERED: return CC_FP_ORDmode;
1456 default: gcc_unreachable ();
1459 return CCmode;
1462 /* Vectors to keep interesting information about registers where it can easily
1463 be got. We use to use the actual mode value as the bit number, but there
1464 is (or may be) more than 32 modes now. Instead we use two tables: one
1465 indexed by hard register number, and one indexed by mode. */
1467 /* The purpose of arc_mode_class is to shrink the range of modes so that
1468 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
1469 mapped into one arc_mode_class mode. */
1471 enum arc_mode_class {
1472 C_MODE,
1473 S_MODE, D_MODE, T_MODE, O_MODE,
1474 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
1475 V_MODE
1478 /* Modes for condition codes. */
1479 #define C_MODES (1 << (int) C_MODE)
1481 /* Modes for single-word and smaller quantities. */
1482 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
1484 /* Modes for double-word and smaller quantities. */
1485 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
1487 /* Mode for 8-byte DF values only. */
1488 #define DF_MODES (1 << DF_MODE)
1490 /* Modes for quad-word and smaller quantities. */
1491 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
1493 /* Modes for 128-bit vectors. */
1494 #define V_MODES (1 << (int) V_MODE)
1496 /* Value is 1 if register/mode pair is acceptable on arc. */
1498 static unsigned int arc_hard_regno_modes[] = {
1499 T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES,
1500 T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES,
1501 T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, D_MODES,
1502 D_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
1504 /* ??? Leave these as S_MODES for now. */
1505 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
1506 DF_MODES, 0, DF_MODES, 0, S_MODES, S_MODES, S_MODES, S_MODES,
1507 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
1508 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, C_MODES, S_MODES,
1510 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1511 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1512 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1513 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1515 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1516 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1517 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1518 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1520 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
1521 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES
1524 static unsigned int arc_mode_class [NUM_MACHINE_MODES];
1526 enum reg_class arc_regno_reg_class[FIRST_PSEUDO_REGISTER];
1528 enum reg_class
1529 arc_preferred_reload_class (rtx, enum reg_class cl)
1531 if ((cl) == CHEAP_CORE_REGS || (cl) == WRITABLE_CORE_REGS)
1532 return GENERAL_REGS;
1533 return cl;
1536 /* Initialize the arc_mode_class array. */
1538 static void
1539 arc_init_reg_tables (void)
1541 int i;
1543 for (i = 0; i < NUM_MACHINE_MODES; i++)
1545 machine_mode m = (machine_mode) i;
1547 switch (GET_MODE_CLASS (m))
1549 case MODE_INT:
1550 case MODE_PARTIAL_INT:
1551 case MODE_COMPLEX_INT:
1552 if (GET_MODE_SIZE (m) <= 4)
1553 arc_mode_class[i] = 1 << (int) S_MODE;
1554 else if (GET_MODE_SIZE (m) == 8)
1555 arc_mode_class[i] = 1 << (int) D_MODE;
1556 else if (GET_MODE_SIZE (m) == 16)
1557 arc_mode_class[i] = 1 << (int) T_MODE;
1558 else if (GET_MODE_SIZE (m) == 32)
1559 arc_mode_class[i] = 1 << (int) O_MODE;
1560 else
1561 arc_mode_class[i] = 0;
1562 break;
1563 case MODE_FLOAT:
1564 case MODE_COMPLEX_FLOAT:
1565 if (GET_MODE_SIZE (m) <= 4)
1566 arc_mode_class[i] = 1 << (int) SF_MODE;
1567 else if (GET_MODE_SIZE (m) == 8)
1568 arc_mode_class[i] = 1 << (int) DF_MODE;
1569 else if (GET_MODE_SIZE (m) == 16)
1570 arc_mode_class[i] = 1 << (int) TF_MODE;
1571 else if (GET_MODE_SIZE (m) == 32)
1572 arc_mode_class[i] = 1 << (int) OF_MODE;
1573 else
1574 arc_mode_class[i] = 0;
1575 break;
1576 case MODE_VECTOR_INT:
1577 if (GET_MODE_SIZE (m) == 4)
1578 arc_mode_class[i] = (1 << (int) S_MODE);
1579 else if (GET_MODE_SIZE (m) == 8)
1580 arc_mode_class[i] = (1 << (int) D_MODE);
1581 else
1582 arc_mode_class[i] = (1 << (int) V_MODE);
1583 break;
1584 case MODE_CC:
1585 default:
1586 /* mode_class hasn't been initialized yet for EXTRA_CC_MODES, so
1587 we must explicitly check for them here. */
1588 if (i == (int) CCmode || i == (int) CC_ZNmode || i == (int) CC_Zmode
1589 || i == (int) CC_Cmode
1590 || i == CC_FP_GTmode || i == CC_FP_GEmode || i == CC_FP_ORDmode
1591 || i == CC_FPUmode || i == CC_FPU_UNEQmode)
1592 arc_mode_class[i] = 1 << (int) C_MODE;
1593 else
1594 arc_mode_class[i] = 0;
1595 break;
1600 /* Core registers 56..59 are used for multiply extension options.
1601 The dsp option uses r56 and r57, these are then named acc1 and acc2.
1602 acc1 is the highpart, and acc2 the lowpart, so which register gets which
1603 number depends on endianness.
1604 The mul64 multiplier options use r57 for mlo, r58 for mmid and r59 for mhi.
1605 Because mlo / mhi form a 64 bit value, we use different gcc internal
1606 register numbers to make them form a register pair as the gcc internals
1607 know it. mmid gets number 57, if still available, and mlo / mhi get
1608 number 58 and 59, depending on endianness. We use DBX_REGISTER_NUMBER
1609 to map this back. */
1610 char rname56[5] = "r56";
1611 char rname57[5] = "r57";
1612 char rname58[5] = "r58";
1613 char rname59[5] = "r59";
1614 char rname29[7] = "ilink1";
1615 char rname30[7] = "ilink2";
1617 static void
1618 arc_conditional_register_usage (void)
1620 int regno;
1621 int i;
1622 int fix_start = 60, fix_end = 55;
1624 if (TARGET_V2)
1626 /* For ARCv2 the core register set is changed. */
1627 strcpy (rname29, "ilink");
1628 strcpy (rname30, "r30");
1629 call_used_regs[30] = 1;
1630 fixed_regs[30] = 0;
1632 arc_regno_reg_class[30] = WRITABLE_CORE_REGS;
1633 SET_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], 30);
1634 SET_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], 30);
1635 SET_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], 30);
1636 SET_HARD_REG_BIT (reg_class_contents[MPY_WRITABLE_CORE_REGS], 30);
1639 if (TARGET_MUL64_SET)
1641 fix_start = 57;
1642 fix_end = 59;
1644 /* We don't provide a name for mmed. In rtl / assembly resource lists,
1645 you are supposed to refer to it as mlo & mhi, e.g
1646 (zero_extract:SI (reg:DI 58) (const_int 32) (16)) .
1647 In an actual asm instruction, you are of course use mmed.
1648 The point of avoiding having a separate register for mmed is that
1649 this way, we don't have to carry clobbers of that reg around in every
1650 isntruction that modifies mlo and/or mhi. */
1651 strcpy (rname57, "");
1652 strcpy (rname58, TARGET_BIG_ENDIAN ? "mhi" : "mlo");
1653 strcpy (rname59, TARGET_BIG_ENDIAN ? "mlo" : "mhi");
1656 /* The nature of arc_tp_regno is actually something more like a global
1657 register, however globalize_reg requires a declaration.
1658 We use EPILOGUE_USES to compensate so that sets from
1659 __builtin_set_frame_pointer are not deleted. */
1660 if (arc_tp_regno != -1)
1661 fixed_regs[arc_tp_regno] = call_used_regs[arc_tp_regno] = 1;
1663 if (TARGET_MULMAC_32BY16_SET)
1665 fix_start = 56;
1666 fix_end = fix_end > 57 ? fix_end : 57;
1667 strcpy (rname56, TARGET_BIG_ENDIAN ? "acc1" : "acc2");
1668 strcpy (rname57, TARGET_BIG_ENDIAN ? "acc2" : "acc1");
1670 for (regno = fix_start; regno <= fix_end; regno++)
1672 if (!fixed_regs[regno])
1673 warning (0, "multiply option implies r%d is fixed", regno);
1674 fixed_regs [regno] = call_used_regs[regno] = 1;
1676 if (TARGET_Q_CLASS)
1678 if (optimize_size)
1680 reg_alloc_order[0] = 0;
1681 reg_alloc_order[1] = 1;
1682 reg_alloc_order[2] = 2;
1683 reg_alloc_order[3] = 3;
1684 reg_alloc_order[4] = 12;
1685 reg_alloc_order[5] = 13;
1686 reg_alloc_order[6] = 14;
1687 reg_alloc_order[7] = 15;
1688 reg_alloc_order[8] = 4;
1689 reg_alloc_order[9] = 5;
1690 reg_alloc_order[10] = 6;
1691 reg_alloc_order[11] = 7;
1692 reg_alloc_order[12] = 8;
1693 reg_alloc_order[13] = 9;
1694 reg_alloc_order[14] = 10;
1695 reg_alloc_order[15] = 11;
1697 else
1699 reg_alloc_order[2] = 12;
1700 reg_alloc_order[3] = 13;
1701 reg_alloc_order[4] = 14;
1702 reg_alloc_order[5] = 15;
1703 reg_alloc_order[6] = 1;
1704 reg_alloc_order[7] = 0;
1705 reg_alloc_order[8] = 4;
1706 reg_alloc_order[9] = 5;
1707 reg_alloc_order[10] = 6;
1708 reg_alloc_order[11] = 7;
1709 reg_alloc_order[12] = 8;
1710 reg_alloc_order[13] = 9;
1711 reg_alloc_order[14] = 10;
1712 reg_alloc_order[15] = 11;
1715 if (TARGET_SIMD_SET)
1717 int i;
1718 for (i = ARC_FIRST_SIMD_VR_REG; i <= ARC_LAST_SIMD_VR_REG; i++)
1719 reg_alloc_order [i] = i;
1720 for (i = ARC_FIRST_SIMD_DMA_CONFIG_REG;
1721 i <= ARC_LAST_SIMD_DMA_CONFIG_REG; i++)
1722 reg_alloc_order [i] = i;
1725 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1726 if (!call_used_regs[regno])
1727 CLEAR_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno);
1728 for (regno = 32; regno < 60; regno++)
1729 if (!fixed_regs[regno])
1730 SET_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], regno);
1731 if (!TARGET_ARC600_FAMILY)
1733 for (regno = 32; regno <= 60; regno++)
1734 CLEAR_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], regno);
1736 /* If they have used -ffixed-lp_count, make sure it takes
1737 effect. */
1738 if (fixed_regs[LP_COUNT])
1740 CLEAR_HARD_REG_BIT (reg_class_contents[LPCOUNT_REG], LP_COUNT);
1741 CLEAR_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], LP_COUNT);
1742 CLEAR_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], LP_COUNT);
1744 /* Instead of taking out SF_MODE like below, forbid it outright. */
1745 arc_hard_regno_modes[60] = 0;
1747 else
1748 arc_hard_regno_modes[60] = 1 << (int) S_MODE;
1751 /* ARCHS has 64-bit data-path which makes use of the even-odd paired
1752 registers. */
1753 if (TARGET_HS)
1755 for (regno = 1; regno < 32; regno +=2)
1757 arc_hard_regno_modes[regno] = S_MODES;
1761 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1763 if (i < 29)
1765 if ((TARGET_Q_CLASS || TARGET_RRQ_CLASS)
1766 && ((i <= 3) || ((i >= 12) && (i <= 15))))
1767 arc_regno_reg_class[i] = ARCOMPACT16_REGS;
1768 else
1769 arc_regno_reg_class[i] = GENERAL_REGS;
1771 else if (i < 60)
1772 arc_regno_reg_class[i]
1773 = (fixed_regs[i]
1774 ? (TEST_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], i)
1775 ? CHEAP_CORE_REGS : ALL_CORE_REGS)
1776 : (((!TARGET_ARC600_FAMILY)
1777 && TEST_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], i))
1778 ? CHEAP_CORE_REGS : WRITABLE_CORE_REGS));
1779 else
1780 arc_regno_reg_class[i] = NO_REGS;
1783 /* ARCOMPACT16_REGS is empty, if TARGET_Q_CLASS / TARGET_RRQ_CLASS
1784 has not been activated. */
1785 if (!TARGET_Q_CLASS && !TARGET_RRQ_CLASS)
1786 CLEAR_HARD_REG_SET(reg_class_contents [ARCOMPACT16_REGS]);
1787 if (!TARGET_Q_CLASS)
1788 CLEAR_HARD_REG_SET(reg_class_contents [AC16_BASE_REGS]);
1790 gcc_assert (FIRST_PSEUDO_REGISTER >= 144);
1792 /* Handle Special Registers. */
1793 arc_regno_reg_class[29] = LINK_REGS; /* ilink1 register. */
1794 if (!TARGET_V2)
1795 arc_regno_reg_class[30] = LINK_REGS; /* ilink2 register. */
1796 arc_regno_reg_class[31] = LINK_REGS; /* blink register. */
1797 arc_regno_reg_class[60] = LPCOUNT_REG;
1798 arc_regno_reg_class[61] = NO_REGS; /* CC_REG: must be NO_REGS. */
1799 arc_regno_reg_class[62] = GENERAL_REGS;
1801 if (TARGET_DPFP)
1803 for (i = 40; i < 44; ++i)
1805 arc_regno_reg_class[i] = DOUBLE_REGS;
1807 /* Unless they want us to do 'mov d1, 0x00000000' make sure
1808 no attempt is made to use such a register as a destination
1809 operand in *movdf_insn. */
1810 if (!TARGET_ARGONAUT_SET)
1812 /* Make sure no 'c', 'w', 'W', or 'Rac' constraint is
1813 interpreted to mean they can use D1 or D2 in their insn. */
1814 CLEAR_HARD_REG_BIT(reg_class_contents[CHEAP_CORE_REGS ], i);
1815 CLEAR_HARD_REG_BIT(reg_class_contents[ALL_CORE_REGS ], i);
1816 CLEAR_HARD_REG_BIT(reg_class_contents[WRITABLE_CORE_REGS ], i);
1817 CLEAR_HARD_REG_BIT(reg_class_contents[MPY_WRITABLE_CORE_REGS], i);
1821 else
1823 /* Disable all DOUBLE_REGISTER settings,
1824 if not generating DPFP code. */
1825 arc_regno_reg_class[40] = ALL_REGS;
1826 arc_regno_reg_class[41] = ALL_REGS;
1827 arc_regno_reg_class[42] = ALL_REGS;
1828 arc_regno_reg_class[43] = ALL_REGS;
1830 fixed_regs[40] = 1;
1831 fixed_regs[41] = 1;
1832 fixed_regs[42] = 1;
1833 fixed_regs[43] = 1;
1835 arc_hard_regno_modes[40] = 0;
1836 arc_hard_regno_modes[42] = 0;
1838 CLEAR_HARD_REG_SET(reg_class_contents [DOUBLE_REGS]);
1841 if (TARGET_SIMD_SET)
1843 gcc_assert (ARC_FIRST_SIMD_VR_REG == 64);
1844 gcc_assert (ARC_LAST_SIMD_VR_REG == 127);
1846 for (i = ARC_FIRST_SIMD_VR_REG; i <= ARC_LAST_SIMD_VR_REG; i++)
1847 arc_regno_reg_class [i] = SIMD_VR_REGS;
1849 gcc_assert (ARC_FIRST_SIMD_DMA_CONFIG_REG == 128);
1850 gcc_assert (ARC_FIRST_SIMD_DMA_CONFIG_IN_REG == 128);
1851 gcc_assert (ARC_FIRST_SIMD_DMA_CONFIG_OUT_REG == 136);
1852 gcc_assert (ARC_LAST_SIMD_DMA_CONFIG_REG == 143);
1854 for (i = ARC_FIRST_SIMD_DMA_CONFIG_REG;
1855 i <= ARC_LAST_SIMD_DMA_CONFIG_REG; i++)
1856 arc_regno_reg_class [i] = SIMD_DMA_CONFIG_REGS;
1859 /* pc : r63 */
1860 arc_regno_reg_class[PROGRAM_COUNTER_REGNO] = GENERAL_REGS;
1862 /*ARCV2 Accumulator. */
1863 if ((TARGET_V2
1864 && (TARGET_FP_DP_FUSED || TARGET_FP_SP_FUSED))
1865 || TARGET_PLUS_DMPY)
1867 arc_regno_reg_class[ACCL_REGNO] = WRITABLE_CORE_REGS;
1868 arc_regno_reg_class[ACCH_REGNO] = WRITABLE_CORE_REGS;
1869 SET_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], ACCL_REGNO);
1870 SET_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], ACCH_REGNO);
1871 SET_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], ACCL_REGNO);
1872 SET_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], ACCH_REGNO);
1873 SET_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], ACCL_REGNO);
1874 SET_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], ACCH_REGNO);
1875 SET_HARD_REG_BIT (reg_class_contents[MPY_WRITABLE_CORE_REGS], ACCL_REGNO);
1876 SET_HARD_REG_BIT (reg_class_contents[MPY_WRITABLE_CORE_REGS], ACCH_REGNO);
1878 /* Allow the compiler to freely use them. */
1879 fixed_regs[ACCL_REGNO] = 0;
1880 fixed_regs[ACCH_REGNO] = 0;
1882 arc_hard_regno_modes[ACC_REG_FIRST] = D_MODES;
1886 /* Implement TARGET_HARD_REGNO_NREGS. */
1888 static unsigned int
1889 arc_hard_regno_nregs (unsigned int regno, machine_mode mode)
1891 if (GET_MODE_SIZE (mode) == 16
1892 && regno >= ARC_FIRST_SIMD_VR_REG
1893 && regno <= ARC_LAST_SIMD_VR_REG)
1894 return 1;
1896 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
1899 /* Implement TARGET_HARD_REGNO_MODE_OK. */
1901 static bool
1902 arc_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
1904 return (arc_hard_regno_modes[regno] & arc_mode_class[mode]) != 0;
1907 /* Implement TARGET_MODES_TIEABLE_P. Tie QI/HI/SI modes together. */
1909 static bool
1910 arc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
1912 return (GET_MODE_CLASS (mode1) == MODE_INT
1913 && GET_MODE_CLASS (mode2) == MODE_INT
1914 && GET_MODE_SIZE (mode1) <= UNITS_PER_WORD
1915 && GET_MODE_SIZE (mode2) <= UNITS_PER_WORD);
1918 /* Handle an "interrupt" attribute; arguments as in
1919 struct attribute_spec.handler. */
1921 static tree
1922 arc_handle_interrupt_attribute (tree *, tree name, tree args, int,
1923 bool *no_add_attrs)
1925 gcc_assert (args);
1927 tree value = TREE_VALUE (args);
1929 if (TREE_CODE (value) != STRING_CST)
1931 warning (OPT_Wattributes,
1932 "argument of %qE attribute is not a string constant",
1933 name);
1934 *no_add_attrs = true;
1936 else if (!TARGET_V2
1937 && strcmp (TREE_STRING_POINTER (value), "ilink1")
1938 && strcmp (TREE_STRING_POINTER (value), "ilink2"))
1940 warning (OPT_Wattributes,
1941 "argument of %qE attribute is not \"ilink1\" or \"ilink2\"",
1942 name);
1943 *no_add_attrs = true;
1945 else if (TARGET_V2
1946 && strcmp (TREE_STRING_POINTER (value), "ilink")
1947 && strcmp (TREE_STRING_POINTER (value), "firq"))
1949 warning (OPT_Wattributes,
1950 "argument of %qE attribute is not \"ilink\" or \"firq\"",
1951 name);
1952 *no_add_attrs = true;
1955 return NULL_TREE;
1958 static tree
1959 arc_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
1960 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1962 if (TREE_CODE (*node) != FUNCTION_DECL)
1964 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1965 name);
1966 *no_add_attrs = true;
1969 return NULL_TREE;
1972 /* Implement `TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS' */
1974 static bool
1975 arc_allocate_stack_slots_for_args (void)
1977 /* Naked functions should not allocate stack slots for arguments. */
1978 unsigned int fn_type = arc_compute_function_type (cfun);
1980 return !ARC_NAKED_P(fn_type);
1983 /* Implement `TARGET_WARN_FUNC_RETURN'. */
1985 static bool
1986 arc_warn_func_return (tree decl)
1988 struct function *func = DECL_STRUCT_FUNCTION (decl);
1989 unsigned int fn_type = arc_compute_function_type (func);
1991 return !ARC_NAKED_P (fn_type);
1994 /* Return zero if TYPE1 and TYPE are incompatible, one if they are compatible,
1995 and two if they are nearly compatible (which causes a warning to be
1996 generated). */
1998 static int
1999 arc_comp_type_attributes (const_tree type1,
2000 const_tree type2)
2002 int l1, l2, m1, m2, s1, s2;
2004 /* Check for mismatch of non-default calling convention. */
2005 if (TREE_CODE (type1) != FUNCTION_TYPE)
2006 return 1;
2008 /* Check for mismatched call attributes. */
2009 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2010 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2011 m1 = lookup_attribute ("medium_call", TYPE_ATTRIBUTES (type1)) != NULL;
2012 m2 = lookup_attribute ("medium_call", TYPE_ATTRIBUTES (type2)) != NULL;
2013 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2014 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2016 /* Only bother to check if an attribute is defined. */
2017 if (l1 | l2 | m1 | m2 | s1 | s2)
2019 /* If one type has an attribute, the other must have the same attribute. */
2020 if ((l1 != l2) || (m1 != m2) || (s1 != s2))
2021 return 0;
2023 /* Disallow mixed attributes. */
2024 if (l1 + m1 + s1 > 1)
2025 return 0;
2029 return 1;
2032 /* Set the default attributes for TYPE. */
2034 void
2035 arc_set_default_type_attributes (tree type ATTRIBUTE_UNUSED)
2037 gcc_unreachable();
2040 /* Misc. utilities. */
2042 /* X and Y are two things to compare using CODE. Emit the compare insn and
2043 return the rtx for the cc reg in the proper mode. */
2046 gen_compare_reg (rtx comparison, machine_mode omode)
2048 enum rtx_code code = GET_CODE (comparison);
2049 rtx x = XEXP (comparison, 0);
2050 rtx y = XEXP (comparison, 1);
2051 rtx tmp, cc_reg;
2052 machine_mode mode, cmode;
2055 cmode = GET_MODE (x);
2056 if (cmode == VOIDmode)
2057 cmode = GET_MODE (y);
2058 gcc_assert (cmode == SImode || cmode == SFmode || cmode == DFmode);
2059 if (cmode == SImode)
2061 if (!register_operand (x, SImode))
2063 if (register_operand (y, SImode))
2065 tmp = x;
2066 x = y;
2067 y = tmp;
2068 code = swap_condition (code);
2070 else
2071 x = copy_to_mode_reg (SImode, x);
2073 if (GET_CODE (y) == SYMBOL_REF && flag_pic)
2074 y = copy_to_mode_reg (SImode, y);
2076 else
2078 x = force_reg (cmode, x);
2079 y = force_reg (cmode, y);
2081 mode = SELECT_CC_MODE (code, x, y);
2083 cc_reg = gen_rtx_REG (mode, CC_REG);
2085 /* ??? FIXME (x-y)==0, as done by both cmpsfpx_raw and
2086 cmpdfpx_raw, is not a correct comparison for floats:
2087 http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
2089 if (TARGET_ARGONAUT_SET
2090 && ((cmode == SFmode && TARGET_SPFP) || (cmode == DFmode && TARGET_DPFP)))
2092 switch (code)
2094 case NE: case EQ: case LT: case UNGE: case LE: case UNGT:
2095 case UNEQ: case LTGT: case ORDERED: case UNORDERED:
2096 break;
2097 case GT: case UNLE: case GE: case UNLT:
2098 code = swap_condition (code);
2099 tmp = x;
2100 x = y;
2101 y = tmp;
2102 break;
2103 default:
2104 gcc_unreachable ();
2106 if (cmode == SFmode)
2108 emit_insn (gen_cmpsfpx_raw (x, y));
2110 else /* DFmode */
2112 /* Accepts Dx regs directly by insns. */
2113 emit_insn (gen_cmpdfpx_raw (x, y));
2116 if (mode != CC_FPXmode)
2117 emit_insn (gen_rtx_SET (cc_reg,
2118 gen_rtx_COMPARE (mode,
2119 gen_rtx_REG (CC_FPXmode, 61),
2120 const0_rtx)));
2122 else if (TARGET_FPX_QUARK && (cmode == SFmode))
2124 switch (code)
2126 case NE: case EQ: case GT: case UNLE: case GE: case UNLT:
2127 case UNEQ: case LTGT: case ORDERED: case UNORDERED:
2128 break;
2129 case LT: case UNGE: case LE: case UNGT:
2130 code = swap_condition (code);
2131 tmp = x;
2132 x = y;
2133 y = tmp;
2134 break;
2135 default:
2136 gcc_unreachable ();
2139 emit_insn (gen_cmp_quark (cc_reg,
2140 gen_rtx_COMPARE (mode, x, y)));
2142 else if (TARGET_HARD_FLOAT
2143 && ((cmode == SFmode && TARGET_FP_SP_BASE)
2144 || (cmode == DFmode && TARGET_FP_DP_BASE)))
2145 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
2146 else if (GET_MODE_CLASS (cmode) == MODE_FLOAT && TARGET_OPTFPE)
2148 rtx op0 = gen_rtx_REG (cmode, 0);
2149 rtx op1 = gen_rtx_REG (cmode, GET_MODE_SIZE (cmode) / UNITS_PER_WORD);
2150 bool swap = false;
2152 switch (code)
2154 case NE: case EQ: case GT: case UNLE: case GE: case UNLT:
2155 case UNEQ: case LTGT: case ORDERED: case UNORDERED:
2156 break;
2157 case LT: case UNGE: case LE: case UNGT:
2158 code = swap_condition (code);
2159 swap = true;
2160 break;
2161 default:
2162 gcc_unreachable ();
2164 if (currently_expanding_to_rtl)
2166 if (swap)
2168 tmp = x;
2169 x = y;
2170 y = tmp;
2172 emit_move_insn (op0, x);
2173 emit_move_insn (op1, y);
2175 else
2177 gcc_assert (rtx_equal_p (op0, x));
2178 gcc_assert (rtx_equal_p (op1, y));
2179 if (swap)
2181 op0 = y;
2182 op1 = x;
2185 emit_insn (gen_cmp_float (cc_reg, gen_rtx_COMPARE (mode, op0, op1)));
2187 else
2188 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
2189 return gen_rtx_fmt_ee (code, omode, cc_reg, const0_rtx);
2192 /* Return true if VALUE, a const_double, will fit in a limm (4 byte number).
2193 We assume the value can be either signed or unsigned. */
2195 bool
2196 arc_double_limm_p (rtx value)
2198 HOST_WIDE_INT low, high;
2200 gcc_assert (GET_CODE (value) == CONST_DOUBLE);
2202 if (TARGET_DPFP)
2203 return true;
2205 low = CONST_DOUBLE_LOW (value);
2206 high = CONST_DOUBLE_HIGH (value);
2208 if (low & 0x80000000)
2210 return (((unsigned HOST_WIDE_INT) low <= 0xffffffff && high == 0)
2211 || (((low & - (unsigned HOST_WIDE_INT) 0x80000000)
2212 == - (unsigned HOST_WIDE_INT) 0x80000000)
2213 && high == -1));
2215 else
2217 return (unsigned HOST_WIDE_INT) low <= 0x7fffffff && high == 0;
2221 /* Do any needed setup for a variadic function. For the ARC, we must
2222 create a register parameter block, and then copy any anonymous arguments
2223 in registers to memory.
2225 CUM has not been updated for the last named argument which has type TYPE
2226 and mode MODE, and we rely on this fact. */
2228 static void
2229 arc_setup_incoming_varargs (cumulative_args_t args_so_far,
2230 machine_mode mode, tree type,
2231 int *pretend_size, int no_rtl)
2233 int first_anon_arg;
2234 CUMULATIVE_ARGS next_cum;
2236 /* We must treat `__builtin_va_alist' as an anonymous arg. */
2238 next_cum = *get_cumulative_args (args_so_far);
2239 arc_function_arg_advance (pack_cumulative_args (&next_cum),
2240 mode, type, true);
2241 first_anon_arg = next_cum;
2243 if (FUNCTION_ARG_REGNO_P (first_anon_arg))
2245 /* First anonymous (unnamed) argument is in a reg. */
2247 /* Note that first_reg_offset < MAX_ARC_PARM_REGS. */
2248 int first_reg_offset = first_anon_arg;
2250 if (!no_rtl)
2252 rtx regblock
2253 = gen_rtx_MEM (BLKmode, plus_constant (Pmode, arg_pointer_rtx,
2254 FIRST_PARM_OFFSET (0)));
2255 move_block_from_reg (first_reg_offset, regblock,
2256 MAX_ARC_PARM_REGS - first_reg_offset);
2259 *pretend_size
2260 = ((MAX_ARC_PARM_REGS - first_reg_offset ) * UNITS_PER_WORD);
2264 /* Cost functions. */
2266 /* Provide the costs of an addressing mode that contains ADDR.
2267 If ADDR is not a valid address, its cost is irrelevant. */
2270 arc_address_cost (rtx addr, machine_mode, addr_space_t, bool speed)
2272 switch (GET_CODE (addr))
2274 case REG :
2275 return speed || satisfies_constraint_Rcq (addr) ? 0 : 1;
2276 case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
2277 case PRE_MODIFY: case POST_MODIFY:
2278 return !speed;
2280 case LABEL_REF :
2281 case SYMBOL_REF :
2282 case CONST :
2283 if (TARGET_NPS_CMEM && cmem_address (addr, SImode))
2284 return 0;
2285 /* Most likely needs a LIMM. */
2286 return COSTS_N_INSNS (1);
2288 case PLUS :
2290 register rtx plus0 = XEXP (addr, 0);
2291 register rtx plus1 = XEXP (addr, 1);
2293 if (GET_CODE (plus0) != REG
2294 && (GET_CODE (plus0) != MULT
2295 || !CONST_INT_P (XEXP (plus0, 1))
2296 || (INTVAL (XEXP (plus0, 1)) != 2
2297 && INTVAL (XEXP (plus0, 1)) != 4)))
2298 break;
2300 switch (GET_CODE (plus1))
2302 case CONST_INT :
2303 return (!RTX_OK_FOR_OFFSET_P (SImode, plus1)
2304 ? COSTS_N_INSNS (1)
2305 : speed
2307 : (satisfies_constraint_Rcq (plus0)
2308 && satisfies_constraint_O (plus1))
2310 : 1);
2311 case REG:
2312 return (speed < 1 ? 0
2313 : (satisfies_constraint_Rcq (plus0)
2314 && satisfies_constraint_Rcq (plus1))
2315 ? 0 : 1);
2316 case CONST :
2317 case SYMBOL_REF :
2318 case LABEL_REF :
2319 return COSTS_N_INSNS (1);
2320 default:
2321 break;
2323 break;
2325 default:
2326 break;
2329 return 4;
2332 /* Emit instruction X with the frame related bit set. */
2334 static rtx
2335 frame_insn (rtx x)
2337 x = emit_insn (x);
2338 RTX_FRAME_RELATED_P (x) = 1;
2339 return x;
2342 /* Emit a frame insn to move SRC to DST. */
2344 static rtx
2345 frame_move (rtx dst, rtx src)
2347 rtx tmp = gen_rtx_SET (dst, src);
2348 RTX_FRAME_RELATED_P (tmp) = 1;
2349 return frame_insn (tmp);
2352 /* Like frame_move, but add a REG_INC note for REG if ADDR contains an
2353 auto increment address, or is zero. */
2355 static rtx
2356 frame_move_inc (rtx dst, rtx src, rtx reg, rtx addr)
2358 rtx insn = frame_move (dst, src);
2360 if (!addr
2361 || GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == POST_INC
2362 || GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY)
2363 add_reg_note (insn, REG_INC, reg);
2364 return insn;
2367 /* Emit a frame insn which adjusts a frame address register REG by OFFSET. */
2369 static rtx
2370 frame_add (rtx reg, HOST_WIDE_INT offset)
2372 gcc_assert ((offset & 0x3) == 0);
2373 if (!offset)
2374 return NULL_RTX;
2375 return frame_move (reg, plus_constant (Pmode, reg, offset));
2378 /* Emit a frame insn which adjusts stack pointer by OFFSET. */
2380 static rtx
2381 frame_stack_add (HOST_WIDE_INT offset)
2383 return frame_add (stack_pointer_rtx, offset);
2386 /* Traditionally, we push saved registers first in the prologue,
2387 then we allocate the rest of the frame - and reverse in the epilogue.
2388 This has still its merits for ease of debugging, or saving code size
2389 or even execution time if the stack frame is so large that some accesses
2390 can't be encoded anymore with offsets in the instruction code when using
2391 a different scheme.
2392 Also, it would be a good starting point if we got instructions to help
2393 with register save/restore.
2395 However, often stack frames are small, and the pushing / popping has
2396 some costs:
2397 - the stack modification prevents a lot of scheduling.
2398 - frame allocation / deallocation needs extra instructions.
2399 - unless we know that we compile ARC700 user code, we need to put
2400 a memory barrier after frame allocation / before deallocation to
2401 prevent interrupts clobbering our data in the frame.
2402 In particular, we don't have any such guarantees for library functions,
2403 which tend to, on the other hand, to have small frames.
2405 Thus, for small frames, we'd like to use a different scheme:
2406 - The frame is allocated in full with the first prologue instruction,
2407 and deallocated in full with the last epilogue instruction.
2408 Thus, the instructions in-betwen can be freely scheduled.
2409 - If the function has no outgoing arguments on the stack, we can allocate
2410 one register save slot at the top of the stack. This register can then
2411 be saved simultanously with frame allocation, and restored with
2412 frame deallocation.
2413 This register can be picked depending on scheduling considerations,
2414 although same though should go into having some set of registers
2415 to be potentially lingering after a call, and others to be available
2416 immediately - i.e. in the absence of interprocedual optimization, we
2417 can use an ABI-like convention for register allocation to reduce
2418 stalls after function return. */
2419 /* Function prologue/epilogue handlers. */
2421 /* ARCompact stack frames look like:
2423 Before call After call
2424 high +-----------------------+ +-----------------------+
2425 mem | reg parm save area | | reg parm save area |
2426 | only created for | | only created for |
2427 | variable arg fns | | variable arg fns |
2428 AP +-----------------------+ +-----------------------+
2429 | return addr register | | return addr register |
2430 | (if required) | | (if required) |
2431 +-----------------------+ +-----------------------+
2432 | | | |
2433 | reg save area | | reg save area |
2434 | | | |
2435 +-----------------------+ +-----------------------+
2436 | frame pointer | | frame pointer |
2437 | (if required) | | (if required) |
2438 FP +-----------------------+ +-----------------------+
2439 | | | |
2440 | local/temp variables | | local/temp variables |
2441 | | | |
2442 +-----------------------+ +-----------------------+
2443 | | | |
2444 | arguments on stack | | arguments on stack |
2445 | | | |
2446 SP +-----------------------+ +-----------------------+
2447 | reg parm save area |
2448 | only created for |
2449 | variable arg fns |
2450 AP +-----------------------+
2451 | return addr register |
2452 | (if required) |
2453 +-----------------------+
2455 | reg save area |
2457 +-----------------------+
2458 | frame pointer |
2459 | (if required) |
2460 FP +-----------------------+
2462 | local/temp variables |
2464 +-----------------------+
2466 | arguments on stack |
2467 low | |
2468 mem SP +-----------------------+
2470 Notes:
2471 1) The "reg parm save area" does not exist for non variable argument fns.
2472 The "reg parm save area" can be eliminated completely if we created our
2473 own va-arc.h, but that has tradeoffs as well (so it's not done). */
2475 /* Structure to be filled in by arc_compute_frame_size with register
2476 save masks, and offsets for the current function. */
2477 struct GTY (()) arc_frame_info
2479 unsigned int total_size; /* # bytes that the entire frame takes up. */
2480 unsigned int extra_size; /* # bytes of extra stuff. */
2481 unsigned int pretend_size; /* # bytes we push and pretend caller did. */
2482 unsigned int args_size; /* # bytes that outgoing arguments take up. */
2483 unsigned int reg_size; /* # bytes needed to store regs. */
2484 unsigned int var_size; /* # bytes that variables take up. */
2485 unsigned int reg_offset; /* Offset from new sp to store regs. */
2486 unsigned int gmask; /* Mask of saved gp registers. */
2487 int initialized; /* Nonzero if frame size already calculated. */
2488 short millicode_start_reg;
2489 short millicode_end_reg;
2490 bool save_return_addr;
2493 /* Defining data structures for per-function information. */
2495 typedef struct GTY (()) machine_function
2497 unsigned int fn_type;
2498 struct arc_frame_info frame_info;
2499 /* To keep track of unalignment caused by short insns. */
2500 int unalign;
2501 int force_short_suffix; /* Used when disgorging return delay slot insns. */
2502 const char *size_reason;
2503 struct arc_ccfsm ccfsm_current;
2504 /* Map from uid to ccfsm state during branch shortening. */
2505 rtx ccfsm_current_insn;
2506 char arc_reorg_started;
2507 char prescan_initialized;
2508 } machine_function;
2510 /* Type of function DECL.
2512 The result is cached. To reset the cache at the end of a function,
2513 call with DECL = NULL_TREE. */
2515 unsigned int
2516 arc_compute_function_type (struct function *fun)
2518 tree attr, decl = fun->decl;
2519 unsigned int fn_type = fun->machine->fn_type;
2521 if (fn_type != ARC_FUNCTION_UNKNOWN)
2522 return fn_type;
2524 /* Check if it is a naked function. */
2525 if (lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) != NULL_TREE)
2526 fn_type |= ARC_FUNCTION_NAKED;
2527 else
2528 fn_type |= ARC_FUNCTION_NORMAL;
2530 /* Now see if this is an interrupt handler. */
2531 attr = lookup_attribute ("interrupt", DECL_ATTRIBUTES (decl));
2532 if (attr != NULL_TREE)
2534 tree value, args = TREE_VALUE (attr);
2536 gcc_assert (list_length (args) == 1);
2537 value = TREE_VALUE (args);
2538 gcc_assert (TREE_CODE (value) == STRING_CST);
2540 if (!strcmp (TREE_STRING_POINTER (value), "ilink1")
2541 || !strcmp (TREE_STRING_POINTER (value), "ilink"))
2542 fn_type |= ARC_FUNCTION_ILINK1;
2543 else if (!strcmp (TREE_STRING_POINTER (value), "ilink2"))
2544 fn_type |= ARC_FUNCTION_ILINK2;
2545 else if (!strcmp (TREE_STRING_POINTER (value), "firq"))
2546 fn_type |= ARC_FUNCTION_FIRQ;
2547 else
2548 gcc_unreachable ();
2551 return fun->machine->fn_type = fn_type;
2554 #define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
2555 #define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
2557 /* Tell prologue and epilogue if register REGNO should be saved / restored.
2558 The return address and frame pointer are treated separately.
2559 Don't consider them here.
2560 Addition for pic: The gp register needs to be saved if the current
2561 function changes it to access gotoff variables.
2562 FIXME: This will not be needed if we used some arbitrary register
2563 instead of r26. */
2565 static bool
2566 arc_must_save_register (int regno, struct function *func)
2568 unsigned int fn_type = arc_compute_function_type (func);
2569 bool irq_auto_save_p = ((irq_ctrl_saved.irq_save_last_reg >= regno)
2570 && ARC_AUTO_IRQ_P (fn_type));
2571 bool firq_auto_save_p = ARC_FAST_INTERRUPT_P (fn_type);
2573 switch (rgf_banked_register_count)
2575 case 4:
2576 firq_auto_save_p &= (regno < 4);
2577 break;
2578 case 8:
2579 firq_auto_save_p &= ((regno < 4) || ((regno > 11) && (regno < 16)));
2580 break;
2581 case 16:
2582 firq_auto_save_p &= ((regno < 4) || ((regno > 9) && (regno < 16))
2583 || ((regno > 25) && (regno < 29))
2584 || ((regno > 29) && (regno < 32)));
2585 break;
2586 case 32:
2587 firq_auto_save_p &= (regno != 29) && (regno < 32);
2588 break;
2589 default:
2590 firq_auto_save_p = false;
2591 break;
2594 if ((regno) != RETURN_ADDR_REGNUM
2595 && (regno) != FRAME_POINTER_REGNUM
2596 && df_regs_ever_live_p (regno)
2597 && (!call_used_regs[regno]
2598 || ARC_INTERRUPT_P (fn_type))
2599 /* Do not emit code for auto saved regs. */
2600 && !irq_auto_save_p
2601 && !firq_auto_save_p)
2602 return true;
2604 if (flag_pic && crtl->uses_pic_offset_table
2605 && regno == PIC_OFFSET_TABLE_REGNUM)
2606 return true;
2608 return false;
2611 /* Return true if the return address must be saved in the current function,
2612 otherwise return false. */
2614 static bool
2615 arc_must_save_return_addr (struct function *func)
2617 if (func->machine->frame_info.save_return_addr)
2618 return true;
2620 return false;
2623 /* Helper function to wrap FRAME_POINTER_NEEDED. We do this as
2624 FRAME_POINTER_NEEDED will not be true until the IRA (Integrated
2625 Register Allocator) pass, while we want to get the frame size
2626 correct earlier than the IRA pass.
2628 When a function uses eh_return we must ensure that the fp register
2629 is saved and then restored so that the unwinder can restore the
2630 correct value for the frame we are going to jump to.
2632 To do this we force all frames that call eh_return to require a
2633 frame pointer (see arc_frame_pointer_required), this
2634 will ensure that the previous frame pointer is stored on entry to
2635 the function, and will then be reloaded at function exit.
2637 As the frame pointer is handled as a special case in our prologue
2638 and epilogue code it must not be saved and restored using the
2639 MUST_SAVE_REGISTER mechanism otherwise we run into issues where GCC
2640 believes that the function is not using a frame pointer and that
2641 the value in the fp register is the frame pointer, while the
2642 prologue and epilogue are busy saving and restoring the fp
2643 register.
2645 During compilation of a function the frame size is evaluated
2646 multiple times, it is not until the reload pass is complete the the
2647 frame size is considered fixed (it is at this point that space for
2648 all spills has been allocated). However the frame_pointer_needed
2649 variable is not set true until the register allocation pass, as a
2650 result in the early stages the frame size does not include space
2651 for the frame pointer to be spilled.
2653 The problem that this causes is that the rtl generated for
2654 EH_RETURN_HANDLER_RTX uses the details of the frame size to compute
2655 the offset from the frame pointer at which the return address
2656 lives. However, in early passes GCC has not yet realised we need a
2657 frame pointer, and so has not included space for the frame pointer
2658 in the frame size, and so gets the offset of the return address
2659 wrong. This should not be an issue as in later passes GCC has
2660 realised that the frame pointer needs to be spilled, and has
2661 increased the frame size. However, the rtl for the
2662 EH_RETURN_HANDLER_RTX is not regenerated to use the newer, larger
2663 offset, and the wrong smaller offset is used. */
2665 static bool
2666 arc_frame_pointer_needed (void)
2668 return (frame_pointer_needed || crtl->calls_eh_return);
2671 /* Return non-zero if there are registers to be saved or loaded using
2672 millicode thunks. We can only use consecutive sequences starting
2673 with r13, and not going beyond r25.
2674 GMASK is a bitmask of registers to save. This function sets
2675 FRAME->millicod_start_reg .. FRAME->millicode_end_reg to the range
2676 of registers to be saved / restored with a millicode call. */
2678 static int
2679 arc_compute_millicode_save_restore_regs (unsigned int gmask,
2680 struct arc_frame_info *frame)
2682 int regno;
2684 int start_reg = 13, end_reg = 25;
2686 for (regno = start_reg; regno <= end_reg && (gmask & (1L << regno));)
2687 regno++;
2688 end_reg = regno - 1;
2689 /* There is no point in using millicode thunks if we don't save/restore
2690 at least three registers. For non-leaf functions we also have the
2691 blink restore. */
2692 if (regno - start_reg >= 3 - (crtl->is_leaf == 0))
2694 frame->millicode_start_reg = 13;
2695 frame->millicode_end_reg = regno - 1;
2696 return 1;
2698 return 0;
2701 /* Return the bytes needed to compute the frame pointer from the
2702 current stack pointer. */
2704 static unsigned int
2705 arc_compute_frame_size (void)
2707 int regno;
2708 unsigned int total_size, var_size, args_size, pretend_size, extra_size;
2709 unsigned int reg_size, reg_offset;
2710 unsigned int gmask;
2711 enum arc_function_type fn_type;
2712 int interrupt_p;
2713 struct arc_frame_info *frame_info;
2714 int size;
2716 /* The answer might already be known. */
2717 if (cfun->machine->frame_info.initialized)
2718 return cfun->machine->frame_info.total_size;
2720 frame_info = &cfun->machine->frame_info;
2721 size = ARC_STACK_ALIGN (get_frame_size ());
2723 /* 1) Size of locals and temporaries. */
2724 var_size = size;
2726 /* 2) Size of outgoing arguments. */
2727 args_size = crtl->outgoing_args_size;
2729 /* 3) Calculate space needed for saved registers.
2730 ??? We ignore the extension registers for now. */
2732 /* See if this is an interrupt handler. Call used registers must be saved
2733 for them too. */
2735 reg_size = 0;
2736 gmask = 0;
2738 for (regno = 0; regno <= 31; regno++)
2740 if (arc_must_save_register (regno, cfun))
2742 reg_size += UNITS_PER_WORD;
2743 gmask |= 1L << regno;
2747 /* In a frame that calls __builtin_eh_return two data registers are
2748 used to pass values back to the exception handler.
2750 Ensure that these registers are spilled to the stack so that the
2751 exception throw code can find them, and update the saved values.
2752 The handling code will then consume these reloaded values to
2753 handle the exception. */
2754 if (crtl->calls_eh_return)
2755 for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++)
2757 reg_size += UNITS_PER_WORD;
2758 gmask |= 1 << regno;
2761 /* 4) Space for back trace data structure.
2762 <return addr reg size> (if required) + <fp size> (if required). */
2763 frame_info->save_return_addr
2764 = (!crtl->is_leaf || df_regs_ever_live_p (RETURN_ADDR_REGNUM)
2765 || crtl->calls_eh_return);
2766 /* Saving blink reg in case of leaf function for millicode thunk calls. */
2767 if (optimize_size
2768 && !TARGET_NO_MILLICODE_THUNK_SET
2769 && !crtl->calls_eh_return)
2771 if (arc_compute_millicode_save_restore_regs (gmask, frame_info))
2772 frame_info->save_return_addr = true;
2775 extra_size = 0;
2776 if (arc_must_save_return_addr (cfun))
2777 extra_size = 4;
2778 if (arc_frame_pointer_needed ())
2779 extra_size += 4;
2781 /* 5) Space for variable arguments passed in registers */
2782 pretend_size = crtl->args.pretend_args_size;
2784 /* Ensure everything before the locals is aligned appropriately. */
2786 unsigned int extra_plus_reg_size;
2787 unsigned int extra_plus_reg_size_aligned;
2789 extra_plus_reg_size = extra_size + reg_size;
2790 extra_plus_reg_size_aligned = ARC_STACK_ALIGN(extra_plus_reg_size);
2791 reg_size = extra_plus_reg_size_aligned - extra_size;
2794 /* Compute total frame size. */
2795 total_size = var_size + args_size + extra_size + pretend_size + reg_size;
2797 /* It used to be the case that the alignment was forced at this
2798 point. However, that is dangerous, calculations based on
2799 total_size would be wrong. Given that this has never cropped up
2800 as an issue I've changed this to an assert for now. */
2801 gcc_assert (total_size == ARC_STACK_ALIGN (total_size));
2803 /* Compute offset of register save area from stack pointer:
2804 Frame: pretend_size <blink> reg_size <fp> var_size args_size <--sp
2806 reg_offset = (total_size - (pretend_size + reg_size + extra_size)
2807 + (arc_frame_pointer_needed () ? 4 : 0));
2809 /* Save computed information. */
2810 frame_info->total_size = total_size;
2811 frame_info->extra_size = extra_size;
2812 frame_info->pretend_size = pretend_size;
2813 frame_info->var_size = var_size;
2814 frame_info->args_size = args_size;
2815 frame_info->reg_size = reg_size;
2816 frame_info->reg_offset = reg_offset;
2817 frame_info->gmask = gmask;
2818 frame_info->initialized = reload_completed;
2820 /* Ok, we're done. */
2821 return total_size;
2824 /* Common code to save/restore registers. */
2825 /* BASE_REG is the base register to use for addressing and to adjust.
2826 GMASK is a bitmask of general purpose registers to save/restore.
2827 epilogue_p 0: prologue 1:epilogue 2:epilogue, sibling thunk
2828 If *FIRST_OFFSET is non-zero, add it first to BASE_REG - preferably
2829 using a pre-modify for the first memory access. *FIRST_OFFSET is then
2830 zeroed. */
2832 static void
2833 arc_save_restore (rtx base_reg,
2834 unsigned int gmask, int epilogue_p, int *first_offset)
2836 unsigned int offset = 0;
2837 int regno;
2838 struct arc_frame_info *frame = &cfun->machine->frame_info;
2839 rtx sibthunk_insn = NULL_RTX;
2841 if (gmask)
2843 /* Millicode thunks implementation:
2844 Generates calls to millicodes for registers starting from r13 to r25
2845 Present Limitations:
2846 - Only one range supported. The remaining regs will have the ordinary
2847 st and ld instructions for store and loads. Hence a gmask asking
2848 to store r13-14, r16-r25 will only generate calls to store and
2849 load r13 to r14 while store and load insns will be generated for
2850 r16 to r25 in the prologue and epilogue respectively.
2852 - Presently library only supports register ranges starting from r13.
2854 if (epilogue_p == 2 || frame->millicode_end_reg > 14)
2856 int start_call = frame->millicode_start_reg;
2857 int end_call = frame->millicode_end_reg;
2858 int n_regs = end_call - start_call + 1;
2859 int i = 0, r, off = 0;
2860 rtx insn;
2861 rtx ret_addr = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
2863 if (*first_offset)
2865 /* "reg_size" won't be more than 127 . */
2866 gcc_assert (epilogue_p || abs (*first_offset) <= 127);
2867 frame_add (base_reg, *first_offset);
2868 *first_offset = 0;
2870 insn = gen_rtx_PARALLEL
2871 (VOIDmode, rtvec_alloc ((epilogue_p == 2) + n_regs + 1));
2872 if (epilogue_p == 2)
2873 i += 2;
2874 else
2875 XVECEXP (insn, 0, n_regs) = gen_rtx_CLOBBER (VOIDmode, ret_addr);
2876 for (r = start_call; r <= end_call; r++, off += UNITS_PER_WORD, i++)
2878 rtx reg = gen_rtx_REG (SImode, r);
2879 rtx mem
2880 = gen_frame_mem (SImode, plus_constant (Pmode, base_reg, off));
2882 if (epilogue_p)
2883 XVECEXP (insn, 0, i) = gen_rtx_SET (reg, mem);
2884 else
2885 XVECEXP (insn, 0, i) = gen_rtx_SET (mem, reg);
2886 gmask = gmask & ~(1L << r);
2888 if (epilogue_p == 2)
2889 sibthunk_insn = insn;
2890 else
2892 insn = frame_insn (insn);
2893 for (r = start_call, off = 0;
2894 r <= end_call;
2895 r++, off += UNITS_PER_WORD)
2897 rtx reg = gen_rtx_REG (SImode, r);
2898 if (epilogue_p)
2899 add_reg_note (insn, REG_CFA_RESTORE, reg);
2900 else
2902 rtx mem = gen_rtx_MEM (SImode, plus_constant (Pmode,
2903 base_reg,
2904 off));
2906 add_reg_note (insn, REG_CFA_OFFSET,
2907 gen_rtx_SET (mem, reg));
2911 offset += off;
2914 for (regno = 0; regno <= 31; regno++)
2916 machine_mode mode = SImode;
2917 bool found = false;
2919 if (TARGET_LL64
2920 && (regno % 2 == 0)
2921 && ((gmask & (1L << regno)) != 0)
2922 && ((gmask & (1L << (regno+1))) != 0))
2924 found = true;
2925 mode = DImode;
2927 else if ((gmask & (1L << regno)) != 0)
2929 found = true;
2930 mode = SImode;
2933 if (found)
2935 rtx reg = gen_rtx_REG (mode, regno);
2936 rtx addr, mem;
2937 int cfa_adjust = *first_offset;
2939 if (*first_offset)
2941 gcc_assert (!offset);
2942 addr = plus_constant (Pmode, base_reg, *first_offset);
2943 addr = gen_rtx_PRE_MODIFY (Pmode, base_reg, addr);
2944 *first_offset = 0;
2946 else
2948 gcc_assert (SMALL_INT (offset));
2949 addr = plus_constant (Pmode, base_reg, offset);
2951 mem = gen_frame_mem (mode, addr);
2952 if (epilogue_p)
2954 rtx insn =
2955 frame_move_inc (reg, mem, base_reg, addr);
2956 add_reg_note (insn, REG_CFA_RESTORE, reg);
2957 if (cfa_adjust)
2959 enum reg_note note = REG_CFA_ADJUST_CFA;
2960 add_reg_note (insn, note,
2961 gen_rtx_SET (stack_pointer_rtx,
2962 plus_constant (Pmode,
2963 stack_pointer_rtx,
2964 cfa_adjust)));
2967 else
2968 frame_move_inc (mem, reg, base_reg, addr);
2969 offset += UNITS_PER_WORD;
2970 if (mode == DImode)
2972 offset += UNITS_PER_WORD;
2973 ++regno;
2975 } /* if */
2976 } /* for */
2977 }/* if */
2978 if (sibthunk_insn)
2980 int start_call = frame->millicode_start_reg;
2981 int end_call = frame->millicode_end_reg;
2982 int r;
2984 rtx r12 = gen_rtx_REG (Pmode, 12);
2986 frame_insn (gen_rtx_SET (r12, GEN_INT (offset)));
2987 XVECEXP (sibthunk_insn, 0, 0) = ret_rtx;
2988 XVECEXP (sibthunk_insn, 0, 1)
2989 = gen_rtx_SET (stack_pointer_rtx,
2990 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r12));
2991 sibthunk_insn = emit_jump_insn (sibthunk_insn);
2992 RTX_FRAME_RELATED_P (sibthunk_insn) = 1;
2994 /* Would be nice if we could do this earlier, when the PARALLEL
2995 is populated, but these need to be attached after the
2996 emit. */
2997 for (r = start_call; r <= end_call; r++)
2999 rtx reg = gen_rtx_REG (SImode, r);
3000 add_reg_note (sibthunk_insn, REG_CFA_RESTORE, reg);
3003 } /* arc_save_restore */
3005 /* Build dwarf information when the context is saved via AUX_IRQ_CTRL
3006 mechanism. */
3008 static void
3009 arc_dwarf_emit_irq_save_regs (void)
3011 rtx tmp, par, insn, reg;
3012 int i, offset, j;
3014 par = gen_rtx_SEQUENCE (VOIDmode,
3015 rtvec_alloc (irq_ctrl_saved.irq_save_last_reg + 1
3016 + irq_ctrl_saved.irq_save_blink
3017 + irq_ctrl_saved.irq_save_lpcount
3018 + 1));
3020 /* Build the stack adjustment note for unwind info. */
3021 j = 0;
3022 offset = UNITS_PER_WORD * (irq_ctrl_saved.irq_save_last_reg + 1
3023 + irq_ctrl_saved.irq_save_blink
3024 + irq_ctrl_saved.irq_save_lpcount);
3025 tmp = plus_constant (Pmode, stack_pointer_rtx, -1 * offset);
3026 tmp = gen_rtx_SET (stack_pointer_rtx, tmp);
3027 RTX_FRAME_RELATED_P (tmp) = 1;
3028 XVECEXP (par, 0, j++) = tmp;
3030 offset -= UNITS_PER_WORD;
3032 /* 1st goes LP_COUNT. */
3033 if (irq_ctrl_saved.irq_save_lpcount)
3035 reg = gen_rtx_REG (SImode, 60);
3036 tmp = plus_constant (Pmode, stack_pointer_rtx, offset);
3037 tmp = gen_frame_mem (SImode, tmp);
3038 tmp = gen_rtx_SET (tmp, reg);
3039 RTX_FRAME_RELATED_P (tmp) = 1;
3040 XVECEXP (par, 0, j++) = tmp;
3041 offset -= UNITS_PER_WORD;
3044 /* 2nd goes BLINK. */
3045 if (irq_ctrl_saved.irq_save_blink)
3047 reg = gen_rtx_REG (SImode, 31);
3048 tmp = plus_constant (Pmode, stack_pointer_rtx, offset);
3049 tmp = gen_frame_mem (SImode, tmp);
3050 tmp = gen_rtx_SET (tmp, reg);
3051 RTX_FRAME_RELATED_P (tmp) = 1;
3052 XVECEXP (par, 0, j++) = tmp;
3053 offset -= UNITS_PER_WORD;
3056 /* Build the parallel of the remaining registers recorded as saved
3057 for unwind. */
3058 for (i = irq_ctrl_saved.irq_save_last_reg; i >= 0; i--)
3060 reg = gen_rtx_REG (SImode, i);
3061 tmp = plus_constant (Pmode, stack_pointer_rtx, offset);
3062 tmp = gen_frame_mem (SImode, tmp);
3063 tmp = gen_rtx_SET (tmp, reg);
3064 RTX_FRAME_RELATED_P (tmp) = 1;
3065 XVECEXP (par, 0, j++) = tmp;
3066 offset -= UNITS_PER_WORD;
3069 /* Dummy insn used to anchor the dwarf info. */
3070 insn = emit_insn (gen_stack_irq_dwarf());
3071 add_reg_note (insn, REG_FRAME_RELATED_EXPR, par);
3072 RTX_FRAME_RELATED_P (insn) = 1;
3075 /* Set up the stack and frame pointer (if desired) for the function. */
3077 void
3078 arc_expand_prologue (void)
3080 int size;
3081 unsigned int gmask = cfun->machine->frame_info.gmask;
3082 /* unsigned int frame_pointer_offset;*/
3083 unsigned int frame_size_to_allocate;
3084 /* (FIXME: The first store will use a PRE_MODIFY; this will usually be r13.
3085 Change the stack layout so that we rather store a high register with the
3086 PRE_MODIFY, thus enabling more short insn generation.) */
3087 int first_offset = 0;
3088 unsigned int fn_type = arc_compute_function_type (cfun);
3090 /* Naked functions don't have prologue. */
3091 if (ARC_NAKED_P (fn_type))
3092 return;
3094 /* Compute total frame size. */
3095 size = arc_compute_frame_size ();
3097 if (flag_stack_usage_info)
3098 current_function_static_stack_size = size;
3100 /* Keep track of frame size to be allocated. */
3101 frame_size_to_allocate = size;
3103 /* These cases shouldn't happen. Catch them now. */
3104 gcc_assert (!(size == 0 && gmask));
3106 /* Allocate space for register arguments if this is a variadic function. */
3107 if (cfun->machine->frame_info.pretend_size != 0)
3109 /* Ensure pretend_size is maximum of 8 * word_size. */
3110 gcc_assert (cfun->machine->frame_info.pretend_size <= 32);
3112 frame_stack_add (-(HOST_WIDE_INT)cfun->machine->frame_info.pretend_size);
3113 frame_size_to_allocate -= cfun->machine->frame_info.pretend_size;
3116 /* IRQ using automatic save mechanism will save the register before
3117 anything we do. */
3118 if (ARC_AUTO_IRQ_P (fn_type)
3119 && !ARC_FAST_INTERRUPT_P (fn_type))
3121 arc_dwarf_emit_irq_save_regs ();
3124 /* The home-grown ABI says link register is saved first. */
3125 if (arc_must_save_return_addr (cfun)
3126 && !ARC_AUTOBLINK_IRQ_P (fn_type))
3128 rtx ra = gen_rtx_REG (SImode, RETURN_ADDR_REGNUM);
3129 rtx mem = gen_frame_mem (Pmode,
3130 gen_rtx_PRE_DEC (Pmode,
3131 stack_pointer_rtx));
3133 frame_move_inc (mem, ra, stack_pointer_rtx, 0);
3134 frame_size_to_allocate -= UNITS_PER_WORD;
3137 /* Save any needed call-saved regs (and call-used if this is an
3138 interrupt handler) for ARCompact ISA. */
3139 if (cfun->machine->frame_info.reg_size)
3141 first_offset = -cfun->machine->frame_info.reg_size;
3142 /* N.B. FRAME_POINTER_MASK and RETURN_ADDR_MASK are cleared in gmask. */
3143 arc_save_restore (stack_pointer_rtx, gmask, 0, &first_offset);
3144 frame_size_to_allocate -= cfun->machine->frame_info.reg_size;
3147 /* In the case of millicode thunk, we need to restore the clobbered
3148 blink register. */
3149 if (cfun->machine->frame_info.millicode_end_reg > 0
3150 && arc_must_save_return_addr (cfun))
3152 HOST_WIDE_INT tmp = cfun->machine->frame_info.reg_size;
3153 emit_insn (gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
3154 gen_rtx_MEM (Pmode,
3155 plus_constant (Pmode,
3156 stack_pointer_rtx,
3157 tmp))));
3160 /* Save frame pointer if needed. First save the FP on stack, if not
3161 autosaved. */
3162 if (arc_frame_pointer_needed ()
3163 && !ARC_AUTOFP_IRQ_P (fn_type))
3165 rtx addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3166 GEN_INT (-UNITS_PER_WORD + first_offset));
3167 rtx mem = gen_frame_mem (Pmode, gen_rtx_PRE_MODIFY (Pmode,
3168 stack_pointer_rtx,
3169 addr));
3170 frame_move_inc (mem, frame_pointer_rtx, stack_pointer_rtx, 0);
3171 frame_size_to_allocate -= UNITS_PER_WORD;
3172 first_offset = 0;
3175 /* Emit mov fp,sp. */
3176 if (arc_frame_pointer_needed ())
3178 frame_move (frame_pointer_rtx, stack_pointer_rtx);
3181 /* ??? We don't handle the case where the saved regs are more than 252
3182 bytes away from sp. This can be handled by decrementing sp once, saving
3183 the regs, and then decrementing it again. The epilogue doesn't have this
3184 problem as the `ld' insn takes reg+limm values (though it would be more
3185 efficient to avoid reg+limm). */
3187 frame_size_to_allocate -= first_offset;
3188 /* Allocate the stack frame. */
3189 if (frame_size_to_allocate > 0)
3191 frame_stack_add ((HOST_WIDE_INT) 0 - frame_size_to_allocate);
3192 /* If the frame pointer is needed, emit a special barrier that
3193 will prevent the scheduler from moving stores to the frame
3194 before the stack adjustment. */
3195 if (arc_frame_pointer_needed ())
3196 emit_insn (gen_stack_tie (stack_pointer_rtx,
3197 hard_frame_pointer_rtx));
3200 /* Setup the gp register, if needed. */
3201 if (crtl->uses_pic_offset_table)
3202 arc_finalize_pic ();
3205 /* Do any necessary cleanup after a function to restore stack, frame,
3206 and regs. */
3208 void
3209 arc_expand_epilogue (int sibcall_p)
3211 int size;
3212 unsigned int fn_type = arc_compute_function_type (cfun);
3214 size = arc_compute_frame_size ();
3216 unsigned int pretend_size = cfun->machine->frame_info.pretend_size;
3217 unsigned int frame_size;
3218 unsigned int size_to_deallocate;
3219 int restored;
3220 int can_trust_sp_p = !cfun->calls_alloca;
3221 int first_offset = 0;
3222 int millicode_p = cfun->machine->frame_info.millicode_end_reg > 0;
3223 rtx insn;
3225 /* Naked functions don't have epilogue. */
3226 if (ARC_NAKED_P (fn_type))
3227 return;
3229 size_to_deallocate = size;
3231 frame_size = size - (pretend_size +
3232 cfun->machine->frame_info.reg_size +
3233 cfun->machine->frame_info.extra_size);
3235 /* ??? There are lots of optimizations that can be done here.
3236 EG: Use fp to restore regs if it's closer.
3237 Maybe in time we'll do them all. For now, always restore regs from
3238 sp, but don't restore sp if we don't have to. */
3240 if (!can_trust_sp_p)
3241 gcc_assert (arc_frame_pointer_needed ());
3243 /* Restore stack pointer to the beginning of saved register area for
3244 ARCompact ISA. */
3245 if (frame_size)
3247 if (arc_frame_pointer_needed ())
3248 frame_move (stack_pointer_rtx, frame_pointer_rtx);
3249 else
3250 first_offset = frame_size;
3251 size_to_deallocate -= frame_size;
3253 else if (!can_trust_sp_p)
3254 frame_stack_add (-frame_size);
3257 /* Restore any saved registers. */
3258 if (arc_frame_pointer_needed ()
3259 && !ARC_AUTOFP_IRQ_P (fn_type))
3261 rtx addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
3263 insn = frame_move_inc (frame_pointer_rtx, gen_frame_mem (Pmode, addr),
3264 stack_pointer_rtx, 0);
3265 add_reg_note (insn, REG_CFA_RESTORE, frame_pointer_rtx);
3266 add_reg_note (insn, REG_CFA_DEF_CFA,
3267 plus_constant (SImode, stack_pointer_rtx,
3268 4));
3269 size_to_deallocate -= UNITS_PER_WORD;
3272 /* Load blink after the calls to thunk calls in case of optimize size. */
3273 if (millicode_p)
3275 int sibthunk_p = (!sibcall_p
3276 && fn_type == ARC_FUNCTION_NORMAL
3277 && !cfun->machine->frame_info.pretend_size);
3279 gcc_assert (!(cfun->machine->frame_info.gmask
3280 & (FRAME_POINTER_MASK | RETURN_ADDR_MASK)));
3281 arc_save_restore (stack_pointer_rtx,
3282 cfun->machine->frame_info.gmask,
3283 1 + sibthunk_p, &first_offset);
3284 if (sibthunk_p)
3285 return;
3287 /* If we are to restore registers, and first_offset would require
3288 a limm to be encoded in a PRE_MODIFY, yet we can add it with a
3289 fast add to the stack pointer, do this now. */
3290 if ((!SMALL_INT (first_offset)
3291 && cfun->machine->frame_info.gmask
3292 && ((TARGET_ARC700 && !optimize_size)
3293 ? first_offset <= 0x800
3294 : satisfies_constraint_C2a (GEN_INT (first_offset))))
3295 /* Also do this if we have both gprs and return
3296 address to restore, and they both would need a LIMM. */
3297 || (arc_must_save_return_addr (cfun)
3298 && !SMALL_INT ((cfun->machine->frame_info.reg_size + first_offset) >> 2)
3299 && cfun->machine->frame_info.gmask))
3301 frame_stack_add (first_offset);
3302 first_offset = 0;
3304 if (arc_must_save_return_addr (cfun)
3305 && !ARC_AUTOBLINK_IRQ_P (fn_type))
3307 rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
3308 int ra_offs = cfun->machine->frame_info.reg_size + first_offset;
3309 rtx addr = plus_constant (Pmode, stack_pointer_rtx, ra_offs);
3310 HOST_WIDE_INT cfa_adjust = 0;
3312 /* If the load of blink would need a LIMM, but we can add
3313 the offset quickly to sp, do the latter. */
3314 if (!SMALL_INT (ra_offs >> 2)
3315 && !cfun->machine->frame_info.gmask
3316 && ((TARGET_ARC700 && !optimize_size)
3317 ? ra_offs <= 0x800
3318 : satisfies_constraint_C2a (GEN_INT (ra_offs))))
3320 size_to_deallocate -= ra_offs - first_offset;
3321 first_offset = 0;
3322 frame_stack_add (ra_offs);
3323 ra_offs = 0;
3324 addr = stack_pointer_rtx;
3326 /* See if we can combine the load of the return address with the
3327 final stack adjustment.
3328 We need a separate load if there are still registers to
3329 restore. We also want a separate load if the combined insn
3330 would need a limm, but a separate load doesn't. */
3331 if (ra_offs
3332 && !cfun->machine->frame_info.gmask
3333 && (SMALL_INT (ra_offs) || !SMALL_INT (ra_offs >> 2)))
3335 addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, addr);
3336 cfa_adjust = ra_offs;
3337 first_offset = 0;
3338 size_to_deallocate -= cfun->machine->frame_info.reg_size;
3340 else if (!ra_offs && size_to_deallocate == UNITS_PER_WORD)
3342 addr = gen_rtx_POST_INC (Pmode, addr);
3343 cfa_adjust = GET_MODE_SIZE (Pmode);
3344 size_to_deallocate = 0;
3347 insn = frame_move_inc (ra, gen_frame_mem (Pmode, addr),
3348 stack_pointer_rtx, addr);
3349 if (cfa_adjust)
3351 enum reg_note note = REG_CFA_ADJUST_CFA;
3353 add_reg_note (insn, note,
3354 gen_rtx_SET (stack_pointer_rtx,
3355 plus_constant (SImode, stack_pointer_rtx,
3356 cfa_adjust)));
3358 add_reg_note (insn, REG_CFA_RESTORE, ra);
3361 if (!millicode_p)
3363 if (cfun->machine->frame_info.reg_size)
3364 arc_save_restore (stack_pointer_rtx,
3365 /* The zeroing of these two bits is unnecessary, but leave this in for clarity. */
3366 cfun->machine->frame_info.gmask
3367 & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK), 1, &first_offset);
3370 /* The rest of this function does the following:
3371 ARCompact : handle epilogue_delay, restore sp (phase-2), return
3374 /* Keep track of how much of the stack pointer we've restored.
3375 It makes the following a lot more readable. */
3376 size_to_deallocate += first_offset;
3377 restored = size - size_to_deallocate;
3379 if (size > restored)
3380 frame_stack_add (size - restored);
3382 /* For frames that use __builtin_eh_return, the register defined by
3383 EH_RETURN_STACKADJ_RTX is set to 0 for all standard return paths.
3384 On eh_return paths however, the register is set to the value that
3385 should be added to the stack pointer in order to restore the
3386 correct stack pointer for the exception handling frame.
3388 For ARC we are going to use r2 for EH_RETURN_STACKADJ_RTX, add
3389 this onto the stack for eh_return frames. */
3390 if (crtl->calls_eh_return)
3391 emit_insn (gen_add2_insn (stack_pointer_rtx,
3392 EH_RETURN_STACKADJ_RTX));
3394 /* Emit the return instruction. */
3395 if (sibcall_p == FALSE)
3396 emit_jump_insn (gen_simple_return ());
3399 /* Return rtx for the location of the return address on the stack,
3400 suitable for use in __builtin_eh_return. The new return address
3401 will be written to this location in order to redirect the return to
3402 the exception handler. */
3405 arc_eh_return_address_location (void)
3407 rtx mem;
3408 int offset;
3409 struct arc_frame_info *afi;
3411 arc_compute_frame_size ();
3412 afi = &cfun->machine->frame_info;
3414 gcc_assert (crtl->calls_eh_return);
3415 gcc_assert (afi->save_return_addr);
3416 gcc_assert (afi->extra_size >= 4);
3418 /* The '-4' removes the size of the return address, which is
3419 included in the 'extra_size' field. */
3420 offset = afi->reg_size + afi->extra_size - 4;
3421 mem = gen_frame_mem (Pmode,
3422 plus_constant (Pmode, frame_pointer_rtx, offset));
3424 /* The following should not be needed, and is, really a hack. The
3425 issue being worked around here is that the DSE (Dead Store
3426 Elimination) pass will remove this write to the stack as it sees
3427 a single store and no corresponding read. The read however
3428 occurs in the epilogue code, which is not added into the function
3429 rtl until a later pass. So, at the time of DSE, the decision to
3430 remove this store seems perfectly sensible. Marking the memory
3431 address as volatile obviously has the effect of preventing DSE
3432 from removing the store. */
3433 MEM_VOLATILE_P (mem) = 1;
3434 return mem;
3437 /* PIC */
3439 /* Helper to generate unspec constant. */
3441 static rtx
3442 arc_unspec_offset (rtx loc, int unspec)
3444 return gen_rtx_CONST (Pmode, gen_rtx_UNSPEC (Pmode, gen_rtvec (1, loc),
3445 unspec));
3448 /* Emit special PIC prologues and epilogues. */
3449 /* If the function has any GOTOFF relocations, then the GOTBASE
3450 register has to be setup in the prologue
3451 The instruction needed at the function start for setting up the
3452 GOTBASE register is
3453 add rdest, pc,
3454 ----------------------------------------------------------
3455 The rtl to be emitted for this should be:
3456 set (reg basereg)
3457 (plus (reg pc)
3458 (const (unspec (symref _DYNAMIC) 3)))
3459 ---------------------------------------------------------- */
3461 static void
3462 arc_finalize_pic (void)
3464 rtx pat;
3465 rtx baseptr_rtx = gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM);
3467 if (crtl->uses_pic_offset_table == 0)
3468 return;
3470 gcc_assert (flag_pic != 0);
3472 pat = gen_rtx_SYMBOL_REF (Pmode, "_DYNAMIC");
3473 pat = arc_unspec_offset (pat, ARC_UNSPEC_GOT);
3474 pat = gen_rtx_SET (baseptr_rtx, pat);
3476 emit_insn (pat);
3479 /* !TARGET_BARREL_SHIFTER support. */
3480 /* Emit a shift insn to set OP0 to OP1 shifted by OP2; CODE specifies what
3481 kind of shift. */
3483 void
3484 emit_shift (enum rtx_code code, rtx op0, rtx op1, rtx op2)
3486 rtx shift = gen_rtx_fmt_ee (code, SImode, op1, op2);
3487 rtx pat
3488 = ((shift4_operator (shift, SImode) ? gen_shift_si3 : gen_shift_si3_loop)
3489 (op0, op1, op2, shift));
3490 emit_insn (pat);
3493 /* Output the assembler code for doing a shift.
3494 We go to a bit of trouble to generate efficient code as the ARC601 only has
3495 single bit shifts. This is taken from the h8300 port. We only have one
3496 mode of shifting and can't access individual bytes like the h8300 can, so
3497 this is greatly simplified (at the expense of not generating hyper-
3498 efficient code).
3500 This function is not used if the variable shift insns are present. */
3502 /* FIXME: This probably can be done using a define_split in arc.md.
3503 Alternately, generate rtx rather than output instructions. */
3505 const char *
3506 output_shift (rtx *operands)
3508 /* static int loopend_lab;*/
3509 rtx shift = operands[3];
3510 machine_mode mode = GET_MODE (shift);
3511 enum rtx_code code = GET_CODE (shift);
3512 const char *shift_one;
3514 gcc_assert (mode == SImode);
3516 switch (code)
3518 case ASHIFT: shift_one = "add %0,%1,%1"; break;
3519 case ASHIFTRT: shift_one = "asr %0,%1"; break;
3520 case LSHIFTRT: shift_one = "lsr %0,%1"; break;
3521 default: gcc_unreachable ();
3524 if (GET_CODE (operands[2]) != CONST_INT)
3526 output_asm_insn ("and.f lp_count,%2, 0x1f", operands);
3527 goto shiftloop;
3529 else
3531 int n;
3533 n = INTVAL (operands[2]);
3535 /* Only consider the lower 5 bits of the shift count. */
3536 n = n & 0x1f;
3538 /* First see if we can do them inline. */
3539 /* ??? We could get better scheduling & shorter code (using short insns)
3540 by using splitters. Alas, that'd be even more verbose. */
3541 if (code == ASHIFT && n <= 9 && n > 2
3542 && dest_reg_operand (operands[4], SImode))
3544 output_asm_insn ("mov %4,0\n\tadd3 %0,%4,%1", operands);
3545 for (n -=3 ; n >= 3; n -= 3)
3546 output_asm_insn ("add3 %0,%4,%0", operands);
3547 if (n == 2)
3548 output_asm_insn ("add2 %0,%4,%0", operands);
3549 else if (n)
3550 output_asm_insn ("add %0,%0,%0", operands);
3552 else if (n <= 4)
3554 while (--n >= 0)
3556 output_asm_insn (shift_one, operands);
3557 operands[1] = operands[0];
3560 /* See if we can use a rotate/and. */
3561 else if (n == BITS_PER_WORD - 1)
3563 switch (code)
3565 case ASHIFT :
3566 output_asm_insn ("and %0,%1,1\n\tror %0,%0", operands);
3567 break;
3568 case ASHIFTRT :
3569 /* The ARC doesn't have a rol insn. Use something else. */
3570 output_asm_insn ("add.f 0,%1,%1\n\tsbc %0,%0,%0", operands);
3571 break;
3572 case LSHIFTRT :
3573 /* The ARC doesn't have a rol insn. Use something else. */
3574 output_asm_insn ("add.f 0,%1,%1\n\trlc %0,0", operands);
3575 break;
3576 default:
3577 break;
3580 else if (n == BITS_PER_WORD - 2 && dest_reg_operand (operands[4], SImode))
3582 switch (code)
3584 case ASHIFT :
3585 output_asm_insn ("and %0,%1,3\n\tror %0,%0\n\tror %0,%0", operands);
3586 break;
3587 case ASHIFTRT :
3588 #if 1 /* Need some scheduling comparisons. */
3589 output_asm_insn ("add.f %4,%1,%1\n\tsbc %0,%0,%0\n\t"
3590 "add.f 0,%4,%4\n\trlc %0,%0", operands);
3591 #else
3592 output_asm_insn ("add.f %4,%1,%1\n\tbxor %0,%4,31\n\t"
3593 "sbc.f %0,%0,%4\n\trlc %0,%0", operands);
3594 #endif
3595 break;
3596 case LSHIFTRT :
3597 #if 1
3598 output_asm_insn ("add.f %4,%1,%1\n\trlc %0,0\n\t"
3599 "add.f 0,%4,%4\n\trlc %0,%0", operands);
3600 #else
3601 output_asm_insn ("add.f %0,%1,%1\n\trlc.f %0,0\n\t"
3602 "and %0,%0,1\n\trlc %0,%0", operands);
3603 #endif
3604 break;
3605 default:
3606 break;
3609 else if (n == BITS_PER_WORD - 3 && code == ASHIFT)
3610 output_asm_insn ("and %0,%1,7\n\tror %0,%0\n\tror %0,%0\n\tror %0,%0",
3611 operands);
3612 /* Must loop. */
3613 else
3615 operands[2] = GEN_INT (n);
3616 output_asm_insn ("mov.f lp_count, %2", operands);
3618 shiftloop:
3620 output_asm_insn ("lpnz\t2f", operands);
3621 output_asm_insn (shift_one, operands);
3622 output_asm_insn ("nop", operands);
3623 fprintf (asm_out_file, "2:\t%s end single insn loop\n",
3624 ASM_COMMENT_START);
3629 return "";
3632 /* Nested function support. */
3634 /* Directly store VALUE into memory object BLOCK at OFFSET. */
3636 static void
3637 emit_store_direct (rtx block, int offset, int value)
3639 emit_insn (gen_store_direct (adjust_address (block, SImode, offset),
3640 force_reg (SImode,
3641 gen_int_mode (value, SImode))));
3644 /* Emit RTL insns to initialize the variable parts of a trampoline.
3645 FNADDR is an RTX for the address of the function's pure code.
3646 CXT is an RTX for the static chain value for the function. */
3647 /* With potentially multiple shared objects loaded, and multiple stacks
3648 present for multiple thereds where trampolines might reside, a simple
3649 range check will likely not suffice for the profiler to tell if a callee
3650 is a trampoline. We a speedier check by making the trampoline start at
3651 an address that is not 4-byte aligned.
3652 A trampoline looks like this:
3654 nop_s 0x78e0
3655 entry:
3656 ld_s r12,[pcl,12] 0xd403
3657 ld r11,[pcl,12] 0x170c 700b
3658 j_s [r12] 0x7c00
3659 nop_s 0x78e0
3661 The fastest trampoline to execute for trampolines within +-8KB of CTX
3662 would be:
3663 add2 r11,pcl,s12
3664 j [limm] 0x20200f80 limm
3665 and that would also be faster to write to the stack by computing the offset
3666 from CTX to TRAMP at compile time. However, it would really be better to
3667 get rid of the high cost of cache invalidation when generating trampolines,
3668 which requires that the code part of trampolines stays constant, and
3669 additionally either
3670 - making sure that no executable code but trampolines is on the stack,
3671 no icache entries linger for the area of the stack from when before the
3672 stack was allocated, and allocating trampolines in trampoline-only
3673 cache lines
3675 - allocate trampolines fram a special pool of pre-allocated trampolines. */
3677 static void
3678 arc_initialize_trampoline (rtx tramp, tree fndecl, rtx cxt)
3680 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3682 emit_store_direct (tramp, 0, TARGET_BIG_ENDIAN ? 0x78e0d403 : 0xd40378e0);
3683 emit_store_direct (tramp, 4, TARGET_BIG_ENDIAN ? 0x170c700b : 0x700b170c);
3684 emit_store_direct (tramp, 8, TARGET_BIG_ENDIAN ? 0x7c0078e0 : 0x78e07c00);
3685 emit_move_insn (adjust_address (tramp, SImode, 12), fnaddr);
3686 emit_move_insn (adjust_address (tramp, SImode, 16), cxt);
3687 emit_insn (gen_flush_icache (adjust_address (tramp, SImode, 0)));
3690 /* Allow the profiler to easily distinguish trampolines from normal
3691 functions. */
3693 static rtx
3694 arc_trampoline_adjust_address (rtx addr)
3696 return plus_constant (Pmode, addr, 2);
3699 /* This is set briefly to 1 when we output a ".as" address modifer, and then
3700 reset when we output the scaled address. */
3701 static int output_scaled = 0;
3703 /* Print operand X (an rtx) in assembler syntax to file FILE.
3704 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
3705 For `%' followed by punctuation, CODE is the punctuation and X is null. */
3706 /* In final.c:output_asm_insn:
3707 'l' : label
3708 'a' : address
3709 'c' : constant address if CONSTANT_ADDRESS_P
3710 'n' : negative
3711 Here:
3712 'Z': log2(x+1)-1
3713 'z': log2
3714 'M': log2(~x)
3715 'p': bit Position of lsb
3716 's': size of bit field
3717 '#': condbranch delay slot suffix
3718 '*': jump delay slot suffix
3719 '?' : nonjump-insn suffix for conditional execution or short instruction
3720 '!' : jump / call suffix for conditional execution or short instruction
3721 '`': fold constant inside unary o-perator, re-recognize, and emit.
3724 'R': Second word
3726 'B': Branch comparison operand - suppress sda reference
3727 'H': Most significant word
3728 'L': Least significant word
3729 'A': ASCII decimal representation of floating point value
3730 'U': Load/store update or scaling indicator
3731 'V': cache bypass indicator for volatile
3735 'O': Operator
3736 'o': original symbol - no @ prepending. */
3738 void
3739 arc_print_operand (FILE *file, rtx x, int code)
3741 switch (code)
3743 case 'Z':
3744 if (GET_CODE (x) == CONST_INT)
3745 fprintf (file, "%d",exact_log2(INTVAL (x) + 1) - 1 );
3746 else
3747 output_operand_lossage ("invalid operand to %%Z code");
3749 return;
3751 case 'z':
3752 if (GET_CODE (x) == CONST_INT)
3753 fprintf (file, "%d",exact_log2(INTVAL (x)) );
3754 else
3755 output_operand_lossage ("invalid operand to %%z code");
3757 return;
3759 case 'c':
3760 if (GET_CODE (x) == CONST_INT)
3761 fprintf (file, "%d", INTVAL (x) );
3762 else
3763 output_operand_lossage ("invalid operands to %%c code");
3765 return;
3767 case 'M':
3768 if (GET_CODE (x) == CONST_INT)
3769 fprintf (file, "%d",exact_log2(~INTVAL (x)) );
3770 else
3771 output_operand_lossage ("invalid operand to %%M code");
3773 return;
3775 case 'p':
3776 if (GET_CODE (x) == CONST_INT)
3777 fprintf (file, "%d", exact_log2 (INTVAL (x) & -INTVAL (x)));
3778 else
3779 output_operand_lossage ("invalid operand to %%p code");
3780 return;
3782 case 's':
3783 if (GET_CODE (x) == CONST_INT)
3785 HOST_WIDE_INT i = INTVAL (x);
3786 HOST_WIDE_INT s = exact_log2 (i & -i);
3787 fprintf (file, "%d", exact_log2 (((0xffffffffUL & i) >> s) + 1));
3789 else
3790 output_operand_lossage ("invalid operand to %%s code");
3791 return;
3793 case '#' :
3794 /* Conditional branches depending on condition codes.
3795 Note that this is only for branches that were known to depend on
3796 condition codes before delay slot scheduling;
3797 out-of-range brcc / bbit expansions should use '*'.
3798 This distinction is important because of the different
3799 allowable delay slot insns and the output of the delay suffix
3800 for TARGET_AT_DBR_COND_EXEC. */
3801 case '*' :
3802 /* Unconditional branches / branches not depending on condition codes.
3803 This could also be a CALL_INSN.
3804 Output the appropriate delay slot suffix. */
3805 if (final_sequence && final_sequence->len () != 1)
3807 rtx_insn *jump = final_sequence->insn (0);
3808 rtx_insn *delay = final_sequence->insn (1);
3810 /* For TARGET_PAD_RETURN we might have grabbed the delay insn. */
3811 if (delay->deleted ())
3812 return;
3813 if (JUMP_P (jump) && INSN_ANNULLED_BRANCH_P (jump))
3814 fputs (INSN_FROM_TARGET_P (delay) ? ".d"
3815 : TARGET_AT_DBR_CONDEXEC && code == '#' ? ".d"
3816 : get_attr_type (jump) == TYPE_RETURN && code == '#' ? ""
3817 : ".nd",
3818 file);
3819 else
3820 fputs (".d", file);
3822 return;
3823 case '?' : /* with leading "." */
3824 case '!' : /* without leading "." */
3825 /* This insn can be conditionally executed. See if the ccfsm machinery
3826 says it should be conditionalized.
3827 If it shouldn't, we'll check the compact attribute if this insn
3828 has a short variant, which may be used depending on code size and
3829 alignment considerations. */
3830 if (current_insn_predicate)
3831 arc_ccfsm_current.cc
3832 = get_arc_condition_code (current_insn_predicate);
3833 if (ARC_CCFSM_COND_EXEC_P (&arc_ccfsm_current))
3835 /* Is this insn in a delay slot sequence? */
3836 if (!final_sequence || XVECLEN (final_sequence, 0) < 2
3837 || current_insn_predicate
3838 || CALL_P (final_sequence->insn (0))
3839 || simplejump_p (final_sequence->insn (0)))
3841 /* This insn isn't in a delay slot sequence, or conditionalized
3842 independently of its position in a delay slot. */
3843 fprintf (file, "%s%s",
3844 code == '?' ? "." : "",
3845 arc_condition_codes[arc_ccfsm_current.cc]);
3846 /* If this is a jump, there are still short variants. However,
3847 only beq_s / bne_s have the same offset range as b_s,
3848 and the only short conditional returns are jeq_s and jne_s. */
3849 if (code == '!'
3850 && (arc_ccfsm_current.cc == ARC_CC_EQ
3851 || arc_ccfsm_current.cc == ARC_CC_NE
3852 || 0 /* FIXME: check if branch in 7 bit range. */))
3853 output_short_suffix (file);
3855 else if (code == '!') /* Jump with delay slot. */
3856 fputs (arc_condition_codes[arc_ccfsm_current.cc], file);
3857 else /* An Instruction in a delay slot of a jump or call. */
3859 rtx jump = XVECEXP (final_sequence, 0, 0);
3860 rtx insn = XVECEXP (final_sequence, 0, 1);
3862 /* If the insn is annulled and is from the target path, we need
3863 to inverse the condition test. */
3864 if (JUMP_P (jump) && INSN_ANNULLED_BRANCH_P (jump))
3866 if (INSN_FROM_TARGET_P (insn))
3867 fprintf (file, "%s%s",
3868 code == '?' ? "." : "",
3869 arc_condition_codes[ARC_INVERSE_CONDITION_CODE (arc_ccfsm_current.cc)]);
3870 else
3871 fprintf (file, "%s%s",
3872 code == '?' ? "." : "",
3873 arc_condition_codes[arc_ccfsm_current.cc]);
3874 if (arc_ccfsm_current.state == 5)
3875 arc_ccfsm_current.state = 0;
3877 else
3878 /* This insn is executed for either path, so don't
3879 conditionalize it at all. */
3880 output_short_suffix (file);
3884 else
3885 output_short_suffix (file);
3886 return;
3887 case'`':
3888 /* FIXME: fold constant inside unary operator, re-recognize, and emit. */
3889 gcc_unreachable ();
3890 case 'd' :
3891 fputs (arc_condition_codes[get_arc_condition_code (x)], file);
3892 return;
3893 case 'D' :
3894 fputs (arc_condition_codes[ARC_INVERSE_CONDITION_CODE
3895 (get_arc_condition_code (x))],
3896 file);
3897 return;
3898 case 'R' :
3899 /* Write second word of DImode or DFmode reference,
3900 register or memory. */
3901 if (GET_CODE (x) == REG)
3902 fputs (reg_names[REGNO (x)+1], file);
3903 else if (GET_CODE (x) == MEM)
3905 fputc ('[', file);
3907 /* Handle possible auto-increment. For PRE_INC / PRE_DEC /
3908 PRE_MODIFY, we will have handled the first word already;
3909 For POST_INC / POST_DEC / POST_MODIFY, the access to the
3910 first word will be done later. In either case, the access
3911 to the first word will do the modify, and we only have
3912 to add an offset of four here. */
3913 if (GET_CODE (XEXP (x, 0)) == PRE_INC
3914 || GET_CODE (XEXP (x, 0)) == PRE_DEC
3915 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY
3916 || GET_CODE (XEXP (x, 0)) == POST_INC
3917 || GET_CODE (XEXP (x, 0)) == POST_DEC
3918 || GET_CODE (XEXP (x, 0)) == POST_MODIFY)
3919 output_address (VOIDmode,
3920 plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 4));
3921 else if (output_scaled)
3923 rtx addr = XEXP (x, 0);
3924 int size = GET_MODE_SIZE (GET_MODE (x));
3926 output_address (VOIDmode,
3927 plus_constant (Pmode, XEXP (addr, 0),
3928 ((INTVAL (XEXP (addr, 1)) + 4)
3929 >> (size == 2 ? 1 : 2))));
3930 output_scaled = 0;
3932 else
3933 output_address (VOIDmode,
3934 plus_constant (Pmode, XEXP (x, 0), 4));
3935 fputc (']', file);
3937 else
3938 output_operand_lossage ("invalid operand to %%R code");
3939 return;
3940 case 'S' :
3941 /* FIXME: remove %S option. */
3942 break;
3943 case 'B' /* Branch or other LIMM ref - must not use sda references. */ :
3944 if (CONSTANT_P (x))
3946 output_addr_const (file, x);
3947 return;
3949 break;
3950 case 'H' :
3951 case 'L' :
3952 if (GET_CODE (x) == REG)
3954 /* L = least significant word, H = most significant word. */
3955 if ((WORDS_BIG_ENDIAN != 0) ^ (code == 'L'))
3956 fputs (reg_names[REGNO (x)], file);
3957 else
3958 fputs (reg_names[REGNO (x)+1], file);
3960 else if (GET_CODE (x) == CONST_INT
3961 || GET_CODE (x) == CONST_DOUBLE)
3963 rtx first, second, word;
3965 split_double (x, &first, &second);
3967 if((WORDS_BIG_ENDIAN) == 0)
3968 word = (code == 'L' ? first : second);
3969 else
3970 word = (code == 'L' ? second : first);
3972 fprintf (file, "0x%08" PRIx32, ((uint32_t) INTVAL (word)));
3974 else
3975 output_operand_lossage ("invalid operand to %%H/%%L code");
3976 return;
3977 case 'A' :
3979 char str[30];
3981 gcc_assert (GET_CODE (x) == CONST_DOUBLE
3982 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT);
3984 real_to_decimal (str, CONST_DOUBLE_REAL_VALUE (x), sizeof (str), 0, 1);
3985 fprintf (file, "%s", str);
3986 return;
3988 case 'U' :
3989 /* Output a load/store with update indicator if appropriate. */
3990 if (GET_CODE (x) == MEM)
3992 rtx addr = XEXP (x, 0);
3993 switch (GET_CODE (addr))
3995 case PRE_INC: case PRE_DEC: case PRE_MODIFY:
3996 fputs (".a", file); break;
3997 case POST_INC: case POST_DEC: case POST_MODIFY:
3998 fputs (".ab", file); break;
3999 case PLUS:
4000 /* Are we using a scaled index? */
4001 if (GET_CODE (XEXP (addr, 0)) == MULT)
4002 fputs (".as", file);
4003 /* Can we use a scaled offset? */
4004 else if (CONST_INT_P (XEXP (addr, 1))
4005 && GET_MODE_SIZE (GET_MODE (x)) > 1
4006 && (!(INTVAL (XEXP (addr, 1))
4007 & (GET_MODE_SIZE (GET_MODE (x)) - 1) & 3))
4008 /* Does it make a difference? */
4009 && !SMALL_INT_RANGE(INTVAL (XEXP (addr, 1)),
4010 GET_MODE_SIZE (GET_MODE (x)) - 2, 0))
4012 fputs (".as", file);
4013 output_scaled = 1;
4015 else if (LEGITIMATE_SMALL_DATA_ADDRESS_P (addr)
4016 && GET_MODE_SIZE (GET_MODE (x)) > 1)
4018 tree decl = NULL_TREE;
4019 int align = 0;
4020 if (GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
4021 decl = SYMBOL_REF_DECL (XEXP (addr, 1));
4022 else if (GET_CODE (XEXP (XEXP (XEXP (addr, 1), 0), 0))
4023 == SYMBOL_REF)
4024 decl = SYMBOL_REF_DECL (XEXP (XEXP (XEXP (addr, 1), 0), 0));
4025 if (decl)
4026 align = DECL_ALIGN (decl);
4027 align = align / BITS_PER_UNIT;
4028 if ((GET_MODE_SIZE (GET_MODE (x)) == 2)
4029 && align && ((align & 1) == 0))
4030 fputs (".as", file);
4031 if ((GET_MODE_SIZE (GET_MODE (x)) >= 4)
4032 && align && ((align & 3) == 0))
4033 fputs (".as", file);
4035 break;
4036 case REG:
4037 break;
4038 default:
4039 gcc_assert (CONSTANT_P (addr)); break;
4042 else
4043 output_operand_lossage ("invalid operand to %%U code");
4044 return;
4045 case 'V' :
4046 /* Output cache bypass indicator for a load/store insn. Volatile memory
4047 refs are defined to use the cache bypass mechanism. */
4048 if (GET_CODE (x) == MEM)
4050 if (MEM_VOLATILE_P (x) && !TARGET_VOLATILE_CACHE_SET )
4051 fputs (".di", file);
4053 else
4054 output_operand_lossage ("invalid operand to %%V code");
4055 return;
4056 /* plt code. */
4057 case 'P':
4058 case 0 :
4059 /* Do nothing special. */
4060 break;
4061 case 'F':
4062 fputs (reg_names[REGNO (x)]+1, file);
4063 return;
4064 case '^':
4065 /* This punctuation character is needed because label references are
4066 printed in the output template using %l. This is a front end
4067 character, and when we want to emit a '@' before it, we have to use
4068 this '^'. */
4070 fputc('@',file);
4071 return;
4072 case 'O':
4073 /* Output an operator. */
4074 switch (GET_CODE (x))
4076 case PLUS: fputs ("add", file); return;
4077 case SS_PLUS: fputs ("adds", file); return;
4078 case AND: fputs ("and", file); return;
4079 case IOR: fputs ("or", file); return;
4080 case XOR: fputs ("xor", file); return;
4081 case MINUS: fputs ("sub", file); return;
4082 case SS_MINUS: fputs ("subs", file); return;
4083 case ASHIFT: fputs ("asl", file); return;
4084 case ASHIFTRT: fputs ("asr", file); return;
4085 case LSHIFTRT: fputs ("lsr", file); return;
4086 case ROTATERT: fputs ("ror", file); return;
4087 case MULT: fputs ("mpy", file); return;
4088 case ABS: fputs ("abs", file); return; /* Unconditional. */
4089 case NEG: fputs ("neg", file); return;
4090 case SS_NEG: fputs ("negs", file); return;
4091 case NOT: fputs ("not", file); return; /* Unconditional. */
4092 case ZERO_EXTEND:
4093 fputs ("ext", file); /* bmsk allows predication. */
4094 goto size_suffix;
4095 case SIGN_EXTEND: /* Unconditional. */
4096 fputs ("sex", file);
4097 size_suffix:
4098 switch (GET_MODE (XEXP (x, 0)))
4100 case E_QImode: fputs ("b", file); return;
4101 case E_HImode: fputs ("w", file); return;
4102 default: break;
4104 break;
4105 case SS_TRUNCATE:
4106 if (GET_MODE (x) != HImode)
4107 break;
4108 fputs ("sat16", file);
4109 default: break;
4111 output_operand_lossage ("invalid operand to %%O code"); return;
4112 case 'o':
4113 if (GET_CODE (x) == SYMBOL_REF)
4115 assemble_name (file, XSTR (x, 0));
4116 return;
4118 break;
4119 case '&':
4120 if (TARGET_ANNOTATE_ALIGN && cfun->machine->size_reason)
4121 fprintf (file, "; unalign: %d", cfun->machine->unalign);
4122 return;
4123 case '+':
4124 if (TARGET_V2)
4125 fputs ("m", file);
4126 else
4127 fputs ("h", file);
4128 return;
4129 case '_':
4130 if (TARGET_V2)
4131 fputs ("h", file);
4132 else
4133 fputs ("w", file);
4134 return;
4135 default :
4136 /* Unknown flag. */
4137 output_operand_lossage ("invalid operand output code");
4140 switch (GET_CODE (x))
4142 case REG :
4143 fputs (reg_names[REGNO (x)], file);
4144 break;
4145 case MEM :
4147 rtx addr = XEXP (x, 0);
4148 int size = GET_MODE_SIZE (GET_MODE (x));
4150 fputc ('[', file);
4152 switch (GET_CODE (addr))
4154 case PRE_INC: case POST_INC:
4155 output_address (VOIDmode,
4156 plus_constant (Pmode, XEXP (addr, 0), size)); break;
4157 case PRE_DEC: case POST_DEC:
4158 output_address (VOIDmode,
4159 plus_constant (Pmode, XEXP (addr, 0), -size));
4160 break;
4161 case PRE_MODIFY: case POST_MODIFY:
4162 output_address (VOIDmode, XEXP (addr, 1)); break;
4163 case PLUS:
4164 if (output_scaled)
4166 output_address (VOIDmode,
4167 plus_constant (Pmode, XEXP (addr, 0),
4168 (INTVAL (XEXP (addr, 1))
4169 >> (size == 2 ? 1 : 2))));
4170 output_scaled = 0;
4172 else
4173 output_address (VOIDmode, addr);
4174 break;
4175 default:
4176 if (flag_pic && CONSTANT_ADDRESS_P (addr))
4177 arc_output_pic_addr_const (file, addr, code);
4178 else
4179 output_address (VOIDmode, addr);
4180 break;
4182 fputc (']', file);
4183 break;
4185 case CONST_DOUBLE :
4186 /* We handle SFmode constants here as output_addr_const doesn't. */
4187 if (GET_MODE (x) == SFmode)
4189 long l;
4191 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
4192 fprintf (file, "0x%08lx", l);
4193 break;
4195 /* FALLTHRU */
4196 /* Let output_addr_const deal with it. */
4197 default :
4198 if (flag_pic
4199 || (GET_CODE (x) == CONST
4200 && GET_CODE (XEXP (x, 0)) == UNSPEC
4201 && (XINT (XEXP (x, 0), 1) == UNSPEC_TLS_OFF
4202 || XINT (XEXP (x, 0), 1) == UNSPEC_TLS_GD))
4203 || (GET_CODE (x) == CONST
4204 && GET_CODE (XEXP (x, 0)) == PLUS
4205 && GET_CODE (XEXP (XEXP (x, 0), 0)) == UNSPEC
4206 && (XINT (XEXP (XEXP (x, 0), 0), 1) == UNSPEC_TLS_OFF
4207 || XINT (XEXP (XEXP (x, 0), 0), 1) == UNSPEC_TLS_GD)))
4208 arc_output_pic_addr_const (file, x, code);
4209 else
4211 /* FIXME: Dirty way to handle @var@sda+const. Shd be handled
4212 with asm_output_symbol_ref */
4213 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4215 x = XEXP (x, 0);
4216 output_addr_const (file, XEXP (x, 0));
4217 if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF && SYMBOL_REF_SMALL_P (XEXP (x, 0)))
4218 fprintf (file, "@sda");
4220 if (GET_CODE (XEXP (x, 1)) != CONST_INT
4221 || INTVAL (XEXP (x, 1)) >= 0)
4222 fprintf (file, "+");
4223 output_addr_const (file, XEXP (x, 1));
4225 else
4226 output_addr_const (file, x);
4228 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_SMALL_P (x))
4229 fprintf (file, "@sda");
4230 break;
4234 /* Print a memory address as an operand to reference that memory location. */
4236 void
4237 arc_print_operand_address (FILE *file , rtx addr)
4239 register rtx base, index = 0;
4241 switch (GET_CODE (addr))
4243 case REG :
4244 fputs (reg_names[REGNO (addr)], file);
4245 break;
4246 case SYMBOL_REF :
4247 output_addr_const (file, addr);
4248 if (SYMBOL_REF_SMALL_P (addr))
4249 fprintf (file, "@sda");
4250 break;
4251 case PLUS :
4252 if (GET_CODE (XEXP (addr, 0)) == MULT)
4253 index = XEXP (XEXP (addr, 0), 0), base = XEXP (addr, 1);
4254 else if (CONST_INT_P (XEXP (addr, 0)))
4255 index = XEXP (addr, 0), base = XEXP (addr, 1);
4256 else
4257 base = XEXP (addr, 0), index = XEXP (addr, 1);
4259 gcc_assert (OBJECT_P (base));
4260 arc_print_operand_address (file, base);
4261 if (CONSTANT_P (base) && CONST_INT_P (index))
4262 fputc ('+', file);
4263 else
4264 fputc (',', file);
4265 gcc_assert (OBJECT_P (index));
4266 arc_print_operand_address (file, index);
4267 break;
4268 case CONST:
4270 rtx c = XEXP (addr, 0);
4272 if ((GET_CODE (c) == UNSPEC
4273 && (XINT (c, 1) == UNSPEC_TLS_OFF
4274 || XINT (c, 1) == UNSPEC_TLS_IE))
4275 || (GET_CODE (c) == PLUS
4276 && GET_CODE (XEXP (c, 0)) == UNSPEC
4277 && (XINT (XEXP (c, 0), 1) == UNSPEC_TLS_OFF
4278 || XINT (XEXP (c, 0), 1) == ARC_UNSPEC_GOTOFFPC)))
4280 arc_output_pic_addr_const (file, c, 0);
4281 break;
4283 gcc_assert (GET_CODE (c) == PLUS);
4284 gcc_assert (GET_CODE (XEXP (c, 0)) == SYMBOL_REF);
4285 gcc_assert (GET_CODE (XEXP (c, 1)) == CONST_INT);
4287 output_address (VOIDmode, XEXP (addr, 0));
4289 break;
4291 case PRE_INC :
4292 case PRE_DEC :
4293 /* We shouldn't get here as we've lost the mode of the memory object
4294 (which says how much to inc/dec by. */
4295 gcc_unreachable ();
4296 break;
4297 default :
4298 if (flag_pic)
4299 arc_output_pic_addr_const (file, addr, 0);
4300 else
4301 output_addr_const (file, addr);
4302 break;
4306 /* Conditional execution support.
4308 This is based on the ARM port but for now is much simpler.
4310 A finite state machine takes care of noticing whether or not instructions
4311 can be conditionally executed, and thus decrease execution time and code
4312 size by deleting branch instructions. The fsm is controlled by
4313 arc_ccfsm_advance (called by arc_final_prescan_insn), and controls the
4314 actions of PRINT_OPERAND. The patterns in the .md file for the branch
4315 insns also have a hand in this. */
4316 /* The way we leave dealing with non-anulled or annull-false delay slot
4317 insns to the consumer is awkward. */
4319 /* The state of the fsm controlling condition codes are:
4320 0: normal, do nothing special
4321 1: don't output this insn
4322 2: don't output this insn
4323 3: make insns conditional
4324 4: make insns conditional
4325 5: make insn conditional (only for outputting anulled delay slot insns)
4327 special value for cfun->machine->uid_ccfsm_state:
4328 6: return with but one insn before it since function start / call
4330 State transitions (state->state by whom, under what condition):
4331 0 -> 1 arc_ccfsm_advance, if insn is a conditional branch skipping over
4332 some instructions.
4333 0 -> 2 arc_ccfsm_advance, if insn is a conditional branch followed
4334 by zero or more non-jump insns and an unconditional branch with
4335 the same target label as the condbranch.
4336 1 -> 3 branch patterns, after having not output the conditional branch
4337 2 -> 4 branch patterns, after having not output the conditional branch
4338 0 -> 5 branch patterns, for anulled delay slot insn.
4339 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL, if the `target' label is reached
4340 (the target label has CODE_LABEL_NUMBER equal to
4341 arc_ccfsm_target_label).
4342 4 -> 0 arc_ccfsm_advance, if `target' unconditional branch is reached
4343 3 -> 1 arc_ccfsm_advance, finding an 'else' jump skipping over some insns.
4344 5 -> 0 when outputting the delay slot insn
4346 If the jump clobbers the conditions then we use states 2 and 4.
4348 A similar thing can be done with conditional return insns.
4350 We also handle separating branches from sets of the condition code.
4351 This is done here because knowledge of the ccfsm state is required,
4352 we may not be outputting the branch. */
4354 /* arc_final_prescan_insn calls arc_ccfsm_advance to adjust arc_ccfsm_current,
4355 before letting final output INSN. */
4357 static void
4358 arc_ccfsm_advance (rtx_insn *insn, struct arc_ccfsm *state)
4360 /* BODY will hold the body of INSN. */
4361 register rtx body;
4363 /* This will be 1 if trying to repeat the trick (ie: do the `else' part of
4364 an if/then/else), and things need to be reversed. */
4365 int reverse = 0;
4367 /* If we start with a return insn, we only succeed if we find another one. */
4368 int seeking_return = 0;
4370 /* START_INSN will hold the insn from where we start looking. This is the
4371 first insn after the following code_label if REVERSE is true. */
4372 rtx_insn *start_insn = insn;
4374 /* Type of the jump_insn. Brcc insns don't affect ccfsm changes,
4375 since they don't rely on a cmp preceding the. */
4376 enum attr_type jump_insn_type;
4378 /* Allow -mdebug-ccfsm to turn this off so we can see how well it does.
4379 We can't do this in macro FINAL_PRESCAN_INSN because its called from
4380 final_scan_insn which has `optimize' as a local. */
4381 if (optimize < 2 || TARGET_NO_COND_EXEC)
4382 return;
4384 /* Ignore notes and labels. */
4385 if (!INSN_P (insn))
4386 return;
4387 body = PATTERN (insn);
4388 /* If in state 4, check if the target branch is reached, in order to
4389 change back to state 0. */
4390 if (state->state == 4)
4392 if (insn == state->target_insn)
4394 state->target_insn = NULL;
4395 state->state = 0;
4397 return;
4400 /* If in state 3, it is possible to repeat the trick, if this insn is an
4401 unconditional branch to a label, and immediately following this branch
4402 is the previous target label which is only used once, and the label this
4403 branch jumps to is not too far off. Or in other words "we've done the
4404 `then' part, see if we can do the `else' part." */
4405 if (state->state == 3)
4407 if (simplejump_p (insn))
4409 start_insn = next_nonnote_insn (start_insn);
4410 if (GET_CODE (start_insn) == BARRIER)
4412 /* ??? Isn't this always a barrier? */
4413 start_insn = next_nonnote_insn (start_insn);
4415 if (GET_CODE (start_insn) == CODE_LABEL
4416 && CODE_LABEL_NUMBER (start_insn) == state->target_label
4417 && LABEL_NUSES (start_insn) == 1)
4418 reverse = TRUE;
4419 else
4420 return;
4422 else if (GET_CODE (body) == SIMPLE_RETURN)
4424 start_insn = next_nonnote_insn (start_insn);
4425 if (GET_CODE (start_insn) == BARRIER)
4426 start_insn = next_nonnote_insn (start_insn);
4427 if (GET_CODE (start_insn) == CODE_LABEL
4428 && CODE_LABEL_NUMBER (start_insn) == state->target_label
4429 && LABEL_NUSES (start_insn) == 1)
4431 reverse = TRUE;
4432 seeking_return = 1;
4434 else
4435 return;
4437 else
4438 return;
4441 if (GET_CODE (insn) != JUMP_INSN
4442 || GET_CODE (PATTERN (insn)) == ADDR_VEC
4443 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4444 return;
4446 /* We can't predicate BRCC or loop ends.
4447 Also, when generating PIC code, and considering a medium range call,
4448 we can't predicate the call. */
4449 jump_insn_type = get_attr_type (insn);
4450 if (jump_insn_type == TYPE_BRCC
4451 || jump_insn_type == TYPE_BRCC_NO_DELAY_SLOT
4452 || jump_insn_type == TYPE_LOOP_END
4453 || (jump_insn_type == TYPE_CALL && !get_attr_predicable (insn)))
4454 return;
4456 /* This jump might be paralleled with a clobber of the condition codes,
4457 the jump should always come first. */
4458 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4459 body = XVECEXP (body, 0, 0);
4461 if (reverse
4462 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
4463 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
4465 int insns_skipped = 0, fail = FALSE, succeed = FALSE;
4466 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
4467 int then_not_else = TRUE;
4468 /* Nonzero if next insn must be the target label. */
4469 int next_must_be_target_label_p;
4470 rtx_insn *this_insn = start_insn;
4471 rtx label = 0;
4473 /* Register the insn jumped to. */
4474 if (reverse)
4476 if (!seeking_return)
4477 label = XEXP (SET_SRC (body), 0);
4479 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
4480 label = XEXP (XEXP (SET_SRC (body), 1), 0);
4481 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
4483 label = XEXP (XEXP (SET_SRC (body), 2), 0);
4484 then_not_else = FALSE;
4486 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == SIMPLE_RETURN)
4487 seeking_return = 1;
4488 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == SIMPLE_RETURN)
4490 seeking_return = 1;
4491 then_not_else = FALSE;
4493 else
4494 gcc_unreachable ();
4496 /* If this is a non-annulled branch with a delay slot, there is
4497 no need to conditionalize the delay slot. */
4498 if ((GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) == SEQUENCE)
4499 && state->state == 0 && !INSN_ANNULLED_BRANCH_P (insn))
4501 this_insn = NEXT_INSN (this_insn);
4503 /* See how many insns this branch skips, and what kind of insns. If all
4504 insns are okay, and the label or unconditional branch to the same
4505 label is not too far away, succeed. */
4506 for (insns_skipped = 0, next_must_be_target_label_p = FALSE;
4507 !fail && !succeed && insns_skipped < MAX_INSNS_SKIPPED;
4508 insns_skipped++)
4510 rtx scanbody;
4512 this_insn = next_nonnote_insn (this_insn);
4513 if (!this_insn)
4514 break;
4516 if (next_must_be_target_label_p)
4518 if (GET_CODE (this_insn) == BARRIER)
4519 continue;
4520 if (GET_CODE (this_insn) == CODE_LABEL
4521 && this_insn == label)
4523 state->state = 1;
4524 succeed = TRUE;
4526 else
4527 fail = TRUE;
4528 break;
4531 switch (GET_CODE (this_insn))
4533 case CODE_LABEL:
4534 /* Succeed if it is the target label, otherwise fail since
4535 control falls in from somewhere else. */
4536 if (this_insn == label)
4538 state->state = 1;
4539 succeed = TRUE;
4541 else
4542 fail = TRUE;
4543 break;
4545 case BARRIER:
4546 /* Succeed if the following insn is the target label.
4547 Otherwise fail.
4548 If return insns are used then the last insn in a function
4549 will be a barrier. */
4550 next_must_be_target_label_p = TRUE;
4551 break;
4553 case CALL_INSN:
4554 /* Can handle a call insn if there are no insns after it.
4555 IE: The next "insn" is the target label. We don't have to
4556 worry about delay slots as such insns are SEQUENCE's inside
4557 INSN's. ??? It is possible to handle such insns though. */
4558 if (get_attr_cond (this_insn) == COND_CANUSE)
4559 next_must_be_target_label_p = TRUE;
4560 else
4561 fail = TRUE;
4562 break;
4564 case JUMP_INSN:
4565 scanbody = PATTERN (this_insn);
4567 /* If this is an unconditional branch to the same label, succeed.
4568 If it is to another label, do nothing. If it is conditional,
4569 fail. */
4570 /* ??? Probably, the test for the SET and the PC are
4571 unnecessary. */
4573 if (GET_CODE (scanbody) == SET
4574 && GET_CODE (SET_DEST (scanbody)) == PC)
4576 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
4577 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
4579 state->state = 2;
4580 succeed = TRUE;
4582 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
4583 fail = TRUE;
4584 else if (get_attr_cond (this_insn) != COND_CANUSE)
4585 fail = TRUE;
4587 else if (GET_CODE (scanbody) == SIMPLE_RETURN
4588 && seeking_return)
4590 state->state = 2;
4591 succeed = TRUE;
4593 else if (GET_CODE (scanbody) == PARALLEL)
4595 if (get_attr_cond (this_insn) != COND_CANUSE)
4596 fail = TRUE;
4598 break;
4600 case INSN:
4601 scanbody = PATTERN (this_insn);
4603 /* We can only do this with insns that can use the condition
4604 codes (and don't set them). */
4605 if (GET_CODE (scanbody) == SET
4606 || GET_CODE (scanbody) == PARALLEL)
4608 if (get_attr_cond (this_insn) != COND_CANUSE)
4609 fail = TRUE;
4611 /* We can't handle other insns like sequences. */
4612 else
4613 fail = TRUE;
4614 break;
4616 default:
4617 break;
4621 if (succeed)
4623 if ((!seeking_return) && (state->state == 1 || reverse))
4624 state->target_label = CODE_LABEL_NUMBER (label);
4625 else if (seeking_return || state->state == 2)
4627 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
4629 this_insn = next_nonnote_insn (this_insn);
4631 gcc_assert (!this_insn ||
4632 (GET_CODE (this_insn) != BARRIER
4633 && GET_CODE (this_insn) != CODE_LABEL));
4635 if (!this_insn)
4637 /* Oh dear! we ran off the end, give up. */
4638 extract_insn_cached (insn);
4639 state->state = 0;
4640 state->target_insn = NULL;
4641 return;
4643 state->target_insn = this_insn;
4645 else
4646 gcc_unreachable ();
4648 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
4649 what it was. */
4650 if (!reverse)
4652 state->cond = XEXP (SET_SRC (body), 0);
4653 state->cc = get_arc_condition_code (XEXP (SET_SRC (body), 0));
4656 if (reverse || then_not_else)
4657 state->cc = ARC_INVERSE_CONDITION_CODE (state->cc);
4660 /* Restore recog_operand. Getting the attributes of other insns can
4661 destroy this array, but final.c assumes that it remains intact
4662 across this call; since the insn has been recognized already we
4663 call insn_extract direct. */
4664 extract_insn_cached (insn);
4668 /* Record that we are currently outputting label NUM with prefix PREFIX.
4669 It it's the label we're looking for, reset the ccfsm machinery.
4671 Called from ASM_OUTPUT_INTERNAL_LABEL. */
4673 static void
4674 arc_ccfsm_at_label (const char *prefix, int num, struct arc_ccfsm *state)
4676 if (state->state == 3 && state->target_label == num
4677 && !strcmp (prefix, "L"))
4679 state->state = 0;
4680 state->target_insn = NULL;
4684 /* We are considering a conditional branch with the condition COND.
4685 Check if we want to conditionalize a delay slot insn, and if so modify
4686 the ccfsm state accordingly.
4687 REVERSE says branch will branch when the condition is false. */
4688 void
4689 arc_ccfsm_record_condition (rtx cond, bool reverse, rtx_insn *jump,
4690 struct arc_ccfsm *state)
4692 rtx_insn *seq_insn = NEXT_INSN (PREV_INSN (jump));
4693 if (!state)
4694 state = &arc_ccfsm_current;
4696 gcc_assert (state->state == 0);
4697 if (seq_insn != jump)
4699 rtx insn = XVECEXP (PATTERN (seq_insn), 0, 1);
4701 if (!as_a<rtx_insn *> (insn)->deleted ()
4702 && INSN_ANNULLED_BRANCH_P (jump)
4703 && (TARGET_AT_DBR_CONDEXEC || INSN_FROM_TARGET_P (insn)))
4705 state->cond = cond;
4706 state->cc = get_arc_condition_code (cond);
4707 if (!reverse)
4708 arc_ccfsm_current.cc
4709 = ARC_INVERSE_CONDITION_CODE (state->cc);
4710 rtx pat = PATTERN (insn);
4711 if (GET_CODE (pat) == COND_EXEC)
4712 gcc_assert ((INSN_FROM_TARGET_P (insn)
4713 ? ARC_INVERSE_CONDITION_CODE (state->cc) : state->cc)
4714 == get_arc_condition_code (XEXP (pat, 0)));
4715 else
4716 state->state = 5;
4721 /* Update *STATE as we would when we emit INSN. */
4723 static void
4724 arc_ccfsm_post_advance (rtx_insn *insn, struct arc_ccfsm *state)
4726 enum attr_type type;
4728 if (LABEL_P (insn))
4729 arc_ccfsm_at_label ("L", CODE_LABEL_NUMBER (insn), state);
4730 else if (JUMP_P (insn)
4731 && GET_CODE (PATTERN (insn)) != ADDR_VEC
4732 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
4733 && ((type = get_attr_type (insn)) == TYPE_BRANCH
4734 || ((type == TYPE_UNCOND_BRANCH
4735 || type == TYPE_RETURN)
4736 && ARC_CCFSM_BRANCH_DELETED_P (state))))
4738 if (ARC_CCFSM_BRANCH_DELETED_P (state))
4739 ARC_CCFSM_RECORD_BRANCH_DELETED (state);
4740 else
4742 rtx src = SET_SRC (PATTERN (insn));
4743 arc_ccfsm_record_condition (XEXP (src, 0), XEXP (src, 1) == pc_rtx,
4744 insn, state);
4747 else if (arc_ccfsm_current.state == 5)
4748 arc_ccfsm_current.state = 0;
4751 /* Return true if the current insn, which is a conditional branch, is to be
4752 deleted. */
4754 bool
4755 arc_ccfsm_branch_deleted_p (void)
4757 return ARC_CCFSM_BRANCH_DELETED_P (&arc_ccfsm_current);
4760 /* Record a branch isn't output because subsequent insns can be
4761 conditionalized. */
4763 void
4764 arc_ccfsm_record_branch_deleted (void)
4766 ARC_CCFSM_RECORD_BRANCH_DELETED (&arc_ccfsm_current);
4769 /* During insn output, indicate if the current insn is predicated. */
4771 bool
4772 arc_ccfsm_cond_exec_p (void)
4774 return (cfun->machine->prescan_initialized
4775 && ARC_CCFSM_COND_EXEC_P (&arc_ccfsm_current));
4778 /* Like next_active_insn, but return NULL if we find an ADDR_(DIFF_)VEC,
4779 and look inside SEQUENCEs. */
4781 static rtx_insn *
4782 arc_next_active_insn (rtx_insn *insn, struct arc_ccfsm *statep)
4784 rtx pat;
4788 if (statep)
4789 arc_ccfsm_post_advance (insn, statep);
4790 insn = NEXT_INSN (insn);
4791 if (!insn || BARRIER_P (insn))
4792 return NULL;
4793 if (statep)
4794 arc_ccfsm_advance (insn, statep);
4796 while (NOTE_P (insn)
4797 || (cfun->machine->arc_reorg_started
4798 && LABEL_P (insn) && !label_to_alignment (insn))
4799 || (NONJUMP_INSN_P (insn)
4800 && (GET_CODE (PATTERN (insn)) == USE
4801 || GET_CODE (PATTERN (insn)) == CLOBBER)));
4802 if (!LABEL_P (insn))
4804 gcc_assert (INSN_P (insn));
4805 pat = PATTERN (insn);
4806 if (GET_CODE (pat) == ADDR_VEC || GET_CODE (pat) == ADDR_DIFF_VEC)
4807 return NULL;
4808 if (GET_CODE (pat) == SEQUENCE)
4809 return as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
4811 return insn;
4814 /* When deciding if an insn should be output short, we want to know something
4815 about the following insns:
4816 - if another insn follows which we know we can output as a short insn
4817 before an alignment-sensitive point, we can output this insn short:
4818 the decision about the eventual alignment can be postponed.
4819 - if a to-be-aligned label comes next, we should output this insn such
4820 as to get / preserve 4-byte alignment.
4821 - if a likely branch without delay slot insn, or a call with an immediately
4822 following short insn comes next, we should out output this insn such as to
4823 get / preserve 2 mod 4 unalignment.
4824 - do the same for a not completely unlikely branch with a short insn
4825 following before any other branch / label.
4826 - in order to decide if we are actually looking at a branch, we need to
4827 call arc_ccfsm_advance.
4828 - in order to decide if we are looking at a short insn, we should know
4829 if it is conditionalized. To a first order of approximation this is
4830 the case if the state from arc_ccfsm_advance from before this insn
4831 indicates the insn is conditionalized. However, a further refinement
4832 could be to not conditionalize an insn if the destination register(s)
4833 is/are dead in the non-executed case. */
4834 /* Return non-zero if INSN should be output as a short insn. UNALIGN is
4835 zero if the current insn is aligned to a 4-byte-boundary, two otherwise.
4836 If CHECK_ATTR is greater than 0, check the iscompact attribute first. */
4839 arc_verify_short (rtx_insn *insn, int, int check_attr)
4841 enum attr_iscompact iscompact;
4842 struct machine_function *machine;
4844 if (check_attr > 0)
4846 iscompact = get_attr_iscompact (insn);
4847 if (iscompact == ISCOMPACT_FALSE)
4848 return 0;
4850 machine = cfun->machine;
4852 if (machine->force_short_suffix >= 0)
4853 return machine->force_short_suffix;
4855 return (get_attr_length (insn) & 2) != 0;
4858 /* When outputting an instruction (alternative) that can potentially be short,
4859 output the short suffix if the insn is in fact short, and update
4860 cfun->machine->unalign accordingly. */
4862 static void
4863 output_short_suffix (FILE *file)
4865 rtx_insn *insn = current_output_insn;
4867 if (arc_verify_short (insn, cfun->machine->unalign, 1))
4869 fprintf (file, "_s");
4870 cfun->machine->unalign ^= 2;
4872 /* Restore recog_operand. */
4873 extract_insn_cached (insn);
4876 /* Implement FINAL_PRESCAN_INSN. */
4878 void
4879 arc_final_prescan_insn (rtx_insn *insn, rtx *opvec ATTRIBUTE_UNUSED,
4880 int noperands ATTRIBUTE_UNUSED)
4882 if (TARGET_DUMPISIZE)
4883 fprintf (asm_out_file, "\n; at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
4885 /* Output a nop if necessary to prevent a hazard.
4886 Don't do this for delay slots: inserting a nop would
4887 alter semantics, and the only time we would find a hazard is for a
4888 call function result - and in that case, the hazard is spurious to
4889 start with. */
4890 if (PREV_INSN (insn)
4891 && PREV_INSN (NEXT_INSN (insn)) == insn
4892 && arc_hazard (prev_real_insn (insn), insn))
4894 current_output_insn =
4895 emit_insn_before (gen_nop (), NEXT_INSN (PREV_INSN (insn)));
4896 final_scan_insn (current_output_insn, asm_out_file, optimize, 1, NULL);
4897 current_output_insn = insn;
4899 /* Restore extraction data which might have been clobbered by arc_hazard. */
4900 extract_constrain_insn_cached (insn);
4902 if (!cfun->machine->prescan_initialized)
4904 /* Clear lingering state from branch shortening. */
4905 memset (&arc_ccfsm_current, 0, sizeof arc_ccfsm_current);
4906 cfun->machine->prescan_initialized = 1;
4908 arc_ccfsm_advance (insn, &arc_ccfsm_current);
4910 cfun->machine->size_reason = 0;
4913 /* Given FROM and TO register numbers, say whether this elimination is allowed.
4914 Frame pointer elimination is automatically handled.
4916 All eliminations are permissible. If we need a frame
4917 pointer, we must eliminate ARG_POINTER_REGNUM into
4918 FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
4920 static bool
4921 arc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
4923 return ((to == FRAME_POINTER_REGNUM) || !arc_frame_pointer_needed ());
4926 /* Define the offset between two registers, one to be eliminated, and
4927 the other its replacement, at the start of a routine. */
4930 arc_initial_elimination_offset (int from, int to)
4932 if (!cfun->machine->frame_info.initialized)
4933 arc_compute_frame_size ();
4935 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
4937 return (cfun->machine->frame_info.extra_size
4938 + cfun->machine->frame_info.reg_size);
4941 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
4943 return (cfun->machine->frame_info.total_size
4944 - cfun->machine->frame_info.pretend_size);
4947 if ((from == FRAME_POINTER_REGNUM) && (to == STACK_POINTER_REGNUM))
4949 return (cfun->machine->frame_info.total_size
4950 - (cfun->machine->frame_info.pretend_size
4951 + cfun->machine->frame_info.extra_size
4952 + cfun->machine->frame_info.reg_size));
4955 gcc_unreachable ();
4958 static bool
4959 arc_frame_pointer_required (void)
4961 return cfun->calls_alloca || crtl->calls_eh_return;
4965 /* Return the destination address of a branch. */
4968 branch_dest (rtx branch)
4970 rtx pat = PATTERN (branch);
4971 rtx dest = (GET_CODE (pat) == PARALLEL
4972 ? SET_SRC (XVECEXP (pat, 0, 0)) : SET_SRC (pat));
4973 int dest_uid;
4975 if (GET_CODE (dest) == IF_THEN_ELSE)
4976 dest = XEXP (dest, XEXP (dest, 1) == pc_rtx ? 2 : 1);
4978 dest = XEXP (dest, 0);
4979 dest_uid = INSN_UID (dest);
4981 return INSN_ADDRESSES (dest_uid);
4985 /* Implement TARGET_ENCODE_SECTION_INFO hook. */
4987 static void
4988 arc_encode_section_info (tree decl, rtx rtl, int first)
4990 /* For sdata, SYMBOL_FLAG_LOCAL and SYMBOL_FLAG_FUNCTION.
4991 This clears machine specific flags, so has to come first. */
4992 default_encode_section_info (decl, rtl, first);
4994 /* Check if it is a function, and whether it has the
4995 [long/medium/short]_call attribute specified. */
4996 if (TREE_CODE (decl) == FUNCTION_DECL)
4998 rtx symbol = XEXP (rtl, 0);
4999 int flags = SYMBOL_REF_FLAGS (symbol);
5001 tree attr = (TREE_TYPE (decl) != error_mark_node
5002 ? TYPE_ATTRIBUTES (TREE_TYPE (decl)) : NULL_TREE);
5003 tree long_call_attr = lookup_attribute ("long_call", attr);
5004 tree medium_call_attr = lookup_attribute ("medium_call", attr);
5005 tree short_call_attr = lookup_attribute ("short_call", attr);
5007 if (long_call_attr != NULL_TREE)
5008 flags |= SYMBOL_FLAG_LONG_CALL;
5009 else if (medium_call_attr != NULL_TREE)
5010 flags |= SYMBOL_FLAG_MEDIUM_CALL;
5011 else if (short_call_attr != NULL_TREE)
5012 flags |= SYMBOL_FLAG_SHORT_CALL;
5014 SYMBOL_REF_FLAGS (symbol) = flags;
5016 else if (TREE_CODE (decl) == VAR_DECL)
5018 rtx symbol = XEXP (rtl, 0);
5020 tree attr = (TREE_TYPE (decl) != error_mark_node
5021 ? DECL_ATTRIBUTES (decl) : NULL_TREE);
5023 tree sec_attr = lookup_attribute ("section", attr);
5024 if (sec_attr)
5026 const char *sec_name
5027 = TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (sec_attr)));
5028 if (strcmp (sec_name, ".cmem") == 0
5029 || strcmp (sec_name, ".cmem_shared") == 0
5030 || strcmp (sec_name, ".cmem_private") == 0)
5031 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_CMEM;
5036 /* This is how to output a definition of an internal numbered label where
5037 PREFIX is the class of label and NUM is the number within the class. */
5039 static void arc_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
5041 if (cfun)
5042 arc_ccfsm_at_label (prefix, labelno, &arc_ccfsm_current);
5043 default_internal_label (stream, prefix, labelno);
5046 /* Set the cpu type and print out other fancy things,
5047 at the top of the file. */
5049 static void arc_file_start (void)
5051 default_file_start ();
5052 fprintf (asm_out_file, "\t.cpu %s\n", arc_cpu_string);
5055 /* Cost functions. */
5057 /* Compute a (partial) cost for rtx X. Return true if the complete
5058 cost has been computed, and false if subexpressions should be
5059 scanned. In either case, *TOTAL contains the cost result. */
5061 static bool
5062 arc_rtx_costs (rtx x, machine_mode mode, int outer_code,
5063 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
5065 int code = GET_CODE (x);
5067 switch (code)
5069 /* Small integers are as cheap as registers. */
5070 case CONST_INT:
5072 bool nolimm = false; /* Can we do without long immediate? */
5073 bool fast = false; /* Is the result available immediately? */
5074 bool condexec = false; /* Does this allow conditiobnal execution? */
5075 bool compact = false; /* Is a 16 bit opcode available? */
5076 /* CONDEXEC also implies that we can have an unconditional
5077 3-address operation. */
5079 nolimm = compact = condexec = false;
5080 if (UNSIGNED_INT6 (INTVAL (x)))
5081 nolimm = condexec = compact = true;
5082 else
5084 if (SMALL_INT (INTVAL (x)))
5085 nolimm = fast = true;
5086 switch (outer_code)
5088 case AND: /* bclr, bmsk, ext[bw] */
5089 if (satisfies_constraint_Ccp (x) /* bclr */
5090 || satisfies_constraint_C1p (x) /* bmsk */)
5091 nolimm = fast = condexec = compact = true;
5092 break;
5093 case IOR: /* bset */
5094 if (satisfies_constraint_C0p (x)) /* bset */
5095 nolimm = fast = condexec = compact = true;
5096 break;
5097 case XOR:
5098 if (satisfies_constraint_C0p (x)) /* bxor */
5099 nolimm = fast = condexec = true;
5100 break;
5101 case SET:
5102 if (satisfies_constraint_Crr (x)) /* ror b,u6 */
5103 nolimm = true;
5104 default:
5105 break;
5108 /* FIXME: Add target options to attach a small cost if
5109 condexec / compact is not true. */
5110 if (nolimm)
5112 *total = 0;
5113 return true;
5116 /* FALLTHRU */
5118 /* 4 byte values can be fetched as immediate constants -
5119 let's give that the cost of an extra insn. */
5120 case CONST:
5121 case LABEL_REF:
5122 case SYMBOL_REF:
5123 *total = COSTS_N_INSNS (1);
5124 return true;
5126 case CONST_DOUBLE:
5128 rtx first, second;
5130 if (TARGET_DPFP)
5132 *total = COSTS_N_INSNS (1);
5133 return true;
5135 split_double (x, &first, &second);
5136 *total = COSTS_N_INSNS (!SMALL_INT (INTVAL (first))
5137 + !SMALL_INT (INTVAL (second)));
5138 return true;
5141 /* Encourage synth_mult to find a synthetic multiply when reasonable.
5142 If we need more than 12 insns to do a multiply, then go out-of-line,
5143 since the call overhead will be < 10% of the cost of the multiply. */
5144 case ASHIFT:
5145 case ASHIFTRT:
5146 case LSHIFTRT:
5147 if (TARGET_BARREL_SHIFTER)
5149 /* If we want to shift a constant, we need a LIMM. */
5150 /* ??? when the optimizers want to know if a constant should be
5151 hoisted, they ask for the cost of the constant. OUTER_CODE is
5152 insufficient context for shifts since we don't know which operand
5153 we are looking at. */
5154 if (CONSTANT_P (XEXP (x, 0)))
5156 *total += (COSTS_N_INSNS (2)
5157 + rtx_cost (XEXP (x, 1), mode, (enum rtx_code) code,
5158 0, speed));
5159 return true;
5161 *total = COSTS_N_INSNS (1);
5163 else if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5164 *total = COSTS_N_INSNS (16);
5165 else
5167 *total = COSTS_N_INSNS (INTVAL (XEXP ((x), 1)));
5168 /* ??? want_to_gcse_p can throw negative shift counts at us,
5169 and then panics when it gets a negative cost as result.
5170 Seen for gcc.c-torture/compile/20020710-1.c -Os . */
5171 if (*total < 0)
5172 *total = 0;
5174 return false;
5176 case DIV:
5177 case UDIV:
5178 if (speed)
5179 *total = COSTS_N_INSNS(30);
5180 else
5181 *total = COSTS_N_INSNS(1);
5182 return false;
5184 case MULT:
5185 if ((TARGET_DPFP && GET_MODE (x) == DFmode))
5186 *total = COSTS_N_INSNS (1);
5187 else if (speed)
5188 *total= arc_multcost;
5189 /* We do not want synth_mult sequences when optimizing
5190 for size. */
5191 else if (TARGET_MUL64_SET || TARGET_ARC700_MPY)
5192 *total = COSTS_N_INSNS (1);
5193 else
5194 *total = COSTS_N_INSNS (2);
5195 return false;
5196 case PLUS:
5197 if ((GET_CODE (XEXP (x, 0)) == ASHIFT
5198 && _1_2_3_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
5199 || (GET_CODE (XEXP (x, 0)) == MULT
5200 && _2_4_8_operand (XEXP (XEXP (x, 0), 1), VOIDmode)))
5202 *total += (rtx_cost (XEXP (x, 1), mode, PLUS, 0, speed)
5203 + rtx_cost (XEXP (XEXP (x, 0), 0), mode, PLUS, 1, speed));
5204 return true;
5206 return false;
5207 case MINUS:
5208 if ((GET_CODE (XEXP (x, 1)) == ASHIFT
5209 && _1_2_3_operand (XEXP (XEXP (x, 1), 1), VOIDmode))
5210 || (GET_CODE (XEXP (x, 1)) == MULT
5211 && _2_4_8_operand (XEXP (XEXP (x, 1), 1), VOIDmode)))
5213 *total += (rtx_cost (XEXP (x, 0), mode, PLUS, 0, speed)
5214 + rtx_cost (XEXP (XEXP (x, 1), 0), mode, PLUS, 1, speed));
5215 return true;
5217 return false;
5218 case COMPARE:
5220 rtx op0 = XEXP (x, 0);
5221 rtx op1 = XEXP (x, 1);
5223 if (GET_CODE (op0) == ZERO_EXTRACT && op1 == const0_rtx
5224 && XEXP (op0, 1) == const1_rtx)
5226 /* btst / bbit0 / bbit1:
5227 Small integers and registers are free; everything else can
5228 be put in a register. */
5229 mode = GET_MODE (XEXP (op0, 0));
5230 *total = (rtx_cost (XEXP (op0, 0), mode, SET, 1, speed)
5231 + rtx_cost (XEXP (op0, 2), mode, SET, 1, speed));
5232 return true;
5234 if (GET_CODE (op0) == AND && op1 == const0_rtx
5235 && satisfies_constraint_C1p (XEXP (op0, 1)))
5237 /* bmsk.f */
5238 *total = rtx_cost (XEXP (op0, 0), VOIDmode, SET, 1, speed);
5239 return true;
5241 /* add.f */
5242 if (GET_CODE (op1) == NEG)
5244 /* op0 might be constant, the inside of op1 is rather
5245 unlikely to be so. So swapping the operands might lower
5246 the cost. */
5247 mode = GET_MODE (op0);
5248 *total = (rtx_cost (op0, mode, PLUS, 1, speed)
5249 + rtx_cost (XEXP (op1, 0), mode, PLUS, 0, speed));
5251 return false;
5253 case EQ: case NE:
5254 if (outer_code == IF_THEN_ELSE
5255 && GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
5256 && XEXP (x, 1) == const0_rtx
5257 && XEXP (XEXP (x, 0), 1) == const1_rtx)
5259 /* btst / bbit0 / bbit1:
5260 Small integers and registers are free; everything else can
5261 be put in a register. */
5262 rtx op0 = XEXP (x, 0);
5264 mode = GET_MODE (XEXP (op0, 0));
5265 *total = (rtx_cost (XEXP (op0, 0), mode, SET, 1, speed)
5266 + rtx_cost (XEXP (op0, 2), mode, SET, 1, speed));
5267 return true;
5269 /* Fall through. */
5270 /* scc_insn expands into two insns. */
5271 case GTU: case GEU: case LEU:
5272 if (mode == SImode)
5273 *total += COSTS_N_INSNS (1);
5274 return false;
5275 case LTU: /* might use adc. */
5276 if (mode == SImode)
5277 *total += COSTS_N_INSNS (1) - 1;
5278 return false;
5279 default:
5280 return false;
5284 /* Return true if ADDR is a valid pic address.
5285 A valid pic address on arc should look like
5286 const (unspec (SYMBOL_REF/LABEL) (ARC_UNSPEC_GOTOFF/ARC_UNSPEC_GOT)) */
5288 bool
5289 arc_legitimate_pic_addr_p (rtx addr)
5291 if (GET_CODE (addr) != CONST)
5292 return false;
5294 addr = XEXP (addr, 0);
5297 if (GET_CODE (addr) == PLUS)
5299 if (GET_CODE (XEXP (addr, 1)) != CONST_INT)
5300 return false;
5301 addr = XEXP (addr, 0);
5304 if (GET_CODE (addr) != UNSPEC
5305 || XVECLEN (addr, 0) != 1)
5306 return false;
5308 /* Must be one of @GOT, @GOTOFF, @GOTOFFPC, @tlsgd, tlsie. */
5309 if (XINT (addr, 1) != ARC_UNSPEC_GOT
5310 && XINT (addr, 1) != ARC_UNSPEC_GOTOFF
5311 && XINT (addr, 1) != ARC_UNSPEC_GOTOFFPC
5312 && XINT (addr, 1) != UNSPEC_TLS_GD
5313 && XINT (addr, 1) != UNSPEC_TLS_IE)
5314 return false;
5316 if (GET_CODE (XVECEXP (addr, 0, 0)) != SYMBOL_REF
5317 && GET_CODE (XVECEXP (addr, 0, 0)) != LABEL_REF)
5318 return false;
5320 return true;
5325 /* Return true if OP contains a symbol reference. */
5327 static bool
5328 symbolic_reference_mentioned_p (rtx op)
5330 register const char *fmt;
5331 register int i;
5333 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
5334 return true;
5336 fmt = GET_RTX_FORMAT (GET_CODE (op));
5337 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
5339 if (fmt[i] == 'E')
5341 register int j;
5343 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
5344 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
5345 return true;
5348 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
5349 return true;
5352 return false;
5355 /* Return true if OP contains a SYMBOL_REF that is not wrapped in an unspec.
5356 If SKIP_LOCAL is true, skip symbols that bind locally.
5357 This is used further down in this file, and, without SKIP_LOCAL,
5358 in the addsi3 / subsi3 expanders when generating PIC code. */
5360 bool
5361 arc_raw_symbolic_reference_mentioned_p (rtx op, bool skip_local)
5363 register const char *fmt;
5364 register int i;
5366 if (GET_CODE(op) == UNSPEC)
5367 return false;
5369 if (GET_CODE (op) == SYMBOL_REF)
5371 if (SYMBOL_REF_TLS_MODEL (op))
5372 return true;
5373 if (!flag_pic)
5374 return false;
5375 tree decl = SYMBOL_REF_DECL (op);
5376 return !skip_local || !decl || !default_binds_local_p (decl);
5379 fmt = GET_RTX_FORMAT (GET_CODE (op));
5380 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
5382 if (fmt[i] == 'E')
5384 register int j;
5386 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
5387 if (arc_raw_symbolic_reference_mentioned_p (XVECEXP (op, i, j),
5388 skip_local))
5389 return true;
5392 else if (fmt[i] == 'e'
5393 && arc_raw_symbolic_reference_mentioned_p (XEXP (op, i),
5394 skip_local))
5395 return true;
5398 return false;
5401 /* Get the thread pointer. */
5403 static rtx
5404 arc_get_tp (void)
5406 /* If arc_tp_regno has been set, we can use that hard register
5407 directly as a base register. */
5408 if (arc_tp_regno != -1)
5409 return gen_rtx_REG (Pmode, arc_tp_regno);
5411 /* Otherwise, call __read_tp. Copy the result to a pseudo to avoid
5412 conflicts with function arguments / results. */
5413 rtx reg = gen_reg_rtx (Pmode);
5414 emit_insn (gen_tls_load_tp_soft ());
5415 emit_move_insn (reg, gen_rtx_REG (Pmode, R0_REG));
5416 return reg;
5419 /* Helper to be used by TLS Global dynamic model. */
5421 static rtx
5422 arc_emit_call_tls_get_addr (rtx sym, int reloc, rtx eqv)
5424 rtx r0 = gen_rtx_REG (Pmode, R0_REG);
5425 rtx call_fusage = NULL_RTX;
5427 start_sequence ();
5429 rtx x = arc_unspec_offset (sym, reloc);
5430 emit_move_insn (r0, x);
5431 use_reg (&call_fusage, r0);
5433 gcc_assert (reloc == UNSPEC_TLS_GD);
5434 rtx call_insn = emit_call_insn (gen_tls_gd_get_addr (sym));
5435 /* Should we set RTL_CONST_CALL_P? We read memory, but not in a
5436 way that the application should care. */
5437 RTL_PURE_CALL_P (call_insn) = 1;
5438 add_function_usage_to (call_insn, call_fusage);
5440 rtx_insn *insns = get_insns ();
5441 end_sequence ();
5443 rtx dest = gen_reg_rtx (Pmode);
5444 emit_libcall_block (insns, dest, r0, eqv);
5445 return dest;
5448 #define DTPOFF_ZERO_SYM ".tdata"
5450 /* Return a legitimized address for ADDR,
5451 which is a SYMBOL_REF with tls_model MODEL. */
5453 static rtx
5454 arc_legitimize_tls_address (rtx addr, enum tls_model model)
5456 if (!flag_pic && model == TLS_MODEL_LOCAL_DYNAMIC)
5457 model = TLS_MODEL_LOCAL_EXEC;
5459 switch (model)
5461 case TLS_MODEL_LOCAL_DYNAMIC:
5462 rtx base;
5463 tree decl;
5464 const char *base_name;
5465 rtvec v;
5467 decl = SYMBOL_REF_DECL (addr);
5468 base_name = DTPOFF_ZERO_SYM;
5469 if (decl && bss_initializer_p (decl))
5470 base_name = ".tbss";
5472 base = gen_rtx_SYMBOL_REF (Pmode, base_name);
5473 if (strcmp (base_name, DTPOFF_ZERO_SYM) == 0)
5475 if (!flag_pic)
5476 goto local_exec;
5477 v = gen_rtvec (1, addr);
5479 else
5480 v = gen_rtvec (2, addr, base);
5481 addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_TLS_OFF);
5482 addr = gen_rtx_CONST (Pmode, addr);
5483 base = arc_legitimize_tls_address (base, TLS_MODEL_GLOBAL_DYNAMIC);
5484 return gen_rtx_PLUS (Pmode, force_reg (Pmode, base), addr);
5486 case TLS_MODEL_GLOBAL_DYNAMIC:
5487 return arc_emit_call_tls_get_addr (addr, UNSPEC_TLS_GD, addr);
5489 case TLS_MODEL_INITIAL_EXEC:
5490 addr = arc_unspec_offset (addr, UNSPEC_TLS_IE);
5491 addr = copy_to_mode_reg (Pmode, gen_const_mem (Pmode, addr));
5492 return gen_rtx_PLUS (Pmode, arc_get_tp (), addr);
5494 case TLS_MODEL_LOCAL_EXEC:
5495 local_exec:
5496 addr = arc_unspec_offset (addr, UNSPEC_TLS_OFF);
5497 return gen_rtx_PLUS (Pmode, arc_get_tp (), addr);
5498 default:
5499 gcc_unreachable ();
5503 /* Legitimize a pic address reference in ORIG.
5504 The return value is the legitimated address.
5505 If OLDX is non-zero, it is the target to assign the address to first. */
5507 static rtx
5508 arc_legitimize_pic_address (rtx orig, rtx oldx)
5510 rtx addr = orig;
5511 rtx pat = orig;
5512 rtx base;
5514 if (oldx == orig)
5515 oldx = NULL;
5517 if (GET_CODE (addr) == LABEL_REF)
5518 ; /* Do nothing. */
5519 else if (GET_CODE (addr) == SYMBOL_REF)
5521 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
5522 if (model != 0)
5523 return arc_legitimize_tls_address (addr, model);
5524 else if (!flag_pic)
5525 return orig;
5526 else if (CONSTANT_POOL_ADDRESS_P (addr) || SYMBOL_REF_LOCAL_P (addr))
5527 return arc_unspec_offset (addr, ARC_UNSPEC_GOTOFFPC);
5529 /* This symbol must be referenced via a load from the Global
5530 Offset Table (@GOTPC). */
5531 pat = arc_unspec_offset (addr, ARC_UNSPEC_GOT);
5532 pat = gen_const_mem (Pmode, pat);
5534 if (oldx == NULL)
5535 oldx = gen_reg_rtx (Pmode);
5537 emit_move_insn (oldx, pat);
5538 pat = oldx;
5540 else
5542 if (GET_CODE (addr) == CONST)
5544 addr = XEXP (addr, 0);
5545 if (GET_CODE (addr) == UNSPEC)
5547 /* Check that the unspec is one of the ones we generate? */
5548 return orig;
5550 /* fwprop is placing in the REG_EQUIV notes constant pic
5551 unspecs expressions. Then, loop may use these notes for
5552 optimizations resulting in complex patterns that are not
5553 supported by the current implementation. The following
5554 two if-cases are simplifying the complex patters to
5555 simpler ones. */
5556 else if (GET_CODE (addr) == MINUS)
5558 rtx op0 = XEXP (addr, 0);
5559 rtx op1 = XEXP (addr, 1);
5560 gcc_assert (oldx);
5561 gcc_assert (GET_CODE (op1) == UNSPEC);
5563 emit_move_insn (oldx,
5564 gen_rtx_CONST (SImode,
5565 arc_legitimize_pic_address (op1,
5566 NULL_RTX)));
5567 emit_insn (gen_rtx_SET (oldx, gen_rtx_MINUS (SImode, op0, oldx)));
5568 return oldx;
5571 else if (GET_CODE (addr) != PLUS)
5573 rtx tmp = XEXP (addr, 0);
5574 enum rtx_code code = GET_CODE (addr);
5576 /* It only works for UNARY operations. */
5577 gcc_assert (UNARY_P (addr));
5578 gcc_assert (GET_CODE (tmp) == UNSPEC);
5579 gcc_assert (oldx);
5581 emit_move_insn
5582 (oldx,
5583 gen_rtx_CONST (SImode,
5584 arc_legitimize_pic_address (tmp,
5585 NULL_RTX)));
5587 emit_insn (gen_rtx_SET (oldx,
5588 gen_rtx_fmt_ee (code, SImode,
5589 oldx, const0_rtx)));
5591 return oldx;
5593 else
5595 gcc_assert (GET_CODE (addr) == PLUS);
5596 if (GET_CODE (XEXP (addr, 0)) == UNSPEC)
5597 return orig;
5601 if (GET_CODE (addr) == PLUS)
5603 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5605 base = arc_legitimize_pic_address (op0, oldx);
5606 pat = arc_legitimize_pic_address (op1,
5607 base == oldx ? NULL_RTX : oldx);
5609 if (base == op0 && pat == op1)
5610 return orig;
5612 if (GET_CODE (pat) == CONST_INT)
5613 pat = plus_constant (Pmode, base, INTVAL (pat));
5614 else
5616 if (GET_CODE (pat) == PLUS && CONSTANT_P (XEXP (pat, 1)))
5618 base = gen_rtx_PLUS (Pmode, base, XEXP (pat, 0));
5619 pat = XEXP (pat, 1);
5621 pat = gen_rtx_PLUS (Pmode, base, pat);
5626 return pat;
5629 /* Output address constant X to FILE, taking PIC into account. */
5631 static void
5632 arc_output_pic_addr_const (FILE * file, rtx x, int code)
5634 char buf[256];
5636 restart:
5637 switch (GET_CODE (x))
5639 case PC:
5640 if (flag_pic)
5641 putc ('.', file);
5642 else
5643 gcc_unreachable ();
5644 break;
5646 case SYMBOL_REF:
5647 output_addr_const (file, x);
5649 /* Local functions do not get references through the PLT. */
5650 if (code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
5651 fputs ("@plt", file);
5652 break;
5654 case LABEL_REF:
5655 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (XEXP (x, 0)));
5656 assemble_name (file, buf);
5657 break;
5659 case CODE_LABEL:
5660 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
5661 assemble_name (file, buf);
5662 break;
5664 case CONST_INT:
5665 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5666 break;
5668 case CONST:
5669 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5670 break;
5672 case CONST_DOUBLE:
5673 if (GET_MODE (x) == VOIDmode)
5675 /* We can use %d if the number is one word and positive. */
5676 if (CONST_DOUBLE_HIGH (x))
5677 fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
5678 CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x));
5679 else if (CONST_DOUBLE_LOW (x) < 0)
5680 fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x));
5681 else
5682 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
5684 else
5685 /* We can't handle floating point constants;
5686 PRINT_OPERAND must handle them. */
5687 output_operand_lossage ("floating constant misused");
5688 break;
5690 case PLUS:
5691 /* FIXME: Not needed here. */
5692 /* Some assemblers need integer constants to appear last (eg masm). */
5693 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5695 arc_output_pic_addr_const (file, XEXP (x, 1), code);
5696 fprintf (file, "+");
5697 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5699 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5701 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5702 if (INTVAL (XEXP (x, 1)) >= 0)
5703 fprintf (file, "+");
5704 arc_output_pic_addr_const (file, XEXP (x, 1), code);
5706 else
5707 gcc_unreachable();
5708 break;
5710 case MINUS:
5711 /* Avoid outputting things like x-x or x+5-x,
5712 since some assemblers can't handle that. */
5713 x = simplify_subtraction (x);
5714 if (GET_CODE (x) != MINUS)
5715 goto restart;
5717 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5718 fprintf (file, "-");
5719 if (GET_CODE (XEXP (x, 1)) == CONST_INT
5720 && INTVAL (XEXP (x, 1)) < 0)
5722 fprintf (file, "(");
5723 arc_output_pic_addr_const (file, XEXP (x, 1), code);
5724 fprintf (file, ")");
5726 else
5727 arc_output_pic_addr_const (file, XEXP (x, 1), code);
5728 break;
5730 case ZERO_EXTEND:
5731 case SIGN_EXTEND:
5732 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5733 break;
5736 case UNSPEC:
5737 const char *suffix;
5738 bool pcrel; pcrel = false;
5739 rtx base; base = NULL;
5740 gcc_assert (XVECLEN (x, 0) >= 1);
5741 switch (XINT (x, 1))
5743 case ARC_UNSPEC_GOT:
5744 suffix = "@gotpc", pcrel = true;
5745 break;
5746 case ARC_UNSPEC_GOTOFF:
5747 suffix = "@gotoff";
5748 break;
5749 case ARC_UNSPEC_GOTOFFPC:
5750 suffix = "@pcl", pcrel = true;
5751 break;
5752 case ARC_UNSPEC_PLT:
5753 suffix = "@plt";
5754 break;
5755 case UNSPEC_TLS_GD:
5756 suffix = "@tlsgd", pcrel = true;
5757 break;
5758 case UNSPEC_TLS_IE:
5759 suffix = "@tlsie", pcrel = true;
5760 break;
5761 case UNSPEC_TLS_OFF:
5762 if (XVECLEN (x, 0) == 2)
5763 base = XVECEXP (x, 0, 1);
5764 if (SYMBOL_REF_TLS_MODEL (XVECEXP (x, 0, 0)) == TLS_MODEL_LOCAL_EXEC
5765 || (!flag_pic && !base))
5766 suffix = "@tpoff";
5767 else
5768 suffix = "@dtpoff";
5769 break;
5770 default:
5771 suffix = "@invalid";
5772 output_operand_lossage ("invalid UNSPEC as operand: %d", XINT (x,1));
5773 break;
5775 if (pcrel)
5776 fputs ("pcl,", file);
5777 arc_output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
5778 fputs (suffix, file);
5779 if (base)
5780 arc_output_pic_addr_const (file, base, code);
5781 break;
5783 default:
5784 output_operand_lossage ("invalid expression as operand");
5788 #define SYMBOLIC_CONST(X) \
5789 (GET_CODE (X) == SYMBOL_REF \
5790 || GET_CODE (X) == LABEL_REF \
5791 || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
5793 /* Emit insns to move operands[1] into operands[0]. */
5795 static void
5796 prepare_pic_move (rtx *operands, machine_mode)
5798 if (GET_CODE (operands[0]) == MEM && SYMBOLIC_CONST (operands[1])
5799 && flag_pic)
5800 operands[1] = force_reg (Pmode, operands[1]);
5801 else
5803 rtx temp = (reload_in_progress ? operands[0]
5804 : flag_pic? gen_reg_rtx (Pmode) : NULL_RTX);
5805 operands[1] = arc_legitimize_pic_address (operands[1], temp);
5810 /* The function returning the number of words, at the beginning of an
5811 argument, must be put in registers. The returned value must be
5812 zero for arguments that are passed entirely in registers or that
5813 are entirely pushed on the stack.
5815 On some machines, certain arguments must be passed partially in
5816 registers and partially in memory. On these machines, typically
5817 the first N words of arguments are passed in registers, and the
5818 rest on the stack. If a multi-word argument (a `double' or a
5819 structure) crosses that boundary, its first few words must be
5820 passed in registers and the rest must be pushed. This function
5821 tells the compiler when this occurs, and how many of the words
5822 should go in registers.
5824 `FUNCTION_ARG' for these arguments should return the first register
5825 to be used by the caller for this argument; likewise
5826 `FUNCTION_INCOMING_ARG', for the called function.
5828 The function is used to implement macro FUNCTION_ARG_PARTIAL_NREGS. */
5830 /* If REGNO is the least arg reg available then what is the total number of arg
5831 regs available. */
5832 #define GPR_REST_ARG_REGS(REGNO) \
5833 ((REGNO) <= MAX_ARC_PARM_REGS ? MAX_ARC_PARM_REGS - (REGNO) : 0 )
5835 /* Since arc parm regs are contiguous. */
5836 #define ARC_NEXT_ARG_REG(REGNO) ( (REGNO) + 1 )
5838 /* Implement TARGET_ARG_PARTIAL_BYTES. */
5840 static int
5841 arc_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
5842 tree type, bool named ATTRIBUTE_UNUSED)
5844 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5845 int bytes = (mode == BLKmode
5846 ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode));
5847 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5848 int arg_num = *cum;
5849 int ret;
5851 arg_num = ROUND_ADVANCE_CUM (arg_num, mode, type);
5852 ret = GPR_REST_ARG_REGS (arg_num);
5854 /* ICEd at function.c:2361, and ret is copied to data->partial */
5855 ret = (ret >= words ? 0 : ret * UNITS_PER_WORD);
5857 return ret;
5860 /* This function is used to control a function argument is passed in a
5861 register, and which register.
5863 The arguments are CUM, of type CUMULATIVE_ARGS, which summarizes
5864 (in a way defined by INIT_CUMULATIVE_ARGS and FUNCTION_ARG_ADVANCE)
5865 all of the previous arguments so far passed in registers; MODE, the
5866 machine mode of the argument; TYPE, the data type of the argument
5867 as a tree node or 0 if that is not known (which happens for C
5868 support library functions); and NAMED, which is 1 for an ordinary
5869 argument and 0 for nameless arguments that correspond to `...' in
5870 the called function's prototype.
5872 The returned value should either be a `reg' RTX for the hard
5873 register in which to pass the argument, or zero to pass the
5874 argument on the stack.
5876 For machines like the Vax and 68000, where normally all arguments
5877 are pushed, zero suffices as a definition.
5879 The usual way to make the ANSI library `stdarg.h' work on a machine
5880 where some arguments are usually passed in registers, is to cause
5881 nameless arguments to be passed on the stack instead. This is done
5882 by making the function return 0 whenever NAMED is 0.
5884 You may use the macro `MUST_PASS_IN_STACK (MODE, TYPE)' in the
5885 definition of this function to determine if this argument is of a
5886 type that must be passed in the stack. If `REG_PARM_STACK_SPACE'
5887 is not defined and the function returns non-zero for such an
5888 argument, the compiler will abort. If `REG_PARM_STACK_SPACE' is
5889 defined, the argument will be computed in the stack and then loaded
5890 into a register.
5892 The function is used to implement macro FUNCTION_ARG. */
5893 /* On the ARC the first MAX_ARC_PARM_REGS args are normally in registers
5894 and the rest are pushed. */
5896 static rtx
5897 arc_function_arg (cumulative_args_t cum_v,
5898 machine_mode mode,
5899 const_tree type ATTRIBUTE_UNUSED,
5900 bool named ATTRIBUTE_UNUSED)
5902 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5903 int arg_num = *cum;
5904 rtx ret;
5905 const char *debstr ATTRIBUTE_UNUSED;
5907 arg_num = ROUND_ADVANCE_CUM (arg_num, mode, type);
5908 /* Return a marker for use in the call instruction. */
5909 if (mode == VOIDmode)
5911 ret = const0_rtx;
5912 debstr = "<0>";
5914 else if (GPR_REST_ARG_REGS (arg_num) > 0)
5916 ret = gen_rtx_REG (mode, arg_num);
5917 debstr = reg_names [arg_num];
5919 else
5921 ret = NULL_RTX;
5922 debstr = "memory";
5924 return ret;
5927 /* The function to update the summarizer variable *CUM to advance past
5928 an argument in the argument list. The values MODE, TYPE and NAMED
5929 describe that argument. Once this is done, the variable *CUM is
5930 suitable for analyzing the *following* argument with
5931 `FUNCTION_ARG', etc.
5933 This function need not do anything if the argument in question was
5934 passed on the stack. The compiler knows how to track the amount of
5935 stack space used for arguments without any special help.
5937 The function is used to implement macro FUNCTION_ARG_ADVANCE. */
5938 /* For the ARC: the cum set here is passed on to function_arg where we
5939 look at its value and say which reg to use. Strategy: advance the
5940 regnumber here till we run out of arg regs, then set *cum to last
5941 reg. In function_arg, since *cum > last arg reg we would return 0
5942 and thus the arg will end up on the stack. For straddling args of
5943 course function_arg_partial_nregs will come into play. */
5945 static void
5946 arc_function_arg_advance (cumulative_args_t cum_v,
5947 machine_mode mode,
5948 const_tree type,
5949 bool named ATTRIBUTE_UNUSED)
5951 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5952 int bytes = (mode == BLKmode
5953 ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode));
5954 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5955 int i;
5957 if (words)
5958 *cum = ROUND_ADVANCE_CUM (*cum, mode, type);
5959 for (i = 0; i < words; i++)
5960 *cum = ARC_NEXT_ARG_REG (*cum);
5964 /* Define how to find the value returned by a function.
5965 VALTYPE is the data type of the value (as a tree).
5966 If the precise function being called is known, FN_DECL_OR_TYPE is its
5967 FUNCTION_DECL; otherwise, FN_DECL_OR_TYPE is its type. */
5969 static rtx
5970 arc_function_value (const_tree valtype,
5971 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
5972 bool outgoing ATTRIBUTE_UNUSED)
5974 machine_mode mode = TYPE_MODE (valtype);
5975 int unsignedp ATTRIBUTE_UNUSED;
5977 unsignedp = TYPE_UNSIGNED (valtype);
5978 if (INTEGRAL_TYPE_P (valtype) || TREE_CODE (valtype) == OFFSET_TYPE)
5979 PROMOTE_MODE (mode, unsignedp, valtype);
5980 return gen_rtx_REG (mode, 0);
5983 /* Returns the return address that is used by builtin_return_address. */
5986 arc_return_addr_rtx (int count, ATTRIBUTE_UNUSED rtx frame)
5988 if (count != 0)
5989 return const0_rtx;
5991 return get_hard_reg_initial_val (Pmode , RETURN_ADDR_REGNUM);
5994 /* Determine if a given RTX is a valid constant. We already know this
5995 satisfies CONSTANT_P. */
5997 bool
5998 arc_legitimate_constant_p (machine_mode mode, rtx x)
6000 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
6001 return false;
6003 if (!flag_pic && mode != Pmode)
6004 return true;
6006 switch (GET_CODE (x))
6008 case CONST:
6009 if (flag_pic)
6011 if (arc_legitimate_pic_addr_p (x))
6012 return true;
6014 return arc_legitimate_constant_p (mode, XEXP (x, 0));
6016 case SYMBOL_REF:
6017 if (SYMBOL_REF_TLS_MODEL (x))
6018 return false;
6019 /* Fall through. */
6020 case LABEL_REF:
6021 if (flag_pic)
6022 return false;
6023 /* Fall through. */
6024 case CONST_INT:
6025 case CONST_DOUBLE:
6026 return true;
6028 case NEG:
6029 return arc_legitimate_constant_p (mode, XEXP (x, 0));
6031 case PLUS:
6032 case MINUS:
6034 bool t1 = arc_legitimate_constant_p (mode, XEXP (x, 0));
6035 bool t2 = arc_legitimate_constant_p (mode, XEXP (x, 1));
6037 return (t1 && t2);
6040 case CONST_VECTOR:
6041 switch (mode)
6043 case E_V2HImode:
6044 return TARGET_PLUS_DMPY;
6045 case E_V2SImode:
6046 case E_V4HImode:
6047 return TARGET_PLUS_QMACW;
6048 default:
6049 return false;
6052 case UNSPEC:
6053 switch (XINT (x, 1))
6055 case UNSPEC_TLS_GD:
6056 case UNSPEC_TLS_OFF:
6057 case UNSPEC_TLS_IE:
6058 return true;
6059 default:
6060 /* Any other unspec ending here are pic related, hence the above
6061 constant pic address checking returned false. */
6062 return false;
6064 /* Fall through. */
6066 default:
6067 fatal_insn ("unrecognized supposed constant", x);
6070 gcc_unreachable ();
6073 static bool
6074 arc_legitimate_address_p (machine_mode mode, rtx x, bool strict)
6076 if (RTX_OK_FOR_BASE_P (x, strict))
6077 return true;
6078 if (legitimate_offset_address_p (mode, x, TARGET_INDEXED_LOADS, strict))
6079 return true;
6080 if (legitimate_scaled_address_p (mode, x, strict))
6081 return true;
6082 if (LEGITIMATE_SMALL_DATA_ADDRESS_P (x))
6083 return true;
6084 if (GET_CODE (x) == CONST_INT && LARGE_INT (INTVAL (x)))
6085 return true;
6087 /* When we compile for size avoid const (@sym + offset)
6088 addresses. */
6089 if (!flag_pic && optimize_size && !reload_completed
6090 && (GET_CODE (x) == CONST)
6091 && (GET_CODE (XEXP (x, 0)) == PLUS)
6092 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
6093 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) == 0
6094 && !SYMBOL_REF_FUNCTION_P (XEXP (XEXP (x, 0), 0)))
6096 rtx addend = XEXP (XEXP (x, 0), 1);
6097 gcc_assert (CONST_INT_P (addend));
6098 HOST_WIDE_INT offset = INTVAL (addend);
6100 /* Allow addresses having a large offset to pass. Anyhow they
6101 will end in a limm. */
6102 return !(offset > -1024 && offset < 1020);
6105 if ((GET_MODE_SIZE (mode) != 16) && CONSTANT_P (x))
6107 return arc_legitimate_constant_p (mode, x);
6109 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == PRE_INC
6110 || GET_CODE (x) == POST_DEC || GET_CODE (x) == POST_INC)
6111 && RTX_OK_FOR_BASE_P (XEXP (x, 0), strict))
6112 return true;
6113 /* We're restricted here by the `st' insn. */
6114 if ((GET_CODE (x) == PRE_MODIFY || GET_CODE (x) == POST_MODIFY)
6115 && GET_CODE (XEXP ((x), 1)) == PLUS
6116 && rtx_equal_p (XEXP ((x), 0), XEXP (XEXP (x, 1), 0))
6117 && legitimate_offset_address_p (QImode, XEXP (x, 1),
6118 TARGET_AUTO_MODIFY_REG, strict))
6119 return true;
6120 return false;
6123 /* Return true iff ADDR (a legitimate address expression)
6124 has an effect that depends on the machine mode it is used for. */
6126 static bool
6127 arc_mode_dependent_address_p (const_rtx addr, addr_space_t)
6129 /* SYMBOL_REF is not mode dependent: it is either a small data reference,
6130 which is valid for loads and stores, or a limm offset, which is valid for
6131 loads. Scaled indices are scaled by the access mode. */
6132 if (GET_CODE (addr) == PLUS
6133 && GET_CODE (XEXP ((addr), 0)) == MULT)
6134 return true;
6135 return false;
6138 /* Determine if it's legal to put X into the constant pool. */
6140 static bool
6141 arc_cannot_force_const_mem (machine_mode mode, rtx x)
6143 return !arc_legitimate_constant_p (mode, x);
6146 /* IDs for all the ARC builtins. */
6148 enum arc_builtin_id
6150 #define DEF_BUILTIN(NAME, N_ARGS, TYPE, ICODE, MASK) \
6151 ARC_BUILTIN_ ## NAME,
6152 #include "builtins.def"
6153 #undef DEF_BUILTIN
6155 ARC_BUILTIN_COUNT
6158 struct GTY(()) arc_builtin_description
6160 enum insn_code icode;
6161 int n_args;
6162 tree fndecl;
6165 static GTY(()) struct arc_builtin_description
6166 arc_bdesc[ARC_BUILTIN_COUNT] =
6168 #define DEF_BUILTIN(NAME, N_ARGS, TYPE, ICODE, MASK) \
6169 { (enum insn_code) CODE_FOR_ ## ICODE, N_ARGS, NULL_TREE },
6170 #include "builtins.def"
6171 #undef DEF_BUILTIN
6174 /* Transform UP into lowercase and write the result to LO.
6175 You must provide enough space for LO. Return LO. */
6177 static char*
6178 arc_tolower (char *lo, const char *up)
6180 char *lo0 = lo;
6182 for (; *up; up++, lo++)
6183 *lo = TOLOWER (*up);
6185 *lo = '\0';
6187 return lo0;
6190 /* Implement `TARGET_BUILTIN_DECL'. */
6192 static tree
6193 arc_builtin_decl (unsigned id, bool initialize_p ATTRIBUTE_UNUSED)
6195 if (id < ARC_BUILTIN_COUNT)
6196 return arc_bdesc[id].fndecl;
6198 return error_mark_node;
6201 static void
6202 arc_init_builtins (void)
6204 tree V4HI_type_node;
6205 tree V2SI_type_node;
6206 tree V2HI_type_node;
6208 /* Vector types based on HS SIMD elements. */
6209 V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
6210 V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
6211 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
6213 tree pcvoid_type_node
6214 = build_pointer_type (build_qualified_type (void_type_node,
6215 TYPE_QUAL_CONST));
6216 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node,
6217 V8HImode);
6219 tree void_ftype_void
6220 = build_function_type_list (void_type_node, NULL_TREE);
6221 tree int_ftype_int
6222 = build_function_type_list (integer_type_node, integer_type_node,
6223 NULL_TREE);
6224 tree int_ftype_pcvoid_int
6225 = build_function_type_list (integer_type_node, pcvoid_type_node,
6226 integer_type_node, NULL_TREE);
6227 tree void_ftype_usint_usint
6228 = build_function_type_list (void_type_node, long_unsigned_type_node,
6229 long_unsigned_type_node, NULL_TREE);
6230 tree int_ftype_int_int
6231 = build_function_type_list (integer_type_node, integer_type_node,
6232 integer_type_node, NULL_TREE);
6233 tree usint_ftype_usint
6234 = build_function_type_list (long_unsigned_type_node,
6235 long_unsigned_type_node, NULL_TREE);
6236 tree void_ftype_usint
6237 = build_function_type_list (void_type_node, long_unsigned_type_node,
6238 NULL_TREE);
6239 tree int_ftype_void
6240 = build_function_type_list (integer_type_node, void_type_node,
6241 NULL_TREE);
6242 tree void_ftype_int
6243 = build_function_type_list (void_type_node, integer_type_node,
6244 NULL_TREE);
6245 tree int_ftype_short
6246 = build_function_type_list (integer_type_node, short_integer_type_node,
6247 NULL_TREE);
6249 /* Old ARC SIMD types. */
6250 tree v8hi_ftype_v8hi_v8hi
6251 = build_function_type_list (V8HI_type_node, V8HI_type_node,
6252 V8HI_type_node, NULL_TREE);
6253 tree v8hi_ftype_v8hi_int
6254 = build_function_type_list (V8HI_type_node, V8HI_type_node,
6255 integer_type_node, NULL_TREE);
6256 tree v8hi_ftype_v8hi_int_int
6257 = build_function_type_list (V8HI_type_node, V8HI_type_node,
6258 integer_type_node, integer_type_node,
6259 NULL_TREE);
6260 tree void_ftype_v8hi_int_int
6261 = build_function_type_list (void_type_node, V8HI_type_node,
6262 integer_type_node, integer_type_node,
6263 NULL_TREE);
6264 tree void_ftype_v8hi_int_int_int
6265 = build_function_type_list (void_type_node, V8HI_type_node,
6266 integer_type_node, integer_type_node,
6267 integer_type_node, NULL_TREE);
6268 tree v8hi_ftype_int_int
6269 = build_function_type_list (V8HI_type_node, integer_type_node,
6270 integer_type_node, NULL_TREE);
6271 tree void_ftype_int_int
6272 = build_function_type_list (void_type_node, integer_type_node,
6273 integer_type_node, NULL_TREE);
6274 tree v8hi_ftype_v8hi
6275 = build_function_type_list (V8HI_type_node, V8HI_type_node,
6276 NULL_TREE);
6277 /* ARCv2 SIMD types. */
6278 tree long_ftype_v4hi_v4hi
6279 = build_function_type_list (long_long_integer_type_node,
6280 V4HI_type_node, V4HI_type_node, NULL_TREE);
6281 tree int_ftype_v2hi_v2hi
6282 = build_function_type_list (integer_type_node,
6283 V2HI_type_node, V2HI_type_node, NULL_TREE);
6284 tree v2si_ftype_v2hi_v2hi
6285 = build_function_type_list (V2SI_type_node,
6286 V2HI_type_node, V2HI_type_node, NULL_TREE);
6287 tree v2hi_ftype_v2hi_v2hi
6288 = build_function_type_list (V2HI_type_node,
6289 V2HI_type_node, V2HI_type_node, NULL_TREE);
6290 tree v2si_ftype_v2si_v2si
6291 = build_function_type_list (V2SI_type_node,
6292 V2SI_type_node, V2SI_type_node, NULL_TREE);
6293 tree v4hi_ftype_v4hi_v4hi
6294 = build_function_type_list (V4HI_type_node,
6295 V4HI_type_node, V4HI_type_node, NULL_TREE);
6296 tree long_ftype_v2si_v2hi
6297 = build_function_type_list (long_long_integer_type_node,
6298 V2SI_type_node, V2HI_type_node, NULL_TREE);
6300 /* Add the builtins. */
6301 #define DEF_BUILTIN(NAME, N_ARGS, TYPE, ICODE, MASK) \
6303 int id = ARC_BUILTIN_ ## NAME; \
6304 const char *Name = "__builtin_arc_" #NAME; \
6305 char *name = (char*) alloca (1 + strlen (Name)); \
6307 gcc_assert (id < ARC_BUILTIN_COUNT); \
6308 if (MASK) \
6309 arc_bdesc[id].fndecl \
6310 = add_builtin_function (arc_tolower(name, Name), TYPE, id, \
6311 BUILT_IN_MD, NULL, NULL_TREE); \
6313 #include "builtins.def"
6314 #undef DEF_BUILTIN
6317 /* Helper to expand __builtin_arc_aligned (void* val, int
6318 alignval). */
6320 static rtx
6321 arc_expand_builtin_aligned (tree exp)
6323 tree arg0 = CALL_EXPR_ARG (exp, 0);
6324 tree arg1 = CALL_EXPR_ARG (exp, 1);
6325 fold (arg1);
6326 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6327 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6329 if (!CONST_INT_P (op1))
6331 /* If we can't fold the alignment to a constant integer
6332 whilst optimizing, this is probably a user error. */
6333 if (optimize)
6334 warning (0, "__builtin_arc_aligned with non-constant alignment");
6336 else
6338 HOST_WIDE_INT alignTest = INTVAL (op1);
6339 /* Check alignTest is positive, and a power of two. */
6340 if (alignTest <= 0 || alignTest != (alignTest & -alignTest))
6342 error ("invalid alignment value for __builtin_arc_aligned");
6343 return NULL_RTX;
6346 if (CONST_INT_P (op0))
6348 HOST_WIDE_INT pnt = INTVAL (op0);
6350 if ((pnt & (alignTest - 1)) == 0)
6351 return const1_rtx;
6353 else
6355 unsigned align = get_pointer_alignment (arg0);
6356 unsigned numBits = alignTest * BITS_PER_UNIT;
6358 if (align && align >= numBits)
6359 return const1_rtx;
6360 /* Another attempt to ascertain alignment. Check the type
6361 we are pointing to. */
6362 if (POINTER_TYPE_P (TREE_TYPE (arg0))
6363 && TYPE_ALIGN (TREE_TYPE (TREE_TYPE (arg0))) >= numBits)
6364 return const1_rtx;
6368 /* Default to false. */
6369 return const0_rtx;
6372 /* Helper arc_expand_builtin, generates a pattern for the given icode
6373 and arguments. */
6375 static rtx_insn *
6376 apply_GEN_FCN (enum insn_code icode, rtx *arg)
6378 switch (insn_data[icode].n_generator_args)
6380 case 0:
6381 return GEN_FCN (icode) ();
6382 case 1:
6383 return GEN_FCN (icode) (arg[0]);
6384 case 2:
6385 return GEN_FCN (icode) (arg[0], arg[1]);
6386 case 3:
6387 return GEN_FCN (icode) (arg[0], arg[1], arg[2]);
6388 case 4:
6389 return GEN_FCN (icode) (arg[0], arg[1], arg[2], arg[3]);
6390 case 5:
6391 return GEN_FCN (icode) (arg[0], arg[1], arg[2], arg[3], arg[4]);
6392 default:
6393 gcc_unreachable ();
6397 /* Expand an expression EXP that calls a built-in function,
6398 with result going to TARGET if that's convenient
6399 (and in mode MODE if that's convenient).
6400 SUBTARGET may be used as the target for computing one of EXP's operands.
6401 IGNORE is nonzero if the value is to be ignored. */
6403 static rtx
6404 arc_expand_builtin (tree exp,
6405 rtx target,
6406 rtx subtarget ATTRIBUTE_UNUSED,
6407 machine_mode mode ATTRIBUTE_UNUSED,
6408 int ignore ATTRIBUTE_UNUSED)
6410 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6411 unsigned int id = DECL_FUNCTION_CODE (fndecl);
6412 const struct arc_builtin_description *d = &arc_bdesc[id];
6413 int i, j, n_args = call_expr_nargs (exp);
6414 rtx pat = NULL_RTX;
6415 rtx xop[5];
6416 enum insn_code icode = d->icode;
6417 machine_mode tmode = insn_data[icode].operand[0].mode;
6418 int nonvoid;
6419 tree arg0;
6420 tree arg1;
6421 tree arg2;
6422 tree arg3;
6423 rtx op0;
6424 rtx op1;
6425 rtx op2;
6426 rtx op3;
6427 rtx op4;
6428 machine_mode mode0;
6429 machine_mode mode1;
6430 machine_mode mode2;
6431 machine_mode mode3;
6432 machine_mode mode4;
6434 if (id >= ARC_BUILTIN_COUNT)
6435 internal_error ("bad builtin fcode");
6437 /* 1st part: Expand special builtins. */
6438 switch (id)
6440 case ARC_BUILTIN_NOP:
6441 emit_insn (gen_nopv ());
6442 return NULL_RTX;
6444 case ARC_BUILTIN_RTIE:
6445 case ARC_BUILTIN_SYNC:
6446 case ARC_BUILTIN_BRK:
6447 case ARC_BUILTIN_SWI:
6448 case ARC_BUILTIN_UNIMP_S:
6449 gcc_assert (icode != 0);
6450 emit_insn (GEN_FCN (icode) (const1_rtx));
6451 return NULL_RTX;
6453 case ARC_BUILTIN_ALIGNED:
6454 return arc_expand_builtin_aligned (exp);
6456 case ARC_BUILTIN_CLRI:
6457 target = gen_reg_rtx (SImode);
6458 emit_insn (gen_clri (target, const1_rtx));
6459 return target;
6461 case ARC_BUILTIN_TRAP_S:
6462 case ARC_BUILTIN_SLEEP:
6463 arg0 = CALL_EXPR_ARG (exp, 0);
6464 fold (arg0);
6465 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6467 if (!CONST_INT_P (op0) || !satisfies_constraint_L (op0))
6469 error ("builtin operand should be an unsigned 6-bit value");
6470 return NULL_RTX;
6472 gcc_assert (icode != 0);
6473 emit_insn (GEN_FCN (icode) (op0));
6474 return NULL_RTX;
6476 case ARC_BUILTIN_VDORUN:
6477 case ARC_BUILTIN_VDIRUN:
6478 arg0 = CALL_EXPR_ARG (exp, 0);
6479 arg1 = CALL_EXPR_ARG (exp, 1);
6480 op0 = expand_expr (arg0, NULL_RTX, SImode, EXPAND_NORMAL);
6481 op1 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6483 target = gen_rtx_REG (SImode, (id == ARC_BUILTIN_VDIRUN) ? 131 : 139);
6485 mode0 = insn_data[icode].operand[1].mode;
6486 mode1 = insn_data[icode].operand[2].mode;
6488 if (!insn_data[icode].operand[1].predicate (op0, mode0))
6489 op0 = copy_to_mode_reg (mode0, op0);
6491 if (!insn_data[icode].operand[2].predicate (op1, mode1))
6492 op1 = copy_to_mode_reg (mode1, op1);
6494 pat = GEN_FCN (icode) (target, op0, op1);
6495 if (!pat)
6496 return NULL_RTX;
6498 emit_insn (pat);
6499 return NULL_RTX;
6501 case ARC_BUILTIN_VDIWR:
6502 case ARC_BUILTIN_VDOWR:
6503 arg0 = CALL_EXPR_ARG (exp, 0);
6504 arg1 = CALL_EXPR_ARG (exp, 1);
6505 op0 = expand_expr (arg0, NULL_RTX, SImode, EXPAND_NORMAL);
6506 op1 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6508 if (!CONST_INT_P (op0)
6509 || !(UNSIGNED_INT3 (INTVAL (op0))))
6510 error ("operand 1 should be an unsigned 3-bit immediate");
6512 mode1 = insn_data[icode].operand[1].mode;
6514 if (icode == CODE_FOR_vdiwr_insn)
6515 target = gen_rtx_REG (SImode,
6516 ARC_FIRST_SIMD_DMA_CONFIG_IN_REG + INTVAL (op0));
6517 else if (icode == CODE_FOR_vdowr_insn)
6518 target = gen_rtx_REG (SImode,
6519 ARC_FIRST_SIMD_DMA_CONFIG_OUT_REG + INTVAL (op0));
6520 else
6521 gcc_unreachable ();
6523 if (!insn_data[icode].operand[2].predicate (op1, mode1))
6524 op1 = copy_to_mode_reg (mode1, op1);
6526 pat = GEN_FCN (icode) (target, op1);
6527 if (!pat)
6528 return NULL_RTX;
6530 emit_insn (pat);
6531 return NULL_RTX;
6533 case ARC_BUILTIN_VASRW:
6534 case ARC_BUILTIN_VSR8:
6535 case ARC_BUILTIN_VSR8AW:
6536 arg0 = CALL_EXPR_ARG (exp, 0);
6537 arg1 = CALL_EXPR_ARG (exp, 1);
6538 op0 = expand_expr (arg0, NULL_RTX, V8HImode, EXPAND_NORMAL);
6539 op1 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6540 op2 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6542 target = gen_reg_rtx (V8HImode);
6543 mode0 = insn_data[icode].operand[1].mode;
6544 mode1 = insn_data[icode].operand[2].mode;
6546 if (!insn_data[icode].operand[1].predicate (op0, mode0))
6547 op0 = copy_to_mode_reg (mode0, op0);
6549 if ((!insn_data[icode].operand[2].predicate (op1, mode1))
6550 || !(UNSIGNED_INT3 (INTVAL (op1))))
6551 error ("operand 2 should be an unsigned 3-bit value (I0-I7)");
6553 pat = GEN_FCN (icode) (target, op0, op1, op2);
6554 if (!pat)
6555 return NULL_RTX;
6557 emit_insn (pat);
6558 return target;
6560 case ARC_BUILTIN_VLD32WH:
6561 case ARC_BUILTIN_VLD32WL:
6562 case ARC_BUILTIN_VLD64:
6563 case ARC_BUILTIN_VLD32:
6564 rtx src_vreg;
6565 icode = d->icode;
6566 arg0 = CALL_EXPR_ARG (exp, 0); /* source vreg. */
6567 arg1 = CALL_EXPR_ARG (exp, 1); /* [I]0-7. */
6568 arg2 = CALL_EXPR_ARG (exp, 2); /* u8. */
6570 src_vreg = expand_expr (arg0, NULL_RTX, V8HImode, EXPAND_NORMAL);
6571 op0 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6572 op1 = expand_expr (arg2, NULL_RTX, SImode, EXPAND_NORMAL);
6573 op2 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6575 /* target <- src vreg. */
6576 emit_insn (gen_move_insn (target, src_vreg));
6578 /* target <- vec_concat: target, mem (Ib, u8). */
6579 mode0 = insn_data[icode].operand[3].mode;
6580 mode1 = insn_data[icode].operand[1].mode;
6582 if ((!insn_data[icode].operand[3].predicate (op0, mode0))
6583 || !(UNSIGNED_INT3 (INTVAL (op0))))
6584 error ("operand 1 should be an unsigned 3-bit value (I0-I7)");
6586 if ((!insn_data[icode].operand[1].predicate (op1, mode1))
6587 || !(UNSIGNED_INT8 (INTVAL (op1))))
6588 error ("operand 2 should be an unsigned 8-bit value");
6590 pat = GEN_FCN (icode) (target, op1, op2, op0);
6591 if (!pat)
6592 return NULL_RTX;
6594 emit_insn (pat);
6595 return target;
6597 case ARC_BUILTIN_VLD64W:
6598 case ARC_BUILTIN_VLD128:
6599 arg0 = CALL_EXPR_ARG (exp, 0); /* dest vreg. */
6600 arg1 = CALL_EXPR_ARG (exp, 1); /* [I]0-7. */
6602 op0 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6603 op1 = expand_expr (arg0, NULL_RTX, SImode, EXPAND_NORMAL);
6604 op2 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6606 /* target <- src vreg. */
6607 target = gen_reg_rtx (V8HImode);
6609 /* target <- vec_concat: target, mem (Ib, u8). */
6610 mode0 = insn_data[icode].operand[1].mode;
6611 mode1 = insn_data[icode].operand[2].mode;
6612 mode2 = insn_data[icode].operand[3].mode;
6614 if ((!insn_data[icode].operand[2].predicate (op1, mode1))
6615 || !(UNSIGNED_INT3 (INTVAL (op1))))
6616 error ("operand 1 should be an unsigned 3-bit value (I0-I7)");
6618 if ((!insn_data[icode].operand[3].predicate (op2, mode2))
6619 || !(UNSIGNED_INT8 (INTVAL (op2))))
6620 error ("operand 2 should be an unsigned 8-bit value");
6622 pat = GEN_FCN (icode) (target, op0, op1, op2);
6624 if (!pat)
6625 return NULL_RTX;
6627 emit_insn (pat);
6628 return target;
6630 case ARC_BUILTIN_VST128:
6631 case ARC_BUILTIN_VST64:
6632 arg0 = CALL_EXPR_ARG (exp, 0); /* src vreg. */
6633 arg1 = CALL_EXPR_ARG (exp, 1); /* [I]0-7. */
6634 arg2 = CALL_EXPR_ARG (exp, 2); /* u8. */
6636 op0 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6637 op1 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6638 op2 = expand_expr (arg2, NULL_RTX, SImode, EXPAND_NORMAL);
6639 op3 = expand_expr (arg0, NULL_RTX, V8HImode, EXPAND_NORMAL);
6641 mode0 = insn_data[icode].operand[0].mode;
6642 mode1 = insn_data[icode].operand[1].mode;
6643 mode2 = insn_data[icode].operand[2].mode;
6644 mode3 = insn_data[icode].operand[3].mode;
6646 if ((!insn_data[icode].operand[1].predicate (op1, mode1))
6647 || !(UNSIGNED_INT3 (INTVAL (op1))))
6648 error ("operand 2 should be an unsigned 3-bit value (I0-I7)");
6650 if ((!insn_data[icode].operand[2].predicate (op2, mode2))
6651 || !(UNSIGNED_INT8 (INTVAL (op2))))
6652 error ("operand 3 should be an unsigned 8-bit value");
6654 if (!insn_data[icode].operand[3].predicate (op3, mode3))
6655 op3 = copy_to_mode_reg (mode3, op3);
6657 pat = GEN_FCN (icode) (op0, op1, op2, op3);
6658 if (!pat)
6659 return NULL_RTX;
6661 emit_insn (pat);
6662 return NULL_RTX;
6664 case ARC_BUILTIN_VST16_N:
6665 case ARC_BUILTIN_VST32_N:
6666 arg0 = CALL_EXPR_ARG (exp, 0); /* source vreg. */
6667 arg1 = CALL_EXPR_ARG (exp, 1); /* u3. */
6668 arg2 = CALL_EXPR_ARG (exp, 2); /* [I]0-7. */
6669 arg3 = CALL_EXPR_ARG (exp, 3); /* u8. */
6671 op0 = expand_expr (arg3, NULL_RTX, SImode, EXPAND_NORMAL);
6672 op1 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6673 op2 = expand_expr (arg2, NULL_RTX, SImode, EXPAND_NORMAL);
6674 op3 = expand_expr (arg0, NULL_RTX, V8HImode, EXPAND_NORMAL);
6675 op4 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6677 mode0 = insn_data[icode].operand[0].mode;
6678 mode2 = insn_data[icode].operand[2].mode;
6679 mode3 = insn_data[icode].operand[3].mode;
6680 mode4 = insn_data[icode].operand[4].mode;
6682 /* Do some correctness checks for the operands. */
6683 if ((!insn_data[icode].operand[0].predicate (op0, mode0))
6684 || !(UNSIGNED_INT8 (INTVAL (op0))))
6685 error ("operand 4 should be an unsigned 8-bit value (0-255)");
6687 if ((!insn_data[icode].operand[2].predicate (op2, mode2))
6688 || !(UNSIGNED_INT3 (INTVAL (op2))))
6689 error ("operand 3 should be an unsigned 3-bit value (I0-I7)");
6691 if (!insn_data[icode].operand[3].predicate (op3, mode3))
6692 op3 = copy_to_mode_reg (mode3, op3);
6694 if ((!insn_data[icode].operand[4].predicate (op4, mode4))
6695 || !(UNSIGNED_INT3 (INTVAL (op4))))
6696 error ("operand 2 should be an unsigned 3-bit value (subreg 0-7)");
6697 else if (icode == CODE_FOR_vst32_n_insn
6698 && ((INTVAL (op4) % 2) != 0))
6699 error ("operand 2 should be an even 3-bit value (subreg 0,2,4,6)");
6701 pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
6702 if (!pat)
6703 return NULL_RTX;
6705 emit_insn (pat);
6706 return NULL_RTX;
6708 default:
6709 break;
6712 /* 2nd part: Expand regular builtins. */
6713 if (icode == 0)
6714 internal_error ("bad builtin fcode");
6716 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6717 j = 0;
6719 if (nonvoid)
6721 if (target == NULL_RTX
6722 || GET_MODE (target) != tmode
6723 || !insn_data[icode].operand[0].predicate (target, tmode))
6725 target = gen_reg_rtx (tmode);
6727 xop[j++] = target;
6730 gcc_assert (n_args <= 4);
6731 for (i = 0; i < n_args; i++, j++)
6733 tree arg = CALL_EXPR_ARG (exp, i);
6734 machine_mode mode = insn_data[icode].operand[j].mode;
6735 rtx op = expand_expr (arg, NULL_RTX, mode, EXPAND_NORMAL);
6736 machine_mode opmode = GET_MODE (op);
6737 char c = insn_data[icode].operand[j].constraint[0];
6739 /* SIMD extension requires exact immediate operand match. */
6740 if ((id > ARC_BUILTIN_SIMD_BEGIN)
6741 && (id < ARC_BUILTIN_SIMD_END)
6742 && (c != 'v')
6743 && (c != 'r'))
6745 if (!CONST_INT_P (op))
6746 error ("builtin requires an immediate for operand %d", j);
6747 switch (c)
6749 case 'L':
6750 if (!satisfies_constraint_L (op))
6751 error ("operand %d should be a 6 bit unsigned immediate", j);
6752 break;
6753 case 'P':
6754 if (!satisfies_constraint_P (op))
6755 error ("operand %d should be a 8 bit unsigned immediate", j);
6756 break;
6757 case 'K':
6758 if (!satisfies_constraint_K (op))
6759 error ("operand %d should be a 3 bit unsigned immediate", j);
6760 break;
6761 default:
6762 error ("unknown builtin immediate operand type for operand %d",
6767 if (CONST_INT_P (op))
6768 opmode = mode;
6770 if ((opmode == SImode) && (mode == HImode))
6772 opmode = HImode;
6773 op = gen_lowpart (HImode, op);
6776 /* In case the insn wants input operands in modes different from
6777 the result, abort. */
6778 gcc_assert (opmode == mode || opmode == VOIDmode);
6780 if (!insn_data[icode].operand[i + nonvoid].predicate (op, mode))
6781 op = copy_to_mode_reg (mode, op);
6783 xop[j] = op;
6786 pat = apply_GEN_FCN (icode, xop);
6787 if (pat == NULL_RTX)
6788 return NULL_RTX;
6790 emit_insn (pat);
6792 if (nonvoid)
6793 return target;
6794 else
6795 return const0_rtx;
6798 /* Returns true if the operands[opno] is a valid compile-time constant to be
6799 used as register number in the code for builtins. Else it flags an error
6800 and returns false. */
6802 bool
6803 check_if_valid_regno_const (rtx *operands, int opno)
6806 switch (GET_CODE (operands[opno]))
6808 case SYMBOL_REF :
6809 case CONST :
6810 case CONST_INT :
6811 return true;
6812 default:
6813 error ("register number must be a compile-time constant. Try giving higher optimization levels");
6814 break;
6816 return false;
6819 /* Check that after all the constant folding, whether the operand to
6820 __builtin_arc_sleep is an unsigned int of 6 bits. If not, flag an error. */
6822 bool
6823 check_if_valid_sleep_operand (rtx *operands, int opno)
6825 switch (GET_CODE (operands[opno]))
6827 case CONST :
6828 case CONST_INT :
6829 if( UNSIGNED_INT6 (INTVAL (operands[opno])))
6830 return true;
6831 /* FALLTHRU */
6832 default:
6833 fatal_error (input_location,
6834 "operand for sleep instruction must be an unsigned 6 bit compile-time constant");
6835 break;
6837 return false;
6840 /* Return true if it is ok to make a tail-call to DECL. */
6842 static bool
6843 arc_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
6844 tree exp ATTRIBUTE_UNUSED)
6846 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
6847 if (ARC_INTERRUPT_P (arc_compute_function_type (cfun)))
6848 return false;
6850 /* Everything else is ok. */
6851 return true;
6854 /* Output code to add DELTA to the first argument, and then jump
6855 to FUNCTION. Used for C++ multiple inheritance. */
6857 static void
6858 arc_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
6859 HOST_WIDE_INT delta,
6860 HOST_WIDE_INT vcall_offset,
6861 tree function)
6863 int mi_delta = delta;
6864 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
6865 int shift = 0;
6866 int this_regno
6867 = aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ? 1 : 0;
6868 rtx fnaddr;
6870 if (mi_delta < 0)
6871 mi_delta = - mi_delta;
6873 /* Add DELTA. When possible use a plain add, otherwise load it into
6874 a register first. */
6876 while (mi_delta != 0)
6878 if ((mi_delta & (3 << shift)) == 0)
6879 shift += 2;
6880 else
6882 asm_fprintf (file, "\t%s\t%s, %s, %d\n",
6883 mi_op, reg_names[this_regno], reg_names[this_regno],
6884 mi_delta & (0xff << shift));
6885 mi_delta &= ~(0xff << shift);
6886 shift += 8;
6890 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
6891 if (vcall_offset != 0)
6893 /* ld r12,[this] --> temp = *this
6894 add r12,r12,vcall_offset --> temp = *(*this + vcall_offset)
6895 ld r12,[r12]
6896 add this,this,r12 --> this+ = *(*this + vcall_offset) */
6897 asm_fprintf (file, "\tld\t%s, [%s]\n",
6898 ARC_TEMP_SCRATCH_REG, reg_names[this_regno]);
6899 asm_fprintf (file, "\tadd\t%s, %s, " HOST_WIDE_INT_PRINT_DEC "\n",
6900 ARC_TEMP_SCRATCH_REG, ARC_TEMP_SCRATCH_REG, vcall_offset);
6901 asm_fprintf (file, "\tld\t%s, [%s]\n",
6902 ARC_TEMP_SCRATCH_REG, ARC_TEMP_SCRATCH_REG);
6903 asm_fprintf (file, "\tadd\t%s, %s, %s\n", reg_names[this_regno],
6904 reg_names[this_regno], ARC_TEMP_SCRATCH_REG);
6907 fnaddr = XEXP (DECL_RTL (function), 0);
6909 if (arc_is_longcall_p (fnaddr))
6911 if (flag_pic)
6913 asm_fprintf (file, "\tld\t%s, [pcl, @",
6914 ARC_TEMP_SCRATCH_REG);
6915 assemble_name (file, XSTR (fnaddr, 0));
6916 fputs ("@gotpc]\n", file);
6917 asm_fprintf (file, "\tj\t[%s]", ARC_TEMP_SCRATCH_REG);
6919 else
6921 fputs ("\tj\t@", file);
6922 assemble_name (file, XSTR (fnaddr, 0));
6925 else
6927 fputs ("\tb\t@", file);
6928 assemble_name (file, XSTR (fnaddr, 0));
6929 if (flag_pic)
6930 fputs ("@plt\n", file);
6932 fputc ('\n', file);
6935 /* Return true if a 32 bit "long_call" should be generated for
6936 this calling SYM_REF. We generate a long_call if the function:
6938 a. has an __attribute__((long call))
6939 or b. the -mlong-calls command line switch has been specified
6941 However we do not generate a long call if the function has an
6942 __attribute__ ((short_call)) or __attribute__ ((medium_call))
6944 This function will be called by C fragments contained in the machine
6945 description file. */
6947 bool
6948 arc_is_longcall_p (rtx sym_ref)
6950 if (GET_CODE (sym_ref) != SYMBOL_REF)
6951 return false;
6953 return (SYMBOL_REF_LONG_CALL_P (sym_ref)
6954 || (TARGET_LONG_CALLS_SET
6955 && !SYMBOL_REF_SHORT_CALL_P (sym_ref)
6956 && !SYMBOL_REF_MEDIUM_CALL_P (sym_ref)));
6960 /* Likewise for short calls. */
6962 bool
6963 arc_is_shortcall_p (rtx sym_ref)
6965 if (GET_CODE (sym_ref) != SYMBOL_REF)
6966 return false;
6968 return (SYMBOL_REF_SHORT_CALL_P (sym_ref)
6969 || (!TARGET_LONG_CALLS_SET && !TARGET_MEDIUM_CALLS
6970 && !SYMBOL_REF_LONG_CALL_P (sym_ref)
6971 && !SYMBOL_REF_MEDIUM_CALL_P (sym_ref)));
6975 /* Worker function for TARGET_RETURN_IN_MEMORY. */
6977 static bool
6978 arc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6980 if (AGGREGATE_TYPE_P (type) || TREE_ADDRESSABLE (type))
6981 return true;
6982 else
6984 HOST_WIDE_INT size = int_size_in_bytes (type);
6985 return (size == -1 || size > (TARGET_V2 ? 16 : 8));
6990 /* This was in rtlanal.c, and can go in there when we decide we want
6991 to submit the change for inclusion in the GCC tree. */
6992 /* Like note_stores, but allow the callback to have side effects on the rtl
6993 (like the note_stores of yore):
6994 Call FUN on each register or MEM that is stored into or clobbered by X.
6995 (X would be the pattern of an insn). DATA is an arbitrary pointer,
6996 ignored by note_stores, but passed to FUN.
6997 FUN may alter parts of the RTL.
6999 FUN receives three arguments:
7000 1. the REG, MEM, CC0 or PC being stored in or clobbered,
7001 2. the SET or CLOBBER rtx that does the store,
7002 3. the pointer DATA provided to note_stores.
7004 If the item being stored in or clobbered is a SUBREG of a hard register,
7005 the SUBREG will be passed. */
7007 /* For now. */ static
7008 void
7009 walk_stores (rtx x, void (*fun) (rtx, rtx, void *), void *data)
7011 int i;
7013 if (GET_CODE (x) == COND_EXEC)
7014 x = COND_EXEC_CODE (x);
7016 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
7018 rtx dest = SET_DEST (x);
7020 while ((GET_CODE (dest) == SUBREG
7021 && (!REG_P (SUBREG_REG (dest))
7022 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
7023 || GET_CODE (dest) == ZERO_EXTRACT
7024 || GET_CODE (dest) == STRICT_LOW_PART)
7025 dest = XEXP (dest, 0);
7027 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
7028 each of whose first operand is a register. */
7029 if (GET_CODE (dest) == PARALLEL)
7031 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
7032 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
7033 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
7035 else
7036 (*fun) (dest, x, data);
7039 else if (GET_CODE (x) == PARALLEL)
7040 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7041 walk_stores (XVECEXP (x, 0, i), fun, data);
7044 static bool
7045 arc_pass_by_reference (cumulative_args_t ca_v ATTRIBUTE_UNUSED,
7046 machine_mode mode ATTRIBUTE_UNUSED,
7047 const_tree type,
7048 bool named ATTRIBUTE_UNUSED)
7050 return (type != 0
7051 && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
7052 || TREE_ADDRESSABLE (type)));
7055 /* Implement TARGET_CAN_USE_DOLOOP_P. */
7057 static bool
7058 arc_can_use_doloop_p (const widest_int &,
7059 const widest_int &iterations_max,
7060 unsigned int loop_depth, bool entered_at_top)
7062 /* Considering limitations in the hardware, only use doloop
7063 for innermost loops which must be entered from the top. */
7064 if (loop_depth > 1 || !entered_at_top)
7065 return false;
7067 /* Check for lp_count width boundary. */
7068 if (arc_lpcwidth != 32
7069 && (wi::gtu_p (iterations_max, ((1 << arc_lpcwidth) - 1))
7070 || wi::eq_p (iterations_max, 0)))
7071 return false;
7072 return true;
7075 /* NULL if INSN insn is valid within a low-overhead loop. Otherwise
7076 return why doloop cannot be applied. */
7078 static const char *
7079 arc_invalid_within_doloop (const rtx_insn *insn)
7081 if (CALL_P (insn))
7082 return "Function call in the loop.";
7084 /* FIXME! add here all the ZOL exceptions. */
7085 return NULL;
7088 /* Return true if a load instruction (CONSUMER) uses the same address as a
7089 store instruction (PRODUCER). This function is used to avoid st/ld
7090 address hazard in ARC700 cores. */
7091 bool
7092 arc_store_addr_hazard_p (rtx_insn* producer, rtx_insn* consumer)
7094 rtx in_set, out_set;
7095 rtx out_addr, in_addr;
7097 if (!producer)
7098 return false;
7100 if (!consumer)
7101 return false;
7103 /* Peel the producer and the consumer for the address. */
7104 out_set = single_set (producer);
7105 if (out_set)
7107 out_addr = SET_DEST (out_set);
7108 if (!out_addr)
7109 return false;
7110 if (GET_CODE (out_addr) == ZERO_EXTEND
7111 || GET_CODE (out_addr) == SIGN_EXTEND)
7112 out_addr = XEXP (out_addr, 0);
7114 if (!MEM_P (out_addr))
7115 return false;
7117 in_set = single_set (consumer);
7118 if (in_set)
7120 in_addr = SET_SRC (in_set);
7121 if (!in_addr)
7122 return false;
7123 if (GET_CODE (in_addr) == ZERO_EXTEND
7124 || GET_CODE (in_addr) == SIGN_EXTEND)
7125 in_addr = XEXP (in_addr, 0);
7127 if (!MEM_P (in_addr))
7128 return false;
7129 /* Get rid of the MEM and check if the addresses are
7130 equivalent. */
7131 in_addr = XEXP (in_addr, 0);
7132 out_addr = XEXP (out_addr, 0);
7134 return exp_equiv_p (in_addr, out_addr, 0, true);
7137 return false;
7140 /* The same functionality as arc_hazard. It is called in machine
7141 reorg before any other optimization. Hence, the NOP size is taken
7142 into account when doing branch shortening. */
7144 static void
7145 workaround_arc_anomaly (void)
7147 rtx_insn *insn, *succ0;
7149 /* For any architecture: call arc_hazard here. */
7150 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7152 succ0 = next_real_insn (insn);
7153 if (arc_hazard (insn, succ0))
7155 emit_insn_before (gen_nopv (), succ0);
7159 if (TARGET_ARC700)
7161 rtx_insn *succ1;
7163 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7165 succ0 = next_real_insn (insn);
7166 if (arc_store_addr_hazard_p (insn, succ0))
7168 emit_insn_after (gen_nopv (), insn);
7169 emit_insn_after (gen_nopv (), insn);
7170 continue;
7173 /* Avoid adding nops if the instruction between the ST and LD is
7174 a call or jump. */
7175 succ1 = next_real_insn (succ0);
7176 if (succ0 && !JUMP_P (succ0) && !CALL_P (succ0)
7177 && arc_store_addr_hazard_p (insn, succ1))
7178 emit_insn_after (gen_nopv (), insn);
7183 /* A callback for the hw-doloop pass. Called when a loop we have discovered
7184 turns out not to be optimizable; we have to split the loop_end pattern into
7185 a subtract and a test. */
7187 static void
7188 hwloop_fail (hwloop_info loop)
7190 rtx test;
7191 rtx insn = loop->loop_end;
7193 if (TARGET_V2
7194 && (loop->length && (loop->length <= ARC_MAX_LOOP_LENGTH))
7195 && REG_P (loop->iter_reg))
7197 /* TARGET_V2 has dbnz instructions. */
7198 test = gen_dbnz (loop->iter_reg, loop->start_label);
7199 insn = emit_jump_insn_before (test, loop->loop_end);
7201 else if (REG_P (loop->iter_reg) && (REGNO (loop->iter_reg) == LP_COUNT))
7203 /* We have the lp_count as loop iterator, try to use it. */
7204 emit_insn_before (gen_loop_fail (), loop->loop_end);
7205 test = gen_rtx_NE (VOIDmode, gen_rtx_REG (CC_ZNmode, CC_REG),
7206 const0_rtx);
7207 test = gen_rtx_IF_THEN_ELSE (VOIDmode, test,
7208 gen_rtx_LABEL_REF (Pmode, loop->start_label),
7209 pc_rtx);
7210 insn = emit_jump_insn_before (gen_rtx_SET (pc_rtx, test),
7211 loop->loop_end);
7213 else
7215 emit_insn_before (gen_addsi3 (loop->iter_reg,
7216 loop->iter_reg,
7217 constm1_rtx),
7218 loop->loop_end);
7219 test = gen_rtx_NE (VOIDmode, loop->iter_reg, const0_rtx);
7220 insn = emit_jump_insn_before (gen_cbranchsi4 (test,
7221 loop->iter_reg,
7222 const0_rtx,
7223 loop->start_label),
7224 loop->loop_end);
7226 JUMP_LABEL (insn) = loop->start_label;
7227 LABEL_NUSES (loop->start_label)++;
7228 delete_insn (loop->loop_end);
7231 /* Optimize LOOP. */
7233 static bool
7234 hwloop_optimize (hwloop_info loop)
7236 int i;
7237 edge entry_edge;
7238 basic_block entry_bb, bb;
7239 rtx iter_reg, end_label;
7240 rtx_insn *insn, *seq, *entry_after, *last_insn;
7241 unsigned int length;
7242 bool need_fix = false;
7243 rtx lp_reg = gen_rtx_REG (SImode, LP_COUNT);
7245 if (loop->depth > 1)
7247 if (dump_file)
7248 fprintf (dump_file, ";; loop %d is not innermost\n",
7249 loop->loop_no);
7250 return false;
7253 if (!loop->incoming_dest)
7255 if (dump_file)
7256 fprintf (dump_file, ";; loop %d has more than one entry\n",
7257 loop->loop_no);
7258 return false;
7261 if (loop->incoming_dest != loop->head)
7263 if (dump_file)
7264 fprintf (dump_file, ";; loop %d is not entered from head\n",
7265 loop->loop_no);
7266 return false;
7269 if (loop->has_call || loop->has_asm)
7271 if (dump_file)
7272 fprintf (dump_file, ";; loop %d has invalid insn\n",
7273 loop->loop_no);
7274 return false;
7277 /* Scan all the blocks to make sure they don't use iter_reg. */
7278 if (loop->iter_reg_used || loop->iter_reg_used_outside)
7280 if (dump_file)
7281 fprintf (dump_file, ";; loop %d uses iterator\n",
7282 loop->loop_no);
7283 return false;
7286 /* Check if start_label appears before doloop_end. */
7287 length = 0;
7288 for (insn = loop->start_label;
7289 insn && insn != loop->loop_end;
7290 insn = NEXT_INSN (insn))
7291 length += NONDEBUG_INSN_P (insn) ? get_attr_length (insn) : 0;
7293 if (!insn)
7295 if (dump_file)
7296 fprintf (dump_file, ";; loop %d start_label not before loop_end\n",
7297 loop->loop_no);
7298 return false;
7301 loop->length = length;
7302 if (loop->length > ARC_MAX_LOOP_LENGTH)
7304 if (dump_file)
7305 fprintf (dump_file, ";; loop %d too long\n", loop->loop_no);
7306 return false;
7308 else if (!loop->length)
7310 if (dump_file)
7311 fprintf (dump_file, ";; loop %d is empty\n", loop->loop_no);
7312 return false;
7315 /* Check if we use a register or not. */
7316 if (!REG_P (loop->iter_reg))
7318 if (dump_file)
7319 fprintf (dump_file, ";; loop %d iterator is MEM\n",
7320 loop->loop_no);
7321 return false;
7324 /* Check if loop register is lpcount. */
7325 if (REG_P (loop->iter_reg) && (REGNO (loop->iter_reg)) != LP_COUNT)
7327 if (dump_file)
7328 fprintf (dump_file, ";; loop %d doesn't use lp_count as loop"
7329 " iterator\n",
7330 loop->loop_no);
7331 /* This loop doesn't use the lp_count, check though if we can
7332 fix it. */
7333 if (TEST_HARD_REG_BIT (loop->regs_set_in_loop, LP_COUNT)
7334 /* In very unique cases we may have LP_COUNT alive. */
7335 || (loop->incoming_src
7336 && REGNO_REG_SET_P (df_get_live_out (loop->incoming_src),
7337 LP_COUNT)))
7338 return false;
7339 else
7340 need_fix = true;
7343 /* Check for control like instruction as the last instruction of a
7344 ZOL. */
7345 bb = loop->tail;
7346 last_insn = PREV_INSN (loop->loop_end);
7348 while (1)
7350 for (; last_insn != BB_HEAD (bb);
7351 last_insn = PREV_INSN (last_insn))
7352 if (NONDEBUG_INSN_P (last_insn))
7353 break;
7355 if (last_insn != BB_HEAD (bb))
7356 break;
7358 if (single_pred_p (bb)
7359 && single_pred_edge (bb)->flags & EDGE_FALLTHRU
7360 && single_pred (bb) != ENTRY_BLOCK_PTR_FOR_FN (cfun))
7362 bb = single_pred (bb);
7363 last_insn = BB_END (bb);
7364 continue;
7366 else
7368 last_insn = NULL;
7369 break;
7373 if (!last_insn)
7375 if (dump_file)
7376 fprintf (dump_file, ";; loop %d has no last instruction\n",
7377 loop->loop_no);
7378 return false;
7381 if ((TARGET_ARC600_FAMILY || TARGET_HS)
7382 && INSN_P (last_insn)
7383 && (JUMP_P (last_insn) || CALL_P (last_insn)
7384 || GET_CODE (PATTERN (last_insn)) == SEQUENCE
7385 /* At this stage we can have (insn (clobber (mem:BLK
7386 (reg)))) instructions, ignore them. */
7387 || (GET_CODE (PATTERN (last_insn)) != CLOBBER
7388 && (get_attr_type (last_insn) == TYPE_BRCC
7389 || get_attr_type (last_insn) == TYPE_BRCC_NO_DELAY_SLOT))))
7391 if (loop->length + 2 > ARC_MAX_LOOP_LENGTH)
7393 if (dump_file)
7394 fprintf (dump_file, ";; loop %d too long\n", loop->loop_no);
7395 return false;
7397 if (dump_file)
7398 fprintf (dump_file, ";; loop %d has a control like last insn;"
7399 "add a nop\n",
7400 loop->loop_no);
7402 last_insn = emit_insn_after (gen_nopv (), last_insn);
7405 if (LABEL_P (last_insn))
7407 if (dump_file)
7408 fprintf (dump_file, ";; loop %d has a label as last insn;"
7409 "add a nop\n",
7410 loop->loop_no);
7411 last_insn = emit_insn_after (gen_nopv (), last_insn);
7414 /* SAVE_NOTE is used by haifa scheduler. However, we are after it
7415 and we can use it to indicate the last ZOL instruction cannot be
7416 part of a delay slot. */
7417 add_reg_note (last_insn, REG_SAVE_NOTE, GEN_INT (2));
7419 loop->last_insn = last_insn;
7421 /* Get the loop iteration register. */
7422 iter_reg = loop->iter_reg;
7424 gcc_assert (REG_P (iter_reg));
7426 entry_edge = NULL;
7428 FOR_EACH_VEC_SAFE_ELT (loop->incoming, i, entry_edge)
7429 if (entry_edge->flags & EDGE_FALLTHRU)
7430 break;
7432 if (entry_edge == NULL)
7434 if (dump_file)
7435 fprintf (dump_file, ";; loop %d has no fallthru edge jumping"
7436 "into the loop\n",
7437 loop->loop_no);
7438 return false;
7440 /* The loop is good. */
7441 end_label = gen_label_rtx ();
7442 loop->end_label = end_label;
7444 /* Place the zero_cost_loop_start instruction before the loop. */
7445 entry_bb = entry_edge->src;
7447 start_sequence ();
7449 if (need_fix)
7451 /* The loop uses a R-register, but the lp_count is free, thus
7452 use lp_count. */
7453 emit_insn (gen_movsi (lp_reg, iter_reg));
7454 SET_HARD_REG_BIT (loop->regs_set_in_loop, LP_COUNT);
7455 iter_reg = lp_reg;
7456 if (dump_file)
7458 fprintf (dump_file, ";; fix loop %d to use lp_count\n",
7459 loop->loop_no);
7463 insn = emit_insn (gen_arc_lp (iter_reg,
7464 loop->start_label,
7465 loop->end_label));
7467 seq = get_insns ();
7468 end_sequence ();
7470 entry_after = BB_END (entry_bb);
7471 if (!single_succ_p (entry_bb) || vec_safe_length (loop->incoming) > 1
7472 || !entry_after)
7474 basic_block new_bb;
7475 edge e;
7476 edge_iterator ei;
7478 emit_insn_before (seq, BB_HEAD (loop->head));
7479 seq = emit_label_before (gen_label_rtx (), seq);
7480 new_bb = create_basic_block (seq, insn, entry_bb);
7481 FOR_EACH_EDGE (e, ei, loop->incoming)
7483 if (!(e->flags & EDGE_FALLTHRU))
7484 redirect_edge_and_branch_force (e, new_bb);
7485 else
7486 redirect_edge_succ (e, new_bb);
7489 make_edge (new_bb, loop->head, 0);
7491 else
7493 #if 0
7494 while (DEBUG_INSN_P (entry_after)
7495 || (NOTE_P (entry_after)
7496 && NOTE_KIND (entry_after) != NOTE_INSN_BASIC_BLOCK
7497 /* Make sure we don't split a call and its corresponding
7498 CALL_ARG_LOCATION note. */
7499 && NOTE_KIND (entry_after) != NOTE_INSN_CALL_ARG_LOCATION))
7500 entry_after = NEXT_INSN (entry_after);
7501 #endif
7502 entry_after = next_nonnote_insn_bb (entry_after);
7504 gcc_assert (entry_after);
7505 emit_insn_before (seq, entry_after);
7508 delete_insn (loop->loop_end);
7509 /* Insert the loop end label before the last instruction of the
7510 loop. */
7511 emit_label_after (end_label, loop->last_insn);
7512 /* Make sure we mark the begining and end label as used. */
7513 LABEL_NUSES (loop->end_label)++;
7514 LABEL_NUSES (loop->start_label)++;
7516 return true;
7519 /* A callback for the hw-doloop pass. This function examines INSN; if
7520 it is a loop_end pattern we recognize, return the reg rtx for the
7521 loop counter. Otherwise, return NULL_RTX. */
7523 static rtx
7524 hwloop_pattern_reg (rtx_insn *insn)
7526 rtx reg;
7528 if (!JUMP_P (insn) || recog_memoized (insn) != CODE_FOR_loop_end)
7529 return NULL_RTX;
7531 reg = SET_DEST (XVECEXP (PATTERN (insn), 0, 1));
7532 if (!REG_P (reg))
7533 return NULL_RTX;
7534 return reg;
7537 static struct hw_doloop_hooks arc_doloop_hooks =
7539 hwloop_pattern_reg,
7540 hwloop_optimize,
7541 hwloop_fail
7544 /* Run from machine_dependent_reorg, this pass looks for doloop_end insns
7545 and tries to rewrite the RTL of these loops so that proper Blackfin
7546 hardware loops are generated. */
7548 static void
7549 arc_reorg_loops (void)
7551 reorg_loops (true, &arc_doloop_hooks);
7554 static int arc_reorg_in_progress = 0;
7556 /* ARC's machince specific reorg function. */
7558 static void
7559 arc_reorg (void)
7561 rtx_insn *insn;
7562 rtx pattern;
7563 rtx pc_target;
7564 long offset;
7565 int changed;
7567 cfun->machine->arc_reorg_started = 1;
7568 arc_reorg_in_progress = 1;
7570 compute_bb_for_insn ();
7572 df_analyze ();
7574 /* Doloop optimization. */
7575 arc_reorg_loops ();
7577 workaround_arc_anomaly ();
7579 /* FIXME: should anticipate ccfsm action, generate special patterns for
7580 to-be-deleted branches that have no delay slot and have at least the
7581 length of the size increase forced on other insns that are conditionalized.
7582 This can also have an insn_list inside that enumerates insns which are
7583 not actually conditionalized because the destinations are dead in the
7584 not-execute case.
7585 Could also tag branches that we want to be unaligned if they get no delay
7586 slot, or even ones that we don't want to do delay slot sheduling for
7587 because we can unalign them.
7589 However, there are cases when conditional execution is only possible after
7590 delay slot scheduling:
7592 - If a delay slot is filled with a nocond/set insn from above, the previous
7593 basic block can become elegible for conditional execution.
7594 - If a delay slot is filled with a nocond insn from the fall-through path,
7595 the branch with that delay slot can become eligble for conditional
7596 execution (however, with the same sort of data flow analysis that dbr
7597 does, we could have figured out before that we don't need to
7598 conditionalize this insn.)
7599 - If a delay slot insn is filled with an insn from the target, the
7600 target label gets its uses decremented (even deleted if falling to zero),
7601 thus possibly creating more condexec opportunities there.
7602 Therefore, we should still be prepared to apply condexec optimization on
7603 non-prepared branches if the size increase of conditionalized insns is no
7604 more than the size saved from eliminating the branch. An invocation option
7605 could also be used to reserve a bit of extra size for condbranches so that
7606 this'll work more often (could also test in arc_reorg if the block is
7607 'close enough' to be eligible for condexec to make this likely, and
7608 estimate required size increase). */
7609 /* Generate BRcc insns, by combining cmp and Bcc insns wherever possible. */
7610 if (TARGET_NO_BRCC_SET)
7611 return;
7615 init_insn_lengths();
7616 changed = 0;
7618 if (optimize > 1 && !TARGET_NO_COND_EXEC)
7620 arc_ifcvt ();
7621 unsigned int flags = pass_data_arc_ifcvt.todo_flags_finish;
7622 df_finish_pass ((flags & TODO_df_verify) != 0);
7624 if (dump_file)
7626 fprintf (dump_file, ";; After if conversion:\n\n");
7627 print_rtl (dump_file, get_insns ());
7631 /* Call shorten_branches to calculate the insn lengths. */
7632 shorten_branches (get_insns());
7633 cfun->machine->ccfsm_current_insn = NULL_RTX;
7635 if (!INSN_ADDRESSES_SET_P())
7636 fatal_error (input_location, "Insn addresses not set after shorten_branches");
7638 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7640 rtx label;
7641 enum attr_type insn_type;
7643 /* If a non-jump insn (or a casesi jump table), continue. */
7644 if (GET_CODE (insn) != JUMP_INSN ||
7645 GET_CODE (PATTERN (insn)) == ADDR_VEC
7646 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
7647 continue;
7649 /* If we already have a brcc, note if it is suitable for brcc_s.
7650 Be a bit generous with the brcc_s range so that we can take
7651 advantage of any code shortening from delay slot scheduling. */
7652 if (recog_memoized (insn) == CODE_FOR_cbranchsi4_scratch)
7654 rtx pat = PATTERN (insn);
7655 rtx op = XEXP (SET_SRC (XVECEXP (pat, 0, 0)), 0);
7656 rtx *ccp = &XEXP (XVECEXP (pat, 0, 1), 0);
7658 offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
7659 if ((offset >= -140 && offset < 140)
7660 && rtx_equal_p (XEXP (op, 1), const0_rtx)
7661 && compact_register_operand (XEXP (op, 0), VOIDmode)
7662 && equality_comparison_operator (op, VOIDmode))
7663 PUT_MODE (*ccp, CC_Zmode);
7664 else if (GET_MODE (*ccp) == CC_Zmode)
7665 PUT_MODE (*ccp, CC_ZNmode);
7666 continue;
7668 if ((insn_type = get_attr_type (insn)) == TYPE_BRCC
7669 || insn_type == TYPE_BRCC_NO_DELAY_SLOT)
7670 continue;
7672 /* OK. so we have a jump insn. */
7673 /* We need to check that it is a bcc. */
7674 /* Bcc => set (pc) (if_then_else ) */
7675 pattern = PATTERN (insn);
7676 if (GET_CODE (pattern) != SET
7677 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE
7678 || ANY_RETURN_P (XEXP (SET_SRC (pattern), 1)))
7679 continue;
7681 /* Now check if the jump is beyond the s9 range. */
7682 if (CROSSING_JUMP_P (insn))
7683 continue;
7684 offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
7686 if(offset > 253 || offset < -254)
7687 continue;
7689 pc_target = SET_SRC (pattern);
7691 /* Avoid FPU instructions. */
7692 if ((GET_MODE (XEXP (XEXP (pc_target, 0), 0)) == CC_FPUmode)
7693 || (GET_MODE (XEXP (XEXP (pc_target, 0), 0)) == CC_FPU_UNEQmode))
7694 continue;
7696 /* Now go back and search for the set cc insn. */
7698 label = XEXP (pc_target, 1);
7701 rtx pat;
7702 rtx_insn *scan, *link_insn = NULL;
7704 for (scan = PREV_INSN (insn);
7705 scan && GET_CODE (scan) != CODE_LABEL;
7706 scan = PREV_INSN (scan))
7708 if (! INSN_P (scan))
7709 continue;
7710 pat = PATTERN (scan);
7711 if (GET_CODE (pat) == SET
7712 && cc_register (SET_DEST (pat), VOIDmode))
7714 link_insn = scan;
7715 break;
7718 if (!link_insn)
7719 continue;
7720 else
7721 /* Check if this is a data dependency. */
7723 rtx op, cc_clob_rtx, op0, op1, brcc_insn, note;
7724 rtx cmp0, cmp1;
7726 /* Ok this is the set cc. copy args here. */
7727 op = XEXP (pc_target, 0);
7729 op0 = cmp0 = XEXP (SET_SRC (pat), 0);
7730 op1 = cmp1 = XEXP (SET_SRC (pat), 1);
7731 if (GET_CODE (op0) == ZERO_EXTRACT
7732 && XEXP (op0, 1) == const1_rtx
7733 && (GET_CODE (op) == EQ
7734 || GET_CODE (op) == NE))
7736 /* btst / b{eq,ne} -> bbit{0,1} */
7737 op0 = XEXP (cmp0, 0);
7738 op1 = XEXP (cmp0, 2);
7740 else if (!register_operand (op0, VOIDmode)
7741 || !general_operand (op1, VOIDmode))
7742 continue;
7743 /* Be careful not to break what cmpsfpx_raw is
7744 trying to create for checking equality of
7745 single-precision floats. */
7746 else if (TARGET_SPFP
7747 && GET_MODE (op0) == SFmode
7748 && GET_MODE (op1) == SFmode)
7749 continue;
7751 /* None of the two cmp operands should be set between the
7752 cmp and the branch. */
7753 if (reg_set_between_p (op0, link_insn, insn))
7754 continue;
7756 if (reg_set_between_p (op1, link_insn, insn))
7757 continue;
7759 /* Since the MODE check does not work, check that this is
7760 CC reg's last set location before insn, and also no
7761 instruction between the cmp and branch uses the
7762 condition codes. */
7763 if ((reg_set_between_p (SET_DEST (pat), link_insn, insn))
7764 || (reg_used_between_p (SET_DEST (pat), link_insn, insn)))
7765 continue;
7767 /* CC reg should be dead after insn. */
7768 if (!find_regno_note (insn, REG_DEAD, CC_REG))
7769 continue;
7771 op = gen_rtx_fmt_ee (GET_CODE (op),
7772 GET_MODE (op), cmp0, cmp1);
7773 /* If we create a LIMM where there was none before,
7774 we only benefit if we can avoid a scheduling bubble
7775 for the ARC600. Otherwise, we'd only forgo chances
7776 at short insn generation, and risk out-of-range
7777 branches. */
7778 if (!brcc_nolimm_operator (op, VOIDmode)
7779 && !long_immediate_operand (op1, VOIDmode)
7780 && (TARGET_ARC700
7781 || next_active_insn (link_insn) != insn))
7782 continue;
7784 /* Emit bbit / brcc (or brcc_s if possible).
7785 CC_Zmode indicates that brcc_s is possible. */
7787 if (op0 != cmp0)
7788 cc_clob_rtx = gen_rtx_REG (CC_ZNmode, CC_REG);
7789 else if ((offset >= -140 && offset < 140)
7790 && rtx_equal_p (op1, const0_rtx)
7791 && compact_register_operand (op0, VOIDmode)
7792 && (GET_CODE (op) == EQ
7793 || GET_CODE (op) == NE))
7794 cc_clob_rtx = gen_rtx_REG (CC_Zmode, CC_REG);
7795 else
7796 cc_clob_rtx = gen_rtx_REG (CCmode, CC_REG);
7798 brcc_insn
7799 = gen_rtx_IF_THEN_ELSE (VOIDmode, op, label, pc_rtx);
7800 brcc_insn = gen_rtx_SET (pc_rtx, brcc_insn);
7801 cc_clob_rtx = gen_rtx_CLOBBER (VOIDmode, cc_clob_rtx);
7802 brcc_insn
7803 = gen_rtx_PARALLEL
7804 (VOIDmode, gen_rtvec (2, brcc_insn, cc_clob_rtx));
7805 brcc_insn = emit_jump_insn_before (brcc_insn, insn);
7807 JUMP_LABEL (brcc_insn) = JUMP_LABEL (insn);
7808 note = find_reg_note (insn, REG_BR_PROB, 0);
7809 if (note)
7811 XEXP (note, 1) = REG_NOTES (brcc_insn);
7812 REG_NOTES (brcc_insn) = note;
7814 note = find_reg_note (link_insn, REG_DEAD, op0);
7815 if (note)
7817 remove_note (link_insn, note);
7818 XEXP (note, 1) = REG_NOTES (brcc_insn);
7819 REG_NOTES (brcc_insn) = note;
7821 note = find_reg_note (link_insn, REG_DEAD, op1);
7822 if (note)
7824 XEXP (note, 1) = REG_NOTES (brcc_insn);
7825 REG_NOTES (brcc_insn) = note;
7828 changed = 1;
7830 /* Delete the bcc insn. */
7831 set_insn_deleted (insn);
7833 /* Delete the cmp insn. */
7834 set_insn_deleted (link_insn);
7839 /* Clear out insn_addresses. */
7840 INSN_ADDRESSES_FREE ();
7842 } while (changed);
7844 if (INSN_ADDRESSES_SET_P())
7845 fatal_error (input_location, "insn addresses not freed");
7847 arc_reorg_in_progress = 0;
7850 /* Check if the operands are valid for BRcc.d generation
7851 Valid Brcc.d patterns are
7852 Brcc.d b, c, s9
7853 Brcc.d b, u6, s9
7855 For cc={GT, LE, GTU, LEU}, u6=63 can not be allowed,
7856 since they are encoded by the assembler as {GE, LT, HS, LS} 64, which
7857 does not have a delay slot
7859 Assumed precondition: Second operand is either a register or a u6 value. */
7861 bool
7862 valid_brcc_with_delay_p (rtx *operands)
7864 if (optimize_size && GET_MODE (operands[4]) == CC_Zmode)
7865 return false;
7866 return brcc_nolimm_operator (operands[0], VOIDmode);
7869 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
7870 access DECL using %gp_rel(...)($gp). */
7872 static bool
7873 arc_in_small_data_p (const_tree decl)
7875 HOST_WIDE_INT size;
7877 /* Only variables are going into small data area. */
7878 if (TREE_CODE (decl) != VAR_DECL)
7879 return false;
7881 if (TARGET_NO_SDATA_SET)
7882 return false;
7884 /* Disable sdata references to weak variables. */
7885 if (DECL_WEAK (decl))
7886 return false;
7888 /* Don't put constants into the small data section: we want them to
7889 be in ROM rather than RAM. */
7890 if (TREE_READONLY (decl))
7891 return false;
7893 /* To ensure -mvolatile-cache works ld.di does not have a
7894 gp-relative variant. */
7895 if (!TARGET_VOLATILE_CACHE_SET
7896 && TREE_THIS_VOLATILE (decl))
7897 return false;
7899 if (DECL_SECTION_NAME (decl) != 0)
7901 const char *name = DECL_SECTION_NAME (decl);
7902 if (strcmp (name, ".sdata") == 0
7903 || strcmp (name, ".sbss") == 0)
7904 return true;
7906 /* If it's not public, there's no need to put it in the small data
7907 section. */
7908 else if (TREE_PUBLIC (decl))
7910 size = int_size_in_bytes (TREE_TYPE (decl));
7911 return (size > 0 && size <= g_switch_value);
7913 return false;
7916 /* Return true if X is a small data address that can be rewritten
7917 as a gp+symref. */
7919 static bool
7920 arc_rewrite_small_data_p (const_rtx x)
7922 if (GET_CODE (x) == CONST)
7923 x = XEXP (x, 0);
7925 if (GET_CODE (x) == PLUS)
7927 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7928 x = XEXP (x, 0);
7931 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_SMALL_P (x))
7933 gcc_assert (SYMBOL_REF_TLS_MODEL (x) == 0);
7934 return true;
7936 return false;
7939 /* If possible, rewrite OP so that it refers to small data using
7940 explicit relocations. */
7942 static rtx
7943 arc_rewrite_small_data_1 (rtx op)
7945 rtx rgp = gen_rtx_REG (Pmode, SDATA_BASE_REGNUM);
7946 op = copy_insn (op);
7947 subrtx_ptr_iterator::array_type array;
7948 FOR_EACH_SUBRTX_PTR (iter, array, &op, ALL)
7950 rtx *loc = *iter;
7951 if (arc_rewrite_small_data_p (*loc))
7953 *loc = gen_rtx_PLUS (Pmode, rgp, *loc);
7954 iter.skip_subrtxes ();
7956 else if (GET_CODE (*loc) == PLUS
7957 && rtx_equal_p (XEXP (*loc, 0), rgp))
7958 iter.skip_subrtxes ();
7960 return op;
7964 arc_rewrite_small_data (rtx op)
7966 op = arc_rewrite_small_data_1 (op);
7968 /* Check if we fit small data constraints. */
7969 if (MEM_P (op)
7970 && !LEGITIMATE_SMALL_DATA_ADDRESS_P (XEXP (op, 0)))
7972 rtx addr = XEXP (op, 0);
7973 rtx tmp = gen_reg_rtx (Pmode);
7974 emit_move_insn (tmp, addr);
7975 op = replace_equiv_address_nv (op, tmp);
7977 return op;
7980 /* Return true if OP refers to small data symbols directly, not through
7981 a PLUS. */
7983 bool
7984 small_data_pattern (rtx op, machine_mode)
7986 if (GET_CODE (op) == SEQUENCE)
7987 return false;
7989 rtx rgp = gen_rtx_REG (Pmode, SDATA_BASE_REGNUM);
7990 subrtx_iterator::array_type array;
7991 FOR_EACH_SUBRTX (iter, array, op, ALL)
7993 const_rtx x = *iter;
7994 if (GET_CODE (x) == PLUS
7995 && rtx_equal_p (XEXP (x, 0), rgp))
7996 iter.skip_subrtxes ();
7997 else if (arc_rewrite_small_data_p (x))
7998 return true;
8000 return false;
8003 /* Return true if OP is an acceptable memory operand for ARCompact
8004 16-bit gp-relative load instructions.
8005 op shd look like : [r26, symref@sda]
8006 i.e. (mem (plus (reg 26) (symref with smalldata flag set))
8008 /* volatile cache option still to be handled. */
8010 bool
8011 compact_sda_memory_operand (rtx op, machine_mode mode, bool short_p)
8013 rtx addr;
8014 int size;
8015 tree decl = NULL_TREE;
8016 int align = 0;
8017 int mask = 0;
8019 /* Eliminate non-memory operations. */
8020 if (GET_CODE (op) != MEM)
8021 return false;
8023 if (mode == VOIDmode)
8024 mode = GET_MODE (op);
8026 size = GET_MODE_SIZE (mode);
8028 /* dword operations really put out 2 instructions, so eliminate them. */
8029 if (size > UNITS_PER_WORD)
8030 return false;
8032 /* Decode the address now. */
8033 addr = XEXP (op, 0);
8035 if (!LEGITIMATE_SMALL_DATA_ADDRESS_P (addr))
8036 return false;
8038 if (!short_p || size == 1)
8039 return true;
8041 /* Now check for the alignment, the short loads using gp require the
8042 addresses to be aligned. */
8043 if (GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
8044 decl = SYMBOL_REF_DECL (XEXP (addr, 1));
8045 else if (GET_CODE (XEXP (XEXP (XEXP (addr, 1), 0), 0)) == SYMBOL_REF)
8046 decl = SYMBOL_REF_DECL (XEXP (XEXP (XEXP (addr, 1), 0), 0));
8047 if (decl)
8048 align = DECL_ALIGN (decl);
8049 align = align / BITS_PER_UNIT;
8051 switch (mode)
8053 case E_HImode:
8054 mask = 1;
8055 break;
8056 default:
8057 mask = 3;
8058 break;
8061 if (align && ((align & mask) == 0))
8062 return true;
8063 return false;
8066 /* Implement ASM_OUTPUT_ALIGNED_DECL_LOCAL. */
8068 void
8069 arc_asm_output_aligned_decl_local (FILE * stream, tree decl, const char * name,
8070 unsigned HOST_WIDE_INT size,
8071 unsigned HOST_WIDE_INT align,
8072 unsigned HOST_WIDE_INT globalize_p)
8074 int in_small_data = arc_in_small_data_p (decl);
8076 if (in_small_data)
8077 switch_to_section (get_named_section (NULL, ".sbss", 0));
8078 /* named_section (0,".sbss",0); */
8079 else
8080 switch_to_section (bss_section);
8082 if (globalize_p)
8083 (*targetm.asm_out.globalize_label) (stream, name);
8085 ASM_OUTPUT_ALIGN (stream, floor_log2 ((align) / BITS_PER_UNIT));
8086 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8087 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8088 ASM_OUTPUT_LABEL (stream, name);
8090 if (size != 0)
8091 ASM_OUTPUT_SKIP (stream, size);
8094 static bool
8095 arc_preserve_reload_p (rtx in)
8097 return (GET_CODE (in) == PLUS
8098 && RTX_OK_FOR_BASE_P (XEXP (in, 0), true)
8099 && CONST_INT_P (XEXP (in, 1))
8100 && !((INTVAL (XEXP (in, 1)) & 511)));
8104 arc_register_move_cost (machine_mode,
8105 enum reg_class from_class, enum reg_class to_class)
8107 /* The ARC600 has no bypass for extension registers, hence a nop might be
8108 needed to be inserted after a write so that reads are safe. */
8109 if (TARGET_ARC600)
8111 if (to_class == MPY_WRITABLE_CORE_REGS)
8112 return 3;
8113 /* Instructions modifying LP_COUNT need 4 additional cycles before
8114 the register will actually contain the value. */
8115 else if (to_class == LPCOUNT_REG)
8116 return 6;
8117 else if (to_class == WRITABLE_CORE_REGS)
8118 return 6;
8121 /* Using lp_count as scratch reg is a VERY bad idea. */
8122 if (from_class == LPCOUNT_REG)
8123 return 1000;
8124 if (to_class == LPCOUNT_REG)
8125 return 6;
8127 /* Force an attempt to 'mov Dy,Dx' to spill. */
8128 if ((TARGET_ARC700 || TARGET_EM) && TARGET_DPFP
8129 && from_class == DOUBLE_REGS && to_class == DOUBLE_REGS)
8130 return 100;
8132 return 2;
8135 /* Emit code for an addsi3 instruction with OPERANDS.
8136 COND_P indicates if this will use conditional execution.
8137 Return the length of the instruction.
8138 If OUTPUT_P is false, don't actually output the instruction, just return
8139 its length. */
8141 arc_output_addsi (rtx *operands, bool cond_p, bool output_p)
8143 char format[35];
8145 int match = operands_match_p (operands[0], operands[1]);
8146 int match2 = operands_match_p (operands[0], operands[2]);
8147 int intval = (REG_P (operands[2]) ? 1
8148 : CONST_INT_P (operands[2]) ? INTVAL (operands[2]) : 0xbadc057);
8149 int neg_intval = -intval;
8150 int short_0 = satisfies_constraint_Rcq (operands[0]);
8151 int short_p = (!cond_p && short_0 && satisfies_constraint_Rcq (operands[1]));
8152 int ret = 0;
8154 #define REG_H_P(OP) (REG_P (OP) && ((TARGET_V2 && REGNO (OP) <= 31 \
8155 && REGNO (OP) != 30) \
8156 || !TARGET_V2))
8158 #define ADDSI_OUTPUT1(FORMAT) do {\
8159 if (output_p) \
8160 output_asm_insn (FORMAT, operands);\
8161 return ret; \
8162 } while (0)
8163 #define ADDSI_OUTPUT(LIST) do {\
8164 if (output_p) \
8165 sprintf LIST;\
8166 ADDSI_OUTPUT1 (format);\
8167 return ret; \
8168 } while (0)
8170 /* First try to emit a 16 bit insn. */
8171 ret = 2;
8172 if (!cond_p
8173 /* If we are actually about to output this insn, don't try a 16 bit
8174 variant if we already decided that we don't want that
8175 (I.e. we upsized this insn to align some following insn.)
8176 E.g. add_s r0,sp,70 is 16 bit, but add r0,sp,70 requires a LIMM -
8177 but add1 r0,sp,35 doesn't. */
8178 && (!output_p || (get_attr_length (current_output_insn) & 2)))
8180 /* Generate add_s a,b,c; add_s b,b,u7; add_s c,b,u3; add_s b,b,h
8181 patterns. */
8182 if (short_p
8183 && ((REG_H_P (operands[2])
8184 && (match || satisfies_constraint_Rcq (operands[2])))
8185 || (CONST_INT_P (operands[2])
8186 && ((unsigned) intval <= (match ? 127 : 7)))))
8187 ADDSI_OUTPUT1 ("add%? %0,%1,%2 ;1");
8189 /* Generate add_s b,b,h patterns. */
8190 if (short_0 && match2 && REG_H_P (operands[1]))
8191 ADDSI_OUTPUT1 ("add%? %0,%2,%1 ;2");
8193 /* Generate add_s b,sp,u7; add_s sp,sp,u7 patterns. */
8194 if ((short_0 || REGNO (operands[0]) == STACK_POINTER_REGNUM)
8195 && REGNO (operands[1]) == STACK_POINTER_REGNUM && !(intval & ~124))
8196 ADDSI_OUTPUT1 ("add%? %0,%1,%2 ;3");
8198 if ((short_p && (unsigned) neg_intval <= (match ? 31 : 7))
8199 || (REGNO (operands[0]) == STACK_POINTER_REGNUM
8200 && match && !(neg_intval & ~124)))
8201 ADDSI_OUTPUT1 ("sub%? %0,%1,%n2 ;4");
8203 /* Generate add_s h,h,s3 patterns. */
8204 if (REG_H_P (operands[0]) && match && TARGET_V2
8205 && CONST_INT_P (operands[2]) && ((intval>= -1) && (intval <= 6)))
8206 ADDSI_OUTPUT1 ("add%? %0,%1,%2 ;5");
8208 /* Generate add_s r0,b,u6; add_s r1,b,u6 patterns. */
8209 if (TARGET_CODE_DENSITY && REG_P (operands[0]) && REG_P (operands[1])
8210 && ((REGNO (operands[0]) == 0) || (REGNO (operands[0]) == 1))
8211 && satisfies_constraint_Rcq (operands[1])
8212 && satisfies_constraint_L (operands[2]))
8213 ADDSI_OUTPUT1 ("add%? %0,%1,%2 ;6");
8216 /* Now try to emit a 32 bit insn without long immediate. */
8217 ret = 4;
8218 if (!match && match2 && REG_P (operands[1]))
8219 ADDSI_OUTPUT1 ("add%? %0,%2,%1");
8220 if (match || !cond_p)
8222 int limit = (match && !cond_p) ? 0x7ff : 0x3f;
8223 int range_factor = neg_intval & intval;
8224 int shift;
8226 if (intval == (HOST_WIDE_INT) (HOST_WIDE_INT_M1U << 31))
8227 ADDSI_OUTPUT1 ("bxor%? %0,%1,31");
8229 /* If we can use a straight add / sub instead of a {add,sub}[123] of
8230 same size, do, so - the insn latency is lower. */
8231 /* -0x800 is a 12-bit constant for add /add3 / sub / sub3, but
8232 0x800 is not. */
8233 if ((intval >= 0 && intval <= limit)
8234 || (intval == -0x800 && limit == 0x7ff))
8235 ADDSI_OUTPUT1 ("add%? %0,%1,%2");
8236 else if ((intval < 0 && neg_intval <= limit)
8237 || (intval == 0x800 && limit == 0x7ff))
8238 ADDSI_OUTPUT1 ("sub%? %0,%1,%n2");
8239 shift = range_factor >= 8 ? 3 : (range_factor >> 1);
8240 gcc_assert (shift == 0 || shift == 1 || shift == 2 || shift == 3);
8241 gcc_assert ((((1 << shift) - 1) & intval) == 0);
8242 if (((intval < 0 && intval != -0x4000)
8243 /* sub[123] is slower than add_s / sub, only use it if it
8244 avoids a long immediate. */
8245 && neg_intval <= limit << shift)
8246 || (intval == 0x4000 && limit == 0x7ff))
8247 ADDSI_OUTPUT ((format, "sub%d%%? %%0,%%1,%d",
8248 shift, neg_intval >> shift));
8249 else if ((intval >= 0 && intval <= limit << shift)
8250 || (intval == -0x4000 && limit == 0x7ff))
8251 ADDSI_OUTPUT ((format, "add%d%%? %%0,%%1,%d", shift, intval >> shift));
8253 /* Try to emit a 16 bit opcode with long immediate. */
8254 ret = 6;
8255 if (short_p && match)
8256 ADDSI_OUTPUT1 ("add%? %0,%1,%S2");
8258 /* We have to use a 32 bit opcode, and with a long immediate. */
8259 ret = 8;
8260 ADDSI_OUTPUT1 (intval < 0 ? "sub%? %0,%1,%n2" : "add%? %0,%1,%S2");
8263 /* Emit code for an commutative_cond_exec instruction with OPERANDS.
8264 Return the length of the instruction.
8265 If OUTPUT_P is false, don't actually output the instruction, just return
8266 its length. */
8268 arc_output_commutative_cond_exec (rtx *operands, bool output_p)
8270 enum rtx_code commutative_op = GET_CODE (operands[3]);
8271 const char *pat = NULL;
8273 /* Canonical rtl should not have a constant in the first operand position. */
8274 gcc_assert (!CONSTANT_P (operands[1]));
8276 switch (commutative_op)
8278 case AND:
8279 if (satisfies_constraint_C1p (operands[2]))
8280 pat = "bmsk%? %0,%1,%Z2";
8281 else if (satisfies_constraint_C2p (operands[2]))
8283 operands[2] = GEN_INT ((~INTVAL (operands[2])));
8284 pat = "bmskn%? %0,%1,%Z2";
8286 else if (satisfies_constraint_Ccp (operands[2]))
8287 pat = "bclr%? %0,%1,%M2";
8288 else if (satisfies_constraint_CnL (operands[2]))
8289 pat = "bic%? %0,%1,%n2-1";
8290 break;
8291 case IOR:
8292 if (satisfies_constraint_C0p (operands[2]))
8293 pat = "bset%? %0,%1,%z2";
8294 break;
8295 case XOR:
8296 if (satisfies_constraint_C0p (operands[2]))
8297 pat = "bxor%? %0,%1,%z2";
8298 break;
8299 case PLUS:
8300 return arc_output_addsi (operands, true, output_p);
8301 default: break;
8303 if (output_p)
8304 output_asm_insn (pat ? pat : "%O3.%d5 %0,%1,%2", operands);
8305 if (pat || REG_P (operands[2]) || satisfies_constraint_L (operands[2]))
8306 return 4;
8307 return 8;
8310 /* Helper function of arc_expand_movmem. ADDR points to a chunk of memory.
8311 Emit code and return an potentially modified address such that offsets
8312 up to SIZE are can be added to yield a legitimate address.
8313 if REUSE is set, ADDR is a register that may be modified. */
8315 static rtx
8316 force_offsettable (rtx addr, HOST_WIDE_INT size, bool reuse)
8318 rtx base = addr;
8319 rtx offs = const0_rtx;
8321 if (GET_CODE (base) == PLUS)
8323 offs = XEXP (base, 1);
8324 base = XEXP (base, 0);
8326 if (!REG_P (base)
8327 || (REGNO (base) != STACK_POINTER_REGNUM
8328 && REGNO_PTR_FRAME_P (REGNO (base)))
8329 || !CONST_INT_P (offs) || !SMALL_INT (INTVAL (offs))
8330 || !SMALL_INT (INTVAL (offs) + size))
8332 if (reuse)
8333 emit_insn (gen_add2_insn (addr, offs));
8334 else
8335 addr = copy_to_mode_reg (Pmode, addr);
8337 return addr;
8340 /* Like move_by_pieces, but take account of load latency, and actual
8341 offset ranges. Return true on success. */
8343 bool
8344 arc_expand_movmem (rtx *operands)
8346 rtx dst = operands[0];
8347 rtx src = operands[1];
8348 rtx dst_addr, src_addr;
8349 HOST_WIDE_INT size;
8350 int align = INTVAL (operands[3]);
8351 unsigned n_pieces;
8352 int piece = align;
8353 rtx store[2];
8354 rtx tmpx[2];
8355 int i;
8357 if (!CONST_INT_P (operands[2]))
8358 return false;
8359 size = INTVAL (operands[2]);
8360 /* move_by_pieces_ninsns is static, so we can't use it. */
8361 if (align >= 4)
8363 if (TARGET_LL64)
8364 n_pieces = (size + 4) / 8U + ((size >> 1) & 1) + (size & 1);
8365 else
8366 n_pieces = (size + 2) / 4U + (size & 1);
8368 else if (align == 2)
8369 n_pieces = (size + 1) / 2U;
8370 else
8371 n_pieces = size;
8372 if (n_pieces >= (unsigned int) (optimize_size ? 3 : 15))
8373 return false;
8374 /* Force 32 bit aligned and larger datum to use 64 bit transfers, if
8375 possible. */
8376 if (TARGET_LL64 && (piece >= 4) && (size >= 8))
8377 piece = 8;
8378 else if (piece > 4)
8379 piece = 4;
8380 dst_addr = force_offsettable (XEXP (operands[0], 0), size, 0);
8381 src_addr = force_offsettable (XEXP (operands[1], 0), size, 0);
8382 store[0] = store[1] = NULL_RTX;
8383 tmpx[0] = tmpx[1] = NULL_RTX;
8384 for (i = 0; size > 0; i ^= 1, size -= piece)
8386 rtx tmp;
8387 machine_mode mode;
8389 while (piece > size)
8390 piece >>= 1;
8391 mode = smallest_int_mode_for_size (piece * BITS_PER_UNIT);
8392 /* If we don't re-use temporaries, the scheduler gets carried away,
8393 and the register pressure gets unnecessarily high. */
8394 if (0 && tmpx[i] && GET_MODE (tmpx[i]) == mode)
8395 tmp = tmpx[i];
8396 else
8397 tmpx[i] = tmp = gen_reg_rtx (mode);
8398 dst_addr = force_offsettable (dst_addr, piece, 1);
8399 src_addr = force_offsettable (src_addr, piece, 1);
8400 if (store[i])
8401 emit_insn (store[i]);
8402 emit_move_insn (tmp, change_address (src, mode, src_addr));
8403 store[i] = gen_move_insn (change_address (dst, mode, dst_addr), tmp);
8404 dst_addr = plus_constant (Pmode, dst_addr, piece);
8405 src_addr = plus_constant (Pmode, src_addr, piece);
8407 if (store[i])
8408 emit_insn (store[i]);
8409 if (store[i^1])
8410 emit_insn (store[i^1]);
8411 return true;
8414 /* Prepare operands for move in MODE. Return true iff the move has
8415 been emitted. */
8417 bool
8418 prepare_move_operands (rtx *operands, machine_mode mode)
8420 /* We used to do this only for MODE_INT Modes, but addresses to floating
8421 point variables may well be in the small data section. */
8422 if (!TARGET_NO_SDATA_SET && small_data_pattern (operands[0], Pmode))
8423 operands[0] = arc_rewrite_small_data (operands[0]);
8425 if (mode == SImode && SYMBOLIC_CONST (operands[1]))
8427 prepare_pic_move (operands, SImode);
8429 /* Disable any REG_EQUALs associated with the symref
8430 otherwise the optimization pass undoes the work done
8431 here and references the variable directly. */
8434 if (GET_CODE (operands[0]) != MEM
8435 && !TARGET_NO_SDATA_SET
8436 && small_data_pattern (operands[1], Pmode))
8438 /* This is to take care of address calculations involving sdata
8439 variables. */
8440 operands[1] = arc_rewrite_small_data (operands[1]);
8442 emit_insn (gen_rtx_SET (operands[0],operands[1]));
8443 /* ??? This note is useless, since it only restates the set itself.
8444 We should rather use the original SYMBOL_REF. However, there is
8445 the problem that we are lying to the compiler about these
8446 SYMBOL_REFs to start with. symbol@sda should be encoded specially
8447 so that we can tell it apart from an actual symbol. */
8448 set_unique_reg_note (get_last_insn (), REG_EQUAL, operands[1]);
8450 /* Take care of the REG_EQUAL note that will be attached to mark the
8451 output reg equal to the initial symbol_ref after this code is
8452 executed. */
8453 emit_move_insn (operands[0], operands[0]);
8454 return true;
8457 if (MEM_P (operands[0])
8458 && !(reload_in_progress || reload_completed))
8460 operands[1] = force_reg (mode, operands[1]);
8461 if (!move_dest_operand (operands[0], mode))
8463 rtx addr = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
8464 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
8465 except that we can't use that function because it is static. */
8466 rtx pat = change_address (operands[0], mode, addr);
8467 MEM_COPY_ATTRIBUTES (pat, operands[0]);
8468 operands[0] = pat;
8470 if (!cse_not_expected)
8472 rtx pat = XEXP (operands[0], 0);
8474 pat = arc_legitimize_address_0 (pat, pat, mode);
8475 if (pat)
8477 pat = change_address (operands[0], mode, pat);
8478 MEM_COPY_ATTRIBUTES (pat, operands[0]);
8479 operands[0] = pat;
8484 if (MEM_P (operands[1]) && !cse_not_expected)
8486 rtx pat = XEXP (operands[1], 0);
8488 pat = arc_legitimize_address_0 (pat, pat, mode);
8489 if (pat)
8491 pat = change_address (operands[1], mode, pat);
8492 MEM_COPY_ATTRIBUTES (pat, operands[1]);
8493 operands[1] = pat;
8497 return false;
8500 /* Prepare OPERANDS for an extension using CODE to OMODE.
8501 Return true iff the move has been emitted. */
8503 bool
8504 prepare_extend_operands (rtx *operands, enum rtx_code code,
8505 machine_mode omode)
8507 if (!TARGET_NO_SDATA_SET && small_data_pattern (operands[1], Pmode))
8509 /* This is to take care of address calculations involving sdata
8510 variables. */
8511 operands[1]
8512 = gen_rtx_fmt_e (code, omode, arc_rewrite_small_data (operands[1]));
8513 emit_insn (gen_rtx_SET (operands[0], operands[1]));
8514 set_unique_reg_note (get_last_insn (), REG_EQUAL, operands[1]);
8516 /* Take care of the REG_EQUAL note that will be attached to mark the
8517 output reg equal to the initial extension after this code is
8518 executed. */
8519 emit_move_insn (operands[0], operands[0]);
8520 return true;
8522 return false;
8525 /* Output a library call to a function called FNAME that has been arranged
8526 to be local to any dso. */
8528 const char *
8529 arc_output_libcall (const char *fname)
8531 unsigned len = strlen (fname);
8532 static char buf[64];
8534 gcc_assert (len < sizeof buf - 35);
8535 if (TARGET_LONG_CALLS_SET
8536 || (TARGET_MEDIUM_CALLS && arc_ccfsm_cond_exec_p ()))
8538 if (flag_pic)
8539 sprintf (buf, "add r12,pcl,@%s@pcl\n\tjl%%!%%* [r12]", fname);
8540 else
8541 sprintf (buf, "jl%%! @%s", fname);
8543 else
8544 sprintf (buf, "bl%%!%%* @%s", fname);
8545 return buf;
8548 /* Return the SImode highpart of the DImode value IN. */
8551 disi_highpart (rtx in)
8553 return simplify_gen_subreg (SImode, in, DImode, TARGET_BIG_ENDIAN ? 0 : 4);
8556 /* Return length adjustment for INSN.
8557 For ARC600:
8558 A write to a core reg greater or equal to 32 must not be immediately
8559 followed by a use. Anticipate the length requirement to insert a nop
8560 between PRED and SUCC to prevent a hazard. */
8562 static int
8563 arc600_corereg_hazard (rtx_insn *pred, rtx_insn *succ)
8565 if (!TARGET_ARC600)
8566 return 0;
8567 if (GET_CODE (PATTERN (pred)) == SEQUENCE)
8568 pred = as_a <rtx_sequence *> (PATTERN (pred))->insn (1);
8569 if (GET_CODE (PATTERN (succ)) == SEQUENCE)
8570 succ = as_a <rtx_sequence *> (PATTERN (succ))->insn (0);
8571 if (recog_memoized (pred) == CODE_FOR_mulsi_600
8572 || recog_memoized (pred) == CODE_FOR_umul_600
8573 || recog_memoized (pred) == CODE_FOR_mac_600
8574 || recog_memoized (pred) == CODE_FOR_mul64_600
8575 || recog_memoized (pred) == CODE_FOR_mac64_600
8576 || recog_memoized (pred) == CODE_FOR_umul64_600
8577 || recog_memoized (pred) == CODE_FOR_umac64_600)
8578 return 0;
8579 subrtx_iterator::array_type array;
8580 FOR_EACH_SUBRTX (iter, array, PATTERN (pred), NONCONST)
8582 const_rtx x = *iter;
8583 switch (GET_CODE (x))
8585 case SET: case POST_INC: case POST_DEC: case PRE_INC: case PRE_DEC:
8586 break;
8587 default:
8588 /* This is also fine for PRE/POST_MODIFY, because they
8589 contain a SET. */
8590 continue;
8592 rtx dest = XEXP (x, 0);
8593 /* Check if this sets a an extension register. N.B. we use 61 for the
8594 condition codes, which is definitely not an extension register. */
8595 if (REG_P (dest) && REGNO (dest) >= 32 && REGNO (dest) < 61
8596 /* Check if the same register is used by the PAT. */
8597 && (refers_to_regno_p
8598 (REGNO (dest),
8599 REGNO (dest) + (GET_MODE_SIZE (GET_MODE (dest)) + 3) / 4U,
8600 PATTERN (succ), 0)))
8601 return 4;
8603 return 0;
8606 /* Given a rtx, check if it is an assembly instruction or not. */
8608 static int
8609 arc_asm_insn_p (rtx x)
8611 int i, j;
8613 if (x == 0)
8614 return 0;
8616 switch (GET_CODE (x))
8618 case ASM_OPERANDS:
8619 case ASM_INPUT:
8620 return 1;
8622 case SET:
8623 return arc_asm_insn_p (SET_SRC (x));
8625 case PARALLEL:
8626 j = 0;
8627 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8628 j += arc_asm_insn_p (XVECEXP (x, 0, i));
8629 if ( j > 0)
8630 return 1;
8631 break;
8633 default:
8634 break;
8637 return 0;
8640 /* For ARC600:
8641 A write to a core reg greater or equal to 32 must not be immediately
8642 followed by a use. Anticipate the length requirement to insert a nop
8643 between PRED and SUCC to prevent a hazard. */
8646 arc_hazard (rtx_insn *pred, rtx_insn *succ)
8648 if (!pred || !INSN_P (pred) || !succ || !INSN_P (succ))
8649 return 0;
8651 if (TARGET_ARC600)
8652 return arc600_corereg_hazard (pred, succ);
8654 return 0;
8657 /* Return length adjustment for INSN. */
8660 arc_adjust_insn_length (rtx_insn *insn, int len, bool)
8662 if (!INSN_P (insn))
8663 return len;
8664 /* We already handle sequences by ignoring the delay sequence flag. */
8665 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8666 return len;
8668 /* Check for return with but one preceding insn since function
8669 start / call. */
8670 if (TARGET_PAD_RETURN
8671 && JUMP_P (insn)
8672 && GET_CODE (PATTERN (insn)) != ADDR_VEC
8673 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8674 && get_attr_type (insn) == TYPE_RETURN)
8676 rtx_insn *prev = prev_active_insn (insn);
8678 if (!prev || !(prev = prev_active_insn (prev))
8679 || ((NONJUMP_INSN_P (prev)
8680 && GET_CODE (PATTERN (prev)) == SEQUENCE)
8681 ? CALL_ATTR (as_a <rtx_sequence *> (PATTERN (prev))->insn (0),
8682 NON_SIBCALL)
8683 : CALL_ATTR (prev, NON_SIBCALL)))
8684 return len + 4;
8686 if (TARGET_ARC600)
8688 rtx_insn *succ = next_real_insn (insn);
8690 /* One the ARC600, a write to an extension register must be separated
8691 from a read. */
8692 if (succ && INSN_P (succ))
8693 len += arc600_corereg_hazard (insn, succ);
8696 /* Restore extracted operands - otherwise splitters like the addsi3_mixed one
8697 can go awry. */
8698 extract_constrain_insn_cached (insn);
8700 return len;
8703 /* Values for length_sensitive. */
8704 enum
8706 ARC_LS_NONE,// Jcc
8707 ARC_LS_25, // 25 bit offset, B
8708 ARC_LS_21, // 21 bit offset, Bcc
8709 ARC_LS_U13,// 13 bit unsigned offset, LP
8710 ARC_LS_10, // 10 bit offset, B_s, Beq_s, Bne_s
8711 ARC_LS_9, // 9 bit offset, BRcc
8712 ARC_LS_8, // 8 bit offset, BRcc_s
8713 ARC_LS_U7, // 7 bit unsigned offset, LPcc
8714 ARC_LS_7 // 7 bit offset, Bcc_s
8717 /* While the infrastructure patch is waiting for review, duplicate the
8718 struct definitions, to allow this file to compile. */
8719 #if 1
8720 typedef struct
8722 unsigned align_set;
8723 /* Cost as a branch / call target or call return address. */
8724 int target_cost;
8725 int fallthrough_cost;
8726 int branch_cost;
8727 int length;
8728 /* 0 for not length sensitive, 1 for largest offset range,
8729 * 2 for next smaller etc. */
8730 unsigned length_sensitive : 8;
8731 bool enabled;
8732 } insn_length_variant_t;
8734 typedef struct insn_length_parameters_s
8736 int align_unit_log;
8737 int align_base_log;
8738 int max_variants;
8739 int (*get_variants) (rtx_insn *, int, bool, bool, insn_length_variant_t *);
8740 } insn_length_parameters_t;
8742 static void
8743 arc_insn_length_parameters (insn_length_parameters_t *ilp) ATTRIBUTE_UNUSED;
8744 #endif
8746 static int
8747 arc_get_insn_variants (rtx_insn *insn, int len, bool, bool target_p,
8748 insn_length_variant_t *ilv)
8750 if (!NONDEBUG_INSN_P (insn))
8751 return 0;
8752 enum attr_type type;
8753 /* shorten_branches doesn't take optimize_size into account yet for the
8754 get_variants mechanism, so turn this off for now. */
8755 if (optimize_size)
8756 return 0;
8757 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
8759 /* The interaction of a short delay slot insn with a short branch is
8760 too weird for shorten_branches to piece together, so describe the
8761 entire SEQUENCE. */
8762 rtx_insn *inner;
8763 if (TARGET_UPSIZE_DBR
8764 && get_attr_length (pat->insn (1)) <= 2
8765 && (((type = get_attr_type (inner = pat->insn (0)))
8766 == TYPE_UNCOND_BRANCH)
8767 || type == TYPE_BRANCH)
8768 && get_attr_delay_slot_filled (inner) == DELAY_SLOT_FILLED_YES)
8770 int n_variants
8771 = arc_get_insn_variants (inner, get_attr_length (inner), true,
8772 target_p, ilv+1);
8773 /* The short variant gets split into a higher-cost aligned
8774 and a lower cost unaligned variant. */
8775 gcc_assert (n_variants);
8776 gcc_assert (ilv[1].length_sensitive == ARC_LS_7
8777 || ilv[1].length_sensitive == ARC_LS_10);
8778 gcc_assert (ilv[1].align_set == 3);
8779 ilv[0] = ilv[1];
8780 ilv[0].align_set = 1;
8781 ilv[0].branch_cost += 1;
8782 ilv[1].align_set = 2;
8783 n_variants++;
8784 for (int i = 0; i < n_variants; i++)
8785 ilv[i].length += 2;
8786 /* In case an instruction with aligned size is wanted, and
8787 the short variants are unavailable / too expensive, add
8788 versions of long branch + long delay slot. */
8789 for (int i = 2, end = n_variants; i < end; i++, n_variants++)
8791 ilv[n_variants] = ilv[i];
8792 ilv[n_variants].length += 2;
8794 return n_variants;
8796 return 0;
8798 insn_length_variant_t *first_ilv = ilv;
8799 type = get_attr_type (insn);
8800 bool delay_filled
8801 = (get_attr_delay_slot_filled (insn) == DELAY_SLOT_FILLED_YES);
8802 int branch_align_cost = delay_filled ? 0 : 1;
8803 int branch_unalign_cost = delay_filled ? 0 : TARGET_UNALIGN_BRANCH ? 0 : 1;
8804 /* If the previous instruction is an sfunc call, this insn is always
8805 a target, even though the middle-end is unaware of this. */
8806 bool force_target = false;
8807 rtx_insn *prev = prev_active_insn (insn);
8808 if (prev && arc_next_active_insn (prev, 0) == insn
8809 && ((NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE)
8810 ? CALL_ATTR (as_a <rtx_sequence *> (PATTERN (prev))->insn (0),
8811 NON_SIBCALL)
8812 : (CALL_ATTR (prev, NON_SIBCALL)
8813 && NEXT_INSN (PREV_INSN (prev)) == prev)))
8814 force_target = true;
8816 switch (type)
8818 case TYPE_BRCC:
8819 /* Short BRCC only comes in no-delay-slot version, and without limm */
8820 if (!delay_filled)
8822 ilv->align_set = 3;
8823 ilv->length = 2;
8824 ilv->branch_cost = 1;
8825 ilv->enabled = (len == 2);
8826 ilv->length_sensitive = ARC_LS_8;
8827 ilv++;
8829 /* Fall through. */
8830 case TYPE_BRCC_NO_DELAY_SLOT:
8831 /* doloop_fallback* patterns are TYPE_BRCC_NO_DELAY_SLOT for
8832 (delay slot) scheduling purposes, but they are longer. */
8833 if (GET_CODE (PATTERN (insn)) == PARALLEL
8834 && GET_CODE (XVECEXP (PATTERN (insn), 0, 1)) == SET)
8835 return 0;
8836 /* Standard BRCC: 4 bytes, or 8 bytes with limm. */
8837 ilv->length = ((type == TYPE_BRCC) ? 4 : 8);
8838 ilv->align_set = 3;
8839 ilv->branch_cost = branch_align_cost;
8840 ilv->enabled = (len <= ilv->length);
8841 ilv->length_sensitive = ARC_LS_9;
8842 if ((target_p || force_target)
8843 || (!delay_filled && TARGET_UNALIGN_BRANCH))
8845 ilv[1] = *ilv;
8846 ilv->align_set = 1;
8847 ilv++;
8848 ilv->align_set = 2;
8849 ilv->target_cost = 1;
8850 ilv->branch_cost = branch_unalign_cost;
8852 ilv++;
8854 rtx op, op0;
8855 op = XEXP (SET_SRC (XVECEXP (PATTERN (insn), 0, 0)), 0);
8856 op0 = XEXP (op, 0);
8858 if (GET_CODE (op0) == ZERO_EXTRACT
8859 && satisfies_constraint_L (XEXP (op0, 2)))
8860 op0 = XEXP (op0, 0);
8861 if (satisfies_constraint_Rcq (op0))
8863 ilv->length = ((type == TYPE_BRCC) ? 6 : 10);
8864 ilv->align_set = 3;
8865 ilv->branch_cost = 1 + branch_align_cost;
8866 ilv->fallthrough_cost = 1;
8867 ilv->enabled = true;
8868 ilv->length_sensitive = ARC_LS_21;
8869 if (!delay_filled && TARGET_UNALIGN_BRANCH)
8871 ilv[1] = *ilv;
8872 ilv->align_set = 1;
8873 ilv++;
8874 ilv->align_set = 2;
8875 ilv->branch_cost = 1 + branch_unalign_cost;
8877 ilv++;
8879 ilv->length = ((type == TYPE_BRCC) ? 8 : 12);
8880 ilv->align_set = 3;
8881 ilv->branch_cost = 1 + branch_align_cost;
8882 ilv->fallthrough_cost = 1;
8883 ilv->enabled = true;
8884 ilv->length_sensitive = ARC_LS_21;
8885 if ((target_p || force_target)
8886 || (!delay_filled && TARGET_UNALIGN_BRANCH))
8888 ilv[1] = *ilv;
8889 ilv->align_set = 1;
8890 ilv++;
8891 ilv->align_set = 2;
8892 ilv->target_cost = 1;
8893 ilv->branch_cost = 1 + branch_unalign_cost;
8895 ilv++;
8896 break;
8898 case TYPE_SFUNC:
8899 ilv->length = 12;
8900 goto do_call;
8901 case TYPE_CALL_NO_DELAY_SLOT:
8902 ilv->length = 8;
8903 goto do_call;
8904 case TYPE_CALL:
8905 ilv->length = 4;
8906 ilv->length_sensitive
8907 = GET_CODE (PATTERN (insn)) == COND_EXEC ? ARC_LS_21 : ARC_LS_25;
8908 do_call:
8909 ilv->align_set = 3;
8910 ilv->fallthrough_cost = branch_align_cost;
8911 ilv->enabled = true;
8912 if ((target_p || force_target)
8913 || (!delay_filled && TARGET_UNALIGN_BRANCH))
8915 ilv[1] = *ilv;
8916 ilv->align_set = 1;
8917 ilv++;
8918 ilv->align_set = 2;
8919 ilv->target_cost = 1;
8920 ilv->fallthrough_cost = branch_unalign_cost;
8922 ilv++;
8923 break;
8924 case TYPE_UNCOND_BRANCH:
8925 /* Strictly speaking, this should be ARC_LS_10 for equality comparisons,
8926 but that makes no difference at the moment. */
8927 ilv->length_sensitive = ARC_LS_7;
8928 ilv[1].length_sensitive = ARC_LS_25;
8929 goto do_branch;
8930 case TYPE_BRANCH:
8931 ilv->length_sensitive = ARC_LS_10;
8932 ilv[1].length_sensitive = ARC_LS_21;
8933 do_branch:
8934 ilv->align_set = 3;
8935 ilv->length = 2;
8936 ilv->branch_cost = branch_align_cost;
8937 ilv->enabled = (len == ilv->length);
8938 ilv++;
8939 ilv->length = 4;
8940 ilv->align_set = 3;
8941 ilv->branch_cost = branch_align_cost;
8942 ilv->enabled = true;
8943 if ((target_p || force_target)
8944 || (!delay_filled && TARGET_UNALIGN_BRANCH))
8946 ilv[1] = *ilv;
8947 ilv->align_set = 1;
8948 ilv++;
8949 ilv->align_set = 2;
8950 ilv->target_cost = 1;
8951 ilv->branch_cost = branch_unalign_cost;
8953 ilv++;
8954 break;
8955 case TYPE_JUMP:
8956 return 0;
8957 default:
8958 /* For every short insn, there is generally also a long insn.
8959 trap_s is an exception. */
8960 if ((len & 2) == 0 || recog_memoized (insn) == CODE_FOR_trap_s)
8961 return 0;
8962 ilv->align_set = 3;
8963 ilv->length = len;
8964 ilv->enabled = 1;
8965 ilv++;
8966 ilv->align_set = 3;
8967 ilv->length = len + 2;
8968 ilv->enabled = 1;
8969 if (target_p || force_target)
8971 ilv[1] = *ilv;
8972 ilv->align_set = 1;
8973 ilv++;
8974 ilv->align_set = 2;
8975 ilv->target_cost = 1;
8977 ilv++;
8979 /* If the previous instruction is an sfunc call, this insn is always
8980 a target, even though the middle-end is unaware of this.
8981 Therefore, if we have a call predecessor, transfer the target cost
8982 to the fallthrough and branch costs. */
8983 if (force_target)
8985 for (insn_length_variant_t *p = first_ilv; p < ilv; p++)
8987 p->fallthrough_cost += p->target_cost;
8988 p->branch_cost += p->target_cost;
8989 p->target_cost = 0;
8993 return ilv - first_ilv;
8996 static void
8997 arc_insn_length_parameters (insn_length_parameters_t *ilp)
8999 ilp->align_unit_log = 1;
9000 ilp->align_base_log = 1;
9001 ilp->max_variants = 7;
9002 ilp->get_variants = arc_get_insn_variants;
9005 /* Return a copy of COND from *STATEP, inverted if that is indicated by the
9006 CC field of *STATEP. */
9008 static rtx
9009 arc_get_ccfsm_cond (struct arc_ccfsm *statep, bool reverse)
9011 rtx cond = statep->cond;
9012 int raw_cc = get_arc_condition_code (cond);
9013 if (reverse)
9014 raw_cc = ARC_INVERSE_CONDITION_CODE (raw_cc);
9016 if (statep->cc == raw_cc)
9017 return copy_rtx (cond);
9019 gcc_assert (ARC_INVERSE_CONDITION_CODE (raw_cc) == statep->cc);
9021 machine_mode ccm = GET_MODE (XEXP (cond, 0));
9022 enum rtx_code code = reverse_condition (GET_CODE (cond));
9023 if (code == UNKNOWN || ccm == CC_FP_GTmode || ccm == CC_FP_GEmode)
9024 code = reverse_condition_maybe_unordered (GET_CODE (cond));
9026 return gen_rtx_fmt_ee (code, GET_MODE (cond),
9027 copy_rtx (XEXP (cond, 0)), copy_rtx (XEXP (cond, 1)));
9030 /* Return version of PAT conditionalized with COND, which is part of INSN.
9031 ANNULLED indicates if INSN is an annulled delay-slot insn.
9032 Register further changes if necessary. */
9033 static rtx
9034 conditionalize_nonjump (rtx pat, rtx cond, rtx insn, bool annulled)
9036 /* For commutative operators, we generally prefer to have
9037 the first source match the destination. */
9038 if (GET_CODE (pat) == SET)
9040 rtx src = SET_SRC (pat);
9042 if (COMMUTATIVE_P (src))
9044 rtx src0 = XEXP (src, 0);
9045 rtx src1 = XEXP (src, 1);
9046 rtx dst = SET_DEST (pat);
9048 if (rtx_equal_p (src1, dst) && !rtx_equal_p (src0, dst)
9049 /* Leave add_n alone - the canonical form is to
9050 have the complex summand first. */
9051 && REG_P (src0))
9052 pat = gen_rtx_SET (dst,
9053 gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src),
9054 src1, src0));
9058 /* dwarf2out.c:dwarf2out_frame_debug_expr doesn't know
9059 what to do with COND_EXEC. */
9060 if (RTX_FRAME_RELATED_P (insn))
9062 /* If this is the delay slot insn of an anulled branch,
9063 dwarf2out.c:scan_trace understands the anulling semantics
9064 without the COND_EXEC. */
9065 gcc_assert (annulled);
9066 rtx note = alloc_reg_note (REG_FRAME_RELATED_EXPR, pat,
9067 REG_NOTES (insn));
9068 validate_change (insn, &REG_NOTES (insn), note, 1);
9070 pat = gen_rtx_COND_EXEC (VOIDmode, cond, pat);
9071 return pat;
9074 /* Use the ccfsm machinery to do if conversion. */
9076 static unsigned
9077 arc_ifcvt (void)
9079 struct arc_ccfsm *statep = &cfun->machine->ccfsm_current;
9081 memset (statep, 0, sizeof *statep);
9082 for (rtx_insn *insn = get_insns (); insn; insn = next_insn (insn))
9084 arc_ccfsm_advance (insn, statep);
9086 switch (statep->state)
9088 case 0:
9089 break;
9090 case 1: case 2:
9092 /* Deleted branch. */
9093 arc_ccfsm_post_advance (insn, statep);
9094 gcc_assert (!IN_RANGE (statep->state, 1, 2));
9095 rtx_insn *seq = NEXT_INSN (PREV_INSN (insn));
9096 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
9098 rtx slot = XVECEXP (PATTERN (seq), 0, 1);
9099 rtx pat = PATTERN (slot);
9100 if (INSN_ANNULLED_BRANCH_P (insn))
9102 rtx cond
9103 = arc_get_ccfsm_cond (statep, INSN_FROM_TARGET_P (slot));
9104 pat = gen_rtx_COND_EXEC (VOIDmode, cond, pat);
9106 if (!validate_change (seq, &PATTERN (seq), pat, 0))
9107 gcc_unreachable ();
9108 PUT_CODE (slot, NOTE);
9109 NOTE_KIND (slot) = NOTE_INSN_DELETED;
9111 else
9113 set_insn_deleted (insn);
9115 continue;
9117 case 3:
9118 if (LABEL_P (insn)
9119 && statep->target_label == CODE_LABEL_NUMBER (insn))
9121 arc_ccfsm_post_advance (insn, statep);
9122 if (--LABEL_NUSES (insn) == 0)
9123 delete_insn (insn);
9124 continue;
9126 /* Fall through. */
9127 case 4: case 5:
9128 if (!NONDEBUG_INSN_P (insn))
9129 break;
9131 /* Conditionalized insn. */
9133 rtx_insn *prev, *pprev;
9134 rtx *patp, pat, cond;
9135 bool annulled; annulled = false;
9137 /* If this is a delay slot insn in a non-annulled branch,
9138 don't conditionalize it. N.B., this should be fine for
9139 conditional return too. However, don't do this for
9140 unconditional branches, as these would be encountered when
9141 processing an 'else' part. */
9142 prev = PREV_INSN (insn);
9143 pprev = PREV_INSN (prev);
9144 if (pprev && NEXT_INSN (NEXT_INSN (pprev)) == NEXT_INSN (insn)
9145 && JUMP_P (prev) && get_attr_cond (prev) == COND_USE)
9147 if (!INSN_ANNULLED_BRANCH_P (prev))
9148 break;
9149 annulled = true;
9152 patp = &PATTERN (insn);
9153 pat = *patp;
9154 cond = arc_get_ccfsm_cond (statep, INSN_FROM_TARGET_P (insn));
9155 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9157 /* ??? don't conditionalize if all side effects are dead
9158 in the not-execute case. */
9160 pat = conditionalize_nonjump (pat, cond, insn, annulled);
9162 else if (simplejump_p (insn))
9164 patp = &SET_SRC (pat);
9165 pat = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, *patp, pc_rtx);
9167 else if (JUMP_P (insn) && ANY_RETURN_P (PATTERN (insn)))
9169 pat = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, pat, pc_rtx);
9170 pat = gen_rtx_SET (pc_rtx, pat);
9172 else
9173 gcc_unreachable ();
9174 validate_change (insn, patp, pat, 1);
9175 if (!apply_change_group ())
9176 gcc_unreachable ();
9177 if (JUMP_P (insn))
9179 rtx_insn *next = next_nonnote_insn (insn);
9180 if (GET_CODE (next) == BARRIER)
9181 delete_insn (next);
9182 if (statep->state == 3)
9183 continue;
9185 break;
9186 default:
9187 gcc_unreachable ();
9189 arc_ccfsm_post_advance (insn, statep);
9191 return 0;
9194 /* Find annulled delay insns and convert them to use the appropriate predicate.
9195 This allows branch shortening to size up these insns properly. */
9197 static unsigned
9198 arc_predicate_delay_insns (void)
9200 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
9202 rtx pat, jump, dlay, src, cond, *patp;
9203 int reverse;
9205 if (!NONJUMP_INSN_P (insn)
9206 || GET_CODE (pat = PATTERN (insn)) != SEQUENCE)
9207 continue;
9208 jump = XVECEXP (pat, 0, 0);
9209 dlay = XVECEXP (pat, 0, 1);
9210 if (!JUMP_P (jump) || !INSN_ANNULLED_BRANCH_P (jump))
9211 continue;
9212 /* If the branch insn does the annulling, leave the delay insn alone. */
9213 if (!TARGET_AT_DBR_CONDEXEC && !INSN_FROM_TARGET_P (dlay))
9214 continue;
9215 /* ??? Could also leave DLAY un-conditionalized if its target is dead
9216 on the other path. */
9217 gcc_assert (GET_CODE (PATTERN (jump)) == SET);
9218 gcc_assert (SET_DEST (PATTERN (jump)) == pc_rtx);
9219 src = SET_SRC (PATTERN (jump));
9220 gcc_assert (GET_CODE (src) == IF_THEN_ELSE);
9221 cond = XEXP (src, 0);
9222 if (XEXP (src, 2) == pc_rtx)
9223 reverse = 0;
9224 else if (XEXP (src, 1) == pc_rtx)
9225 reverse = 1;
9226 else
9227 gcc_unreachable ();
9228 if (reverse != !INSN_FROM_TARGET_P (dlay))
9230 machine_mode ccm = GET_MODE (XEXP (cond, 0));
9231 enum rtx_code code = reverse_condition (GET_CODE (cond));
9232 if (code == UNKNOWN || ccm == CC_FP_GTmode || ccm == CC_FP_GEmode)
9233 code = reverse_condition_maybe_unordered (GET_CODE (cond));
9235 cond = gen_rtx_fmt_ee (code, GET_MODE (cond),
9236 copy_rtx (XEXP (cond, 0)),
9237 copy_rtx (XEXP (cond, 1)));
9239 else
9240 cond = copy_rtx (cond);
9241 patp = &PATTERN (dlay);
9242 pat = *patp;
9243 pat = conditionalize_nonjump (pat, cond, dlay, true);
9244 validate_change (dlay, patp, pat, 1);
9245 if (!apply_change_group ())
9246 gcc_unreachable ();
9248 return 0;
9251 /* For ARC600: If a write to a core reg >=32 appears in a delay slot
9252 (other than of a forward brcc), it creates a hazard when there is a read
9253 of the same register at the branch target. We can't know what is at the
9254 branch target of calls, and for branches, we don't really know before the
9255 end of delay slot scheduling, either. Not only can individual instruction
9256 be hoisted out into a delay slot, a basic block can also be emptied this
9257 way, and branch and/or fall through targets be redirected. Hence we don't
9258 want such writes in a delay slot. */
9260 /* Return nonzreo iff INSN writes to an extension core register. */
9263 arc_write_ext_corereg (rtx insn)
9265 subrtx_iterator::array_type array;
9266 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
9268 const_rtx x = *iter;
9269 switch (GET_CODE (x))
9271 case SET: case POST_INC: case POST_DEC: case PRE_INC: case PRE_DEC:
9272 break;
9273 default:
9274 /* This is also fine for PRE/POST_MODIFY, because they
9275 contain a SET. */
9276 continue;
9278 const_rtx dest = XEXP (x, 0);
9279 if (REG_P (dest) && REGNO (dest) >= 32 && REGNO (dest) < 61)
9280 return 1;
9282 return 0;
9285 /* This is like the hook, but returns NULL when it can't / won't generate
9286 a legitimate address. */
9288 static rtx
9289 arc_legitimize_address_0 (rtx x, rtx oldx ATTRIBUTE_UNUSED,
9290 machine_mode mode)
9292 rtx addr, inner;
9294 if (flag_pic && SYMBOLIC_CONST (x))
9295 (x) = arc_legitimize_pic_address (x, 0);
9296 addr = x;
9297 if (GET_CODE (addr) == CONST)
9298 addr = XEXP (addr, 0);
9299 if (GET_CODE (addr) == PLUS
9300 && CONST_INT_P (XEXP (addr, 1))
9301 && ((GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
9302 && !SYMBOL_REF_FUNCTION_P (XEXP (addr, 0)))
9303 || (REG_P (XEXP (addr, 0))
9304 && (INTVAL (XEXP (addr, 1)) & 252))))
9306 HOST_WIDE_INT offs, upper;
9307 int size = GET_MODE_SIZE (mode);
9309 offs = INTVAL (XEXP (addr, 1));
9310 upper = (offs + 256 * size) & ~511 * size;
9311 inner = plus_constant (Pmode, XEXP (addr, 0), upper);
9312 #if 0 /* ??? this produces worse code for EEMBC idctrn01 */
9313 if (GET_CODE (x) == CONST)
9314 inner = gen_rtx_CONST (Pmode, inner);
9315 #endif
9316 addr = plus_constant (Pmode, force_reg (Pmode, inner), offs - upper);
9317 x = addr;
9319 else if (GET_CODE (addr) == SYMBOL_REF && !SYMBOL_REF_FUNCTION_P (addr))
9320 x = force_reg (Pmode, x);
9321 if (memory_address_p ((machine_mode) mode, x))
9322 return x;
9323 return NULL_RTX;
9326 static rtx
9327 arc_legitimize_address (rtx orig_x, rtx oldx, machine_mode mode)
9329 if (GET_CODE (orig_x) == SYMBOL_REF)
9331 enum tls_model model = SYMBOL_REF_TLS_MODEL (orig_x);
9332 if (model != 0)
9333 return arc_legitimize_tls_address (orig_x, model);
9336 rtx new_x = arc_legitimize_address_0 (orig_x, oldx, mode);
9338 if (new_x)
9339 return new_x;
9340 return orig_x;
9343 static rtx
9344 arc_delegitimize_address_0 (rtx x)
9346 rtx u, gp, p;
9348 if (GET_CODE (x) == CONST && GET_CODE (u = XEXP (x, 0)) == UNSPEC)
9350 if (XINT (u, 1) == ARC_UNSPEC_GOT
9351 || XINT (u, 1) == ARC_UNSPEC_GOTOFFPC)
9352 return XVECEXP (u, 0, 0);
9354 else if (GET_CODE (x) == CONST && GET_CODE (p = XEXP (x, 0)) == PLUS
9355 && GET_CODE (u = XEXP (p, 0)) == UNSPEC
9356 && (XINT (u, 1) == ARC_UNSPEC_GOT
9357 || XINT (u, 1) == ARC_UNSPEC_GOTOFFPC))
9358 return gen_rtx_CONST
9359 (GET_MODE (x),
9360 gen_rtx_PLUS (GET_MODE (p), XVECEXP (u, 0, 0), XEXP (p, 1)));
9361 else if (GET_CODE (x) == PLUS
9362 && ((REG_P (gp = XEXP (x, 0))
9363 && REGNO (gp) == PIC_OFFSET_TABLE_REGNUM)
9364 || (GET_CODE (gp) == CONST
9365 && GET_CODE (u = XEXP (gp, 0)) == UNSPEC
9366 && XINT (u, 1) == ARC_UNSPEC_GOT
9367 && GET_CODE (XVECEXP (u, 0, 0)) == SYMBOL_REF
9368 && !strcmp (XSTR (XVECEXP (u, 0, 0), 0), "_DYNAMIC")))
9369 && GET_CODE (XEXP (x, 1)) == CONST
9370 && GET_CODE (u = XEXP (XEXP (x, 1), 0)) == UNSPEC
9371 && XINT (u, 1) == ARC_UNSPEC_GOTOFF)
9372 return XVECEXP (u, 0, 0);
9373 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
9374 && ((REG_P (gp = XEXP (XEXP (x, 0), 1))
9375 && REGNO (gp) == PIC_OFFSET_TABLE_REGNUM)
9376 || (GET_CODE (gp) == CONST
9377 && GET_CODE (u = XEXP (gp, 0)) == UNSPEC
9378 && XINT (u, 1) == ARC_UNSPEC_GOT
9379 && GET_CODE (XVECEXP (u, 0, 0)) == SYMBOL_REF
9380 && !strcmp (XSTR (XVECEXP (u, 0, 0), 0), "_DYNAMIC")))
9381 && GET_CODE (XEXP (x, 1)) == CONST
9382 && GET_CODE (u = XEXP (XEXP (x, 1), 0)) == UNSPEC
9383 && XINT (u, 1) == ARC_UNSPEC_GOTOFF)
9384 return gen_rtx_PLUS (GET_MODE (x), XEXP (XEXP (x, 0), 0),
9385 XVECEXP (u, 0, 0));
9386 else if (GET_CODE (x) == PLUS
9387 && (u = arc_delegitimize_address_0 (XEXP (x, 1))))
9388 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), u);
9389 return NULL_RTX;
9392 static rtx
9393 arc_delegitimize_address (rtx x)
9395 rtx orig_x = x = delegitimize_mem_from_attrs (x);
9396 if (GET_CODE (x) == MEM)
9397 x = XEXP (x, 0);
9398 x = arc_delegitimize_address_0 (x);
9399 if (x)
9401 if (MEM_P (orig_x))
9402 x = replace_equiv_address_nv (orig_x, x);
9403 return x;
9405 return orig_x;
9408 /* Return a REG rtx for acc1. N.B. the gcc-internal representation may
9409 differ from the hardware register number in order to allow the generic
9410 code to correctly split the concatenation of acc1 and acc2. */
9413 gen_acc1 (void)
9415 return gen_rtx_REG (SImode, TARGET_BIG_ENDIAN ? 56: 57);
9418 /* Return a REG rtx for acc2. N.B. the gcc-internal representation may
9419 differ from the hardware register number in order to allow the generic
9420 code to correctly split the concatenation of acc1 and acc2. */
9423 gen_acc2 (void)
9425 return gen_rtx_REG (SImode, TARGET_BIG_ENDIAN ? 57: 56);
9428 /* Return a REG rtx for mlo. N.B. the gcc-internal representation may
9429 differ from the hardware register number in order to allow the generic
9430 code to correctly split the concatenation of mhi and mlo. */
9433 gen_mlo (void)
9435 return gen_rtx_REG (SImode, TARGET_BIG_ENDIAN ? 59: 58);
9438 /* Return a REG rtx for mhi. N.B. the gcc-internal representation may
9439 differ from the hardware register number in order to allow the generic
9440 code to correctly split the concatenation of mhi and mlo. */
9443 gen_mhi (void)
9445 return gen_rtx_REG (SImode, TARGET_BIG_ENDIAN ? 58: 59);
9448 /* FIXME: a parameter should be added, and code added to final.c,
9449 to reproduce this functionality in shorten_branches. */
9450 #if 0
9451 /* Return nonzero iff BRANCH should be unaligned if possible by upsizing
9452 a previous instruction. */
9454 arc_unalign_branch_p (rtx branch)
9456 rtx note;
9458 if (!TARGET_UNALIGN_BRANCH)
9459 return 0;
9460 /* Do not do this if we have a filled delay slot. */
9461 if (get_attr_delay_slot_filled (branch) == DELAY_SLOT_FILLED_YES
9462 && !NEXT_INSN (branch)->deleted ())
9463 return 0;
9464 note = find_reg_note (branch, REG_BR_PROB, 0);
9465 return (!note
9466 || (arc_unalign_prob_threshold && !br_prob_note_reliable_p (note))
9467 || INTVAL (XEXP (note, 0)) < arc_unalign_prob_threshold);
9469 #endif
9471 /* When estimating sizes during arc_reorg, when optimizing for speed, there
9472 are three reasons why we need to consider branches to be length 6:
9473 - annull-false delay slot insns are implemented using conditional execution,
9474 thus preventing short insn formation where used.
9475 - for ARC600: annul-true delay slot insns are implemented where possible
9476 using conditional execution, preventing short insn formation where used.
9477 - for ARC700: likely or somewhat likely taken branches are made long and
9478 unaligned if possible to avoid branch penalty. */
9480 bool
9481 arc_branch_size_unknown_p (void)
9483 return !optimize_size && arc_reorg_in_progress;
9486 /* We are about to output a return insn. Add padding if necessary to avoid
9487 a mispredict. A return could happen immediately after the function
9488 start, but after a call we know that there will be at least a blink
9489 restore. */
9491 void
9492 arc_pad_return (void)
9494 rtx_insn *insn = current_output_insn;
9495 rtx_insn *prev = prev_active_insn (insn);
9496 int want_long;
9498 if (!prev)
9500 fputs ("\tnop_s\n", asm_out_file);
9501 cfun->machine->unalign ^= 2;
9502 want_long = 1;
9504 /* If PREV is a sequence, we know it must be a branch / jump or a tailcall,
9505 because after a call, we'd have to restore blink first. */
9506 else if (GET_CODE (PATTERN (prev)) == SEQUENCE)
9507 return;
9508 else
9510 want_long = (get_attr_length (prev) == 2);
9511 prev = prev_active_insn (prev);
9513 if (!prev
9514 || ((NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE)
9515 ? CALL_ATTR (as_a <rtx_sequence *> (PATTERN (prev))->insn (0),
9516 NON_SIBCALL)
9517 : CALL_ATTR (prev, NON_SIBCALL)))
9519 if (want_long)
9520 cfun->machine->size_reason
9521 = "call/return and return/return must be 6 bytes apart to avoid mispredict";
9522 else if (TARGET_UNALIGN_BRANCH && cfun->machine->unalign)
9524 cfun->machine->size_reason
9525 = "Long unaligned jump avoids non-delay slot penalty";
9526 want_long = 1;
9528 /* Disgorge delay insn, if there is any, and it may be moved. */
9529 if (final_sequence
9530 /* ??? Annulled would be OK if we can and do conditionalize
9531 the delay slot insn accordingly. */
9532 && !INSN_ANNULLED_BRANCH_P (insn)
9533 && (get_attr_cond (insn) != COND_USE
9534 || !reg_set_p (gen_rtx_REG (CCmode, CC_REG),
9535 XVECEXP (final_sequence, 0, 1))))
9537 prev = as_a <rtx_insn *> (XVECEXP (final_sequence, 0, 1));
9538 gcc_assert (!prev_real_insn (insn)
9539 || !arc_hazard (prev_real_insn (insn), prev));
9540 cfun->machine->force_short_suffix = !want_long;
9541 rtx save_pred = current_insn_predicate;
9542 final_scan_insn (prev, asm_out_file, optimize, 1, NULL);
9543 cfun->machine->force_short_suffix = -1;
9544 prev->set_deleted ();
9545 current_output_insn = insn;
9546 current_insn_predicate = save_pred;
9548 else if (want_long)
9549 fputs ("\tnop\n", asm_out_file);
9550 else
9552 fputs ("\tnop_s\n", asm_out_file);
9553 cfun->machine->unalign ^= 2;
9556 return;
9559 /* The usual; we set up our machine_function data. */
9561 static struct machine_function *
9562 arc_init_machine_status (void)
9564 struct machine_function *machine;
9565 machine = ggc_cleared_alloc<machine_function> ();
9566 machine->fn_type = ARC_FUNCTION_UNKNOWN;
9567 machine->force_short_suffix = -1;
9569 return machine;
9572 /* Implements INIT_EXPANDERS. We just set up to call the above
9573 function. */
9575 void
9576 arc_init_expanders (void)
9578 init_machine_status = arc_init_machine_status;
9581 /* Check if OP is a proper parallel of a millicode call pattern. OFFSET
9582 indicates a number of elements to ignore - that allows to have a
9583 sibcall pattern that starts with (return). LOAD_P is zero for store
9584 multiple (for prologues), and one for load multiples (for epilogues),
9585 and two for load multiples where no final clobber of blink is required.
9586 We also skip the first load / store element since this is supposed to
9587 be checked in the instruction pattern. */
9590 arc_check_millicode (rtx op, int offset, int load_p)
9592 int len = XVECLEN (op, 0) - offset;
9593 int i;
9595 if (load_p == 2)
9597 if (len < 2 || len > 13)
9598 return 0;
9599 load_p = 1;
9601 else
9603 rtx elt = XVECEXP (op, 0, --len);
9605 if (GET_CODE (elt) != CLOBBER
9606 || !REG_P (XEXP (elt, 0))
9607 || REGNO (XEXP (elt, 0)) != RETURN_ADDR_REGNUM
9608 || len < 3 || len > 13)
9609 return 0;
9611 for (i = 1; i < len; i++)
9613 rtx elt = XVECEXP (op, 0, i + offset);
9614 rtx reg, mem, addr;
9616 if (GET_CODE (elt) != SET)
9617 return 0;
9618 mem = XEXP (elt, load_p);
9619 reg = XEXP (elt, 1-load_p);
9620 if (!REG_P (reg) || REGNO (reg) != 13U+i || !MEM_P (mem))
9621 return 0;
9622 addr = XEXP (mem, 0);
9623 if (GET_CODE (addr) != PLUS
9624 || !rtx_equal_p (stack_pointer_rtx, XEXP (addr, 0))
9625 || !CONST_INT_P (XEXP (addr, 1)) || INTVAL (XEXP (addr, 1)) != i*4)
9626 return 0;
9628 return 1;
9631 /* Accessor functions for cfun->machine->unalign. */
9634 arc_get_unalign (void)
9636 return cfun->machine->unalign;
9639 void
9640 arc_clear_unalign (void)
9642 if (cfun)
9643 cfun->machine->unalign = 0;
9646 void
9647 arc_toggle_unalign (void)
9649 cfun->machine->unalign ^= 2;
9652 /* Operands 0..2 are the operands of a addsi which uses a 12 bit
9653 constant in operand 2, but which would require a LIMM because of
9654 operand mismatch.
9655 operands 3 and 4 are new SET_SRCs for operands 0. */
9657 void
9658 split_addsi (rtx *operands)
9660 int val = INTVAL (operands[2]);
9662 /* Try for two short insns first. Lengths being equal, we prefer
9663 expansions with shorter register lifetimes. */
9664 if (val > 127 && val <= 255
9665 && satisfies_constraint_Rcq (operands[0]))
9667 operands[3] = operands[2];
9668 operands[4] = gen_rtx_PLUS (SImode, operands[0], operands[1]);
9670 else
9672 operands[3] = operands[1];
9673 operands[4] = gen_rtx_PLUS (SImode, operands[0], operands[2]);
9677 /* Operands 0..2 are the operands of a subsi which uses a 12 bit
9678 constant in operand 1, but which would require a LIMM because of
9679 operand mismatch.
9680 operands 3 and 4 are new SET_SRCs for operands 0. */
9682 void
9683 split_subsi (rtx *operands)
9685 int val = INTVAL (operands[1]);
9687 /* Try for two short insns first. Lengths being equal, we prefer
9688 expansions with shorter register lifetimes. */
9689 if (satisfies_constraint_Rcq (operands[0])
9690 && satisfies_constraint_Rcq (operands[2]))
9692 if (val >= -31 && val <= 127)
9694 operands[3] = gen_rtx_NEG (SImode, operands[2]);
9695 operands[4] = gen_rtx_PLUS (SImode, operands[0], operands[1]);
9696 return;
9698 else if (val >= 0 && val < 255)
9700 operands[3] = operands[1];
9701 operands[4] = gen_rtx_MINUS (SImode, operands[0], operands[2]);
9702 return;
9705 /* If the destination is not an ARCompact16 register, we might
9706 still have a chance to make a short insn if the source is;
9707 we need to start with a reg-reg move for this. */
9708 operands[3] = operands[2];
9709 operands[4] = gen_rtx_MINUS (SImode, operands[1], operands[0]);
9712 /* Handle DOUBLE_REGS uses.
9713 Operand 0: destination register
9714 Operand 1: source register */
9716 static bool
9717 arc_process_double_reg_moves (rtx *operands)
9719 rtx dest = operands[0];
9720 rtx src = operands[1];
9722 enum usesDxState { none, srcDx, destDx, maxDx };
9723 enum usesDxState state = none;
9725 if (refers_to_regno_p (40, 44, src, 0))
9726 state = srcDx;
9727 if (refers_to_regno_p (40, 44, dest, 0))
9729 /* Via arc_register_move_cost, we should never see D,D moves. */
9730 gcc_assert (state == none);
9731 state = destDx;
9734 if (state == none)
9735 return false;
9737 if (state == srcDx)
9739 /* Without the LR insn, we need to split this into a
9740 sequence of insns which will use the DEXCLx and DADDHxy
9741 insns to be able to read the Dx register in question. */
9742 if (TARGET_DPFP_DISABLE_LRSR)
9744 /* gen *movdf_insn_nolrsr */
9745 rtx set = gen_rtx_SET (dest, src);
9746 rtx use1 = gen_rtx_USE (VOIDmode, const1_rtx);
9747 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, use1)));
9749 else
9751 /* When we have 'mov D, r' or 'mov D, D' then get the target
9752 register pair for use with LR insn. */
9753 rtx destHigh = simplify_gen_subreg (SImode, dest, DFmode,
9754 TARGET_BIG_ENDIAN ? 0 : 4);
9755 rtx destLow = simplify_gen_subreg (SImode, dest, DFmode,
9756 TARGET_BIG_ENDIAN ? 4 : 0);
9758 /* Produce the two LR insns to get the high and low parts. */
9759 emit_insn (gen_rtx_SET (destHigh,
9760 gen_rtx_UNSPEC_VOLATILE (Pmode,
9761 gen_rtvec (1, src),
9762 VUNSPEC_ARC_LR_HIGH)));
9763 emit_insn (gen_rtx_SET (destLow,
9764 gen_rtx_UNSPEC_VOLATILE (Pmode,
9765 gen_rtvec (1, src),
9766 VUNSPEC_ARC_LR)));
9769 else if (state == destDx)
9771 /* When we have 'mov r, D' or 'mov D, D' and we have access to the
9772 LR insn get the target register pair. */
9773 rtx srcHigh = simplify_gen_subreg (SImode, src, DFmode,
9774 TARGET_BIG_ENDIAN ? 0 : 4);
9775 rtx srcLow = simplify_gen_subreg (SImode, src, DFmode,
9776 TARGET_BIG_ENDIAN ? 4 : 0);
9778 emit_insn (gen_dexcl_2op (dest, srcHigh, srcLow));
9780 else
9781 gcc_unreachable ();
9783 return true;
9786 /* operands 0..1 are the operands of a 64 bit move instruction.
9787 split it into two moves with operands 2/3 and 4/5. */
9789 void
9790 arc_split_move (rtx *operands)
9792 machine_mode mode = GET_MODE (operands[0]);
9793 int i;
9794 int swap = 0;
9795 rtx xop[4];
9797 if (TARGET_DPFP)
9799 if (arc_process_double_reg_moves (operands))
9800 return;
9803 if (TARGET_LL64
9804 && ((memory_operand (operands[0], mode)
9805 && even_register_operand (operands[1], mode))
9806 || (memory_operand (operands[1], mode)
9807 && even_register_operand (operands[0], mode))))
9809 emit_move_insn (operands[0], operands[1]);
9810 return;
9813 if (TARGET_PLUS_QMACW
9814 && GET_CODE (operands[1]) == CONST_VECTOR)
9816 HOST_WIDE_INT intval0, intval1;
9817 if (GET_MODE (operands[1]) == V2SImode)
9819 intval0 = INTVAL (XVECEXP (operands[1], 0, 0));
9820 intval1 = INTVAL (XVECEXP (operands[1], 0, 1));
9822 else
9824 intval1 = INTVAL (XVECEXP (operands[1], 0, 3)) << 16;
9825 intval1 |= INTVAL (XVECEXP (operands[1], 0, 2)) & 0xFFFF;
9826 intval0 = INTVAL (XVECEXP (operands[1], 0, 1)) << 16;
9827 intval0 |= INTVAL (XVECEXP (operands[1], 0, 0)) & 0xFFFF;
9829 xop[0] = gen_rtx_REG (SImode, REGNO (operands[0]));
9830 xop[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
9831 xop[2] = GEN_INT (trunc_int_for_mode (intval0, SImode));
9832 xop[1] = GEN_INT (trunc_int_for_mode (intval1, SImode));
9833 emit_move_insn (xop[0], xop[2]);
9834 emit_move_insn (xop[3], xop[1]);
9835 return;
9838 for (i = 0; i < 2; i++)
9840 if (MEM_P (operands[i]) && auto_inc_p (XEXP (operands[i], 0)))
9842 rtx addr = XEXP (operands[i], 0);
9843 rtx r, o;
9844 enum rtx_code code;
9846 gcc_assert (!reg_overlap_mentioned_p (operands[0], addr));
9847 switch (GET_CODE (addr))
9849 case PRE_DEC: o = GEN_INT (-8); goto pre_modify;
9850 case PRE_INC: o = GEN_INT (8); goto pre_modify;
9851 case PRE_MODIFY: o = XEXP (XEXP (addr, 1), 1);
9852 pre_modify:
9853 code = PRE_MODIFY;
9854 break;
9855 case POST_DEC: o = GEN_INT (-8); goto post_modify;
9856 case POST_INC: o = GEN_INT (8); goto post_modify;
9857 case POST_MODIFY: o = XEXP (XEXP (addr, 1), 1);
9858 post_modify:
9859 code = POST_MODIFY;
9860 swap = 2;
9861 break;
9862 default:
9863 gcc_unreachable ();
9865 r = XEXP (addr, 0);
9866 xop[0+i] = adjust_automodify_address_nv
9867 (operands[i], SImode,
9868 gen_rtx_fmt_ee (code, Pmode, r,
9869 gen_rtx_PLUS (Pmode, r, o)),
9871 xop[2+i] = adjust_automodify_address_nv
9872 (operands[i], SImode, plus_constant (Pmode, r, 4), 4);
9874 else
9876 xop[0+i] = operand_subword (operands[i], 0, 0, mode);
9877 xop[2+i] = operand_subword (operands[i], 1, 0, mode);
9880 if (reg_overlap_mentioned_p (xop[0], xop[3]))
9882 swap = 2;
9883 gcc_assert (!reg_overlap_mentioned_p (xop[2], xop[1]));
9886 emit_move_insn (xop[0 + swap], xop[1 + swap]);
9887 emit_move_insn (xop[2 - swap], xop[3 - swap]);
9891 /* Select between the instruction output templates s_tmpl (for short INSNs)
9892 and l_tmpl (for long INSNs). */
9894 const char *
9895 arc_short_long (rtx_insn *insn, const char *s_tmpl, const char *l_tmpl)
9897 int is_short = arc_verify_short (insn, cfun->machine->unalign, -1);
9899 extract_constrain_insn_cached (insn);
9900 return is_short ? s_tmpl : l_tmpl;
9903 /* Searches X for any reference to REGNO, returning the rtx of the
9904 reference found if any. Otherwise, returns NULL_RTX. */
9907 arc_regno_use_in (unsigned int regno, rtx x)
9909 const char *fmt;
9910 int i, j;
9911 rtx tem;
9913 if (REG_P (x) && refers_to_regno_p (regno, x))
9914 return x;
9916 fmt = GET_RTX_FORMAT (GET_CODE (x));
9917 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9919 if (fmt[i] == 'e')
9921 if ((tem = regno_use_in (regno, XEXP (x, i))))
9922 return tem;
9924 else if (fmt[i] == 'E')
9925 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9926 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
9927 return tem;
9930 return NULL_RTX;
9933 /* Return the integer value of the "type" attribute for INSN, or -1 if
9934 INSN can't have attributes. */
9937 arc_attr_type (rtx_insn *insn)
9939 if (NONJUMP_INSN_P (insn)
9940 ? (GET_CODE (PATTERN (insn)) == USE
9941 || GET_CODE (PATTERN (insn)) == CLOBBER)
9942 : JUMP_P (insn)
9943 ? (GET_CODE (PATTERN (insn)) == ADDR_VEC
9944 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
9945 : !CALL_P (insn))
9946 return -1;
9947 return get_attr_type (insn);
9950 /* Return true if insn sets the condition codes. */
9952 bool
9953 arc_sets_cc_p (rtx_insn *insn)
9955 if (NONJUMP_INSN_P (insn))
9956 if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
9957 insn = seq->insn (seq->len () - 1);
9958 return arc_attr_type (insn) == TYPE_COMPARE;
9961 /* Return true if INSN is an instruction with a delay slot we may want
9962 to fill. */
9964 bool
9965 arc_need_delay (rtx_insn *insn)
9967 rtx_insn *next;
9969 if (!flag_delayed_branch)
9970 return false;
9971 /* The return at the end of a function needs a delay slot. */
9972 if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE
9973 && (!(next = next_active_insn (insn))
9974 || ((!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) != SEQUENCE)
9975 && arc_attr_type (next) == TYPE_RETURN))
9976 && (!TARGET_PAD_RETURN
9977 || (prev_active_insn (insn)
9978 && prev_active_insn (prev_active_insn (insn))
9979 && prev_active_insn (prev_active_insn (prev_active_insn (insn))))))
9980 return true;
9981 if (NONJUMP_INSN_P (insn)
9982 ? (GET_CODE (PATTERN (insn)) == USE
9983 || GET_CODE (PATTERN (insn)) == CLOBBER
9984 || GET_CODE (PATTERN (insn)) == SEQUENCE)
9985 : JUMP_P (insn)
9986 ? (GET_CODE (PATTERN (insn)) == ADDR_VEC
9987 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
9988 : !CALL_P (insn))
9989 return false;
9990 return num_delay_slots (insn) != 0;
9993 /* Return true if the scheduling pass(es) has/have already run,
9994 i.e. where possible, we should try to mitigate high latencies
9995 by different instruction selection. */
9997 bool
9998 arc_scheduling_not_expected (void)
10000 return cfun->machine->arc_reorg_started;
10004 arc_label_align (rtx_insn *label)
10006 /* Code has a minimum p2 alignment of 1, which we must restore after an
10007 ADDR_DIFF_VEC. */
10008 if (align_labels_log < 1)
10010 rtx_insn *next = next_nonnote_nondebug_insn (label);
10011 if (INSN_P (next) && recog_memoized (next) >= 0)
10012 return 1;
10014 return align_labels_log;
10017 /* Return true if LABEL is in executable code. */
10019 bool
10020 arc_text_label (rtx_insn *label)
10022 rtx_insn *next;
10024 /* ??? We use deleted labels like they were still there, see
10025 gcc.c-torture/compile/20000326-2.c . */
10026 gcc_assert (GET_CODE (label) == CODE_LABEL
10027 || (GET_CODE (label) == NOTE
10028 && NOTE_KIND (label) == NOTE_INSN_DELETED_LABEL));
10029 next = next_nonnote_insn (label);
10030 if (next)
10031 return (!JUMP_TABLE_DATA_P (next)
10032 || GET_CODE (PATTERN (next)) != ADDR_VEC);
10033 else if (!PREV_INSN (label))
10034 /* ??? sometimes text labels get inserted very late, see
10035 gcc.dg/torture/stackalign/comp-goto-1.c */
10036 return true;
10037 return false;
10040 /* Without this, gcc.dg/tree-prof/bb-reorg.c fails to assemble
10041 when compiling with -O2 -freorder-blocks-and-partition -fprofile-use
10042 -D_PROFILE_USE; delay branch scheduling then follows a crossing jump
10043 to redirect two breqs. */
10045 static bool
10046 arc_can_follow_jump (const rtx_insn *follower, const rtx_insn *followee)
10048 /* ??? get_attr_type is declared to take an rtx. */
10049 union { const rtx_insn *c; rtx_insn *r; } u;
10051 u.c = follower;
10052 if (CROSSING_JUMP_P (followee))
10053 switch (get_attr_type (u.r))
10055 case TYPE_BRANCH:
10056 if (get_attr_length (u.r) != 2)
10057 break;
10058 case TYPE_BRCC:
10059 case TYPE_BRCC_NO_DELAY_SLOT:
10060 return false;
10061 default:
10062 return true;
10064 return true;
10067 /* Return the register number of the register holding the return address
10068 for a function of type TYPE. */
10071 arc_return_address_register (unsigned int fn_type)
10073 int regno = 0;
10075 if (ARC_INTERRUPT_P (fn_type))
10077 if (((fn_type & ARC_FUNCTION_ILINK1) | ARC_FUNCTION_FIRQ) != 0)
10078 regno = ILINK1_REGNUM;
10079 else if ((fn_type & ARC_FUNCTION_ILINK2) != 0)
10080 regno = ILINK2_REGNUM;
10081 else
10082 gcc_unreachable ();
10084 else if (ARC_NORMAL_P (fn_type) || ARC_NAKED_P (fn_type))
10085 regno = RETURN_ADDR_REGNUM;
10087 gcc_assert (regno != 0);
10088 return regno;
10091 /* Implement EPILOGUE_USES.
10092 Return true if REGNO should be added to the deemed uses of the epilogue.
10094 We have to make sure all the register restore instructions are
10095 known to be live in interrupt functions, plus the blink register if
10096 it is clobbered by the isr. */
10098 bool
10099 arc_epilogue_uses (int regno)
10101 unsigned int fn_type;
10103 if (regno == arc_tp_regno)
10104 return true;
10106 fn_type = arc_compute_function_type (cfun);
10107 if (reload_completed)
10109 if (ARC_INTERRUPT_P (cfun->machine->fn_type))
10111 if (!fixed_regs[regno])
10112 return true;
10113 return ((regno == arc_return_address_register (fn_type))
10114 || (regno == RETURN_ADDR_REGNUM));
10116 else
10117 return regno == RETURN_ADDR_REGNUM;
10119 else
10120 return regno == arc_return_address_register (fn_type);
10123 /* Helper for EH_USES macro. */
10125 bool
10126 arc_eh_uses (int regno)
10128 if (regno == arc_tp_regno)
10129 return true;
10130 return false;
10133 #ifndef TARGET_NO_LRA
10134 #define TARGET_NO_LRA !TARGET_LRA
10135 #endif
10137 static bool
10138 arc_lra_p (void)
10140 return !TARGET_NO_LRA;
10143 /* ??? Should we define TARGET_REGISTER_PRIORITY? We might perfer to use
10144 Rcq registers, because some insn are shorter with them. OTOH we already
10145 have separate alternatives for this purpose, and other insns don't
10146 mind, so maybe we should rather prefer the other registers?
10147 We need more data, and we can only get that if we allow people to
10148 try all options. */
10149 static int
10150 arc_register_priority (int r)
10152 switch (arc_lra_priority_tag)
10154 case ARC_LRA_PRIORITY_NONE:
10155 return 0;
10156 case ARC_LRA_PRIORITY_NONCOMPACT:
10157 return ((((r & 7) ^ 4) - 4) & 15) != r;
10158 case ARC_LRA_PRIORITY_COMPACT:
10159 return ((((r & 7) ^ 4) - 4) & 15) == r;
10160 default:
10161 gcc_unreachable ();
10165 static reg_class_t
10166 arc_spill_class (reg_class_t /* orig_class */, machine_mode)
10168 return GENERAL_REGS;
10171 bool
10172 arc_legitimize_reload_address (rtx *p, machine_mode mode, int opnum,
10173 int itype)
10175 rtx x = *p;
10176 enum reload_type type = (enum reload_type) itype;
10178 if (GET_CODE (x) == PLUS
10179 && CONST_INT_P (XEXP (x, 1))
10180 && (RTX_OK_FOR_BASE_P (XEXP (x, 0), true)
10181 || (REG_P (XEXP (x, 0))
10182 && reg_equiv_constant (REGNO (XEXP (x, 0))))))
10184 int scale = GET_MODE_SIZE (mode);
10185 int shift;
10186 rtx index_rtx = XEXP (x, 1);
10187 HOST_WIDE_INT offset = INTVAL (index_rtx), offset_base;
10188 rtx reg, sum, sum2;
10190 if (scale > 4)
10191 scale = 4;
10192 if ((scale-1) & offset)
10193 scale = 1;
10194 shift = scale >> 1;
10195 offset_base
10196 = ((offset + (256 << shift))
10197 & ((HOST_WIDE_INT)((unsigned HOST_WIDE_INT) -512 << shift)));
10198 /* Sometimes the normal form does not suit DImode. We
10199 could avoid that by using smaller ranges, but that
10200 would give less optimized code when SImode is
10201 prevalent. */
10202 if (GET_MODE_SIZE (mode) + offset - offset_base <= (256 << shift))
10204 int regno;
10206 reg = XEXP (x, 0);
10207 regno = REGNO (reg);
10208 sum2 = sum = plus_constant (Pmode, reg, offset_base);
10210 if (reg_equiv_constant (regno))
10212 sum2 = plus_constant (Pmode, reg_equiv_constant (regno),
10213 offset_base);
10214 if (GET_CODE (sum2) == PLUS)
10215 sum2 = gen_rtx_CONST (Pmode, sum2);
10217 *p = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - offset_base));
10218 push_reload (sum2, NULL_RTX, &XEXP (*p, 0), NULL,
10219 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum,
10220 type);
10221 return true;
10224 /* We must re-recognize what we created before. */
10225 else if (GET_CODE (x) == PLUS
10226 && GET_CODE (XEXP (x, 0)) == PLUS
10227 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10228 && REG_P (XEXP (XEXP (x, 0), 0))
10229 && CONST_INT_P (XEXP (x, 1)))
10231 /* Because this address is so complex, we know it must have
10232 been created by LEGITIMIZE_RELOAD_ADDRESS before; thus,
10233 it is already unshared, and needs no further unsharing. */
10234 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
10235 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
10236 return true;
10238 return false;
10241 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
10243 static bool
10244 arc_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
10245 unsigned int align,
10246 enum by_pieces_operation op,
10247 bool speed_p)
10249 /* Let the movmem expander handle small block moves. */
10250 if (op == MOVE_BY_PIECES)
10251 return false;
10253 return default_use_by_pieces_infrastructure_p (size, align, op, speed_p);
10256 /* Emit a (pre) memory barrier around an atomic sequence according to
10257 MODEL. */
10259 static void
10260 arc_pre_atomic_barrier (enum memmodel model)
10262 if (need_atomic_barrier_p (model, true))
10263 emit_insn (gen_memory_barrier ());
10266 /* Emit a (post) memory barrier around an atomic sequence according to
10267 MODEL. */
10269 static void
10270 arc_post_atomic_barrier (enum memmodel model)
10272 if (need_atomic_barrier_p (model, false))
10273 emit_insn (gen_memory_barrier ());
10276 /* Expand a compare and swap pattern. */
10278 static void
10279 emit_unlikely_jump (rtx insn)
10281 rtx_insn *jump = emit_jump_insn (insn);
10282 add_reg_br_prob_note (jump, profile_probability::very_unlikely ());
10285 /* Expand code to perform a 8 or 16-bit compare and swap by doing
10286 32-bit compare and swap on the word containing the byte or
10287 half-word. The difference between a weak and a strong CAS is that
10288 the weak version may simply fail. The strong version relies on two
10289 loops, one checks if the SCOND op is succsfully or not, the other
10290 checks if the 32 bit accessed location which contains the 8 or 16
10291 bit datum is not changed by other thread. The first loop is
10292 implemented by the atomic_compare_and_swapsi_1 pattern. The second
10293 loops is implemented by this routine. */
10295 static void
10296 arc_expand_compare_and_swap_qh (rtx bool_result, rtx result, rtx mem,
10297 rtx oldval, rtx newval, rtx weak,
10298 rtx mod_s, rtx mod_f)
10300 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
10301 rtx addr = gen_reg_rtx (Pmode);
10302 rtx off = gen_reg_rtx (SImode);
10303 rtx oldv = gen_reg_rtx (SImode);
10304 rtx newv = gen_reg_rtx (SImode);
10305 rtx oldvalue = gen_reg_rtx (SImode);
10306 rtx newvalue = gen_reg_rtx (SImode);
10307 rtx res = gen_reg_rtx (SImode);
10308 rtx resv = gen_reg_rtx (SImode);
10309 rtx memsi, val, mask, end_label, loop_label, cc, x;
10310 machine_mode mode;
10311 bool is_weak = (weak != const0_rtx);
10313 /* Truncate the address. */
10314 emit_insn (gen_rtx_SET (addr,
10315 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
10317 /* Compute the datum offset. */
10318 emit_insn (gen_rtx_SET (off,
10319 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
10320 if (TARGET_BIG_ENDIAN)
10321 emit_insn (gen_rtx_SET (off,
10322 gen_rtx_MINUS (SImode,
10323 (GET_MODE (mem) == QImode) ?
10324 GEN_INT (3) : GEN_INT (2), off)));
10326 /* Normal read from truncated address. */
10327 memsi = gen_rtx_MEM (SImode, addr);
10328 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
10329 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
10331 val = copy_to_reg (memsi);
10333 /* Convert the offset in bits. */
10334 emit_insn (gen_rtx_SET (off,
10335 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
10337 /* Get the proper mask. */
10338 if (GET_MODE (mem) == QImode)
10339 mask = force_reg (SImode, GEN_INT (0xff));
10340 else
10341 mask = force_reg (SImode, GEN_INT (0xffff));
10343 emit_insn (gen_rtx_SET (mask,
10344 gen_rtx_ASHIFT (SImode, mask, off)));
10346 /* Prepare the old and new values. */
10347 emit_insn (gen_rtx_SET (val,
10348 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10349 val)));
10351 oldval = gen_lowpart (SImode, oldval);
10352 emit_insn (gen_rtx_SET (oldv,
10353 gen_rtx_ASHIFT (SImode, oldval, off)));
10355 newval = gen_lowpart_common (SImode, newval);
10356 emit_insn (gen_rtx_SET (newv,
10357 gen_rtx_ASHIFT (SImode, newval, off)));
10359 emit_insn (gen_rtx_SET (oldv,
10360 gen_rtx_AND (SImode, oldv, mask)));
10362 emit_insn (gen_rtx_SET (newv,
10363 gen_rtx_AND (SImode, newv, mask)));
10365 if (!is_weak)
10367 end_label = gen_label_rtx ();
10368 loop_label = gen_label_rtx ();
10369 emit_label (loop_label);
10372 /* Make the old and new values. */
10373 emit_insn (gen_rtx_SET (oldvalue,
10374 gen_rtx_IOR (SImode, oldv, val)));
10376 emit_insn (gen_rtx_SET (newvalue,
10377 gen_rtx_IOR (SImode, newv, val)));
10379 /* Try an 32bit atomic compare and swap. It clobbers the CC
10380 register. */
10381 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue,
10382 weak, mod_s, mod_f));
10384 /* Regardless of the weakness of the operation, a proper boolean
10385 result needs to be provided. */
10386 x = gen_rtx_REG (CC_Zmode, CC_REG);
10387 x = gen_rtx_EQ (SImode, x, const0_rtx);
10388 emit_insn (gen_rtx_SET (bool_result, x));
10390 if (!is_weak)
10392 /* Check the results: if the atomic op is successfully the goto
10393 to end label. */
10394 x = gen_rtx_REG (CC_Zmode, CC_REG);
10395 x = gen_rtx_EQ (VOIDmode, x, const0_rtx);
10396 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10397 gen_rtx_LABEL_REF (Pmode, end_label), pc_rtx);
10398 emit_jump_insn (gen_rtx_SET (pc_rtx, x));
10400 /* Wait for the right moment when the accessed 32-bit location
10401 is stable. */
10402 emit_insn (gen_rtx_SET (resv,
10403 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10404 res)));
10405 mode = SELECT_CC_MODE (NE, resv, val);
10406 cc = gen_rtx_REG (mode, CC_REG);
10407 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, resv, val)));
10409 /* Set the new value of the 32 bit location, proper masked. */
10410 emit_insn (gen_rtx_SET (val, resv));
10412 /* Try again if location is unstable. Fall through if only
10413 scond op failed. */
10414 x = gen_rtx_NE (VOIDmode, cc, const0_rtx);
10415 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10416 gen_rtx_LABEL_REF (Pmode, loop_label), pc_rtx);
10417 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
10419 emit_label (end_label);
10422 /* End: proper return the result for the given mode. */
10423 emit_insn (gen_rtx_SET (res,
10424 gen_rtx_AND (SImode, res, mask)));
10426 emit_insn (gen_rtx_SET (res,
10427 gen_rtx_LSHIFTRT (SImode, res, off)));
10429 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
10432 /* Helper function used by "atomic_compare_and_swap" expand
10433 pattern. */
10435 void
10436 arc_expand_compare_and_swap (rtx operands[])
10438 rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
10439 machine_mode mode;
10441 bval = operands[0];
10442 rval = operands[1];
10443 mem = operands[2];
10444 oldval = operands[3];
10445 newval = operands[4];
10446 is_weak = operands[5];
10447 mod_s = operands[6];
10448 mod_f = operands[7];
10449 mode = GET_MODE (mem);
10451 if (reg_overlap_mentioned_p (rval, oldval))
10452 oldval = copy_to_reg (oldval);
10454 if (mode == SImode)
10456 emit_insn (gen_atomic_compare_and_swapsi_1 (rval, mem, oldval, newval,
10457 is_weak, mod_s, mod_f));
10458 x = gen_rtx_REG (CC_Zmode, CC_REG);
10459 x = gen_rtx_EQ (SImode, x, const0_rtx);
10460 emit_insn (gen_rtx_SET (bval, x));
10462 else
10464 arc_expand_compare_and_swap_qh (bval, rval, mem, oldval, newval,
10465 is_weak, mod_s, mod_f);
10469 /* Helper function used by the "atomic_compare_and_swapsi_1"
10470 pattern. */
10472 void
10473 arc_split_compare_and_swap (rtx operands[])
10475 rtx rval, mem, oldval, newval;
10476 machine_mode mode;
10477 enum memmodel mod_s, mod_f;
10478 bool is_weak;
10479 rtx label1, label2, x, cond;
10481 rval = operands[0];
10482 mem = operands[1];
10483 oldval = operands[2];
10484 newval = operands[3];
10485 is_weak = (operands[4] != const0_rtx);
10486 mod_s = (enum memmodel) INTVAL (operands[5]);
10487 mod_f = (enum memmodel) INTVAL (operands[6]);
10488 mode = GET_MODE (mem);
10490 /* ARC atomic ops work only with 32-bit aligned memories. */
10491 gcc_assert (mode == SImode);
10493 arc_pre_atomic_barrier (mod_s);
10495 label1 = NULL_RTX;
10496 if (!is_weak)
10498 label1 = gen_label_rtx ();
10499 emit_label (label1);
10501 label2 = gen_label_rtx ();
10503 /* Load exclusive. */
10504 emit_insn (gen_arc_load_exclusivesi (rval, mem));
10506 /* Check if it is oldval. */
10507 mode = SELECT_CC_MODE (NE, rval, oldval);
10508 cond = gen_rtx_REG (mode, CC_REG);
10509 emit_insn (gen_rtx_SET (cond, gen_rtx_COMPARE (mode, rval, oldval)));
10511 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
10512 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10513 gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
10514 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
10516 /* Exclusively store new item. Store clobbers CC reg. */
10517 emit_insn (gen_arc_store_exclusivesi (mem, newval));
10519 if (!is_weak)
10521 /* Check the result of the store. */
10522 cond = gen_rtx_REG (CC_Zmode, CC_REG);
10523 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
10524 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10525 gen_rtx_LABEL_REF (Pmode, label1), pc_rtx);
10526 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
10529 if (mod_f != MEMMODEL_RELAXED)
10530 emit_label (label2);
10532 arc_post_atomic_barrier (mod_s);
10534 if (mod_f == MEMMODEL_RELAXED)
10535 emit_label (label2);
10538 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
10539 to perform. MEM is the memory on which to operate. VAL is the second
10540 operand of the binary operator. BEFORE and AFTER are optional locations to
10541 return the value of MEM either before of after the operation. MODEL_RTX
10542 is a CONST_INT containing the memory model to use. */
10544 void
10545 arc_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
10546 rtx orig_before, rtx orig_after, rtx model_rtx)
10548 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
10549 machine_mode mode = GET_MODE (mem);
10550 rtx label, x, cond;
10551 rtx before = orig_before, after = orig_after;
10553 /* ARC atomic ops work only with 32-bit aligned memories. */
10554 gcc_assert (mode == SImode);
10556 arc_pre_atomic_barrier (model);
10558 label = gen_label_rtx ();
10559 emit_label (label);
10560 label = gen_rtx_LABEL_REF (VOIDmode, label);
10562 if (before == NULL_RTX)
10563 before = gen_reg_rtx (mode);
10565 if (after == NULL_RTX)
10566 after = gen_reg_rtx (mode);
10568 /* Load exclusive. */
10569 emit_insn (gen_arc_load_exclusivesi (before, mem));
10571 switch (code)
10573 case NOT:
10574 x = gen_rtx_AND (mode, before, val);
10575 emit_insn (gen_rtx_SET (after, x));
10576 x = gen_rtx_NOT (mode, after);
10577 emit_insn (gen_rtx_SET (after, x));
10578 break;
10580 case MINUS:
10581 if (CONST_INT_P (val))
10583 val = GEN_INT (-INTVAL (val));
10584 code = PLUS;
10587 /* FALLTHRU. */
10588 default:
10589 x = gen_rtx_fmt_ee (code, mode, before, val);
10590 emit_insn (gen_rtx_SET (after, x));
10591 break;
10594 /* Exclusively store new item. Store clobbers CC reg. */
10595 emit_insn (gen_arc_store_exclusivesi (mem, after));
10597 /* Check the result of the store. */
10598 cond = gen_rtx_REG (CC_Zmode, CC_REG);
10599 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
10600 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10601 label, pc_rtx);
10602 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
10604 arc_post_atomic_barrier (model);
10607 /* Implement TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P. */
10609 static bool
10610 arc_no_speculation_in_delay_slots_p ()
10612 return true;
10615 /* Return a parallel of registers to represent where to find the
10616 register pieces if required, otherwise NULL_RTX. */
10618 static rtx
10619 arc_dwarf_register_span (rtx rtl)
10621 machine_mode mode = GET_MODE (rtl);
10622 unsigned regno;
10623 rtx p;
10625 if (GET_MODE_SIZE (mode) != 8)
10626 return NULL_RTX;
10628 p = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
10629 regno = REGNO (rtl);
10630 XVECEXP (p, 0, 0) = gen_rtx_REG (SImode, regno);
10631 XVECEXP (p, 0, 1) = gen_rtx_REG (SImode, regno + 1);
10633 return p;
10636 /* Return true if OP is an acceptable memory operand for ARCompact
10637 16-bit load instructions of MODE.
10639 AV2SHORT: TRUE if address needs to fit into the new ARCv2 short
10640 non scaled instructions.
10642 SCALED: TRUE if address can be scaled. */
10644 bool
10645 compact_memory_operand_p (rtx op, machine_mode mode,
10646 bool av2short, bool scaled)
10648 rtx addr, plus0, plus1;
10649 int size, off;
10651 /* Eliminate non-memory operations. */
10652 if (GET_CODE (op) != MEM)
10653 return 0;
10655 /* .di instructions have no 16-bit form. */
10656 if (MEM_VOLATILE_P (op) && !TARGET_VOLATILE_CACHE_SET)
10657 return false;
10659 if (mode == VOIDmode)
10660 mode = GET_MODE (op);
10662 size = GET_MODE_SIZE (mode);
10664 /* dword operations really put out 2 instructions, so eliminate
10665 them. */
10666 if (size > UNITS_PER_WORD)
10667 return false;
10669 /* Decode the address now. */
10670 addr = XEXP (op, 0);
10671 switch (GET_CODE (addr))
10673 case REG:
10674 return (REGNO (addr) >= FIRST_PSEUDO_REGISTER
10675 || COMPACT_GP_REG_P (REGNO (addr))
10676 || (SP_REG_P (REGNO (addr)) && (size != 2)));
10677 case PLUS:
10678 plus0 = XEXP (addr, 0);
10679 plus1 = XEXP (addr, 1);
10681 if ((GET_CODE (plus0) == REG)
10682 && ((REGNO (plus0) >= FIRST_PSEUDO_REGISTER)
10683 || COMPACT_GP_REG_P (REGNO (plus0)))
10684 && ((GET_CODE (plus1) == REG)
10685 && ((REGNO (plus1) >= FIRST_PSEUDO_REGISTER)
10686 || COMPACT_GP_REG_P (REGNO (plus1)))))
10688 return !av2short;
10691 if ((GET_CODE (plus0) == REG)
10692 && ((REGNO (plus0) >= FIRST_PSEUDO_REGISTER)
10693 || (COMPACT_GP_REG_P (REGNO (plus0)) && !av2short)
10694 || (IN_RANGE (REGNO (plus0), 0, 31) && av2short))
10695 && (GET_CODE (plus1) == CONST_INT))
10697 bool valid = false;
10699 off = INTVAL (plus1);
10701 /* Negative offset is not supported in 16-bit load/store insns. */
10702 if (off < 0)
10703 return 0;
10705 /* Only u5 immediates allowed in code density instructions. */
10706 if (av2short)
10708 switch (size)
10710 case 1:
10711 return false;
10712 case 2:
10713 /* This is an ldh_s.x instruction, check the u6
10714 immediate. */
10715 if (COMPACT_GP_REG_P (REGNO (plus0)))
10716 valid = true;
10717 break;
10718 case 4:
10719 /* Only u5 immediates allowed in 32bit access code
10720 density instructions. */
10721 if (REGNO (plus0) <= 31)
10722 return ((off < 32) && (off % 4 == 0));
10723 break;
10724 default:
10725 return false;
10728 else
10729 if (COMPACT_GP_REG_P (REGNO (plus0)))
10730 valid = true;
10732 if (valid)
10735 switch (size)
10737 case 1:
10738 return (off < 32);
10739 case 2:
10740 /* The 6-bit constant get shifted to fit the real
10741 5-bits field. Check also for the alignment. */
10742 return ((off < 64) && (off % 2 == 0));
10743 case 4:
10744 return ((off < 128) && (off % 4 == 0));
10745 default:
10746 return false;
10751 if (REG_P (plus0) && CONST_INT_P (plus1)
10752 && ((REGNO (plus0) >= FIRST_PSEUDO_REGISTER)
10753 || SP_REG_P (REGNO (plus0)))
10754 && !av2short)
10756 off = INTVAL (plus1);
10757 return ((size != 2) && (off >= 0 && off < 128) && (off % 4 == 0));
10760 if ((GET_CODE (plus0) == MULT)
10761 && (GET_CODE (XEXP (plus0, 0)) == REG)
10762 && ((REGNO (XEXP (plus0, 0)) >= FIRST_PSEUDO_REGISTER)
10763 || COMPACT_GP_REG_P (REGNO (XEXP (plus0, 0))))
10764 && (GET_CODE (plus1) == REG)
10765 && ((REGNO (plus1) >= FIRST_PSEUDO_REGISTER)
10766 || COMPACT_GP_REG_P (REGNO (plus1))))
10767 return scaled;
10768 default:
10769 break ;
10770 /* TODO: 'gp' and 'pcl' are to supported as base address operand
10771 for 16-bit load instructions. */
10773 return false;
10776 /* Return the frame pointer value to be backed up in the setjmp buffer. */
10778 static rtx
10779 arc_builtin_setjmp_frame_value (void)
10781 /* We always want to preserve whatever value is currently in the frame
10782 pointer register. For frames that are using the frame pointer the new
10783 value of the frame pointer register will have already been computed
10784 (as part of the prologue). For frames that are not using the frame
10785 pointer it is important that we backup whatever value is in the frame
10786 pointer register, as earlier (more outer) frames may have placed a
10787 value into the frame pointer register. It might be tempting to try
10788 and use `frame_pointer_rtx` here, however, this is not what we want.
10789 For frames that are using the frame pointer this will give the
10790 correct value. However, for frames that are not using the frame
10791 pointer this will still give the value that _would_ have been the
10792 frame pointer value for this frame (if the use of the frame pointer
10793 had not been removed). We really do want the raw frame pointer
10794 register value. */
10795 return gen_raw_REG (Pmode, FRAME_POINTER_REGNUM);
10798 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
10799 anchors for small data: the GP register acts as an anchor in that
10800 case. We also don't want to use them for PC-relative accesses,
10801 where the PC acts as an anchor. Prohibit also TLS symbols to use
10802 anchors. */
10804 static bool
10805 arc_use_anchors_for_symbol_p (const_rtx symbol)
10807 if (SYMBOL_REF_TLS_MODEL (symbol))
10808 return false;
10810 if (flag_pic)
10811 return false;
10813 if (SYMBOL_REF_SMALL_P (symbol))
10814 return false;
10816 return default_use_anchors_for_symbol_p (symbol);
10819 /* Return true if SUBST can't safely replace its equivalent during RA. */
10820 static bool
10821 arc_cannot_substitute_mem_equiv_p (rtx)
10823 /* If SUBST is mem[base+index], the address may not fit ISA,
10824 thus return true. */
10825 return true;
10828 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
10829 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P arc_use_anchors_for_symbol_p
10831 #undef TARGET_CONSTANT_ALIGNMENT
10832 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
10834 #undef TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P
10835 #define TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P arc_cannot_substitute_mem_equiv_p
10837 struct gcc_target targetm = TARGET_INITIALIZER;
10839 #include "gt-arc.h"