* c-ada-spec.c (dump_number): Add FLOAT_P parameter.
[official-gcc.git] / gcc / config / arc / arc.c
blobc7947912bdeb6c8d6ceddc416f22997c7b992315
1 /* Subroutines used for code generation on the Synopsys DesignWare ARC cpu.
2 Copyright (C) 1994-2018 Free Software Foundation, Inc.
4 Sources derived from work done by Sankhya Technologies (www.sankhya.com) on
5 behalf of Synopsys Inc.
7 Position Independent Code support added,Code cleaned up,
8 Comments and Support For ARC700 instructions added by
9 Saurabh Verma (saurabh.verma@codito.com)
10 Ramana Radhakrishnan(ramana.radhakrishnan@codito.com)
12 Fixing ABI inconsistencies, optimizations for ARC600 / ARC700 pipelines,
13 profiling support added by Joern Rennecke <joern.rennecke@embecosm.com>
15 This file is part of GCC.
17 GCC is free software; you can redistribute it and/or modify
18 it under the terms of the GNU General Public License as published by
19 the Free Software Foundation; either version 3, or (at your option)
20 any later version.
22 GCC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING3. If not see
29 <http://www.gnu.org/licenses/>. */
31 #define IN_TARGET_CODE 1
33 #include "config.h"
34 #include "system.h"
35 #include "coretypes.h"
36 #include "memmodel.h"
37 #include "backend.h"
38 #include "target.h"
39 #include "rtl.h"
40 #include "tree.h"
41 #include "cfghooks.h"
42 #include "df.h"
43 #include "tm_p.h"
44 #include "stringpool.h"
45 #include "attribs.h"
46 #include "optabs.h"
47 #include "regs.h"
48 #include "emit-rtl.h"
49 #include "recog.h"
50 #include "diagnostic.h"
51 #include "fold-const.h"
52 #include "varasm.h"
53 #include "stor-layout.h"
54 #include "calls.h"
55 #include "output.h"
56 #include "insn-attr.h"
57 #include "flags.h"
58 #include "explow.h"
59 #include "expr.h"
60 #include "langhooks.h"
61 #include "tm-constrs.h"
62 #include "reload.h" /* For operands_match_p */
63 #include "cfgrtl.h"
64 #include "tree-pass.h"
65 #include "context.h"
66 #include "builtins.h"
67 #include "rtl-iter.h"
68 #include "alias.h"
69 #include "opts.h"
70 #include "hw-doloop.h"
72 /* Which cpu we're compiling for (ARC600, ARC601, ARC700). */
73 static char arc_cpu_name[10] = "";
74 static const char *arc_cpu_string = arc_cpu_name;
76 /* Track which regs are set fixed/call saved/call used from commnad line. */
77 HARD_REG_SET overrideregs;
79 /* Maximum size of a loop. */
80 #define ARC_MAX_LOOP_LENGTH 4095
82 /* ??? Loads can handle any constant, stores can only handle small ones. */
83 /* OTOH, LIMMs cost extra, so their usefulness is limited. */
84 #define RTX_OK_FOR_OFFSET_P(MODE, X) \
85 (GET_CODE (X) == CONST_INT \
86 && SMALL_INT_RANGE (INTVAL (X), (GET_MODE_SIZE (MODE) - 1) & -4, \
87 (INTVAL (X) & (GET_MODE_SIZE (MODE) - 1) & 3 \
88 ? 0 \
89 : -(-GET_MODE_SIZE (MODE) | -4) >> 1)))
91 #define LEGITIMATE_SMALL_DATA_OFFSET_P(X) \
92 (GET_CODE (X) == CONST \
93 && GET_CODE (XEXP ((X), 0)) == PLUS \
94 && GET_CODE (XEXP (XEXP ((X), 0), 0)) == SYMBOL_REF \
95 && SYMBOL_REF_SMALL_P (XEXP (XEXP ((X), 0), 0)) \
96 && GET_CODE (XEXP(XEXP ((X), 0), 1)) == CONST_INT \
97 && INTVAL (XEXP (XEXP ((X), 0), 1)) <= g_switch_value)
99 #define LEGITIMATE_SMALL_DATA_ADDRESS_P(X) \
100 (GET_CODE (X) == PLUS \
101 && REG_P (XEXP ((X), 0)) \
102 && REGNO (XEXP ((X), 0)) == SDATA_BASE_REGNUM \
103 && ((GET_CODE (XEXP ((X), 1)) == SYMBOL_REF \
104 && SYMBOL_REF_SMALL_P (XEXP ((X), 1))) \
105 || LEGITIMATE_SMALL_DATA_OFFSET_P (XEXP ((X), 1))))
107 /* Array of valid operand punctuation characters. */
108 char arc_punct_chars[256];
110 /* State used by arc_ccfsm_advance to implement conditional execution. */
111 struct GTY (()) arc_ccfsm
113 int state;
114 int cc;
115 rtx cond;
116 rtx_insn *target_insn;
117 int target_label;
120 /* Status of the IRQ_CTRL_AUX register. */
121 typedef struct irq_ctrl_saved_t
123 /* Last register number used by IRQ_CTRL_SAVED aux_reg. */
124 short irq_save_last_reg;
125 /* True if BLINK is automatically saved. */
126 bool irq_save_blink;
127 /* True if LPCOUNT is automatically saved. */
128 bool irq_save_lpcount;
129 } irq_ctrl_saved_t;
130 static irq_ctrl_saved_t irq_ctrl_saved;
132 #define ARC_AUTOBLINK_IRQ_P(FNTYPE) \
133 ((ARC_INTERRUPT_P (FNTYPE) \
134 && irq_ctrl_saved.irq_save_blink) \
135 || (ARC_FAST_INTERRUPT_P (FNTYPE) \
136 && rgf_banked_register_count > 8))
138 #define ARC_AUTOFP_IRQ_P(FNTYPE) \
139 ((ARC_INTERRUPT_P (FNTYPE) \
140 && (irq_ctrl_saved.irq_save_last_reg > 26)) \
141 || (ARC_FAST_INTERRUPT_P (FNTYPE) \
142 && rgf_banked_register_count > 8))
144 #define ARC_AUTO_IRQ_P(FNTYPE) \
145 (ARC_INTERRUPT_P (FNTYPE) && !ARC_FAST_INTERRUPT_P (FNTYPE) \
146 && (irq_ctrl_saved.irq_save_blink \
147 || (irq_ctrl_saved.irq_save_last_reg >= 0)))
149 /* Number of registers in second bank for FIRQ support. */
150 static int rgf_banked_register_count;
152 #define arc_ccfsm_current cfun->machine->ccfsm_current
154 #define ARC_CCFSM_BRANCH_DELETED_P(STATE) \
155 ((STATE)->state == 1 || (STATE)->state == 2)
157 /* Indicate we're conditionalizing insns now. */
158 #define ARC_CCFSM_RECORD_BRANCH_DELETED(STATE) \
159 ((STATE)->state += 2)
161 #define ARC_CCFSM_COND_EXEC_P(STATE) \
162 ((STATE)->state == 3 || (STATE)->state == 4 || (STATE)->state == 5 \
163 || current_insn_predicate)
165 /* Check if INSN has a 16 bit opcode considering struct arc_ccfsm *STATE. */
166 #define CCFSM_ISCOMPACT(INSN,STATE) \
167 (ARC_CCFSM_COND_EXEC_P (STATE) \
168 ? (get_attr_iscompact (INSN) == ISCOMPACT_TRUE \
169 || get_attr_iscompact (INSN) == ISCOMPACT_TRUE_LIMM) \
170 : get_attr_iscompact (INSN) != ISCOMPACT_FALSE)
172 /* Likewise, but also consider that INSN might be in a delay slot of JUMP. */
173 #define CCFSM_DBR_ISCOMPACT(INSN,JUMP,STATE) \
174 ((ARC_CCFSM_COND_EXEC_P (STATE) \
175 || (JUMP_P (JUMP) \
176 && INSN_ANNULLED_BRANCH_P (JUMP) \
177 && (TARGET_AT_DBR_CONDEXEC || INSN_FROM_TARGET_P (INSN)))) \
178 ? (get_attr_iscompact (INSN) == ISCOMPACT_TRUE \
179 || get_attr_iscompact (INSN) == ISCOMPACT_TRUE_LIMM) \
180 : get_attr_iscompact (INSN) != ISCOMPACT_FALSE)
182 /* The maximum number of insns skipped which will be conditionalised if
183 possible. */
184 /* When optimizing for speed:
185 Let p be the probability that the potentially skipped insns need to
186 be executed, pn the cost of a correctly predicted non-taken branch,
187 mt the cost of a mis/non-predicted taken branch,
188 mn mispredicted non-taken, pt correctly predicted taken ;
189 costs expressed in numbers of instructions like the ones considered
190 skipping.
191 Unfortunately we don't have a measure of predictability - this
192 is linked to probability only in that in the no-eviction-scenario
193 there is a lower bound 1 - 2 * min (p, 1-p), and a somewhat larger
194 value that can be assumed *if* the distribution is perfectly random.
195 A predictability of 1 is perfectly plausible not matter what p is,
196 because the decision could be dependent on an invocation parameter
197 of the program.
198 For large p, we want MAX_INSNS_SKIPPED == pn/(1-p) + mt - pn
199 For small p, we want MAX_INSNS_SKIPPED == pt
201 When optimizing for size:
202 We want to skip insn unless we could use 16 opcodes for the
203 non-conditionalized insn to balance the branch length or more.
204 Performance can be tie-breaker. */
205 /* If the potentially-skipped insns are likely to be executed, we'll
206 generally save one non-taken branch
208 this to be no less than the 1/p */
209 #define MAX_INSNS_SKIPPED 3
211 /* A nop is needed between a 4 byte insn that sets the condition codes and
212 a branch that uses them (the same isn't true for an 8 byte insn that sets
213 the condition codes). Set by arc_ccfsm_advance. Used by
214 arc_print_operand. */
216 static int get_arc_condition_code (rtx);
218 static tree arc_handle_interrupt_attribute (tree *, tree, tree, int, bool *);
219 static tree arc_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
221 /* Initialized arc_attribute_table to NULL since arc doesnot have any
222 machine specific supported attributes. */
223 const struct attribute_spec arc_attribute_table[] =
225 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
226 affects_type_identity, handler, exclude } */
227 { "interrupt", 1, 1, true, false, false, true,
228 arc_handle_interrupt_attribute, NULL },
229 /* Function calls made to this symbol must be done indirectly, because
230 it may lie outside of the 21/25 bit addressing range of a normal function
231 call. */
232 { "long_call", 0, 0, false, true, true, false, NULL, NULL },
233 /* Whereas these functions are always known to reside within the 25 bit
234 addressing range of unconditionalized bl. */
235 { "medium_call", 0, 0, false, true, true, false, NULL, NULL },
236 /* And these functions are always known to reside within the 21 bit
237 addressing range of blcc. */
238 { "short_call", 0, 0, false, true, true, false, NULL, NULL },
239 /* Function which are not having the prologue and epilogue generated
240 by the compiler. */
241 { "naked", 0, 0, true, false, false, false, arc_handle_fndecl_attribute,
242 NULL },
243 { NULL, 0, 0, false, false, false, false, NULL, NULL }
245 static int arc_comp_type_attributes (const_tree, const_tree);
246 static void arc_file_start (void);
247 static void arc_internal_label (FILE *, const char *, unsigned long);
248 static void arc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
249 tree);
250 static int arc_address_cost (rtx, machine_mode, addr_space_t, bool);
251 static void arc_encode_section_info (tree decl, rtx rtl, int first);
253 static void arc_init_builtins (void);
254 static rtx arc_expand_builtin (tree, rtx, rtx, machine_mode, int);
256 static int branch_dest (rtx);
258 static void arc_output_pic_addr_const (FILE *, rtx, int);
259 static bool arc_function_ok_for_sibcall (tree, tree);
260 static rtx arc_function_value (const_tree, const_tree, bool);
261 const char * output_shift (rtx *);
262 static void arc_reorg (void);
263 static bool arc_in_small_data_p (const_tree);
265 static void arc_init_reg_tables (void);
266 static bool arc_return_in_memory (const_tree, const_tree);
267 static bool arc_vector_mode_supported_p (machine_mode);
269 static bool arc_can_use_doloop_p (const widest_int &, const widest_int &,
270 unsigned int, bool);
271 static const char *arc_invalid_within_doloop (const rtx_insn *);
273 static void output_short_suffix (FILE *file);
275 static bool arc_frame_pointer_required (void);
277 static bool arc_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT,
278 unsigned int,
279 enum by_pieces_operation op,
280 bool);
282 /* Globally visible information about currently selected cpu. */
283 const arc_cpu_t *arc_selected_cpu;
285 static bool
286 legitimate_scaled_address_p (machine_mode mode, rtx op, bool strict)
288 if (GET_CODE (op) != PLUS)
289 return false;
291 if (GET_CODE (XEXP (op, 0)) != MULT)
292 return false;
294 /* Check multiplication operands. */
295 if (!RTX_OK_FOR_INDEX_P (XEXP (XEXP (op, 0), 0), strict))
296 return false;
298 if (!CONST_INT_P (XEXP (XEXP (op, 0), 1)))
299 return false;
301 switch (GET_MODE_SIZE (mode))
303 case 2:
304 if (INTVAL (XEXP (XEXP (op, 0), 1)) != 2)
305 return false;
306 break;
307 case 8:
308 if (!TARGET_LL64)
309 return false;
310 /* Fall through. */
311 case 4:
312 if (INTVAL (XEXP (XEXP (op, 0), 1)) != 4)
313 return false;
314 /* Fall through. */
315 default:
316 return false;
319 /* Check the base. */
320 if (RTX_OK_FOR_BASE_P (XEXP (op, 1), (strict)))
321 return true;
323 if (flag_pic)
325 if (CONST_INT_P (XEXP (op, 1)))
326 return true;
327 return false;
329 if (CONSTANT_P (XEXP (op, 1)))
331 /* Scalled addresses for sdata is done other places. */
332 if (GET_CODE (XEXP (op, 1)) == SYMBOL_REF
333 && SYMBOL_REF_SMALL_P (XEXP (op, 1)))
334 return false;
335 return true;
338 return false;
341 /* Check for constructions like REG + OFFS, where OFFS can be a
342 register, an immediate or an long immediate. */
344 static bool
345 legitimate_offset_address_p (machine_mode mode, rtx x, bool index, bool strict)
347 if (GET_CODE (x) != PLUS)
348 return false;
350 if (!RTX_OK_FOR_BASE_P (XEXP (x, 0), (strict)))
351 return false;
353 /* Check for: [Rx + small offset] or [Rx + Ry]. */
354 if (((index && RTX_OK_FOR_INDEX_P (XEXP (x, 1), (strict))
355 && GET_MODE_SIZE ((mode)) <= 4)
356 || RTX_OK_FOR_OFFSET_P (mode, XEXP (x, 1))))
357 return true;
359 /* Check for [Rx + symbol]. */
360 if (!flag_pic
361 && (GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
362 /* Avoid this type of address for double or larger modes. */
363 && (GET_MODE_SIZE (mode) <= 4)
364 /* Avoid small data which ends in something like GP +
365 symb@sda. */
366 && (!SYMBOL_REF_SMALL_P (XEXP (x, 1))))
367 return true;
369 return false;
372 /* Implements target hook vector_mode_supported_p. */
374 static bool
375 arc_vector_mode_supported_p (machine_mode mode)
377 switch (mode)
379 case E_V2HImode:
380 return TARGET_PLUS_DMPY;
381 case E_V4HImode:
382 case E_V2SImode:
383 return TARGET_PLUS_QMACW;
384 case E_V4SImode:
385 case E_V8HImode:
386 return TARGET_SIMD_SET;
388 default:
389 return false;
393 /* Implements target hook TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
395 static machine_mode
396 arc_preferred_simd_mode (scalar_mode mode)
398 switch (mode)
400 case E_HImode:
401 return TARGET_PLUS_QMACW ? V4HImode : V2HImode;
402 case E_SImode:
403 return V2SImode;
405 default:
406 return word_mode;
410 /* Implements target hook
411 TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES. */
413 static void
414 arc_autovectorize_vector_sizes (vector_sizes *sizes)
416 if (TARGET_PLUS_QMACW)
418 sizes->quick_push (8);
419 sizes->quick_push (4);
423 /* TARGET_PRESERVE_RELOAD_P is still awaiting patch re-evaluation / review. */
424 static bool arc_preserve_reload_p (rtx in) ATTRIBUTE_UNUSED;
425 static rtx arc_delegitimize_address (rtx);
426 static bool arc_can_follow_jump (const rtx_insn *follower,
427 const rtx_insn *followee);
429 static rtx frame_insn (rtx);
430 static void arc_function_arg_advance (cumulative_args_t, machine_mode,
431 const_tree, bool);
432 static rtx arc_legitimize_address_0 (rtx, rtx, machine_mode mode);
434 static void arc_finalize_pic (void);
436 /* initialize the GCC target structure. */
437 #undef TARGET_COMP_TYPE_ATTRIBUTES
438 #define TARGET_COMP_TYPE_ATTRIBUTES arc_comp_type_attributes
439 #undef TARGET_ASM_FILE_START
440 #define TARGET_ASM_FILE_START arc_file_start
441 #undef TARGET_ATTRIBUTE_TABLE
442 #define TARGET_ATTRIBUTE_TABLE arc_attribute_table
443 #undef TARGET_ASM_INTERNAL_LABEL
444 #define TARGET_ASM_INTERNAL_LABEL arc_internal_label
445 #undef TARGET_RTX_COSTS
446 #define TARGET_RTX_COSTS arc_rtx_costs
447 #undef TARGET_ADDRESS_COST
448 #define TARGET_ADDRESS_COST arc_address_cost
450 #undef TARGET_ENCODE_SECTION_INFO
451 #define TARGET_ENCODE_SECTION_INFO arc_encode_section_info
453 #undef TARGET_CANNOT_FORCE_CONST_MEM
454 #define TARGET_CANNOT_FORCE_CONST_MEM arc_cannot_force_const_mem
456 #undef TARGET_INIT_BUILTINS
457 #define TARGET_INIT_BUILTINS arc_init_builtins
459 #undef TARGET_EXPAND_BUILTIN
460 #define TARGET_EXPAND_BUILTIN arc_expand_builtin
462 #undef TARGET_BUILTIN_DECL
463 #define TARGET_BUILTIN_DECL arc_builtin_decl
465 #undef TARGET_ASM_OUTPUT_MI_THUNK
466 #define TARGET_ASM_OUTPUT_MI_THUNK arc_output_mi_thunk
468 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
469 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
471 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
472 #define TARGET_FUNCTION_OK_FOR_SIBCALL arc_function_ok_for_sibcall
474 #undef TARGET_MACHINE_DEPENDENT_REORG
475 #define TARGET_MACHINE_DEPENDENT_REORG arc_reorg
477 #undef TARGET_IN_SMALL_DATA_P
478 #define TARGET_IN_SMALL_DATA_P arc_in_small_data_p
480 #undef TARGET_PROMOTE_FUNCTION_MODE
481 #define TARGET_PROMOTE_FUNCTION_MODE \
482 default_promote_function_mode_always_promote
484 #undef TARGET_PROMOTE_PROTOTYPES
485 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
487 #undef TARGET_RETURN_IN_MEMORY
488 #define TARGET_RETURN_IN_MEMORY arc_return_in_memory
489 #undef TARGET_PASS_BY_REFERENCE
490 #define TARGET_PASS_BY_REFERENCE arc_pass_by_reference
492 #undef TARGET_SETUP_INCOMING_VARARGS
493 #define TARGET_SETUP_INCOMING_VARARGS arc_setup_incoming_varargs
495 #undef TARGET_ARG_PARTIAL_BYTES
496 #define TARGET_ARG_PARTIAL_BYTES arc_arg_partial_bytes
498 #undef TARGET_MUST_PASS_IN_STACK
499 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
501 #undef TARGET_FUNCTION_VALUE
502 #define TARGET_FUNCTION_VALUE arc_function_value
504 #undef TARGET_SCHED_ADJUST_PRIORITY
505 #define TARGET_SCHED_ADJUST_PRIORITY arc_sched_adjust_priority
507 #undef TARGET_VECTOR_MODE_SUPPORTED_P
508 #define TARGET_VECTOR_MODE_SUPPORTED_P arc_vector_mode_supported_p
510 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
511 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE arc_preferred_simd_mode
513 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
514 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES arc_autovectorize_vector_sizes
516 #undef TARGET_CAN_USE_DOLOOP_P
517 #define TARGET_CAN_USE_DOLOOP_P arc_can_use_doloop_p
519 #undef TARGET_INVALID_WITHIN_DOLOOP
520 #define TARGET_INVALID_WITHIN_DOLOOP arc_invalid_within_doloop
522 #undef TARGET_PRESERVE_RELOAD_P
523 #define TARGET_PRESERVE_RELOAD_P arc_preserve_reload_p
525 #undef TARGET_CAN_FOLLOW_JUMP
526 #define TARGET_CAN_FOLLOW_JUMP arc_can_follow_jump
528 #undef TARGET_DELEGITIMIZE_ADDRESS
529 #define TARGET_DELEGITIMIZE_ADDRESS arc_delegitimize_address
531 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
532 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
533 arc_use_by_pieces_infrastructure_p
535 /* Usually, we will be able to scale anchor offsets.
536 When this fails, we want LEGITIMIZE_ADDRESS to kick in. */
537 #undef TARGET_MIN_ANCHOR_OFFSET
538 #define TARGET_MIN_ANCHOR_OFFSET (-1024)
539 #undef TARGET_MAX_ANCHOR_OFFSET
540 #define TARGET_MAX_ANCHOR_OFFSET (1020)
542 #undef TARGET_SECONDARY_RELOAD
543 #define TARGET_SECONDARY_RELOAD arc_secondary_reload
545 #define TARGET_OPTION_OVERRIDE arc_override_options
547 #define TARGET_CONDITIONAL_REGISTER_USAGE arc_conditional_register_usage
549 #define TARGET_TRAMPOLINE_INIT arc_initialize_trampoline
551 #define TARGET_CAN_ELIMINATE arc_can_eliminate
553 #define TARGET_FRAME_POINTER_REQUIRED arc_frame_pointer_required
555 #define TARGET_FUNCTION_ARG arc_function_arg
557 #define TARGET_FUNCTION_ARG_ADVANCE arc_function_arg_advance
559 #define TARGET_LEGITIMATE_CONSTANT_P arc_legitimate_constant_p
561 #define TARGET_LEGITIMATE_ADDRESS_P arc_legitimate_address_p
563 #define TARGET_MODE_DEPENDENT_ADDRESS_P arc_mode_dependent_address_p
565 #define TARGET_LEGITIMIZE_ADDRESS arc_legitimize_address
567 #define TARGET_ADJUST_INSN_LENGTH arc_adjust_insn_length
569 #define TARGET_INSN_LENGTH_PARAMETERS arc_insn_length_parameters
571 #undef TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P
572 #define TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P \
573 arc_no_speculation_in_delay_slots_p
575 #undef TARGET_LRA_P
576 #define TARGET_LRA_P arc_lra_p
577 #define TARGET_REGISTER_PRIORITY arc_register_priority
578 /* Stores with scaled offsets have different displacement ranges. */
579 #define TARGET_DIFFERENT_ADDR_DISPLACEMENT_P hook_bool_void_true
580 #define TARGET_SPILL_CLASS arc_spill_class
582 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
583 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS arc_allocate_stack_slots_for_args
585 #undef TARGET_WARN_FUNC_RETURN
586 #define TARGET_WARN_FUNC_RETURN arc_warn_func_return
588 #include "target-def.h"
590 #undef TARGET_ASM_ALIGNED_HI_OP
591 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
592 #undef TARGET_ASM_ALIGNED_SI_OP
593 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
595 #ifdef HAVE_AS_TLS
596 #undef TARGET_HAVE_TLS
597 #define TARGET_HAVE_TLS HAVE_AS_TLS
598 #endif
600 #undef TARGET_DWARF_REGISTER_SPAN
601 #define TARGET_DWARF_REGISTER_SPAN arc_dwarf_register_span
603 #undef TARGET_HARD_REGNO_NREGS
604 #define TARGET_HARD_REGNO_NREGS arc_hard_regno_nregs
605 #undef TARGET_HARD_REGNO_MODE_OK
606 #define TARGET_HARD_REGNO_MODE_OK arc_hard_regno_mode_ok
608 #undef TARGET_MODES_TIEABLE_P
609 #define TARGET_MODES_TIEABLE_P arc_modes_tieable_p
610 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
611 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE arc_builtin_setjmp_frame_value
613 /* Try to keep the (mov:DF _, reg) as early as possible so
614 that the d<add/sub/mul>h-lr insns appear together and can
615 use the peephole2 pattern. */
617 static int
618 arc_sched_adjust_priority (rtx_insn *insn, int priority)
620 rtx set = single_set (insn);
621 if (set
622 && GET_MODE (SET_SRC(set)) == DFmode
623 && GET_CODE (SET_SRC(set)) == REG)
625 /* Incrementing priority by 20 (empirically derived). */
626 return priority + 20;
629 return priority;
632 /* For ARC base register + offset addressing, the validity of the
633 address is mode-dependent for most of the offset range, as the
634 offset can be scaled by the access size.
635 We don't expose these as mode-dependent addresses in the
636 mode_dependent_address_p target hook, because that would disable
637 lots of optimizations, and most uses of these addresses are for 32
638 or 64 bit accesses anyways, which are fine.
639 However, that leaves some addresses for 8 / 16 bit values not
640 properly reloaded by the generic code, which is why we have to
641 schedule secondary reloads for these. */
643 static reg_class_t
644 arc_secondary_reload (bool in_p,
645 rtx x,
646 reg_class_t cl,
647 machine_mode mode,
648 secondary_reload_info *sri)
650 enum rtx_code code = GET_CODE (x);
652 if (cl == DOUBLE_REGS)
653 return GENERAL_REGS;
655 /* The loop counter register can be stored, but not loaded directly. */
656 if ((cl == LPCOUNT_REG || cl == WRITABLE_CORE_REGS)
657 && in_p && MEM_P (x))
658 return GENERAL_REGS;
660 /* If we have a subreg (reg), where reg is a pseudo (that will end in
661 a memory location), then we may need a scratch register to handle
662 the fp/sp+largeoffset address. */
663 if (code == SUBREG)
665 rtx addr = NULL_RTX;
666 x = SUBREG_REG (x);
668 if (REG_P (x))
670 int regno = REGNO (x);
671 if (regno >= FIRST_PSEUDO_REGISTER)
672 regno = reg_renumber[regno];
674 if (regno != -1)
675 return NO_REGS;
677 /* It is a pseudo that ends in a stack location. */
678 if (reg_equiv_mem (REGNO (x)))
680 /* Get the equivalent address and check the range of the
681 offset. */
682 rtx mem = reg_equiv_mem (REGNO (x));
683 addr = find_replacement (&XEXP (mem, 0));
686 else
688 gcc_assert (MEM_P (x));
689 addr = XEXP (x, 0);
690 addr = simplify_rtx (addr);
692 if (addr && GET_CODE (addr) == PLUS
693 && CONST_INT_P (XEXP (addr, 1))
694 && (!RTX_OK_FOR_OFFSET_P (mode, XEXP (addr, 1))))
696 switch (mode)
698 case E_QImode:
699 sri->icode =
700 in_p ? CODE_FOR_reload_qi_load : CODE_FOR_reload_qi_store;
701 break;
702 case E_HImode:
703 sri->icode =
704 in_p ? CODE_FOR_reload_hi_load : CODE_FOR_reload_hi_store;
705 break;
706 default:
707 break;
711 return NO_REGS;
714 /* Convert reloads using offsets that are too large to use indirect
715 addressing. */
717 void
718 arc_secondary_reload_conv (rtx reg, rtx mem, rtx scratch, bool store_p)
720 rtx addr;
722 gcc_assert (GET_CODE (mem) == MEM);
723 addr = XEXP (mem, 0);
725 /* Large offset: use a move. FIXME: ld ops accepts limms as
726 offsets. Hence, the following move insn is not required. */
727 emit_move_insn (scratch, addr);
728 mem = replace_equiv_address_nv (mem, scratch);
730 /* Now create the move. */
731 if (store_p)
732 emit_insn (gen_rtx_SET (mem, reg));
733 else
734 emit_insn (gen_rtx_SET (reg, mem));
736 return;
739 static unsigned arc_ifcvt (void);
741 namespace {
743 const pass_data pass_data_arc_ifcvt =
745 RTL_PASS,
746 "arc_ifcvt", /* name */
747 OPTGROUP_NONE, /* optinfo_flags */
748 TV_IFCVT2, /* tv_id */
749 0, /* properties_required */
750 0, /* properties_provided */
751 0, /* properties_destroyed */
752 0, /* todo_flags_start */
753 TODO_df_finish /* todo_flags_finish */
756 class pass_arc_ifcvt : public rtl_opt_pass
758 public:
759 pass_arc_ifcvt(gcc::context *ctxt)
760 : rtl_opt_pass(pass_data_arc_ifcvt, ctxt)
763 /* opt_pass methods: */
764 opt_pass * clone () { return new pass_arc_ifcvt (m_ctxt); }
765 virtual unsigned int execute (function *) { return arc_ifcvt (); }
768 } // anon namespace
770 rtl_opt_pass *
771 make_pass_arc_ifcvt (gcc::context *ctxt)
773 return new pass_arc_ifcvt (ctxt);
776 static unsigned arc_predicate_delay_insns (void);
778 namespace {
780 const pass_data pass_data_arc_predicate_delay_insns =
782 RTL_PASS,
783 "arc_predicate_delay_insns", /* name */
784 OPTGROUP_NONE, /* optinfo_flags */
785 TV_IFCVT2, /* tv_id */
786 0, /* properties_required */
787 0, /* properties_provided */
788 0, /* properties_destroyed */
789 0, /* todo_flags_start */
790 TODO_df_finish /* todo_flags_finish */
793 class pass_arc_predicate_delay_insns : public rtl_opt_pass
795 public:
796 pass_arc_predicate_delay_insns(gcc::context *ctxt)
797 : rtl_opt_pass(pass_data_arc_predicate_delay_insns, ctxt)
800 /* opt_pass methods: */
801 virtual unsigned int execute (function *)
803 return arc_predicate_delay_insns ();
807 } // anon namespace
809 rtl_opt_pass *
810 make_pass_arc_predicate_delay_insns (gcc::context *ctxt)
812 return new pass_arc_predicate_delay_insns (ctxt);
815 /* Called by OVERRIDE_OPTIONS to initialize various things. */
817 static void
818 arc_init (void)
820 if (TARGET_V2)
822 /* I have the multiplier, then use it*/
823 if (TARGET_MPYW || TARGET_MULTI)
824 arc_multcost = COSTS_N_INSNS (1);
826 /* Note: arc_multcost is only used in rtx_cost if speed is true. */
827 if (arc_multcost < 0)
828 switch (arc_tune)
830 case TUNE_ARC700_4_2_STD:
831 /* latency 7;
832 max throughput (1 multiply + 4 other insns) / 5 cycles. */
833 arc_multcost = COSTS_N_INSNS (4);
834 if (TARGET_NOMPY_SET)
835 arc_multcost = COSTS_N_INSNS (30);
836 break;
837 case TUNE_ARC700_4_2_XMAC:
838 /* latency 5;
839 max throughput (1 multiply + 2 other insns) / 3 cycles. */
840 arc_multcost = COSTS_N_INSNS (3);
841 if (TARGET_NOMPY_SET)
842 arc_multcost = COSTS_N_INSNS (30);
843 break;
844 case TUNE_ARC600:
845 if (TARGET_MUL64_SET)
847 arc_multcost = COSTS_N_INSNS (4);
848 break;
850 /* Fall through. */
851 default:
852 arc_multcost = COSTS_N_INSNS (30);
853 break;
856 /* MPY instructions valid only for ARC700 or ARCv2. */
857 if (TARGET_NOMPY_SET && TARGET_ARC600_FAMILY)
858 error ("-mno-mpy supported only for ARC700 or ARCv2");
860 if (!TARGET_DPFP && TARGET_DPFP_DISABLE_LRSR)
861 error ("-mno-dpfp-lrsr supported only with -mdpfp");
863 /* FPX-1. No fast and compact together. */
864 if ((TARGET_DPFP_FAST_SET && TARGET_DPFP_COMPACT_SET)
865 || (TARGET_SPFP_FAST_SET && TARGET_SPFP_COMPACT_SET))
866 error ("FPX fast and compact options cannot be specified together");
868 /* FPX-2. No fast-spfp for arc600 or arc601. */
869 if (TARGET_SPFP_FAST_SET && TARGET_ARC600_FAMILY)
870 error ("-mspfp_fast not available on ARC600 or ARC601");
872 /* FPX-4. No FPX extensions mixed with FPU extensions. */
873 if ((TARGET_DPFP_FAST_SET || TARGET_DPFP_COMPACT_SET || TARGET_SPFP)
874 && TARGET_HARD_FLOAT)
875 error ("No FPX/FPU mixing allowed");
877 /* Warn for unimplemented PIC in pre-ARC700 cores, and disable flag_pic. */
878 if (flag_pic && TARGET_ARC600_FAMILY)
880 warning (DK_WARNING,
881 "PIC is not supported for %s. Generating non-PIC code only..",
882 arc_cpu_string);
883 flag_pic = 0;
886 arc_init_reg_tables ();
888 /* Initialize array for PRINT_OPERAND_PUNCT_VALID_P. */
889 memset (arc_punct_chars, 0, sizeof (arc_punct_chars));
890 arc_punct_chars['#'] = 1;
891 arc_punct_chars['*'] = 1;
892 arc_punct_chars['?'] = 1;
893 arc_punct_chars['!'] = 1;
894 arc_punct_chars['^'] = 1;
895 arc_punct_chars['&'] = 1;
896 arc_punct_chars['+'] = 1;
897 arc_punct_chars['_'] = 1;
899 if (optimize > 1 && !TARGET_NO_COND_EXEC)
901 /* There are two target-independent ifcvt passes, and arc_reorg may do
902 one or more arc_ifcvt calls. */
903 opt_pass *pass_arc_ifcvt_4 = make_pass_arc_ifcvt (g);
904 struct register_pass_info arc_ifcvt4_info
905 = { pass_arc_ifcvt_4, "dbr", 1, PASS_POS_INSERT_AFTER };
906 struct register_pass_info arc_ifcvt5_info
907 = { pass_arc_ifcvt_4->clone (), "shorten", 1, PASS_POS_INSERT_BEFORE };
909 register_pass (&arc_ifcvt4_info);
910 register_pass (&arc_ifcvt5_info);
913 if (flag_delayed_branch)
915 opt_pass *pass_arc_predicate_delay_insns
916 = make_pass_arc_predicate_delay_insns (g);
917 struct register_pass_info arc_predicate_delay_info
918 = { pass_arc_predicate_delay_insns, "dbr", 1, PASS_POS_INSERT_AFTER };
920 register_pass (&arc_predicate_delay_info);
924 /* Parse -mirq-ctrl-saved=RegisterRange, blink, lp_copunt. The
925 register range is specified as two registers separated by a dash.
926 It always starts with r0, and its upper limit is fp register.
927 blink and lp_count registers are optional. */
929 static void
930 irq_range (const char *cstr)
932 int i, first, last, blink, lpcount, xreg;
933 char *str, *dash, *comma;
935 i = strlen (cstr);
936 str = (char *) alloca (i + 1);
937 memcpy (str, cstr, i + 1);
938 blink = -1;
939 lpcount = -1;
941 dash = strchr (str, '-');
942 if (!dash)
944 warning (0, "value of -mirq-ctrl-saved must have form R0-REGx");
945 return;
947 *dash = '\0';
949 comma = strchr (dash + 1, ',');
950 if (comma)
951 *comma = '\0';
953 first = decode_reg_name (str);
954 if (first != 0)
956 warning (0, "first register must be R0");
957 return;
960 /* At this moment we do not have the register names initialized
961 accordingly. */
962 if (!strcmp (dash + 1, "ilink"))
963 last = 29;
964 else
965 last = decode_reg_name (dash + 1);
967 if (last < 0)
969 warning (0, "unknown register name: %s", dash + 1);
970 return;
973 if (!(last & 0x01))
975 warning (0, "last register name %s must be an odd register", dash + 1);
976 return;
979 *dash = '-';
981 if (first > last)
983 warning (0, "%s-%s is an empty range", str, dash + 1);
984 return;
987 while (comma)
989 *comma = ',';
990 str = comma + 1;
992 comma = strchr (str, ',');
993 if (comma)
994 *comma = '\0';
996 xreg = decode_reg_name (str);
997 switch (xreg)
999 case 31:
1000 blink = 31;
1001 break;
1003 case 60:
1004 lpcount = 60;
1005 break;
1007 default:
1008 warning (0, "unknown register name: %s", str);
1009 return;
1013 irq_ctrl_saved.irq_save_last_reg = last;
1014 irq_ctrl_saved.irq_save_blink = (blink == 31) || (last == 31);
1015 irq_ctrl_saved.irq_save_lpcount = (lpcount == 60);
1018 /* Parse -mrgf-banked-regs=NUM option string. Valid values for NUM are 4,
1019 8, 16, or 32. */
1021 static void
1022 parse_mrgf_banked_regs_option (const char *arg)
1024 long int val;
1025 char *end_ptr;
1027 errno = 0;
1028 val = strtol (arg, &end_ptr, 10);
1029 if (errno != 0 || *arg == '\0' || *end_ptr != '\0'
1030 || (val != 0 && val != 4 && val != 8 && val != 16 && val != 32))
1032 error ("invalid number in -mrgf-banked-regs=%s "
1033 "valid values are 0, 4, 8, 16, or 32", arg);
1034 return;
1036 rgf_banked_register_count = (int) val;
1039 /* Check ARC options, generate derived target attributes. */
1041 static void
1042 arc_override_options (void)
1044 unsigned int i;
1045 cl_deferred_option *opt;
1046 vec<cl_deferred_option> *vopt
1047 = (vec<cl_deferred_option> *) arc_deferred_options;
1049 if (arc_cpu == PROCESSOR_NONE)
1050 arc_cpu = TARGET_CPU_DEFAULT;
1052 /* Set the default cpu options. */
1053 arc_selected_cpu = &arc_cpu_types[(int) arc_cpu];
1055 /* Set the architectures. */
1056 switch (arc_selected_cpu->arch_info->arch_id)
1058 case BASE_ARCH_em:
1059 arc_cpu_string = "EM";
1060 break;
1061 case BASE_ARCH_hs:
1062 arc_cpu_string = "HS";
1063 break;
1064 case BASE_ARCH_700:
1065 if (arc_selected_cpu->processor == PROCESSOR_nps400)
1066 arc_cpu_string = "NPS400";
1067 else
1068 arc_cpu_string = "ARC700";
1069 break;
1070 case BASE_ARCH_6xx:
1071 arc_cpu_string = "ARC600";
1072 break;
1073 default:
1074 gcc_unreachable ();
1077 irq_ctrl_saved.irq_save_last_reg = -1;
1078 irq_ctrl_saved.irq_save_blink = false;
1079 irq_ctrl_saved.irq_save_lpcount = false;
1081 rgf_banked_register_count = 0;
1083 /* Handle the deferred options. */
1084 if (vopt)
1085 FOR_EACH_VEC_ELT (*vopt, i, opt)
1087 switch (opt->opt_index)
1089 case OPT_mirq_ctrl_saved_:
1090 if (TARGET_V2)
1091 irq_range (opt->arg);
1092 else
1093 warning (0, "option -mirq-ctrl-saved valid only for ARC v2 processors");
1094 break;
1096 case OPT_mrgf_banked_regs_:
1097 if (TARGET_V2)
1098 parse_mrgf_banked_regs_option (opt->arg);
1099 else
1100 warning (0, "option -mrgf-banked-regs valid only for ARC v2 processors");
1101 break;
1103 default:
1104 gcc_unreachable();
1108 CLEAR_HARD_REG_SET (overrideregs);
1109 if (common_deferred_options)
1111 vec<cl_deferred_option> v =
1112 *((vec<cl_deferred_option> *) common_deferred_options);
1113 int reg, nregs, j;
1115 FOR_EACH_VEC_ELT (v, i, opt)
1117 switch (opt->opt_index)
1119 case OPT_ffixed_:
1120 case OPT_fcall_used_:
1121 case OPT_fcall_saved_:
1122 if ((reg = decode_reg_name_and_count (opt->arg, &nregs)) >= 0)
1123 for (j = reg; j < reg + nregs; j++)
1124 SET_HARD_REG_BIT (overrideregs, j);
1125 break;
1126 default:
1127 break;
1132 /* Set cpu flags accordingly to architecture/selected cpu. The cpu
1133 specific flags are set in arc-common.c. The architecture forces
1134 the default hardware configurations in, regardless what command
1135 line options are saying. The CPU optional hw options can be
1136 turned on or off. */
1137 #define ARC_OPT(NAME, CODE, MASK, DOC) \
1138 do { \
1139 if ((arc_selected_cpu->flags & CODE) \
1140 && ((target_flags_explicit & MASK) == 0)) \
1141 target_flags |= MASK; \
1142 if (arc_selected_cpu->arch_info->dflags & CODE) \
1143 target_flags |= MASK; \
1144 } while (0);
1145 #define ARC_OPTX(NAME, CODE, VAR, VAL, DOC) \
1146 do { \
1147 if ((arc_selected_cpu->flags & CODE) \
1148 && (VAR == DEFAULT_##VAR)) \
1149 VAR = VAL; \
1150 if (arc_selected_cpu->arch_info->dflags & CODE) \
1151 VAR = VAL; \
1152 } while (0);
1154 #include "arc-options.def"
1156 #undef ARC_OPTX
1157 #undef ARC_OPT
1159 /* Check options against architecture options. Throw an error if
1160 option is not allowed. */
1161 #define ARC_OPTX(NAME, CODE, VAR, VAL, DOC) \
1162 do { \
1163 if ((VAR == VAL) \
1164 && (!(arc_selected_cpu->arch_info->flags & CODE))) \
1166 error ("%s is not available for %s architecture", \
1167 DOC, arc_selected_cpu->arch_info->name); \
1169 } while (0);
1170 #define ARC_OPT(NAME, CODE, MASK, DOC) \
1171 do { \
1172 if ((target_flags & MASK) \
1173 && (!(arc_selected_cpu->arch_info->flags & CODE))) \
1174 error ("%s is not available for %s architecture", \
1175 DOC, arc_selected_cpu->arch_info->name); \
1176 } while (0);
1178 #include "arc-options.def"
1180 #undef ARC_OPTX
1181 #undef ARC_OPT
1183 /* Set Tune option. */
1184 if (arc_tune == TUNE_NONE)
1185 arc_tune = (enum attr_tune) arc_selected_cpu->tune;
1187 if (arc_size_opt_level == 3)
1188 optimize_size = 1;
1190 /* Compact casesi is not a valid option for ARCv2 family. */
1191 if (TARGET_V2)
1193 if (TARGET_COMPACT_CASESI)
1195 warning (0, "compact-casesi is not applicable to ARCv2");
1196 TARGET_COMPACT_CASESI = 0;
1199 else if (optimize_size == 1
1200 && !global_options_set.x_TARGET_COMPACT_CASESI)
1201 TARGET_COMPACT_CASESI = 1;
1203 if (flag_pic)
1204 target_flags |= MASK_NO_SDATA_SET;
1206 if (flag_no_common == 255)
1207 flag_no_common = !TARGET_NO_SDATA_SET;
1209 /* TARGET_COMPACT_CASESI needs the "q" register class. */
1210 if (TARGET_MIXED_CODE)
1211 TARGET_Q_CLASS = 1;
1212 if (!TARGET_Q_CLASS)
1213 TARGET_COMPACT_CASESI = 0;
1214 if (TARGET_COMPACT_CASESI)
1215 TARGET_CASE_VECTOR_PC_RELATIVE = 1;
1217 /* Check for small data option */
1218 if (!global_options_set.x_g_switch_value && !TARGET_NO_SDATA_SET)
1219 g_switch_value = TARGET_LL64 ? 8 : 4;
1221 /* These need to be done at start up. It's convenient to do them here. */
1222 arc_init ();
1225 /* The condition codes of the ARC, and the inverse function. */
1226 /* For short branches, the "c" / "nc" names are not defined in the ARC
1227 Programmers manual, so we have to use "lo" / "hs"" instead. */
1228 static const char *arc_condition_codes[] =
1230 "al", 0, "eq", "ne", "p", "n", "lo", "hs", "v", "nv",
1231 "gt", "le", "ge", "lt", "hi", "ls", "pnz", 0
1234 enum arc_cc_code_index
1236 ARC_CC_AL, ARC_CC_EQ = ARC_CC_AL+2, ARC_CC_NE, ARC_CC_P, ARC_CC_N,
1237 ARC_CC_C, ARC_CC_NC, ARC_CC_V, ARC_CC_NV,
1238 ARC_CC_GT, ARC_CC_LE, ARC_CC_GE, ARC_CC_LT, ARC_CC_HI, ARC_CC_LS, ARC_CC_PNZ,
1239 ARC_CC_LO = ARC_CC_C, ARC_CC_HS = ARC_CC_NC
1242 #define ARC_INVERSE_CONDITION_CODE(X) ((X) ^ 1)
1244 /* Returns the index of the ARC condition code string in
1245 `arc_condition_codes'. COMPARISON should be an rtx like
1246 `(eq (...) (...))'. */
1248 static int
1249 get_arc_condition_code (rtx comparison)
1251 switch (GET_MODE (XEXP (comparison, 0)))
1253 case E_CCmode:
1254 case E_SImode: /* For BRcc. */
1255 switch (GET_CODE (comparison))
1257 case EQ : return ARC_CC_EQ;
1258 case NE : return ARC_CC_NE;
1259 case GT : return ARC_CC_GT;
1260 case LE : return ARC_CC_LE;
1261 case GE : return ARC_CC_GE;
1262 case LT : return ARC_CC_LT;
1263 case GTU : return ARC_CC_HI;
1264 case LEU : return ARC_CC_LS;
1265 case LTU : return ARC_CC_LO;
1266 case GEU : return ARC_CC_HS;
1267 default : gcc_unreachable ();
1269 case E_CC_ZNmode:
1270 switch (GET_CODE (comparison))
1272 case EQ : return ARC_CC_EQ;
1273 case NE : return ARC_CC_NE;
1274 case GE: return ARC_CC_P;
1275 case LT: return ARC_CC_N;
1276 case GT : return ARC_CC_PNZ;
1277 default : gcc_unreachable ();
1279 case E_CC_Zmode:
1280 switch (GET_CODE (comparison))
1282 case EQ : return ARC_CC_EQ;
1283 case NE : return ARC_CC_NE;
1284 default : gcc_unreachable ();
1286 case E_CC_Cmode:
1287 switch (GET_CODE (comparison))
1289 case LTU : return ARC_CC_C;
1290 case GEU : return ARC_CC_NC;
1291 default : gcc_unreachable ();
1293 case E_CC_FP_GTmode:
1294 if (TARGET_ARGONAUT_SET && TARGET_SPFP)
1295 switch (GET_CODE (comparison))
1297 case GT : return ARC_CC_N;
1298 case UNLE: return ARC_CC_P;
1299 default : gcc_unreachable ();
1301 else
1302 switch (GET_CODE (comparison))
1304 case GT : return ARC_CC_HI;
1305 case UNLE : return ARC_CC_LS;
1306 default : gcc_unreachable ();
1308 case E_CC_FP_GEmode:
1309 /* Same for FPX and non-FPX. */
1310 switch (GET_CODE (comparison))
1312 case GE : return ARC_CC_HS;
1313 case UNLT : return ARC_CC_LO;
1314 default : gcc_unreachable ();
1316 case E_CC_FP_UNEQmode:
1317 switch (GET_CODE (comparison))
1319 case UNEQ : return ARC_CC_EQ;
1320 case LTGT : return ARC_CC_NE;
1321 default : gcc_unreachable ();
1323 case E_CC_FP_ORDmode:
1324 switch (GET_CODE (comparison))
1326 case UNORDERED : return ARC_CC_C;
1327 case ORDERED : return ARC_CC_NC;
1328 default : gcc_unreachable ();
1330 case E_CC_FPXmode:
1331 switch (GET_CODE (comparison))
1333 case EQ : return ARC_CC_EQ;
1334 case NE : return ARC_CC_NE;
1335 case UNORDERED : return ARC_CC_C;
1336 case ORDERED : return ARC_CC_NC;
1337 case LTGT : return ARC_CC_HI;
1338 case UNEQ : return ARC_CC_LS;
1339 default : gcc_unreachable ();
1341 case E_CC_FPUmode:
1342 switch (GET_CODE (comparison))
1344 case EQ : return ARC_CC_EQ;
1345 case NE : return ARC_CC_NE;
1346 case GT : return ARC_CC_GT;
1347 case GE : return ARC_CC_GE;
1348 case LT : return ARC_CC_C;
1349 case LE : return ARC_CC_LS;
1350 case UNORDERED : return ARC_CC_V;
1351 case ORDERED : return ARC_CC_NV;
1352 case UNGT : return ARC_CC_HI;
1353 case UNGE : return ARC_CC_HS;
1354 case UNLT : return ARC_CC_LT;
1355 case UNLE : return ARC_CC_LE;
1356 /* UNEQ and LTGT do not have representation. */
1357 case LTGT : /* Fall through. */
1358 case UNEQ : /* Fall through. */
1359 default : gcc_unreachable ();
1361 case E_CC_FPU_UNEQmode:
1362 switch (GET_CODE (comparison))
1364 case LTGT : return ARC_CC_NE;
1365 case UNEQ : return ARC_CC_EQ;
1366 default : gcc_unreachable ();
1368 default : gcc_unreachable ();
1370 /*NOTREACHED*/
1371 return (42);
1374 /* Return true if COMPARISON has a short form that can accomodate OFFSET. */
1376 bool
1377 arc_short_comparison_p (rtx comparison, int offset)
1379 gcc_assert (ARC_CC_NC == ARC_CC_HS);
1380 gcc_assert (ARC_CC_C == ARC_CC_LO);
1381 switch (get_arc_condition_code (comparison))
1383 case ARC_CC_EQ: case ARC_CC_NE:
1384 return offset >= -512 && offset <= 506;
1385 case ARC_CC_GT: case ARC_CC_LE: case ARC_CC_GE: case ARC_CC_LT:
1386 case ARC_CC_HI: case ARC_CC_LS: case ARC_CC_LO: case ARC_CC_HS:
1387 return offset >= -64 && offset <= 58;
1388 default:
1389 return false;
1393 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1394 return the mode to be used for the comparison. */
1396 machine_mode
1397 arc_select_cc_mode (enum rtx_code op, rtx x, rtx y)
1399 machine_mode mode = GET_MODE (x);
1400 rtx x1;
1402 /* For an operation that sets the condition codes as a side-effect, the
1403 C and V flags is not set as for cmp, so we can only use comparisons where
1404 this doesn't matter. (For LT and GE we can use "mi" and "pl"
1405 instead.) */
1406 /* ??? We could use "pnz" for greater than zero, however, we could then
1407 get into trouble because the comparison could not be reversed. */
1408 if (GET_MODE_CLASS (mode) == MODE_INT
1409 && y == const0_rtx
1410 && (op == EQ || op == NE
1411 || ((op == LT || op == GE) && GET_MODE_SIZE (GET_MODE (x)) <= 4)))
1412 return CC_ZNmode;
1414 /* add.f for if (a+b) */
1415 if (mode == SImode
1416 && GET_CODE (y) == NEG
1417 && (op == EQ || op == NE))
1418 return CC_ZNmode;
1420 /* Check if this is a test suitable for bxor.f . */
1421 if (mode == SImode && (op == EQ || op == NE) && CONST_INT_P (y)
1422 && ((INTVAL (y) - 1) & INTVAL (y)) == 0
1423 && INTVAL (y))
1424 return CC_Zmode;
1426 /* Check if this is a test suitable for add / bmsk.f . */
1427 if (mode == SImode && (op == EQ || op == NE) && CONST_INT_P (y)
1428 && GET_CODE (x) == AND && CONST_INT_P ((x1 = XEXP (x, 1)))
1429 && ((INTVAL (x1) + 1) & INTVAL (x1)) == 0
1430 && (~INTVAL (x1) | INTVAL (y)) < 0
1431 && (~INTVAL (x1) | INTVAL (y)) > -0x800)
1432 return CC_Zmode;
1434 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
1435 && GET_CODE (x) == PLUS
1436 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
1437 return CC_Cmode;
1439 if (TARGET_ARGONAUT_SET
1440 && ((mode == SFmode && TARGET_SPFP) || (mode == DFmode && TARGET_DPFP)))
1441 switch (op)
1443 case EQ: case NE: case UNEQ: case LTGT: case ORDERED: case UNORDERED:
1444 return CC_FPXmode;
1445 case LT: case UNGE: case GT: case UNLE:
1446 return CC_FP_GTmode;
1447 case LE: case UNGT: case GE: case UNLT:
1448 return CC_FP_GEmode;
1449 default: gcc_unreachable ();
1451 else if (TARGET_HARD_FLOAT
1452 && ((mode == SFmode && TARGET_FP_SP_BASE)
1453 || (mode == DFmode && TARGET_FP_DP_BASE)))
1454 switch (op)
1456 case EQ:
1457 case NE:
1458 case UNORDERED:
1459 case ORDERED:
1460 case UNLT:
1461 case UNLE:
1462 case UNGT:
1463 case UNGE:
1464 case LT:
1465 case LE:
1466 case GT:
1467 case GE:
1468 return CC_FPUmode;
1470 case LTGT:
1471 case UNEQ:
1472 return CC_FPU_UNEQmode;
1474 default:
1475 gcc_unreachable ();
1477 else if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_OPTFPE)
1479 switch (op)
1481 case EQ: case NE: return CC_Zmode;
1482 case LT: case UNGE:
1483 case GT: case UNLE: return CC_FP_GTmode;
1484 case LE: case UNGT:
1485 case GE: case UNLT: return CC_FP_GEmode;
1486 case UNEQ: case LTGT: return CC_FP_UNEQmode;
1487 case ORDERED: case UNORDERED: return CC_FP_ORDmode;
1488 default: gcc_unreachable ();
1491 return CCmode;
1494 /* Vectors to keep interesting information about registers where it can easily
1495 be got. We use to use the actual mode value as the bit number, but there
1496 is (or may be) more than 32 modes now. Instead we use two tables: one
1497 indexed by hard register number, and one indexed by mode. */
1499 /* The purpose of arc_mode_class is to shrink the range of modes so that
1500 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
1501 mapped into one arc_mode_class mode. */
1503 enum arc_mode_class {
1504 C_MODE,
1505 S_MODE, D_MODE, T_MODE, O_MODE,
1506 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
1507 V_MODE
1510 /* Modes for condition codes. */
1511 #define C_MODES (1 << (int) C_MODE)
1513 /* Modes for single-word and smaller quantities. */
1514 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
1516 /* Modes for double-word and smaller quantities. */
1517 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
1519 /* Mode for 8-byte DF values only. */
1520 #define DF_MODES (1 << DF_MODE)
1522 /* Modes for quad-word and smaller quantities. */
1523 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
1525 /* Modes for 128-bit vectors. */
1526 #define V_MODES (1 << (int) V_MODE)
1528 /* Value is 1 if register/mode pair is acceptable on arc. */
1530 static unsigned int arc_hard_regno_modes[] = {
1531 T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES,
1532 T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES,
1533 T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, D_MODES,
1534 D_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
1536 /* ??? Leave these as S_MODES for now. */
1537 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
1538 DF_MODES, 0, DF_MODES, 0, S_MODES, S_MODES, S_MODES, S_MODES,
1539 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
1540 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, C_MODES, S_MODES,
1542 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1543 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1544 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1545 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1547 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1548 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1549 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1550 V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES, V_MODES,
1552 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
1553 S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES
1556 static unsigned int arc_mode_class [NUM_MACHINE_MODES];
1558 enum reg_class arc_regno_reg_class[FIRST_PSEUDO_REGISTER];
1560 enum reg_class
1561 arc_preferred_reload_class (rtx, enum reg_class cl)
1563 if ((cl) == CHEAP_CORE_REGS || (cl) == WRITABLE_CORE_REGS)
1564 return GENERAL_REGS;
1565 return cl;
1568 /* Initialize the arc_mode_class array. */
1570 static void
1571 arc_init_reg_tables (void)
1573 int i;
1575 for (i = 0; i < NUM_MACHINE_MODES; i++)
1577 machine_mode m = (machine_mode) i;
1579 switch (GET_MODE_CLASS (m))
1581 case MODE_INT:
1582 case MODE_PARTIAL_INT:
1583 case MODE_COMPLEX_INT:
1584 if (GET_MODE_SIZE (m) <= 4)
1585 arc_mode_class[i] = 1 << (int) S_MODE;
1586 else if (GET_MODE_SIZE (m) == 8)
1587 arc_mode_class[i] = 1 << (int) D_MODE;
1588 else if (GET_MODE_SIZE (m) == 16)
1589 arc_mode_class[i] = 1 << (int) T_MODE;
1590 else if (GET_MODE_SIZE (m) == 32)
1591 arc_mode_class[i] = 1 << (int) O_MODE;
1592 else
1593 arc_mode_class[i] = 0;
1594 break;
1595 case MODE_FLOAT:
1596 case MODE_COMPLEX_FLOAT:
1597 if (GET_MODE_SIZE (m) <= 4)
1598 arc_mode_class[i] = 1 << (int) SF_MODE;
1599 else if (GET_MODE_SIZE (m) == 8)
1600 arc_mode_class[i] = 1 << (int) DF_MODE;
1601 else if (GET_MODE_SIZE (m) == 16)
1602 arc_mode_class[i] = 1 << (int) TF_MODE;
1603 else if (GET_MODE_SIZE (m) == 32)
1604 arc_mode_class[i] = 1 << (int) OF_MODE;
1605 else
1606 arc_mode_class[i] = 0;
1607 break;
1608 case MODE_VECTOR_INT:
1609 if (GET_MODE_SIZE (m) == 4)
1610 arc_mode_class[i] = (1 << (int) S_MODE);
1611 else if (GET_MODE_SIZE (m) == 8)
1612 arc_mode_class[i] = (1 << (int) D_MODE);
1613 else
1614 arc_mode_class[i] = (1 << (int) V_MODE);
1615 break;
1616 case MODE_CC:
1617 default:
1618 /* mode_class hasn't been initialized yet for EXTRA_CC_MODES, so
1619 we must explicitly check for them here. */
1620 if (i == (int) CCmode || i == (int) CC_ZNmode || i == (int) CC_Zmode
1621 || i == (int) CC_Cmode
1622 || i == CC_FP_GTmode || i == CC_FP_GEmode || i == CC_FP_ORDmode
1623 || i == CC_FPUmode || i == CC_FPU_UNEQmode)
1624 arc_mode_class[i] = 1 << (int) C_MODE;
1625 else
1626 arc_mode_class[i] = 0;
1627 break;
1632 /* Core registers 56..59 are used for multiply extension options.
1633 The dsp option uses r56 and r57, these are then named acc1 and acc2.
1634 acc1 is the highpart, and acc2 the lowpart, so which register gets which
1635 number depends on endianness.
1636 The mul64 multiplier options use r57 for mlo, r58 for mmid and r59 for mhi.
1637 Because mlo / mhi form a 64 bit value, we use different gcc internal
1638 register numbers to make them form a register pair as the gcc internals
1639 know it. mmid gets number 57, if still available, and mlo / mhi get
1640 number 58 and 59, depending on endianness. We use DBX_REGISTER_NUMBER
1641 to map this back. */
1642 char rname56[5] = "r56";
1643 char rname57[5] = "r57";
1644 char rname58[5] = "r58";
1645 char rname59[5] = "r59";
1646 char rname29[7] = "ilink1";
1647 char rname30[7] = "ilink2";
1649 static void
1650 arc_conditional_register_usage (void)
1652 int regno;
1653 int i;
1654 int fix_start = 60, fix_end = 55;
1656 if (TARGET_V2)
1658 /* For ARCv2 the core register set is changed. */
1659 strcpy (rname29, "ilink");
1660 strcpy (rname30, "r30");
1662 if (!TEST_HARD_REG_BIT (overrideregs, 30))
1664 /* No user interference. Set the r30 to be used by the
1665 compiler. */
1666 call_used_regs[30] = 1;
1667 fixed_regs[30] = 0;
1669 arc_regno_reg_class[30] = WRITABLE_CORE_REGS;
1670 SET_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], 30);
1671 SET_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], 30);
1672 SET_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], 30);
1673 SET_HARD_REG_BIT (reg_class_contents[MPY_WRITABLE_CORE_REGS], 30);
1677 if (TARGET_MUL64_SET)
1679 fix_start = 57;
1680 fix_end = 59;
1682 /* We don't provide a name for mmed. In rtl / assembly resource lists,
1683 you are supposed to refer to it as mlo & mhi, e.g
1684 (zero_extract:SI (reg:DI 58) (const_int 32) (16)) .
1685 In an actual asm instruction, you are of course use mmed.
1686 The point of avoiding having a separate register for mmed is that
1687 this way, we don't have to carry clobbers of that reg around in every
1688 isntruction that modifies mlo and/or mhi. */
1689 strcpy (rname57, "");
1690 strcpy (rname58, TARGET_BIG_ENDIAN ? "mhi" : "mlo");
1691 strcpy (rname59, TARGET_BIG_ENDIAN ? "mlo" : "mhi");
1694 /* The nature of arc_tp_regno is actually something more like a global
1695 register, however globalize_reg requires a declaration.
1696 We use EPILOGUE_USES to compensate so that sets from
1697 __builtin_set_frame_pointer are not deleted. */
1698 if (arc_tp_regno != -1)
1699 fixed_regs[arc_tp_regno] = call_used_regs[arc_tp_regno] = 1;
1701 if (TARGET_MULMAC_32BY16_SET)
1703 fix_start = 56;
1704 fix_end = fix_end > 57 ? fix_end : 57;
1705 strcpy (rname56, TARGET_BIG_ENDIAN ? "acc1" : "acc2");
1706 strcpy (rname57, TARGET_BIG_ENDIAN ? "acc2" : "acc1");
1708 for (regno = fix_start; regno <= fix_end; regno++)
1710 if (!fixed_regs[regno])
1711 warning (0, "multiply option implies r%d is fixed", regno);
1712 fixed_regs [regno] = call_used_regs[regno] = 1;
1714 if (TARGET_Q_CLASS)
1716 if (optimize_size)
1718 reg_alloc_order[0] = 0;
1719 reg_alloc_order[1] = 1;
1720 reg_alloc_order[2] = 2;
1721 reg_alloc_order[3] = 3;
1722 reg_alloc_order[4] = 12;
1723 reg_alloc_order[5] = 13;
1724 reg_alloc_order[6] = 14;
1725 reg_alloc_order[7] = 15;
1726 reg_alloc_order[8] = 4;
1727 reg_alloc_order[9] = 5;
1728 reg_alloc_order[10] = 6;
1729 reg_alloc_order[11] = 7;
1730 reg_alloc_order[12] = 8;
1731 reg_alloc_order[13] = 9;
1732 reg_alloc_order[14] = 10;
1733 reg_alloc_order[15] = 11;
1735 else
1737 reg_alloc_order[2] = 12;
1738 reg_alloc_order[3] = 13;
1739 reg_alloc_order[4] = 14;
1740 reg_alloc_order[5] = 15;
1741 reg_alloc_order[6] = 1;
1742 reg_alloc_order[7] = 0;
1743 reg_alloc_order[8] = 4;
1744 reg_alloc_order[9] = 5;
1745 reg_alloc_order[10] = 6;
1746 reg_alloc_order[11] = 7;
1747 reg_alloc_order[12] = 8;
1748 reg_alloc_order[13] = 9;
1749 reg_alloc_order[14] = 10;
1750 reg_alloc_order[15] = 11;
1753 if (TARGET_SIMD_SET)
1755 int i;
1756 for (i = ARC_FIRST_SIMD_VR_REG; i <= ARC_LAST_SIMD_VR_REG; i++)
1757 reg_alloc_order [i] = i;
1758 for (i = ARC_FIRST_SIMD_DMA_CONFIG_REG;
1759 i <= ARC_LAST_SIMD_DMA_CONFIG_REG; i++)
1760 reg_alloc_order [i] = i;
1763 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1764 if (!call_used_regs[regno])
1765 CLEAR_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno);
1766 for (regno = 32; regno < 60; regno++)
1767 if (!fixed_regs[regno])
1768 SET_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], regno);
1769 if (!TARGET_ARC600_FAMILY)
1771 for (regno = 32; regno <= 60; regno++)
1772 CLEAR_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], regno);
1774 /* If they have used -ffixed-lp_count, make sure it takes
1775 effect. */
1776 if (fixed_regs[LP_COUNT])
1778 CLEAR_HARD_REG_BIT (reg_class_contents[LPCOUNT_REG], LP_COUNT);
1779 CLEAR_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], LP_COUNT);
1780 CLEAR_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], LP_COUNT);
1782 /* Instead of taking out SF_MODE like below, forbid it outright. */
1783 arc_hard_regno_modes[60] = 0;
1785 else
1786 arc_hard_regno_modes[60] = 1 << (int) S_MODE;
1789 /* ARCHS has 64-bit data-path which makes use of the even-odd paired
1790 registers. */
1791 if (TARGET_HS)
1793 for (regno = 1; regno < 32; regno +=2)
1795 arc_hard_regno_modes[regno] = S_MODES;
1799 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1801 if (i < 29)
1803 if ((TARGET_Q_CLASS || TARGET_RRQ_CLASS)
1804 && ((i <= 3) || ((i >= 12) && (i <= 15))))
1805 arc_regno_reg_class[i] = ARCOMPACT16_REGS;
1806 else
1807 arc_regno_reg_class[i] = GENERAL_REGS;
1809 else if (i < 60)
1810 arc_regno_reg_class[i]
1811 = (fixed_regs[i]
1812 ? (TEST_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], i)
1813 ? CHEAP_CORE_REGS : ALL_CORE_REGS)
1814 : (((!TARGET_ARC600_FAMILY)
1815 && TEST_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], i))
1816 ? CHEAP_CORE_REGS : WRITABLE_CORE_REGS));
1817 else
1818 arc_regno_reg_class[i] = NO_REGS;
1821 /* ARCOMPACT16_REGS is empty, if TARGET_Q_CLASS / TARGET_RRQ_CLASS
1822 has not been activated. */
1823 if (!TARGET_Q_CLASS && !TARGET_RRQ_CLASS)
1824 CLEAR_HARD_REG_SET(reg_class_contents [ARCOMPACT16_REGS]);
1825 if (!TARGET_Q_CLASS)
1826 CLEAR_HARD_REG_SET(reg_class_contents [AC16_BASE_REGS]);
1828 gcc_assert (FIRST_PSEUDO_REGISTER >= 144);
1830 /* Handle Special Registers. */
1831 arc_regno_reg_class[29] = LINK_REGS; /* ilink1 register. */
1832 if (!TARGET_V2)
1833 arc_regno_reg_class[30] = LINK_REGS; /* ilink2 register. */
1834 arc_regno_reg_class[31] = LINK_REGS; /* blink register. */
1835 arc_regno_reg_class[60] = LPCOUNT_REG;
1836 arc_regno_reg_class[61] = NO_REGS; /* CC_REG: must be NO_REGS. */
1837 arc_regno_reg_class[62] = GENERAL_REGS;
1839 if (TARGET_DPFP)
1841 for (i = 40; i < 44; ++i)
1843 arc_regno_reg_class[i] = DOUBLE_REGS;
1845 /* Unless they want us to do 'mov d1, 0x00000000' make sure
1846 no attempt is made to use such a register as a destination
1847 operand in *movdf_insn. */
1848 if (!TARGET_ARGONAUT_SET)
1850 /* Make sure no 'c', 'w', 'W', or 'Rac' constraint is
1851 interpreted to mean they can use D1 or D2 in their insn. */
1852 CLEAR_HARD_REG_BIT(reg_class_contents[CHEAP_CORE_REGS ], i);
1853 CLEAR_HARD_REG_BIT(reg_class_contents[ALL_CORE_REGS ], i);
1854 CLEAR_HARD_REG_BIT(reg_class_contents[WRITABLE_CORE_REGS ], i);
1855 CLEAR_HARD_REG_BIT(reg_class_contents[MPY_WRITABLE_CORE_REGS], i);
1859 else
1861 /* Disable all DOUBLE_REGISTER settings,
1862 if not generating DPFP code. */
1863 arc_regno_reg_class[40] = ALL_REGS;
1864 arc_regno_reg_class[41] = ALL_REGS;
1865 arc_regno_reg_class[42] = ALL_REGS;
1866 arc_regno_reg_class[43] = ALL_REGS;
1868 fixed_regs[40] = 1;
1869 fixed_regs[41] = 1;
1870 fixed_regs[42] = 1;
1871 fixed_regs[43] = 1;
1873 arc_hard_regno_modes[40] = 0;
1874 arc_hard_regno_modes[42] = 0;
1876 CLEAR_HARD_REG_SET(reg_class_contents [DOUBLE_REGS]);
1879 if (TARGET_SIMD_SET)
1881 gcc_assert (ARC_FIRST_SIMD_VR_REG == 64);
1882 gcc_assert (ARC_LAST_SIMD_VR_REG == 127);
1884 for (i = ARC_FIRST_SIMD_VR_REG; i <= ARC_LAST_SIMD_VR_REG; i++)
1885 arc_regno_reg_class [i] = SIMD_VR_REGS;
1887 gcc_assert (ARC_FIRST_SIMD_DMA_CONFIG_REG == 128);
1888 gcc_assert (ARC_FIRST_SIMD_DMA_CONFIG_IN_REG == 128);
1889 gcc_assert (ARC_FIRST_SIMD_DMA_CONFIG_OUT_REG == 136);
1890 gcc_assert (ARC_LAST_SIMD_DMA_CONFIG_REG == 143);
1892 for (i = ARC_FIRST_SIMD_DMA_CONFIG_REG;
1893 i <= ARC_LAST_SIMD_DMA_CONFIG_REG; i++)
1894 arc_regno_reg_class [i] = SIMD_DMA_CONFIG_REGS;
1897 /* pc : r63 */
1898 arc_regno_reg_class[PROGRAM_COUNTER_REGNO] = GENERAL_REGS;
1900 /*ARCV2 Accumulator. */
1901 if ((TARGET_V2
1902 && (TARGET_FP_DP_FUSED || TARGET_FP_SP_FUSED))
1903 || TARGET_PLUS_DMPY)
1905 arc_regno_reg_class[ACCL_REGNO] = WRITABLE_CORE_REGS;
1906 arc_regno_reg_class[ACCH_REGNO] = WRITABLE_CORE_REGS;
1907 SET_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], ACCL_REGNO);
1908 SET_HARD_REG_BIT (reg_class_contents[WRITABLE_CORE_REGS], ACCH_REGNO);
1909 SET_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], ACCL_REGNO);
1910 SET_HARD_REG_BIT (reg_class_contents[CHEAP_CORE_REGS], ACCH_REGNO);
1911 SET_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], ACCL_REGNO);
1912 SET_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], ACCH_REGNO);
1913 SET_HARD_REG_BIT (reg_class_contents[MPY_WRITABLE_CORE_REGS], ACCL_REGNO);
1914 SET_HARD_REG_BIT (reg_class_contents[MPY_WRITABLE_CORE_REGS], ACCH_REGNO);
1916 /* Allow the compiler to freely use them. */
1917 if (!TEST_HARD_REG_BIT (overrideregs, ACCL_REGNO))
1918 fixed_regs[ACCL_REGNO] = 0;
1919 if (!TEST_HARD_REG_BIT (overrideregs, ACCH_REGNO))
1920 fixed_regs[ACCH_REGNO] = 0;
1922 if (!fixed_regs[ACCH_REGNO] && !fixed_regs[ACCL_REGNO])
1923 arc_hard_regno_modes[ACC_REG_FIRST] = D_MODES;
1927 /* Implement TARGET_HARD_REGNO_NREGS. */
1929 static unsigned int
1930 arc_hard_regno_nregs (unsigned int regno, machine_mode mode)
1932 if (GET_MODE_SIZE (mode) == 16
1933 && regno >= ARC_FIRST_SIMD_VR_REG
1934 && regno <= ARC_LAST_SIMD_VR_REG)
1935 return 1;
1937 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
1940 /* Implement TARGET_HARD_REGNO_MODE_OK. */
1942 static bool
1943 arc_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
1945 return (arc_hard_regno_modes[regno] & arc_mode_class[mode]) != 0;
1948 /* Implement TARGET_MODES_TIEABLE_P. Tie QI/HI/SI modes together. */
1950 static bool
1951 arc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
1953 return (GET_MODE_CLASS (mode1) == MODE_INT
1954 && GET_MODE_CLASS (mode2) == MODE_INT
1955 && GET_MODE_SIZE (mode1) <= UNITS_PER_WORD
1956 && GET_MODE_SIZE (mode2) <= UNITS_PER_WORD);
1959 /* Handle an "interrupt" attribute; arguments as in
1960 struct attribute_spec.handler. */
1962 static tree
1963 arc_handle_interrupt_attribute (tree *, tree name, tree args, int,
1964 bool *no_add_attrs)
1966 gcc_assert (args);
1968 tree value = TREE_VALUE (args);
1970 if (TREE_CODE (value) != STRING_CST)
1972 warning (OPT_Wattributes,
1973 "argument of %qE attribute is not a string constant",
1974 name);
1975 *no_add_attrs = true;
1977 else if (!TARGET_V2
1978 && strcmp (TREE_STRING_POINTER (value), "ilink1")
1979 && strcmp (TREE_STRING_POINTER (value), "ilink2"))
1981 warning (OPT_Wattributes,
1982 "argument of %qE attribute is not \"ilink1\" or \"ilink2\"",
1983 name);
1984 *no_add_attrs = true;
1986 else if (TARGET_V2
1987 && strcmp (TREE_STRING_POINTER (value), "ilink")
1988 && strcmp (TREE_STRING_POINTER (value), "firq"))
1990 warning (OPT_Wattributes,
1991 "argument of %qE attribute is not \"ilink\" or \"firq\"",
1992 name);
1993 *no_add_attrs = true;
1996 return NULL_TREE;
1999 static tree
2000 arc_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2001 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2003 if (TREE_CODE (*node) != FUNCTION_DECL)
2005 warning (OPT_Wattributes, "%qE attribute only applies to functions",
2006 name);
2007 *no_add_attrs = true;
2010 return NULL_TREE;
2013 /* Implement `TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS' */
2015 static bool
2016 arc_allocate_stack_slots_for_args (void)
2018 /* Naked functions should not allocate stack slots for arguments. */
2019 unsigned int fn_type = arc_compute_function_type (cfun);
2021 return !ARC_NAKED_P(fn_type);
2024 /* Implement `TARGET_WARN_FUNC_RETURN'. */
2026 static bool
2027 arc_warn_func_return (tree decl)
2029 struct function *func = DECL_STRUCT_FUNCTION (decl);
2030 unsigned int fn_type = arc_compute_function_type (func);
2032 return !ARC_NAKED_P (fn_type);
2035 /* Return zero if TYPE1 and TYPE are incompatible, one if they are compatible,
2036 and two if they are nearly compatible (which causes a warning to be
2037 generated). */
2039 static int
2040 arc_comp_type_attributes (const_tree type1,
2041 const_tree type2)
2043 int l1, l2, m1, m2, s1, s2;
2045 /* Check for mismatch of non-default calling convention. */
2046 if (TREE_CODE (type1) != FUNCTION_TYPE)
2047 return 1;
2049 /* Check for mismatched call attributes. */
2050 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2051 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2052 m1 = lookup_attribute ("medium_call", TYPE_ATTRIBUTES (type1)) != NULL;
2053 m2 = lookup_attribute ("medium_call", TYPE_ATTRIBUTES (type2)) != NULL;
2054 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2055 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2057 /* Only bother to check if an attribute is defined. */
2058 if (l1 | l2 | m1 | m2 | s1 | s2)
2060 /* If one type has an attribute, the other must have the same attribute. */
2061 if ((l1 != l2) || (m1 != m2) || (s1 != s2))
2062 return 0;
2064 /* Disallow mixed attributes. */
2065 if (l1 + m1 + s1 > 1)
2066 return 0;
2070 return 1;
2073 /* Set the default attributes for TYPE. */
2075 void
2076 arc_set_default_type_attributes (tree type ATTRIBUTE_UNUSED)
2078 gcc_unreachable();
2081 /* Misc. utilities. */
2083 /* X and Y are two things to compare using CODE. Emit the compare insn and
2084 return the rtx for the cc reg in the proper mode. */
2087 gen_compare_reg (rtx comparison, machine_mode omode)
2089 enum rtx_code code = GET_CODE (comparison);
2090 rtx x = XEXP (comparison, 0);
2091 rtx y = XEXP (comparison, 1);
2092 rtx tmp, cc_reg;
2093 machine_mode mode, cmode;
2096 cmode = GET_MODE (x);
2097 if (cmode == VOIDmode)
2098 cmode = GET_MODE (y);
2099 gcc_assert (cmode == SImode || cmode == SFmode || cmode == DFmode);
2100 if (cmode == SImode)
2102 if (!register_operand (x, SImode))
2104 if (register_operand (y, SImode))
2106 tmp = x;
2107 x = y;
2108 y = tmp;
2109 code = swap_condition (code);
2111 else
2112 x = copy_to_mode_reg (SImode, x);
2114 if (GET_CODE (y) == SYMBOL_REF && flag_pic)
2115 y = copy_to_mode_reg (SImode, y);
2117 else
2119 x = force_reg (cmode, x);
2120 y = force_reg (cmode, y);
2122 mode = SELECT_CC_MODE (code, x, y);
2124 cc_reg = gen_rtx_REG (mode, CC_REG);
2126 /* ??? FIXME (x-y)==0, as done by both cmpsfpx_raw and
2127 cmpdfpx_raw, is not a correct comparison for floats:
2128 http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
2130 if (TARGET_ARGONAUT_SET
2131 && ((cmode == SFmode && TARGET_SPFP) || (cmode == DFmode && TARGET_DPFP)))
2133 switch (code)
2135 case NE: case EQ: case LT: case UNGE: case LE: case UNGT:
2136 case UNEQ: case LTGT: case ORDERED: case UNORDERED:
2137 break;
2138 case GT: case UNLE: case GE: case UNLT:
2139 code = swap_condition (code);
2140 tmp = x;
2141 x = y;
2142 y = tmp;
2143 break;
2144 default:
2145 gcc_unreachable ();
2147 if (cmode == SFmode)
2149 emit_insn (gen_cmpsfpx_raw (x, y));
2151 else /* DFmode */
2153 /* Accepts Dx regs directly by insns. */
2154 emit_insn (gen_cmpdfpx_raw (x, y));
2157 if (mode != CC_FPXmode)
2158 emit_insn (gen_rtx_SET (cc_reg,
2159 gen_rtx_COMPARE (mode,
2160 gen_rtx_REG (CC_FPXmode, 61),
2161 const0_rtx)));
2163 else if (TARGET_FPX_QUARK && (cmode == SFmode))
2165 switch (code)
2167 case NE: case EQ: case GT: case UNLE: case GE: case UNLT:
2168 case UNEQ: case LTGT: case ORDERED: case UNORDERED:
2169 break;
2170 case LT: case UNGE: case LE: case UNGT:
2171 code = swap_condition (code);
2172 tmp = x;
2173 x = y;
2174 y = tmp;
2175 break;
2176 default:
2177 gcc_unreachable ();
2180 emit_insn (gen_cmp_quark (cc_reg,
2181 gen_rtx_COMPARE (mode, x, y)));
2183 else if (TARGET_HARD_FLOAT
2184 && ((cmode == SFmode && TARGET_FP_SP_BASE)
2185 || (cmode == DFmode && TARGET_FP_DP_BASE)))
2186 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
2187 else if (GET_MODE_CLASS (cmode) == MODE_FLOAT && TARGET_OPTFPE)
2189 rtx op0 = gen_rtx_REG (cmode, 0);
2190 rtx op1 = gen_rtx_REG (cmode, GET_MODE_SIZE (cmode) / UNITS_PER_WORD);
2191 bool swap = false;
2193 switch (code)
2195 case NE: case EQ: case GT: case UNLE: case GE: case UNLT:
2196 case UNEQ: case LTGT: case ORDERED: case UNORDERED:
2197 break;
2198 case LT: case UNGE: case LE: case UNGT:
2199 code = swap_condition (code);
2200 swap = true;
2201 break;
2202 default:
2203 gcc_unreachable ();
2205 if (currently_expanding_to_rtl)
2207 if (swap)
2209 tmp = x;
2210 x = y;
2211 y = tmp;
2213 emit_move_insn (op0, x);
2214 emit_move_insn (op1, y);
2216 else
2218 gcc_assert (rtx_equal_p (op0, x));
2219 gcc_assert (rtx_equal_p (op1, y));
2220 if (swap)
2222 op0 = y;
2223 op1 = x;
2226 emit_insn (gen_cmp_float (cc_reg, gen_rtx_COMPARE (mode, op0, op1)));
2228 else
2229 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
2230 return gen_rtx_fmt_ee (code, omode, cc_reg, const0_rtx);
2233 /* Return true if VALUE, a const_double, will fit in a limm (4 byte number).
2234 We assume the value can be either signed or unsigned. */
2236 bool
2237 arc_double_limm_p (rtx value)
2239 HOST_WIDE_INT low, high;
2241 gcc_assert (GET_CODE (value) == CONST_DOUBLE);
2243 if (TARGET_DPFP)
2244 return true;
2246 low = CONST_DOUBLE_LOW (value);
2247 high = CONST_DOUBLE_HIGH (value);
2249 if (low & 0x80000000)
2251 return (((unsigned HOST_WIDE_INT) low <= 0xffffffff && high == 0)
2252 || (((low & - (unsigned HOST_WIDE_INT) 0x80000000)
2253 == - (unsigned HOST_WIDE_INT) 0x80000000)
2254 && high == -1));
2256 else
2258 return (unsigned HOST_WIDE_INT) low <= 0x7fffffff && high == 0;
2262 /* Do any needed setup for a variadic function. For the ARC, we must
2263 create a register parameter block, and then copy any anonymous arguments
2264 in registers to memory.
2266 CUM has not been updated for the last named argument which has type TYPE
2267 and mode MODE, and we rely on this fact. */
2269 static void
2270 arc_setup_incoming_varargs (cumulative_args_t args_so_far,
2271 machine_mode mode, tree type,
2272 int *pretend_size, int no_rtl)
2274 int first_anon_arg;
2275 CUMULATIVE_ARGS next_cum;
2277 /* We must treat `__builtin_va_alist' as an anonymous arg. */
2279 next_cum = *get_cumulative_args (args_so_far);
2280 arc_function_arg_advance (pack_cumulative_args (&next_cum),
2281 mode, type, true);
2282 first_anon_arg = next_cum;
2284 if (FUNCTION_ARG_REGNO_P (first_anon_arg))
2286 /* First anonymous (unnamed) argument is in a reg. */
2288 /* Note that first_reg_offset < MAX_ARC_PARM_REGS. */
2289 int first_reg_offset = first_anon_arg;
2291 if (!no_rtl)
2293 rtx regblock
2294 = gen_rtx_MEM (BLKmode, plus_constant (Pmode, arg_pointer_rtx,
2295 FIRST_PARM_OFFSET (0)));
2296 move_block_from_reg (first_reg_offset, regblock,
2297 MAX_ARC_PARM_REGS - first_reg_offset);
2300 *pretend_size
2301 = ((MAX_ARC_PARM_REGS - first_reg_offset ) * UNITS_PER_WORD);
2305 /* Cost functions. */
2307 /* Provide the costs of an addressing mode that contains ADDR.
2308 If ADDR is not a valid address, its cost is irrelevant. */
2311 arc_address_cost (rtx addr, machine_mode, addr_space_t, bool speed)
2313 switch (GET_CODE (addr))
2315 case REG :
2316 return speed || satisfies_constraint_Rcq (addr) ? 0 : 1;
2317 case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
2318 case PRE_MODIFY: case POST_MODIFY:
2319 return !speed;
2321 case LABEL_REF :
2322 case SYMBOL_REF :
2323 case CONST :
2324 if (TARGET_NPS_CMEM && cmem_address (addr, SImode))
2325 return 0;
2326 /* Most likely needs a LIMM. */
2327 return COSTS_N_INSNS (1);
2329 case PLUS :
2331 register rtx plus0 = XEXP (addr, 0);
2332 register rtx plus1 = XEXP (addr, 1);
2334 if (GET_CODE (plus0) != REG
2335 && (GET_CODE (plus0) != MULT
2336 || !CONST_INT_P (XEXP (plus0, 1))
2337 || (INTVAL (XEXP (plus0, 1)) != 2
2338 && INTVAL (XEXP (plus0, 1)) != 4)))
2339 break;
2341 switch (GET_CODE (plus1))
2343 case CONST_INT :
2344 return (!RTX_OK_FOR_OFFSET_P (SImode, plus1)
2345 ? COSTS_N_INSNS (1)
2346 : speed
2348 : (satisfies_constraint_Rcq (plus0)
2349 && satisfies_constraint_O (plus1))
2351 : 1);
2352 case REG:
2353 return (speed < 1 ? 0
2354 : (satisfies_constraint_Rcq (plus0)
2355 && satisfies_constraint_Rcq (plus1))
2356 ? 0 : 1);
2357 case CONST :
2358 case SYMBOL_REF :
2359 case LABEL_REF :
2360 return COSTS_N_INSNS (1);
2361 default:
2362 break;
2364 break;
2366 default:
2367 break;
2370 return 4;
2373 /* Emit instruction X with the frame related bit set. */
2375 static rtx
2376 frame_insn (rtx x)
2378 x = emit_insn (x);
2379 RTX_FRAME_RELATED_P (x) = 1;
2380 return x;
2383 /* Emit a frame insn to move SRC to DST. */
2385 static rtx
2386 frame_move (rtx dst, rtx src)
2388 rtx tmp = gen_rtx_SET (dst, src);
2389 RTX_FRAME_RELATED_P (tmp) = 1;
2390 return frame_insn (tmp);
2393 /* Like frame_move, but add a REG_INC note for REG if ADDR contains an
2394 auto increment address, or is zero. */
2396 static rtx
2397 frame_move_inc (rtx dst, rtx src, rtx reg, rtx addr)
2399 rtx insn = frame_move (dst, src);
2401 if (!addr
2402 || GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == POST_INC
2403 || GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY)
2404 add_reg_note (insn, REG_INC, reg);
2405 return insn;
2408 /* Emit a frame insn which adjusts a frame address register REG by OFFSET. */
2410 static rtx
2411 frame_add (rtx reg, HOST_WIDE_INT offset)
2413 gcc_assert ((offset & 0x3) == 0);
2414 if (!offset)
2415 return NULL_RTX;
2416 return frame_move (reg, plus_constant (Pmode, reg, offset));
2419 /* Emit a frame insn which adjusts stack pointer by OFFSET. */
2421 static rtx
2422 frame_stack_add (HOST_WIDE_INT offset)
2424 return frame_add (stack_pointer_rtx, offset);
2427 /* Traditionally, we push saved registers first in the prologue,
2428 then we allocate the rest of the frame - and reverse in the epilogue.
2429 This has still its merits for ease of debugging, or saving code size
2430 or even execution time if the stack frame is so large that some accesses
2431 can't be encoded anymore with offsets in the instruction code when using
2432 a different scheme.
2433 Also, it would be a good starting point if we got instructions to help
2434 with register save/restore.
2436 However, often stack frames are small, and the pushing / popping has
2437 some costs:
2438 - the stack modification prevents a lot of scheduling.
2439 - frame allocation / deallocation needs extra instructions.
2440 - unless we know that we compile ARC700 user code, we need to put
2441 a memory barrier after frame allocation / before deallocation to
2442 prevent interrupts clobbering our data in the frame.
2443 In particular, we don't have any such guarantees for library functions,
2444 which tend to, on the other hand, to have small frames.
2446 Thus, for small frames, we'd like to use a different scheme:
2447 - The frame is allocated in full with the first prologue instruction,
2448 and deallocated in full with the last epilogue instruction.
2449 Thus, the instructions in-betwen can be freely scheduled.
2450 - If the function has no outgoing arguments on the stack, we can allocate
2451 one register save slot at the top of the stack. This register can then
2452 be saved simultanously with frame allocation, and restored with
2453 frame deallocation.
2454 This register can be picked depending on scheduling considerations,
2455 although same though should go into having some set of registers
2456 to be potentially lingering after a call, and others to be available
2457 immediately - i.e. in the absence of interprocedual optimization, we
2458 can use an ABI-like convention for register allocation to reduce
2459 stalls after function return. */
2460 /* Function prologue/epilogue handlers. */
2462 /* ARCompact stack frames look like:
2464 Before call After call
2465 high +-----------------------+ +-----------------------+
2466 mem | reg parm save area | | reg parm save area |
2467 | only created for | | only created for |
2468 | variable arg fns | | variable arg fns |
2469 AP +-----------------------+ +-----------------------+
2470 | return addr register | | return addr register |
2471 | (if required) | | (if required) |
2472 +-----------------------+ +-----------------------+
2473 | | | |
2474 | reg save area | | reg save area |
2475 | | | |
2476 +-----------------------+ +-----------------------+
2477 | frame pointer | | frame pointer |
2478 | (if required) | | (if required) |
2479 FP +-----------------------+ +-----------------------+
2480 | | | |
2481 | local/temp variables | | local/temp variables |
2482 | | | |
2483 +-----------------------+ +-----------------------+
2484 | | | |
2485 | arguments on stack | | arguments on stack |
2486 | | | |
2487 SP +-----------------------+ +-----------------------+
2488 | reg parm save area |
2489 | only created for |
2490 | variable arg fns |
2491 AP +-----------------------+
2492 | return addr register |
2493 | (if required) |
2494 +-----------------------+
2496 | reg save area |
2498 +-----------------------+
2499 | frame pointer |
2500 | (if required) |
2501 FP +-----------------------+
2503 | local/temp variables |
2505 +-----------------------+
2507 | arguments on stack |
2508 low | |
2509 mem SP +-----------------------+
2511 Notes:
2512 1) The "reg parm save area" does not exist for non variable argument fns.
2513 The "reg parm save area" can be eliminated completely if we created our
2514 own va-arc.h, but that has tradeoffs as well (so it's not done). */
2516 /* Structure to be filled in by arc_compute_frame_size with register
2517 save masks, and offsets for the current function. */
2518 struct GTY (()) arc_frame_info
2520 unsigned int total_size; /* # bytes that the entire frame takes up. */
2521 unsigned int extra_size; /* # bytes of extra stuff. */
2522 unsigned int pretend_size; /* # bytes we push and pretend caller did. */
2523 unsigned int args_size; /* # bytes that outgoing arguments take up. */
2524 unsigned int reg_size; /* # bytes needed to store regs. */
2525 unsigned int var_size; /* # bytes that variables take up. */
2526 unsigned int reg_offset; /* Offset from new sp to store regs. */
2527 unsigned int gmask; /* Mask of saved gp registers. */
2528 int initialized; /* Nonzero if frame size already calculated. */
2529 short millicode_start_reg;
2530 short millicode_end_reg;
2531 bool save_return_addr;
2534 /* Defining data structures for per-function information. */
2536 typedef struct GTY (()) machine_function
2538 unsigned int fn_type;
2539 struct arc_frame_info frame_info;
2540 /* To keep track of unalignment caused by short insns. */
2541 int unalign;
2542 int force_short_suffix; /* Used when disgorging return delay slot insns. */
2543 const char *size_reason;
2544 struct arc_ccfsm ccfsm_current;
2545 /* Map from uid to ccfsm state during branch shortening. */
2546 rtx ccfsm_current_insn;
2547 char arc_reorg_started;
2548 char prescan_initialized;
2549 } machine_function;
2551 /* Type of function DECL.
2553 The result is cached. To reset the cache at the end of a function,
2554 call with DECL = NULL_TREE. */
2556 unsigned int
2557 arc_compute_function_type (struct function *fun)
2559 tree attr, decl = fun->decl;
2560 unsigned int fn_type = fun->machine->fn_type;
2562 if (fn_type != ARC_FUNCTION_UNKNOWN)
2563 return fn_type;
2565 /* Check if it is a naked function. */
2566 if (lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) != NULL_TREE)
2567 fn_type |= ARC_FUNCTION_NAKED;
2568 else
2569 fn_type |= ARC_FUNCTION_NORMAL;
2571 /* Now see if this is an interrupt handler. */
2572 attr = lookup_attribute ("interrupt", DECL_ATTRIBUTES (decl));
2573 if (attr != NULL_TREE)
2575 tree value, args = TREE_VALUE (attr);
2577 gcc_assert (list_length (args) == 1);
2578 value = TREE_VALUE (args);
2579 gcc_assert (TREE_CODE (value) == STRING_CST);
2581 if (!strcmp (TREE_STRING_POINTER (value), "ilink1")
2582 || !strcmp (TREE_STRING_POINTER (value), "ilink"))
2583 fn_type |= ARC_FUNCTION_ILINK1;
2584 else if (!strcmp (TREE_STRING_POINTER (value), "ilink2"))
2585 fn_type |= ARC_FUNCTION_ILINK2;
2586 else if (!strcmp (TREE_STRING_POINTER (value), "firq"))
2587 fn_type |= ARC_FUNCTION_FIRQ;
2588 else
2589 gcc_unreachable ();
2592 return fun->machine->fn_type = fn_type;
2595 #define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
2596 #define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
2598 /* Tell prologue and epilogue if register REGNO should be saved / restored.
2599 The return address and frame pointer are treated separately.
2600 Don't consider them here.
2601 Addition for pic: The gp register needs to be saved if the current
2602 function changes it to access gotoff variables.
2603 FIXME: This will not be needed if we used some arbitrary register
2604 instead of r26. */
2606 static bool
2607 arc_must_save_register (int regno, struct function *func)
2609 unsigned int fn_type = arc_compute_function_type (func);
2610 bool irq_auto_save_p = ((irq_ctrl_saved.irq_save_last_reg >= regno)
2611 && ARC_AUTO_IRQ_P (fn_type));
2612 bool firq_auto_save_p = ARC_FAST_INTERRUPT_P (fn_type);
2614 switch (rgf_banked_register_count)
2616 case 4:
2617 firq_auto_save_p &= (regno < 4);
2618 break;
2619 case 8:
2620 firq_auto_save_p &= ((regno < 4) || ((regno > 11) && (regno < 16)));
2621 break;
2622 case 16:
2623 firq_auto_save_p &= ((regno < 4) || ((regno > 9) && (regno < 16))
2624 || ((regno > 25) && (regno < 29))
2625 || ((regno > 29) && (regno < 32)));
2626 break;
2627 case 32:
2628 firq_auto_save_p &= (regno != 29) && (regno < 32);
2629 break;
2630 default:
2631 firq_auto_save_p = false;
2632 break;
2635 if ((regno) != RETURN_ADDR_REGNUM
2636 && (regno) != FRAME_POINTER_REGNUM
2637 && df_regs_ever_live_p (regno)
2638 && (!call_used_regs[regno]
2639 || ARC_INTERRUPT_P (fn_type))
2640 /* Do not emit code for auto saved regs. */
2641 && !irq_auto_save_p
2642 && !firq_auto_save_p)
2643 return true;
2645 if (flag_pic && crtl->uses_pic_offset_table
2646 && regno == PIC_OFFSET_TABLE_REGNUM)
2647 return true;
2649 return false;
2652 /* Return true if the return address must be saved in the current function,
2653 otherwise return false. */
2655 static bool
2656 arc_must_save_return_addr (struct function *func)
2658 if (func->machine->frame_info.save_return_addr)
2659 return true;
2661 return false;
2664 /* Helper function to wrap FRAME_POINTER_NEEDED. We do this as
2665 FRAME_POINTER_NEEDED will not be true until the IRA (Integrated
2666 Register Allocator) pass, while we want to get the frame size
2667 correct earlier than the IRA pass.
2669 When a function uses eh_return we must ensure that the fp register
2670 is saved and then restored so that the unwinder can restore the
2671 correct value for the frame we are going to jump to.
2673 To do this we force all frames that call eh_return to require a
2674 frame pointer (see arc_frame_pointer_required), this
2675 will ensure that the previous frame pointer is stored on entry to
2676 the function, and will then be reloaded at function exit.
2678 As the frame pointer is handled as a special case in our prologue
2679 and epilogue code it must not be saved and restored using the
2680 MUST_SAVE_REGISTER mechanism otherwise we run into issues where GCC
2681 believes that the function is not using a frame pointer and that
2682 the value in the fp register is the frame pointer, while the
2683 prologue and epilogue are busy saving and restoring the fp
2684 register.
2686 During compilation of a function the frame size is evaluated
2687 multiple times, it is not until the reload pass is complete the the
2688 frame size is considered fixed (it is at this point that space for
2689 all spills has been allocated). However the frame_pointer_needed
2690 variable is not set true until the register allocation pass, as a
2691 result in the early stages the frame size does not include space
2692 for the frame pointer to be spilled.
2694 The problem that this causes is that the rtl generated for
2695 EH_RETURN_HANDLER_RTX uses the details of the frame size to compute
2696 the offset from the frame pointer at which the return address
2697 lives. However, in early passes GCC has not yet realised we need a
2698 frame pointer, and so has not included space for the frame pointer
2699 in the frame size, and so gets the offset of the return address
2700 wrong. This should not be an issue as in later passes GCC has
2701 realised that the frame pointer needs to be spilled, and has
2702 increased the frame size. However, the rtl for the
2703 EH_RETURN_HANDLER_RTX is not regenerated to use the newer, larger
2704 offset, and the wrong smaller offset is used. */
2706 static bool
2707 arc_frame_pointer_needed (void)
2709 return (frame_pointer_needed || crtl->calls_eh_return);
2712 /* Return non-zero if there are registers to be saved or loaded using
2713 millicode thunks. We can only use consecutive sequences starting
2714 with r13, and not going beyond r25.
2715 GMASK is a bitmask of registers to save. This function sets
2716 FRAME->millicod_start_reg .. FRAME->millicode_end_reg to the range
2717 of registers to be saved / restored with a millicode call. */
2719 static int
2720 arc_compute_millicode_save_restore_regs (unsigned int gmask,
2721 struct arc_frame_info *frame)
2723 int regno;
2725 int start_reg = 13, end_reg = 25;
2727 for (regno = start_reg; regno <= end_reg && (gmask & (1L << regno));)
2728 regno++;
2729 end_reg = regno - 1;
2730 /* There is no point in using millicode thunks if we don't save/restore
2731 at least three registers. For non-leaf functions we also have the
2732 blink restore. */
2733 if (regno - start_reg >= 3 - (crtl->is_leaf == 0))
2735 frame->millicode_start_reg = 13;
2736 frame->millicode_end_reg = regno - 1;
2737 return 1;
2739 return 0;
2742 /* Return the bytes needed to compute the frame pointer from the
2743 current stack pointer. */
2745 static unsigned int
2746 arc_compute_frame_size (void)
2748 int regno;
2749 unsigned int total_size, var_size, args_size, pretend_size, extra_size;
2750 unsigned int reg_size, reg_offset;
2751 unsigned int gmask;
2752 struct arc_frame_info *frame_info;
2753 int size;
2755 /* The answer might already be known. */
2756 if (cfun->machine->frame_info.initialized)
2757 return cfun->machine->frame_info.total_size;
2759 frame_info = &cfun->machine->frame_info;
2760 size = ARC_STACK_ALIGN (get_frame_size ());
2762 /* 1) Size of locals and temporaries. */
2763 var_size = size;
2765 /* 2) Size of outgoing arguments. */
2766 args_size = crtl->outgoing_args_size;
2768 /* 3) Calculate space needed for saved registers.
2769 ??? We ignore the extension registers for now. */
2771 /* See if this is an interrupt handler. Call used registers must be saved
2772 for them too. */
2774 reg_size = 0;
2775 gmask = 0;
2777 for (regno = 0; regno <= 31; regno++)
2779 if (arc_must_save_register (regno, cfun))
2781 reg_size += UNITS_PER_WORD;
2782 gmask |= 1L << regno;
2786 /* In a frame that calls __builtin_eh_return two data registers are
2787 used to pass values back to the exception handler.
2789 Ensure that these registers are spilled to the stack so that the
2790 exception throw code can find them, and update the saved values.
2791 The handling code will then consume these reloaded values to
2792 handle the exception. */
2793 if (crtl->calls_eh_return)
2794 for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++)
2796 reg_size += UNITS_PER_WORD;
2797 gmask |= 1 << regno;
2800 /* 4) Space for back trace data structure.
2801 <return addr reg size> (if required) + <fp size> (if required). */
2802 frame_info->save_return_addr
2803 = (!crtl->is_leaf || df_regs_ever_live_p (RETURN_ADDR_REGNUM)
2804 || crtl->calls_eh_return);
2805 /* Saving blink reg in case of leaf function for millicode thunk calls. */
2806 if (optimize_size
2807 && !TARGET_NO_MILLICODE_THUNK_SET
2808 && !crtl->calls_eh_return)
2810 if (arc_compute_millicode_save_restore_regs (gmask, frame_info))
2811 frame_info->save_return_addr = true;
2814 extra_size = 0;
2815 if (arc_must_save_return_addr (cfun))
2816 extra_size = 4;
2817 if (arc_frame_pointer_needed ())
2818 extra_size += 4;
2820 /* 5) Space for variable arguments passed in registers */
2821 pretend_size = crtl->args.pretend_args_size;
2823 /* Ensure everything before the locals is aligned appropriately. */
2825 unsigned int extra_plus_reg_size;
2826 unsigned int extra_plus_reg_size_aligned;
2828 extra_plus_reg_size = extra_size + reg_size;
2829 extra_plus_reg_size_aligned = ARC_STACK_ALIGN(extra_plus_reg_size);
2830 reg_size = extra_plus_reg_size_aligned - extra_size;
2833 /* Compute total frame size. */
2834 total_size = var_size + args_size + extra_size + pretend_size + reg_size;
2836 /* It used to be the case that the alignment was forced at this
2837 point. However, that is dangerous, calculations based on
2838 total_size would be wrong. Given that this has never cropped up
2839 as an issue I've changed this to an assert for now. */
2840 gcc_assert (total_size == ARC_STACK_ALIGN (total_size));
2842 /* Compute offset of register save area from stack pointer:
2843 Frame: pretend_size <blink> reg_size <fp> var_size args_size <--sp
2845 reg_offset = (total_size - (pretend_size + reg_size + extra_size)
2846 + (arc_frame_pointer_needed () ? 4 : 0));
2848 /* Save computed information. */
2849 frame_info->total_size = total_size;
2850 frame_info->extra_size = extra_size;
2851 frame_info->pretend_size = pretend_size;
2852 frame_info->var_size = var_size;
2853 frame_info->args_size = args_size;
2854 frame_info->reg_size = reg_size;
2855 frame_info->reg_offset = reg_offset;
2856 frame_info->gmask = gmask;
2857 frame_info->initialized = reload_completed;
2859 /* Ok, we're done. */
2860 return total_size;
2863 /* Common code to save/restore registers. */
2864 /* BASE_REG is the base register to use for addressing and to adjust.
2865 GMASK is a bitmask of general purpose registers to save/restore.
2866 epilogue_p 0: prologue 1:epilogue 2:epilogue, sibling thunk
2867 If *FIRST_OFFSET is non-zero, add it first to BASE_REG - preferably
2868 using a pre-modify for the first memory access. *FIRST_OFFSET is then
2869 zeroed. */
2871 static void
2872 arc_save_restore (rtx base_reg,
2873 unsigned int gmask, int epilogue_p, int *first_offset)
2875 unsigned int offset = 0;
2876 int regno;
2877 struct arc_frame_info *frame = &cfun->machine->frame_info;
2878 rtx sibthunk_insn = NULL_RTX;
2880 if (gmask)
2882 /* Millicode thunks implementation:
2883 Generates calls to millicodes for registers starting from r13 to r25
2884 Present Limitations:
2885 - Only one range supported. The remaining regs will have the ordinary
2886 st and ld instructions for store and loads. Hence a gmask asking
2887 to store r13-14, r16-r25 will only generate calls to store and
2888 load r13 to r14 while store and load insns will be generated for
2889 r16 to r25 in the prologue and epilogue respectively.
2891 - Presently library only supports register ranges starting from r13.
2893 if (epilogue_p == 2 || frame->millicode_end_reg > 14)
2895 int start_call = frame->millicode_start_reg;
2896 int end_call = frame->millicode_end_reg;
2897 int n_regs = end_call - start_call + 1;
2898 int i = 0, r, off = 0;
2899 rtx insn;
2900 rtx ret_addr = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
2902 if (*first_offset)
2904 /* "reg_size" won't be more than 127 . */
2905 gcc_assert (epilogue_p || abs (*first_offset) <= 127);
2906 frame_add (base_reg, *first_offset);
2907 *first_offset = 0;
2909 insn = gen_rtx_PARALLEL
2910 (VOIDmode, rtvec_alloc ((epilogue_p == 2) + n_regs + 1));
2911 if (epilogue_p == 2)
2912 i += 2;
2913 else
2914 XVECEXP (insn, 0, n_regs) = gen_rtx_CLOBBER (VOIDmode, ret_addr);
2915 for (r = start_call; r <= end_call; r++, off += UNITS_PER_WORD, i++)
2917 rtx reg = gen_rtx_REG (SImode, r);
2918 rtx mem
2919 = gen_frame_mem (SImode, plus_constant (Pmode, base_reg, off));
2921 if (epilogue_p)
2922 XVECEXP (insn, 0, i) = gen_rtx_SET (reg, mem);
2923 else
2924 XVECEXP (insn, 0, i) = gen_rtx_SET (mem, reg);
2925 gmask = gmask & ~(1L << r);
2927 if (epilogue_p == 2)
2928 sibthunk_insn = insn;
2929 else
2931 insn = frame_insn (insn);
2932 for (r = start_call, off = 0;
2933 r <= end_call;
2934 r++, off += UNITS_PER_WORD)
2936 rtx reg = gen_rtx_REG (SImode, r);
2937 if (epilogue_p)
2938 add_reg_note (insn, REG_CFA_RESTORE, reg);
2939 else
2941 rtx mem = gen_rtx_MEM (SImode, plus_constant (Pmode,
2942 base_reg,
2943 off));
2945 add_reg_note (insn, REG_CFA_OFFSET,
2946 gen_rtx_SET (mem, reg));
2950 offset += off;
2953 for (regno = 0; regno <= 31; regno++)
2955 machine_mode mode = SImode;
2956 bool found = false;
2958 if (TARGET_LL64
2959 && (regno % 2 == 0)
2960 && ((gmask & (1L << regno)) != 0)
2961 && ((gmask & (1L << (regno+1))) != 0))
2963 found = true;
2964 mode = DImode;
2966 else if ((gmask & (1L << regno)) != 0)
2968 found = true;
2969 mode = SImode;
2972 if (found)
2974 rtx reg = gen_rtx_REG (mode, regno);
2975 rtx addr, mem;
2976 int cfa_adjust = *first_offset;
2978 if (*first_offset)
2980 gcc_assert (!offset);
2981 addr = plus_constant (Pmode, base_reg, *first_offset);
2982 addr = gen_rtx_PRE_MODIFY (Pmode, base_reg, addr);
2983 *first_offset = 0;
2985 else
2987 gcc_assert (SMALL_INT (offset));
2988 addr = plus_constant (Pmode, base_reg, offset);
2990 mem = gen_frame_mem (mode, addr);
2991 if (epilogue_p)
2993 rtx insn =
2994 frame_move_inc (reg, mem, base_reg, addr);
2995 add_reg_note (insn, REG_CFA_RESTORE, reg);
2996 if (cfa_adjust)
2998 enum reg_note note = REG_CFA_ADJUST_CFA;
2999 add_reg_note (insn, note,
3000 gen_rtx_SET (stack_pointer_rtx,
3001 plus_constant (Pmode,
3002 stack_pointer_rtx,
3003 cfa_adjust)));
3006 else
3007 frame_move_inc (mem, reg, base_reg, addr);
3008 offset += UNITS_PER_WORD;
3009 if (mode == DImode)
3011 offset += UNITS_PER_WORD;
3012 ++regno;
3014 } /* if */
3015 } /* for */
3016 }/* if */
3017 if (sibthunk_insn)
3019 int start_call = frame->millicode_start_reg;
3020 int end_call = frame->millicode_end_reg;
3021 int r;
3023 rtx r12 = gen_rtx_REG (Pmode, 12);
3025 frame_insn (gen_rtx_SET (r12, GEN_INT (offset)));
3026 XVECEXP (sibthunk_insn, 0, 0) = ret_rtx;
3027 XVECEXP (sibthunk_insn, 0, 1)
3028 = gen_rtx_SET (stack_pointer_rtx,
3029 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r12));
3030 sibthunk_insn = emit_jump_insn (sibthunk_insn);
3031 RTX_FRAME_RELATED_P (sibthunk_insn) = 1;
3033 /* Would be nice if we could do this earlier, when the PARALLEL
3034 is populated, but these need to be attached after the
3035 emit. */
3036 for (r = start_call; r <= end_call; r++)
3038 rtx reg = gen_rtx_REG (SImode, r);
3039 add_reg_note (sibthunk_insn, REG_CFA_RESTORE, reg);
3042 } /* arc_save_restore */
3044 /* Build dwarf information when the context is saved via AUX_IRQ_CTRL
3045 mechanism. */
3047 static void
3048 arc_dwarf_emit_irq_save_regs (void)
3050 rtx tmp, par, insn, reg;
3051 int i, offset, j;
3053 par = gen_rtx_SEQUENCE (VOIDmode,
3054 rtvec_alloc (irq_ctrl_saved.irq_save_last_reg + 1
3055 + irq_ctrl_saved.irq_save_blink
3056 + irq_ctrl_saved.irq_save_lpcount
3057 + 1));
3059 /* Build the stack adjustment note for unwind info. */
3060 j = 0;
3061 offset = UNITS_PER_WORD * (irq_ctrl_saved.irq_save_last_reg + 1
3062 + irq_ctrl_saved.irq_save_blink
3063 + irq_ctrl_saved.irq_save_lpcount);
3064 tmp = plus_constant (Pmode, stack_pointer_rtx, -1 * offset);
3065 tmp = gen_rtx_SET (stack_pointer_rtx, tmp);
3066 RTX_FRAME_RELATED_P (tmp) = 1;
3067 XVECEXP (par, 0, j++) = tmp;
3069 offset -= UNITS_PER_WORD;
3071 /* 1st goes LP_COUNT. */
3072 if (irq_ctrl_saved.irq_save_lpcount)
3074 reg = gen_rtx_REG (SImode, 60);
3075 tmp = plus_constant (Pmode, stack_pointer_rtx, offset);
3076 tmp = gen_frame_mem (SImode, tmp);
3077 tmp = gen_rtx_SET (tmp, reg);
3078 RTX_FRAME_RELATED_P (tmp) = 1;
3079 XVECEXP (par, 0, j++) = tmp;
3080 offset -= UNITS_PER_WORD;
3083 /* 2nd goes BLINK. */
3084 if (irq_ctrl_saved.irq_save_blink)
3086 reg = gen_rtx_REG (SImode, 31);
3087 tmp = plus_constant (Pmode, stack_pointer_rtx, offset);
3088 tmp = gen_frame_mem (SImode, tmp);
3089 tmp = gen_rtx_SET (tmp, reg);
3090 RTX_FRAME_RELATED_P (tmp) = 1;
3091 XVECEXP (par, 0, j++) = tmp;
3092 offset -= UNITS_PER_WORD;
3095 /* Build the parallel of the remaining registers recorded as saved
3096 for unwind. */
3097 for (i = irq_ctrl_saved.irq_save_last_reg; i >= 0; i--)
3099 reg = gen_rtx_REG (SImode, i);
3100 tmp = plus_constant (Pmode, stack_pointer_rtx, offset);
3101 tmp = gen_frame_mem (SImode, tmp);
3102 tmp = gen_rtx_SET (tmp, reg);
3103 RTX_FRAME_RELATED_P (tmp) = 1;
3104 XVECEXP (par, 0, j++) = tmp;
3105 offset -= UNITS_PER_WORD;
3108 /* Dummy insn used to anchor the dwarf info. */
3109 insn = emit_insn (gen_stack_irq_dwarf());
3110 add_reg_note (insn, REG_FRAME_RELATED_EXPR, par);
3111 RTX_FRAME_RELATED_P (insn) = 1;
3114 /* Set up the stack and frame pointer (if desired) for the function. */
3116 void
3117 arc_expand_prologue (void)
3119 int size;
3120 unsigned int gmask = cfun->machine->frame_info.gmask;
3121 /* unsigned int frame_pointer_offset;*/
3122 unsigned int frame_size_to_allocate;
3123 /* (FIXME: The first store will use a PRE_MODIFY; this will usually be r13.
3124 Change the stack layout so that we rather store a high register with the
3125 PRE_MODIFY, thus enabling more short insn generation.) */
3126 int first_offset = 0;
3127 unsigned int fn_type = arc_compute_function_type (cfun);
3129 /* Naked functions don't have prologue. */
3130 if (ARC_NAKED_P (fn_type))
3131 return;
3133 /* Compute total frame size. */
3134 size = arc_compute_frame_size ();
3136 if (flag_stack_usage_info)
3137 current_function_static_stack_size = size;
3139 /* Keep track of frame size to be allocated. */
3140 frame_size_to_allocate = size;
3142 /* These cases shouldn't happen. Catch them now. */
3143 gcc_assert (!(size == 0 && gmask));
3145 /* Allocate space for register arguments if this is a variadic function. */
3146 if (cfun->machine->frame_info.pretend_size != 0)
3148 /* Ensure pretend_size is maximum of 8 * word_size. */
3149 gcc_assert (cfun->machine->frame_info.pretend_size <= 32);
3151 frame_stack_add (-(HOST_WIDE_INT)cfun->machine->frame_info.pretend_size);
3152 frame_size_to_allocate -= cfun->machine->frame_info.pretend_size;
3155 /* IRQ using automatic save mechanism will save the register before
3156 anything we do. */
3157 if (ARC_AUTO_IRQ_P (fn_type)
3158 && !ARC_FAST_INTERRUPT_P (fn_type))
3160 arc_dwarf_emit_irq_save_regs ();
3163 /* The home-grown ABI says link register is saved first. */
3164 if (arc_must_save_return_addr (cfun)
3165 && !ARC_AUTOBLINK_IRQ_P (fn_type))
3167 rtx ra = gen_rtx_REG (SImode, RETURN_ADDR_REGNUM);
3168 rtx mem = gen_frame_mem (Pmode,
3169 gen_rtx_PRE_DEC (Pmode,
3170 stack_pointer_rtx));
3172 frame_move_inc (mem, ra, stack_pointer_rtx, 0);
3173 frame_size_to_allocate -= UNITS_PER_WORD;
3176 /* Save any needed call-saved regs (and call-used if this is an
3177 interrupt handler) for ARCompact ISA. */
3178 if (cfun->machine->frame_info.reg_size)
3180 first_offset = -cfun->machine->frame_info.reg_size;
3181 /* N.B. FRAME_POINTER_MASK and RETURN_ADDR_MASK are cleared in gmask. */
3182 arc_save_restore (stack_pointer_rtx, gmask, 0, &first_offset);
3183 frame_size_to_allocate -= cfun->machine->frame_info.reg_size;
3186 /* In the case of millicode thunk, we need to restore the clobbered
3187 blink register. */
3188 if (cfun->machine->frame_info.millicode_end_reg > 0
3189 && arc_must_save_return_addr (cfun))
3191 HOST_WIDE_INT tmp = cfun->machine->frame_info.reg_size;
3192 emit_insn (gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
3193 gen_rtx_MEM (Pmode,
3194 plus_constant (Pmode,
3195 stack_pointer_rtx,
3196 tmp))));
3199 /* Save frame pointer if needed. First save the FP on stack, if not
3200 autosaved. */
3201 if (arc_frame_pointer_needed ()
3202 && !ARC_AUTOFP_IRQ_P (fn_type))
3204 rtx addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3205 GEN_INT (-UNITS_PER_WORD + first_offset));
3206 rtx mem = gen_frame_mem (Pmode, gen_rtx_PRE_MODIFY (Pmode,
3207 stack_pointer_rtx,
3208 addr));
3209 frame_move_inc (mem, frame_pointer_rtx, stack_pointer_rtx, 0);
3210 frame_size_to_allocate -= UNITS_PER_WORD;
3211 first_offset = 0;
3214 /* Emit mov fp,sp. */
3215 if (arc_frame_pointer_needed ())
3217 frame_move (frame_pointer_rtx, stack_pointer_rtx);
3220 /* ??? We don't handle the case where the saved regs are more than 252
3221 bytes away from sp. This can be handled by decrementing sp once, saving
3222 the regs, and then decrementing it again. The epilogue doesn't have this
3223 problem as the `ld' insn takes reg+limm values (though it would be more
3224 efficient to avoid reg+limm). */
3226 frame_size_to_allocate -= first_offset;
3227 /* Allocate the stack frame. */
3228 if (frame_size_to_allocate > 0)
3230 frame_stack_add ((HOST_WIDE_INT) 0 - frame_size_to_allocate);
3231 /* If the frame pointer is needed, emit a special barrier that
3232 will prevent the scheduler from moving stores to the frame
3233 before the stack adjustment. */
3234 if (arc_frame_pointer_needed ())
3235 emit_insn (gen_stack_tie (stack_pointer_rtx,
3236 hard_frame_pointer_rtx));
3239 /* Setup the gp register, if needed. */
3240 if (crtl->uses_pic_offset_table)
3241 arc_finalize_pic ();
3244 /* Do any necessary cleanup after a function to restore stack, frame,
3245 and regs. */
3247 void
3248 arc_expand_epilogue (int sibcall_p)
3250 int size;
3251 unsigned int fn_type = arc_compute_function_type (cfun);
3253 size = arc_compute_frame_size ();
3255 unsigned int pretend_size = cfun->machine->frame_info.pretend_size;
3256 unsigned int frame_size;
3257 unsigned int size_to_deallocate;
3258 int restored;
3259 int can_trust_sp_p = !cfun->calls_alloca;
3260 int first_offset = 0;
3261 int millicode_p = cfun->machine->frame_info.millicode_end_reg > 0;
3262 rtx insn;
3264 /* Naked functions don't have epilogue. */
3265 if (ARC_NAKED_P (fn_type))
3266 return;
3268 size_to_deallocate = size;
3270 frame_size = size - (pretend_size +
3271 cfun->machine->frame_info.reg_size +
3272 cfun->machine->frame_info.extra_size);
3274 /* ??? There are lots of optimizations that can be done here.
3275 EG: Use fp to restore regs if it's closer.
3276 Maybe in time we'll do them all. For now, always restore regs from
3277 sp, but don't restore sp if we don't have to. */
3279 if (!can_trust_sp_p)
3280 gcc_assert (arc_frame_pointer_needed ());
3282 /* Restore stack pointer to the beginning of saved register area for
3283 ARCompact ISA. */
3284 if (frame_size)
3286 if (arc_frame_pointer_needed ())
3287 frame_move (stack_pointer_rtx, frame_pointer_rtx);
3288 else
3289 first_offset = frame_size;
3290 size_to_deallocate -= frame_size;
3292 else if (!can_trust_sp_p)
3293 frame_stack_add (-frame_size);
3296 /* Restore any saved registers. */
3297 if (arc_frame_pointer_needed ()
3298 && !ARC_AUTOFP_IRQ_P (fn_type))
3300 rtx addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
3302 insn = frame_move_inc (frame_pointer_rtx, gen_frame_mem (Pmode, addr),
3303 stack_pointer_rtx, 0);
3304 add_reg_note (insn, REG_CFA_RESTORE, frame_pointer_rtx);
3305 add_reg_note (insn, REG_CFA_DEF_CFA,
3306 plus_constant (SImode, stack_pointer_rtx,
3307 4));
3308 size_to_deallocate -= UNITS_PER_WORD;
3311 /* Load blink after the calls to thunk calls in case of optimize size. */
3312 if (millicode_p)
3314 int sibthunk_p = (!sibcall_p
3315 && fn_type == ARC_FUNCTION_NORMAL
3316 && !cfun->machine->frame_info.pretend_size);
3318 gcc_assert (!(cfun->machine->frame_info.gmask
3319 & (FRAME_POINTER_MASK | RETURN_ADDR_MASK)));
3320 arc_save_restore (stack_pointer_rtx,
3321 cfun->machine->frame_info.gmask,
3322 1 + sibthunk_p, &first_offset);
3323 if (sibthunk_p)
3324 return;
3326 /* If we are to restore registers, and first_offset would require
3327 a limm to be encoded in a PRE_MODIFY, yet we can add it with a
3328 fast add to the stack pointer, do this now. */
3329 if ((!SMALL_INT (first_offset)
3330 && cfun->machine->frame_info.gmask
3331 && ((TARGET_ARC700 && !optimize_size)
3332 ? first_offset <= 0x800
3333 : satisfies_constraint_C2a (GEN_INT (first_offset))))
3334 /* Also do this if we have both gprs and return
3335 address to restore, and they both would need a LIMM. */
3336 || (arc_must_save_return_addr (cfun)
3337 && !SMALL_INT ((cfun->machine->frame_info.reg_size + first_offset) >> 2)
3338 && cfun->machine->frame_info.gmask))
3340 frame_stack_add (first_offset);
3341 first_offset = 0;
3343 if (arc_must_save_return_addr (cfun)
3344 && !ARC_AUTOBLINK_IRQ_P (fn_type))
3346 rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
3347 int ra_offs = cfun->machine->frame_info.reg_size + first_offset;
3348 rtx addr = plus_constant (Pmode, stack_pointer_rtx, ra_offs);
3349 HOST_WIDE_INT cfa_adjust = 0;
3351 /* If the load of blink would need a LIMM, but we can add
3352 the offset quickly to sp, do the latter. */
3353 if (!SMALL_INT (ra_offs >> 2)
3354 && !cfun->machine->frame_info.gmask
3355 && ((TARGET_ARC700 && !optimize_size)
3356 ? ra_offs <= 0x800
3357 : satisfies_constraint_C2a (GEN_INT (ra_offs))))
3359 size_to_deallocate -= ra_offs - first_offset;
3360 first_offset = 0;
3361 frame_stack_add (ra_offs);
3362 ra_offs = 0;
3363 addr = stack_pointer_rtx;
3365 /* See if we can combine the load of the return address with the
3366 final stack adjustment.
3367 We need a separate load if there are still registers to
3368 restore. We also want a separate load if the combined insn
3369 would need a limm, but a separate load doesn't. */
3370 if (ra_offs
3371 && !cfun->machine->frame_info.gmask
3372 && (SMALL_INT (ra_offs) || !SMALL_INT (ra_offs >> 2)))
3374 addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, addr);
3375 cfa_adjust = ra_offs;
3376 first_offset = 0;
3377 size_to_deallocate -= cfun->machine->frame_info.reg_size;
3379 else if (!ra_offs && size_to_deallocate == UNITS_PER_WORD)
3381 addr = gen_rtx_POST_INC (Pmode, addr);
3382 cfa_adjust = GET_MODE_SIZE (Pmode);
3383 size_to_deallocate = 0;
3386 insn = frame_move_inc (ra, gen_frame_mem (Pmode, addr),
3387 stack_pointer_rtx, addr);
3388 if (cfa_adjust)
3390 enum reg_note note = REG_CFA_ADJUST_CFA;
3392 add_reg_note (insn, note,
3393 gen_rtx_SET (stack_pointer_rtx,
3394 plus_constant (SImode, stack_pointer_rtx,
3395 cfa_adjust)));
3397 add_reg_note (insn, REG_CFA_RESTORE, ra);
3400 if (!millicode_p)
3402 if (cfun->machine->frame_info.reg_size)
3403 arc_save_restore (stack_pointer_rtx,
3404 /* The zeroing of these two bits is unnecessary, but leave this in for clarity. */
3405 cfun->machine->frame_info.gmask
3406 & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK), 1, &first_offset);
3409 /* The rest of this function does the following:
3410 ARCompact : handle epilogue_delay, restore sp (phase-2), return
3413 /* Keep track of how much of the stack pointer we've restored.
3414 It makes the following a lot more readable. */
3415 size_to_deallocate += first_offset;
3416 restored = size - size_to_deallocate;
3418 if (size > restored)
3419 frame_stack_add (size - restored);
3421 /* For frames that use __builtin_eh_return, the register defined by
3422 EH_RETURN_STACKADJ_RTX is set to 0 for all standard return paths.
3423 On eh_return paths however, the register is set to the value that
3424 should be added to the stack pointer in order to restore the
3425 correct stack pointer for the exception handling frame.
3427 For ARC we are going to use r2 for EH_RETURN_STACKADJ_RTX, add
3428 this onto the stack for eh_return frames. */
3429 if (crtl->calls_eh_return)
3430 emit_insn (gen_add2_insn (stack_pointer_rtx,
3431 EH_RETURN_STACKADJ_RTX));
3433 /* Emit the return instruction. */
3434 if (sibcall_p == FALSE)
3435 emit_jump_insn (gen_simple_return ());
3438 /* Return rtx for the location of the return address on the stack,
3439 suitable for use in __builtin_eh_return. The new return address
3440 will be written to this location in order to redirect the return to
3441 the exception handler. */
3444 arc_eh_return_address_location (void)
3446 rtx mem;
3447 int offset;
3448 struct arc_frame_info *afi;
3450 arc_compute_frame_size ();
3451 afi = &cfun->machine->frame_info;
3453 gcc_assert (crtl->calls_eh_return);
3454 gcc_assert (afi->save_return_addr);
3455 gcc_assert (afi->extra_size >= 4);
3457 /* The '-4' removes the size of the return address, which is
3458 included in the 'extra_size' field. */
3459 offset = afi->reg_size + afi->extra_size - 4;
3460 mem = gen_frame_mem (Pmode,
3461 plus_constant (Pmode, frame_pointer_rtx, offset));
3463 /* The following should not be needed, and is, really a hack. The
3464 issue being worked around here is that the DSE (Dead Store
3465 Elimination) pass will remove this write to the stack as it sees
3466 a single store and no corresponding read. The read however
3467 occurs in the epilogue code, which is not added into the function
3468 rtl until a later pass. So, at the time of DSE, the decision to
3469 remove this store seems perfectly sensible. Marking the memory
3470 address as volatile obviously has the effect of preventing DSE
3471 from removing the store. */
3472 MEM_VOLATILE_P (mem) = 1;
3473 return mem;
3476 /* PIC */
3478 /* Helper to generate unspec constant. */
3480 static rtx
3481 arc_unspec_offset (rtx loc, int unspec)
3483 return gen_rtx_CONST (Pmode, gen_rtx_UNSPEC (Pmode, gen_rtvec (1, loc),
3484 unspec));
3487 /* Emit special PIC prologues and epilogues. */
3488 /* If the function has any GOTOFF relocations, then the GOTBASE
3489 register has to be setup in the prologue
3490 The instruction needed at the function start for setting up the
3491 GOTBASE register is
3492 add rdest, pc,
3493 ----------------------------------------------------------
3494 The rtl to be emitted for this should be:
3495 set (reg basereg)
3496 (plus (reg pc)
3497 (const (unspec (symref _DYNAMIC) 3)))
3498 ---------------------------------------------------------- */
3500 static void
3501 arc_finalize_pic (void)
3503 rtx pat;
3504 rtx baseptr_rtx = gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM);
3506 if (crtl->uses_pic_offset_table == 0)
3507 return;
3509 gcc_assert (flag_pic != 0);
3511 pat = gen_rtx_SYMBOL_REF (Pmode, "_DYNAMIC");
3512 pat = arc_unspec_offset (pat, ARC_UNSPEC_GOT);
3513 pat = gen_rtx_SET (baseptr_rtx, pat);
3515 emit_insn (pat);
3518 /* !TARGET_BARREL_SHIFTER support. */
3519 /* Emit a shift insn to set OP0 to OP1 shifted by OP2; CODE specifies what
3520 kind of shift. */
3522 void
3523 emit_shift (enum rtx_code code, rtx op0, rtx op1, rtx op2)
3525 rtx shift = gen_rtx_fmt_ee (code, SImode, op1, op2);
3526 rtx pat
3527 = ((shift4_operator (shift, SImode) ? gen_shift_si3 : gen_shift_si3_loop)
3528 (op0, op1, op2, shift));
3529 emit_insn (pat);
3532 /* Output the assembler code for doing a shift.
3533 We go to a bit of trouble to generate efficient code as the ARC601 only has
3534 single bit shifts. This is taken from the h8300 port. We only have one
3535 mode of shifting and can't access individual bytes like the h8300 can, so
3536 this is greatly simplified (at the expense of not generating hyper-
3537 efficient code).
3539 This function is not used if the variable shift insns are present. */
3541 /* FIXME: This probably can be done using a define_split in arc.md.
3542 Alternately, generate rtx rather than output instructions. */
3544 const char *
3545 output_shift (rtx *operands)
3547 /* static int loopend_lab;*/
3548 rtx shift = operands[3];
3549 machine_mode mode = GET_MODE (shift);
3550 enum rtx_code code = GET_CODE (shift);
3551 const char *shift_one;
3553 gcc_assert (mode == SImode);
3555 switch (code)
3557 case ASHIFT: shift_one = "add %0,%1,%1"; break;
3558 case ASHIFTRT: shift_one = "asr %0,%1"; break;
3559 case LSHIFTRT: shift_one = "lsr %0,%1"; break;
3560 default: gcc_unreachable ();
3563 if (GET_CODE (operands[2]) != CONST_INT)
3565 output_asm_insn ("and.f lp_count,%2, 0x1f", operands);
3566 goto shiftloop;
3568 else
3570 int n;
3572 n = INTVAL (operands[2]);
3574 /* Only consider the lower 5 bits of the shift count. */
3575 n = n & 0x1f;
3577 /* First see if we can do them inline. */
3578 /* ??? We could get better scheduling & shorter code (using short insns)
3579 by using splitters. Alas, that'd be even more verbose. */
3580 if (code == ASHIFT && n <= 9 && n > 2
3581 && dest_reg_operand (operands[4], SImode))
3583 output_asm_insn ("mov %4,0\n\tadd3 %0,%4,%1", operands);
3584 for (n -=3 ; n >= 3; n -= 3)
3585 output_asm_insn ("add3 %0,%4,%0", operands);
3586 if (n == 2)
3587 output_asm_insn ("add2 %0,%4,%0", operands);
3588 else if (n)
3589 output_asm_insn ("add %0,%0,%0", operands);
3591 else if (n <= 4)
3593 while (--n >= 0)
3595 output_asm_insn (shift_one, operands);
3596 operands[1] = operands[0];
3599 /* See if we can use a rotate/and. */
3600 else if (n == BITS_PER_WORD - 1)
3602 switch (code)
3604 case ASHIFT :
3605 output_asm_insn ("and %0,%1,1\n\tror %0,%0", operands);
3606 break;
3607 case ASHIFTRT :
3608 /* The ARC doesn't have a rol insn. Use something else. */
3609 output_asm_insn ("add.f 0,%1,%1\n\tsbc %0,%0,%0", operands);
3610 break;
3611 case LSHIFTRT :
3612 /* The ARC doesn't have a rol insn. Use something else. */
3613 output_asm_insn ("add.f 0,%1,%1\n\trlc %0,0", operands);
3614 break;
3615 default:
3616 break;
3619 else if (n == BITS_PER_WORD - 2 && dest_reg_operand (operands[4], SImode))
3621 switch (code)
3623 case ASHIFT :
3624 output_asm_insn ("and %0,%1,3\n\tror %0,%0\n\tror %0,%0", operands);
3625 break;
3626 case ASHIFTRT :
3627 #if 1 /* Need some scheduling comparisons. */
3628 output_asm_insn ("add.f %4,%1,%1\n\tsbc %0,%0,%0\n\t"
3629 "add.f 0,%4,%4\n\trlc %0,%0", operands);
3630 #else
3631 output_asm_insn ("add.f %4,%1,%1\n\tbxor %0,%4,31\n\t"
3632 "sbc.f %0,%0,%4\n\trlc %0,%0", operands);
3633 #endif
3634 break;
3635 case LSHIFTRT :
3636 #if 1
3637 output_asm_insn ("add.f %4,%1,%1\n\trlc %0,0\n\t"
3638 "add.f 0,%4,%4\n\trlc %0,%0", operands);
3639 #else
3640 output_asm_insn ("add.f %0,%1,%1\n\trlc.f %0,0\n\t"
3641 "and %0,%0,1\n\trlc %0,%0", operands);
3642 #endif
3643 break;
3644 default:
3645 break;
3648 else if (n == BITS_PER_WORD - 3 && code == ASHIFT)
3649 output_asm_insn ("and %0,%1,7\n\tror %0,%0\n\tror %0,%0\n\tror %0,%0",
3650 operands);
3651 /* Must loop. */
3652 else
3654 operands[2] = GEN_INT (n);
3655 output_asm_insn ("mov.f lp_count, %2", operands);
3657 shiftloop:
3659 output_asm_insn ("lpnz\t2f", operands);
3660 output_asm_insn (shift_one, operands);
3661 output_asm_insn ("nop", operands);
3662 fprintf (asm_out_file, "2:\t%s end single insn loop\n",
3663 ASM_COMMENT_START);
3668 return "";
3671 /* Nested function support. */
3673 /* Output assembler code for a block containing the constant parts of
3674 a trampoline, leaving space for variable parts. A trampoline looks
3675 like this:
3677 ld_s r12,[pcl,8]
3678 ld r11,[pcl,12]
3679 j_s [r12]
3680 .word function's address
3681 .word static chain value
3685 static void
3686 arc_asm_trampoline_template (FILE *f)
3688 asm_fprintf (f, "\tld_s\t%s,[pcl,8]\n", ARC_TEMP_SCRATCH_REG);
3689 asm_fprintf (f, "\tld\t%s,[pcl,12]\n", reg_names[STATIC_CHAIN_REGNUM]);
3690 asm_fprintf (f, "\tj_s\t[%s]\n", ARC_TEMP_SCRATCH_REG);
3691 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
3692 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
3695 /* Emit RTL insns to initialize the variable parts of a trampoline.
3696 FNADDR is an RTX for the address of the function's pure code. CXT
3697 is an RTX for the static chain value for the function.
3699 The fastest trampoline to execute for trampolines within +-8KB of CTX
3700 would be:
3702 add2 r11,pcl,s12
3703 j [limm] 0x20200f80 limm
3705 and that would also be faster to write to the stack by computing
3706 the offset from CTX to TRAMP at compile time. However, it would
3707 really be better to get rid of the high cost of cache invalidation
3708 when generating trampolines, which requires that the code part of
3709 trampolines stays constant, and additionally either making sure
3710 that no executable code but trampolines is on the stack, no icache
3711 entries linger for the area of the stack from when before the stack
3712 was allocated, and allocating trampolines in trampoline-only cache
3713 lines or allocate trampolines fram a special pool of pre-allocated
3714 trampolines. */
3716 static void
3717 arc_initialize_trampoline (rtx tramp, tree fndecl, rtx cxt)
3719 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3721 emit_block_move (tramp, assemble_trampoline_template (),
3722 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
3723 emit_move_insn (adjust_address (tramp, SImode, 8), fnaddr);
3724 emit_move_insn (adjust_address (tramp, SImode, 12), cxt);
3725 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
3726 LCT_NORMAL, VOIDmode, XEXP (tramp, 0), Pmode,
3727 plus_constant (Pmode, XEXP (tramp, 0), TRAMPOLINE_SIZE),
3728 Pmode);
3731 /* This is set briefly to 1 when we output a ".as" address modifer, and then
3732 reset when we output the scaled address. */
3733 static int output_scaled = 0;
3735 /* Print operand X (an rtx) in assembler syntax to file FILE.
3736 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
3737 For `%' followed by punctuation, CODE is the punctuation and X is null. */
3738 /* In final.c:output_asm_insn:
3739 'l' : label
3740 'a' : address
3741 'c' : constant address if CONSTANT_ADDRESS_P
3742 'n' : negative
3743 Here:
3744 'Z': log2(x+1)-1
3745 'z': log2
3746 'M': log2(~x)
3747 'p': bit Position of lsb
3748 's': size of bit field
3749 '#': condbranch delay slot suffix
3750 '*': jump delay slot suffix
3751 '?' : nonjump-insn suffix for conditional execution or short instruction
3752 '!' : jump / call suffix for conditional execution or short instruction
3753 '`': fold constant inside unary o-perator, re-recognize, and emit.
3756 'R': Second word
3758 'B': Branch comparison operand - suppress sda reference
3759 'H': Most significant word
3760 'L': Least significant word
3761 'A': ASCII decimal representation of floating point value
3762 'U': Load/store update or scaling indicator
3763 'V': cache bypass indicator for volatile
3767 'O': Operator
3768 'o': original symbol - no @ prepending. */
3770 void
3771 arc_print_operand (FILE *file, rtx x, int code)
3773 switch (code)
3775 case 'Z':
3776 if (GET_CODE (x) == CONST_INT)
3777 fprintf (file, "%d",exact_log2(INTVAL (x) + 1) - 1 );
3778 else
3779 output_operand_lossage ("invalid operand to %%Z code");
3781 return;
3783 case 'z':
3784 if (GET_CODE (x) == CONST_INT)
3785 fprintf (file, "%d",exact_log2(INTVAL (x)) );
3786 else
3787 output_operand_lossage ("invalid operand to %%z code");
3789 return;
3791 case 'c':
3792 if (GET_CODE (x) == CONST_INT)
3793 fprintf (file, "%ld", INTVAL (x) );
3794 else
3795 output_operand_lossage ("invalid operands to %%c code");
3797 return;
3799 case 'M':
3800 if (GET_CODE (x) == CONST_INT)
3801 fprintf (file, "%d",exact_log2(~INTVAL (x)) );
3802 else
3803 output_operand_lossage ("invalid operand to %%M code");
3805 return;
3807 case 'p':
3808 if (GET_CODE (x) == CONST_INT)
3809 fprintf (file, "%d", exact_log2 (INTVAL (x) & -INTVAL (x)));
3810 else
3811 output_operand_lossage ("invalid operand to %%p code");
3812 return;
3814 case 's':
3815 if (GET_CODE (x) == CONST_INT)
3817 HOST_WIDE_INT i = INTVAL (x);
3818 HOST_WIDE_INT s = exact_log2 (i & -i);
3819 fprintf (file, "%d", exact_log2 (((0xffffffffUL & i) >> s) + 1));
3821 else
3822 output_operand_lossage ("invalid operand to %%s code");
3823 return;
3825 case '#' :
3826 /* Conditional branches depending on condition codes.
3827 Note that this is only for branches that were known to depend on
3828 condition codes before delay slot scheduling;
3829 out-of-range brcc / bbit expansions should use '*'.
3830 This distinction is important because of the different
3831 allowable delay slot insns and the output of the delay suffix
3832 for TARGET_AT_DBR_COND_EXEC. */
3833 case '*' :
3834 /* Unconditional branches / branches not depending on condition codes.
3835 This could also be a CALL_INSN.
3836 Output the appropriate delay slot suffix. */
3837 if (final_sequence && final_sequence->len () != 1)
3839 rtx_insn *jump = final_sequence->insn (0);
3840 rtx_insn *delay = final_sequence->insn (1);
3842 /* For TARGET_PAD_RETURN we might have grabbed the delay insn. */
3843 if (delay->deleted ())
3844 return;
3845 if (JUMP_P (jump) && INSN_ANNULLED_BRANCH_P (jump))
3846 fputs (INSN_FROM_TARGET_P (delay) ? ".d"
3847 : TARGET_AT_DBR_CONDEXEC && code == '#' ? ".d"
3848 : get_attr_type (jump) == TYPE_RETURN && code == '#' ? ""
3849 : ".nd",
3850 file);
3851 else
3852 fputs (".d", file);
3854 return;
3855 case '?' : /* with leading "." */
3856 case '!' : /* without leading "." */
3857 /* This insn can be conditionally executed. See if the ccfsm machinery
3858 says it should be conditionalized.
3859 If it shouldn't, we'll check the compact attribute if this insn
3860 has a short variant, which may be used depending on code size and
3861 alignment considerations. */
3862 if (current_insn_predicate)
3863 arc_ccfsm_current.cc
3864 = get_arc_condition_code (current_insn_predicate);
3865 if (ARC_CCFSM_COND_EXEC_P (&arc_ccfsm_current))
3867 /* Is this insn in a delay slot sequence? */
3868 if (!final_sequence || XVECLEN (final_sequence, 0) < 2
3869 || current_insn_predicate
3870 || CALL_P (final_sequence->insn (0))
3871 || simplejump_p (final_sequence->insn (0)))
3873 /* This insn isn't in a delay slot sequence, or conditionalized
3874 independently of its position in a delay slot. */
3875 fprintf (file, "%s%s",
3876 code == '?' ? "." : "",
3877 arc_condition_codes[arc_ccfsm_current.cc]);
3878 /* If this is a jump, there are still short variants. However,
3879 only beq_s / bne_s have the same offset range as b_s,
3880 and the only short conditional returns are jeq_s and jne_s. */
3881 if (code == '!'
3882 && (arc_ccfsm_current.cc == ARC_CC_EQ
3883 || arc_ccfsm_current.cc == ARC_CC_NE
3884 || 0 /* FIXME: check if branch in 7 bit range. */))
3885 output_short_suffix (file);
3887 else if (code == '!') /* Jump with delay slot. */
3888 fputs (arc_condition_codes[arc_ccfsm_current.cc], file);
3889 else /* An Instruction in a delay slot of a jump or call. */
3891 rtx jump = XVECEXP (final_sequence, 0, 0);
3892 rtx insn = XVECEXP (final_sequence, 0, 1);
3894 /* If the insn is annulled and is from the target path, we need
3895 to inverse the condition test. */
3896 if (JUMP_P (jump) && INSN_ANNULLED_BRANCH_P (jump))
3898 if (INSN_FROM_TARGET_P (insn))
3899 fprintf (file, "%s%s",
3900 code == '?' ? "." : "",
3901 arc_condition_codes[ARC_INVERSE_CONDITION_CODE (arc_ccfsm_current.cc)]);
3902 else
3903 fprintf (file, "%s%s",
3904 code == '?' ? "." : "",
3905 arc_condition_codes[arc_ccfsm_current.cc]);
3906 if (arc_ccfsm_current.state == 5)
3907 arc_ccfsm_current.state = 0;
3909 else
3910 /* This insn is executed for either path, so don't
3911 conditionalize it at all. */
3912 output_short_suffix (file);
3916 else
3917 output_short_suffix (file);
3918 return;
3919 case'`':
3920 /* FIXME: fold constant inside unary operator, re-recognize, and emit. */
3921 gcc_unreachable ();
3922 case 'd' :
3923 fputs (arc_condition_codes[get_arc_condition_code (x)], file);
3924 return;
3925 case 'D' :
3926 fputs (arc_condition_codes[ARC_INVERSE_CONDITION_CODE
3927 (get_arc_condition_code (x))],
3928 file);
3929 return;
3930 case 'R' :
3931 /* Write second word of DImode or DFmode reference,
3932 register or memory. */
3933 if (GET_CODE (x) == REG)
3934 fputs (reg_names[REGNO (x)+1], file);
3935 else if (GET_CODE (x) == MEM)
3937 fputc ('[', file);
3939 /* Handle possible auto-increment. For PRE_INC / PRE_DEC /
3940 PRE_MODIFY, we will have handled the first word already;
3941 For POST_INC / POST_DEC / POST_MODIFY, the access to the
3942 first word will be done later. In either case, the access
3943 to the first word will do the modify, and we only have
3944 to add an offset of four here. */
3945 if (GET_CODE (XEXP (x, 0)) == PRE_INC
3946 || GET_CODE (XEXP (x, 0)) == PRE_DEC
3947 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY
3948 || GET_CODE (XEXP (x, 0)) == POST_INC
3949 || GET_CODE (XEXP (x, 0)) == POST_DEC
3950 || GET_CODE (XEXP (x, 0)) == POST_MODIFY)
3951 output_address (VOIDmode,
3952 plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 4));
3953 else if (output_scaled)
3955 rtx addr = XEXP (x, 0);
3956 int size = GET_MODE_SIZE (GET_MODE (x));
3958 output_address (VOIDmode,
3959 plus_constant (Pmode, XEXP (addr, 0),
3960 ((INTVAL (XEXP (addr, 1)) + 4)
3961 >> (size == 2 ? 1 : 2))));
3962 output_scaled = 0;
3964 else
3965 output_address (VOIDmode,
3966 plus_constant (Pmode, XEXP (x, 0), 4));
3967 fputc (']', file);
3969 else
3970 output_operand_lossage ("invalid operand to %%R code");
3971 return;
3972 case 'S' :
3973 /* FIXME: remove %S option. */
3974 break;
3975 case 'B' /* Branch or other LIMM ref - must not use sda references. */ :
3976 if (CONSTANT_P (x))
3978 output_addr_const (file, x);
3979 return;
3981 break;
3982 case 'H' :
3983 case 'L' :
3984 if (GET_CODE (x) == REG)
3986 /* L = least significant word, H = most significant word. */
3987 if ((WORDS_BIG_ENDIAN != 0) ^ (code == 'L'))
3988 fputs (reg_names[REGNO (x)], file);
3989 else
3990 fputs (reg_names[REGNO (x)+1], file);
3992 else if (GET_CODE (x) == CONST_INT
3993 || GET_CODE (x) == CONST_DOUBLE)
3995 rtx first, second, word;
3997 split_double (x, &first, &second);
3999 if((WORDS_BIG_ENDIAN) == 0)
4000 word = (code == 'L' ? first : second);
4001 else
4002 word = (code == 'L' ? second : first);
4004 fprintf (file, "0x%08" PRIx32, ((uint32_t) INTVAL (word)));
4006 else
4007 output_operand_lossage ("invalid operand to %%H/%%L code");
4008 return;
4009 case 'A' :
4011 char str[30];
4013 gcc_assert (GET_CODE (x) == CONST_DOUBLE
4014 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT);
4016 real_to_decimal (str, CONST_DOUBLE_REAL_VALUE (x), sizeof (str), 0, 1);
4017 fprintf (file, "%s", str);
4018 return;
4020 case 'U' :
4021 /* Output a load/store with update indicator if appropriate. */
4022 if (GET_CODE (x) == MEM)
4024 rtx addr = XEXP (x, 0);
4025 switch (GET_CODE (addr))
4027 case PRE_INC: case PRE_DEC: case PRE_MODIFY:
4028 fputs (".a", file); break;
4029 case POST_INC: case POST_DEC: case POST_MODIFY:
4030 fputs (".ab", file); break;
4031 case PLUS:
4032 /* Are we using a scaled index? */
4033 if (GET_CODE (XEXP (addr, 0)) == MULT)
4034 fputs (".as", file);
4035 /* Can we use a scaled offset? */
4036 else if (CONST_INT_P (XEXP (addr, 1))
4037 && GET_MODE_SIZE (GET_MODE (x)) > 1
4038 && (!(INTVAL (XEXP (addr, 1))
4039 & (GET_MODE_SIZE (GET_MODE (x)) - 1) & 3))
4040 /* Does it make a difference? */
4041 && !SMALL_INT_RANGE(INTVAL (XEXP (addr, 1)),
4042 GET_MODE_SIZE (GET_MODE (x)) - 2, 0))
4044 fputs (".as", file);
4045 output_scaled = 1;
4047 else if (LEGITIMATE_SMALL_DATA_ADDRESS_P (addr)
4048 && GET_MODE_SIZE (GET_MODE (x)) > 1)
4050 tree decl = NULL_TREE;
4051 int align = 0;
4052 if (GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
4053 decl = SYMBOL_REF_DECL (XEXP (addr, 1));
4054 else if (GET_CODE (XEXP (XEXP (XEXP (addr, 1), 0), 0))
4055 == SYMBOL_REF)
4056 decl = SYMBOL_REF_DECL (XEXP (XEXP (XEXP (addr, 1), 0), 0));
4057 if (decl)
4058 align = DECL_ALIGN (decl);
4059 align = align / BITS_PER_UNIT;
4060 if ((GET_MODE_SIZE (GET_MODE (x)) == 2)
4061 && align && ((align & 1) == 0))
4062 fputs (".as", file);
4063 if ((GET_MODE_SIZE (GET_MODE (x)) >= 4)
4064 && align && ((align & 3) == 0))
4065 fputs (".as", file);
4067 break;
4068 case REG:
4069 break;
4070 default:
4071 gcc_assert (CONSTANT_P (addr)); break;
4074 else
4075 output_operand_lossage ("invalid operand to %%U code");
4076 return;
4077 case 'V' :
4078 /* Output cache bypass indicator for a load/store insn. Volatile memory
4079 refs are defined to use the cache bypass mechanism. */
4080 if (GET_CODE (x) == MEM)
4082 if (MEM_VOLATILE_P (x) && !TARGET_VOLATILE_CACHE_SET )
4083 fputs (".di", file);
4085 else
4086 output_operand_lossage ("invalid operand to %%V code");
4087 return;
4088 /* plt code. */
4089 case 'P':
4090 case 0 :
4091 /* Do nothing special. */
4092 break;
4093 case 'F':
4094 fputs (reg_names[REGNO (x)]+1, file);
4095 return;
4096 case '^':
4097 /* This punctuation character is needed because label references are
4098 printed in the output template using %l. This is a front end
4099 character, and when we want to emit a '@' before it, we have to use
4100 this '^'. */
4102 fputc('@',file);
4103 return;
4104 case 'O':
4105 /* Output an operator. */
4106 switch (GET_CODE (x))
4108 case PLUS: fputs ("add", file); return;
4109 case SS_PLUS: fputs ("adds", file); return;
4110 case AND: fputs ("and", file); return;
4111 case IOR: fputs ("or", file); return;
4112 case XOR: fputs ("xor", file); return;
4113 case MINUS: fputs ("sub", file); return;
4114 case SS_MINUS: fputs ("subs", file); return;
4115 case ASHIFT: fputs ("asl", file); return;
4116 case ASHIFTRT: fputs ("asr", file); return;
4117 case LSHIFTRT: fputs ("lsr", file); return;
4118 case ROTATERT: fputs ("ror", file); return;
4119 case MULT: fputs ("mpy", file); return;
4120 case ABS: fputs ("abs", file); return; /* Unconditional. */
4121 case NEG: fputs ("neg", file); return;
4122 case SS_NEG: fputs ("negs", file); return;
4123 case NOT: fputs ("not", file); return; /* Unconditional. */
4124 case ZERO_EXTEND:
4125 fputs ("ext", file); /* bmsk allows predication. */
4126 goto size_suffix;
4127 case SIGN_EXTEND: /* Unconditional. */
4128 fputs ("sex", file);
4129 size_suffix:
4130 switch (GET_MODE (XEXP (x, 0)))
4132 case E_QImode: fputs ("b", file); return;
4133 case E_HImode: fputs ("w", file); return;
4134 default: break;
4136 break;
4137 case SS_TRUNCATE:
4138 if (GET_MODE (x) != HImode)
4139 break;
4140 fputs ("sat16", file);
4141 default: break;
4143 output_operand_lossage ("invalid operand to %%O code"); return;
4144 case 'o':
4145 if (GET_CODE (x) == SYMBOL_REF)
4147 assemble_name (file, XSTR (x, 0));
4148 return;
4150 break;
4151 case '&':
4152 if (TARGET_ANNOTATE_ALIGN && cfun->machine->size_reason)
4153 fprintf (file, "; unalign: %d", cfun->machine->unalign);
4154 return;
4155 case '+':
4156 if (TARGET_V2)
4157 fputs ("m", file);
4158 else
4159 fputs ("h", file);
4160 return;
4161 case '_':
4162 if (TARGET_V2)
4163 fputs ("h", file);
4164 else
4165 fputs ("w", file);
4166 return;
4167 default :
4168 /* Unknown flag. */
4169 output_operand_lossage ("invalid operand output code");
4172 switch (GET_CODE (x))
4174 case REG :
4175 fputs (reg_names[REGNO (x)], file);
4176 break;
4177 case MEM :
4179 rtx addr = XEXP (x, 0);
4180 int size = GET_MODE_SIZE (GET_MODE (x));
4182 fputc ('[', file);
4184 switch (GET_CODE (addr))
4186 case PRE_INC: case POST_INC:
4187 output_address (VOIDmode,
4188 plus_constant (Pmode, XEXP (addr, 0), size)); break;
4189 case PRE_DEC: case POST_DEC:
4190 output_address (VOIDmode,
4191 plus_constant (Pmode, XEXP (addr, 0), -size));
4192 break;
4193 case PRE_MODIFY: case POST_MODIFY:
4194 output_address (VOIDmode, XEXP (addr, 1)); break;
4195 case PLUS:
4196 if (output_scaled)
4198 output_address (VOIDmode,
4199 plus_constant (Pmode, XEXP (addr, 0),
4200 (INTVAL (XEXP (addr, 1))
4201 >> (size == 2 ? 1 : 2))));
4202 output_scaled = 0;
4204 else
4205 output_address (VOIDmode, addr);
4206 break;
4207 default:
4208 if (flag_pic && CONSTANT_ADDRESS_P (addr))
4209 arc_output_pic_addr_const (file, addr, code);
4210 else
4211 output_address (VOIDmode, addr);
4212 break;
4214 fputc (']', file);
4215 break;
4217 case CONST_DOUBLE :
4218 /* We handle SFmode constants here as output_addr_const doesn't. */
4219 if (GET_MODE (x) == SFmode)
4221 long l;
4223 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
4224 fprintf (file, "0x%08lx", l);
4225 break;
4227 /* FALLTHRU */
4228 /* Let output_addr_const deal with it. */
4229 default :
4230 if (flag_pic
4231 || (GET_CODE (x) == CONST
4232 && GET_CODE (XEXP (x, 0)) == UNSPEC
4233 && (XINT (XEXP (x, 0), 1) == UNSPEC_TLS_OFF
4234 || XINT (XEXP (x, 0), 1) == UNSPEC_TLS_GD))
4235 || (GET_CODE (x) == CONST
4236 && GET_CODE (XEXP (x, 0)) == PLUS
4237 && GET_CODE (XEXP (XEXP (x, 0), 0)) == UNSPEC
4238 && (XINT (XEXP (XEXP (x, 0), 0), 1) == UNSPEC_TLS_OFF
4239 || XINT (XEXP (XEXP (x, 0), 0), 1) == UNSPEC_TLS_GD)))
4240 arc_output_pic_addr_const (file, x, code);
4241 else
4243 /* FIXME: Dirty way to handle @var@sda+const. Shd be handled
4244 with asm_output_symbol_ref */
4245 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4247 x = XEXP (x, 0);
4248 output_addr_const (file, XEXP (x, 0));
4249 if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF && SYMBOL_REF_SMALL_P (XEXP (x, 0)))
4250 fprintf (file, "@sda");
4252 if (GET_CODE (XEXP (x, 1)) != CONST_INT
4253 || INTVAL (XEXP (x, 1)) >= 0)
4254 fprintf (file, "+");
4255 output_addr_const (file, XEXP (x, 1));
4257 else
4258 output_addr_const (file, x);
4260 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_SMALL_P (x))
4261 fprintf (file, "@sda");
4262 break;
4266 /* Print a memory address as an operand to reference that memory location. */
4268 void
4269 arc_print_operand_address (FILE *file , rtx addr)
4271 register rtx base, index = 0;
4273 switch (GET_CODE (addr))
4275 case REG :
4276 fputs (reg_names[REGNO (addr)], file);
4277 break;
4278 case SYMBOL_REF :
4279 output_addr_const (file, addr);
4280 if (SYMBOL_REF_SMALL_P (addr))
4281 fprintf (file, "@sda");
4282 break;
4283 case PLUS :
4284 if (GET_CODE (XEXP (addr, 0)) == MULT)
4285 index = XEXP (XEXP (addr, 0), 0), base = XEXP (addr, 1);
4286 else if (CONST_INT_P (XEXP (addr, 0)))
4287 index = XEXP (addr, 0), base = XEXP (addr, 1);
4288 else
4289 base = XEXP (addr, 0), index = XEXP (addr, 1);
4291 gcc_assert (OBJECT_P (base));
4292 arc_print_operand_address (file, base);
4293 if (CONSTANT_P (base) && CONST_INT_P (index))
4294 fputc ('+', file);
4295 else
4296 fputc (',', file);
4297 gcc_assert (OBJECT_P (index));
4298 arc_print_operand_address (file, index);
4299 break;
4300 case CONST:
4302 rtx c = XEXP (addr, 0);
4304 if ((GET_CODE (c) == UNSPEC
4305 && (XINT (c, 1) == UNSPEC_TLS_OFF
4306 || XINT (c, 1) == UNSPEC_TLS_IE))
4307 || (GET_CODE (c) == PLUS
4308 && GET_CODE (XEXP (c, 0)) == UNSPEC
4309 && (XINT (XEXP (c, 0), 1) == UNSPEC_TLS_OFF
4310 || XINT (XEXP (c, 0), 1) == ARC_UNSPEC_GOTOFFPC)))
4312 arc_output_pic_addr_const (file, c, 0);
4313 break;
4315 gcc_assert (GET_CODE (c) == PLUS);
4316 gcc_assert (GET_CODE (XEXP (c, 0)) == SYMBOL_REF);
4317 gcc_assert (GET_CODE (XEXP (c, 1)) == CONST_INT);
4319 output_address (VOIDmode, XEXP (addr, 0));
4321 break;
4323 case PRE_INC :
4324 case PRE_DEC :
4325 /* We shouldn't get here as we've lost the mode of the memory object
4326 (which says how much to inc/dec by. */
4327 gcc_unreachable ();
4328 break;
4329 default :
4330 if (flag_pic)
4331 arc_output_pic_addr_const (file, addr, 0);
4332 else
4333 output_addr_const (file, addr);
4334 break;
4338 /* Conditional execution support.
4340 This is based on the ARM port but for now is much simpler.
4342 A finite state machine takes care of noticing whether or not instructions
4343 can be conditionally executed, and thus decrease execution time and code
4344 size by deleting branch instructions. The fsm is controlled by
4345 arc_ccfsm_advance (called by arc_final_prescan_insn), and controls the
4346 actions of PRINT_OPERAND. The patterns in the .md file for the branch
4347 insns also have a hand in this. */
4348 /* The way we leave dealing with non-anulled or annull-false delay slot
4349 insns to the consumer is awkward. */
4351 /* The state of the fsm controlling condition codes are:
4352 0: normal, do nothing special
4353 1: don't output this insn
4354 2: don't output this insn
4355 3: make insns conditional
4356 4: make insns conditional
4357 5: make insn conditional (only for outputting anulled delay slot insns)
4359 special value for cfun->machine->uid_ccfsm_state:
4360 6: return with but one insn before it since function start / call
4362 State transitions (state->state by whom, under what condition):
4363 0 -> 1 arc_ccfsm_advance, if insn is a conditional branch skipping over
4364 some instructions.
4365 0 -> 2 arc_ccfsm_advance, if insn is a conditional branch followed
4366 by zero or more non-jump insns and an unconditional branch with
4367 the same target label as the condbranch.
4368 1 -> 3 branch patterns, after having not output the conditional branch
4369 2 -> 4 branch patterns, after having not output the conditional branch
4370 0 -> 5 branch patterns, for anulled delay slot insn.
4371 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL, if the `target' label is reached
4372 (the target label has CODE_LABEL_NUMBER equal to
4373 arc_ccfsm_target_label).
4374 4 -> 0 arc_ccfsm_advance, if `target' unconditional branch is reached
4375 3 -> 1 arc_ccfsm_advance, finding an 'else' jump skipping over some insns.
4376 5 -> 0 when outputting the delay slot insn
4378 If the jump clobbers the conditions then we use states 2 and 4.
4380 A similar thing can be done with conditional return insns.
4382 We also handle separating branches from sets of the condition code.
4383 This is done here because knowledge of the ccfsm state is required,
4384 we may not be outputting the branch. */
4386 /* arc_final_prescan_insn calls arc_ccfsm_advance to adjust arc_ccfsm_current,
4387 before letting final output INSN. */
4389 static void
4390 arc_ccfsm_advance (rtx_insn *insn, struct arc_ccfsm *state)
4392 /* BODY will hold the body of INSN. */
4393 register rtx body;
4395 /* This will be 1 if trying to repeat the trick (ie: do the `else' part of
4396 an if/then/else), and things need to be reversed. */
4397 int reverse = 0;
4399 /* If we start with a return insn, we only succeed if we find another one. */
4400 int seeking_return = 0;
4402 /* START_INSN will hold the insn from where we start looking. This is the
4403 first insn after the following code_label if REVERSE is true. */
4404 rtx_insn *start_insn = insn;
4406 /* Type of the jump_insn. Brcc insns don't affect ccfsm changes,
4407 since they don't rely on a cmp preceding the. */
4408 enum attr_type jump_insn_type;
4410 /* Allow -mdebug-ccfsm to turn this off so we can see how well it does.
4411 We can't do this in macro FINAL_PRESCAN_INSN because its called from
4412 final_scan_insn which has `optimize' as a local. */
4413 if (optimize < 2 || TARGET_NO_COND_EXEC)
4414 return;
4416 /* Ignore notes and labels. */
4417 if (!INSN_P (insn))
4418 return;
4419 body = PATTERN (insn);
4420 /* If in state 4, check if the target branch is reached, in order to
4421 change back to state 0. */
4422 if (state->state == 4)
4424 if (insn == state->target_insn)
4426 state->target_insn = NULL;
4427 state->state = 0;
4429 return;
4432 /* If in state 3, it is possible to repeat the trick, if this insn is an
4433 unconditional branch to a label, and immediately following this branch
4434 is the previous target label which is only used once, and the label this
4435 branch jumps to is not too far off. Or in other words "we've done the
4436 `then' part, see if we can do the `else' part." */
4437 if (state->state == 3)
4439 if (simplejump_p (insn))
4441 start_insn = next_nonnote_insn (start_insn);
4442 if (GET_CODE (start_insn) == BARRIER)
4444 /* ??? Isn't this always a barrier? */
4445 start_insn = next_nonnote_insn (start_insn);
4447 if (GET_CODE (start_insn) == CODE_LABEL
4448 && CODE_LABEL_NUMBER (start_insn) == state->target_label
4449 && LABEL_NUSES (start_insn) == 1)
4450 reverse = TRUE;
4451 else
4452 return;
4454 else if (GET_CODE (body) == SIMPLE_RETURN)
4456 start_insn = next_nonnote_insn (start_insn);
4457 if (GET_CODE (start_insn) == BARRIER)
4458 start_insn = next_nonnote_insn (start_insn);
4459 if (GET_CODE (start_insn) == CODE_LABEL
4460 && CODE_LABEL_NUMBER (start_insn) == state->target_label
4461 && LABEL_NUSES (start_insn) == 1)
4463 reverse = TRUE;
4464 seeking_return = 1;
4466 else
4467 return;
4469 else
4470 return;
4473 if (GET_CODE (insn) != JUMP_INSN
4474 || GET_CODE (PATTERN (insn)) == ADDR_VEC
4475 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4476 return;
4478 /* We can't predicate BRCC or loop ends.
4479 Also, when generating PIC code, and considering a medium range call,
4480 we can't predicate the call. */
4481 jump_insn_type = get_attr_type (insn);
4482 if (jump_insn_type == TYPE_BRCC
4483 || jump_insn_type == TYPE_BRCC_NO_DELAY_SLOT
4484 || jump_insn_type == TYPE_LOOP_END
4485 || (jump_insn_type == TYPE_CALL && !get_attr_predicable (insn)))
4486 return;
4488 /* This jump might be paralleled with a clobber of the condition codes,
4489 the jump should always come first. */
4490 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4491 body = XVECEXP (body, 0, 0);
4493 if (reverse
4494 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
4495 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
4497 int insns_skipped = 0, fail = FALSE, succeed = FALSE;
4498 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
4499 int then_not_else = TRUE;
4500 /* Nonzero if next insn must be the target label. */
4501 int next_must_be_target_label_p;
4502 rtx_insn *this_insn = start_insn;
4503 rtx label = 0;
4505 /* Register the insn jumped to. */
4506 if (reverse)
4508 if (!seeking_return)
4509 label = XEXP (SET_SRC (body), 0);
4511 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
4512 label = XEXP (XEXP (SET_SRC (body), 1), 0);
4513 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
4515 label = XEXP (XEXP (SET_SRC (body), 2), 0);
4516 then_not_else = FALSE;
4518 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == SIMPLE_RETURN)
4519 seeking_return = 1;
4520 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == SIMPLE_RETURN)
4522 seeking_return = 1;
4523 then_not_else = FALSE;
4525 else
4526 gcc_unreachable ();
4528 /* If this is a non-annulled branch with a delay slot, there is
4529 no need to conditionalize the delay slot. */
4530 if ((GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) == SEQUENCE)
4531 && state->state == 0 && !INSN_ANNULLED_BRANCH_P (insn))
4533 this_insn = NEXT_INSN (this_insn);
4535 /* See how many insns this branch skips, and what kind of insns. If all
4536 insns are okay, and the label or unconditional branch to the same
4537 label is not too far away, succeed. */
4538 for (insns_skipped = 0, next_must_be_target_label_p = FALSE;
4539 !fail && !succeed && insns_skipped < MAX_INSNS_SKIPPED;
4540 insns_skipped++)
4542 rtx scanbody;
4544 this_insn = next_nonnote_insn (this_insn);
4545 if (!this_insn)
4546 break;
4548 if (next_must_be_target_label_p)
4550 if (GET_CODE (this_insn) == BARRIER)
4551 continue;
4552 if (GET_CODE (this_insn) == CODE_LABEL
4553 && this_insn == label)
4555 state->state = 1;
4556 succeed = TRUE;
4558 else
4559 fail = TRUE;
4560 break;
4563 switch (GET_CODE (this_insn))
4565 case CODE_LABEL:
4566 /* Succeed if it is the target label, otherwise fail since
4567 control falls in from somewhere else. */
4568 if (this_insn == label)
4570 state->state = 1;
4571 succeed = TRUE;
4573 else
4574 fail = TRUE;
4575 break;
4577 case BARRIER:
4578 /* Succeed if the following insn is the target label.
4579 Otherwise fail.
4580 If return insns are used then the last insn in a function
4581 will be a barrier. */
4582 next_must_be_target_label_p = TRUE;
4583 break;
4585 case CALL_INSN:
4586 /* Can handle a call insn if there are no insns after it.
4587 IE: The next "insn" is the target label. We don't have to
4588 worry about delay slots as such insns are SEQUENCE's inside
4589 INSN's. ??? It is possible to handle such insns though. */
4590 if (get_attr_cond (this_insn) == COND_CANUSE)
4591 next_must_be_target_label_p = TRUE;
4592 else
4593 fail = TRUE;
4594 break;
4596 case JUMP_INSN:
4597 scanbody = PATTERN (this_insn);
4599 /* If this is an unconditional branch to the same label, succeed.
4600 If it is to another label, do nothing. If it is conditional,
4601 fail. */
4602 /* ??? Probably, the test for the SET and the PC are
4603 unnecessary. */
4605 if (GET_CODE (scanbody) == SET
4606 && GET_CODE (SET_DEST (scanbody)) == PC)
4608 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
4609 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
4611 state->state = 2;
4612 succeed = TRUE;
4614 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
4615 fail = TRUE;
4616 else if (get_attr_cond (this_insn) != COND_CANUSE)
4617 fail = TRUE;
4619 else if (GET_CODE (scanbody) == SIMPLE_RETURN
4620 && seeking_return)
4622 state->state = 2;
4623 succeed = TRUE;
4625 else if (GET_CODE (scanbody) == PARALLEL)
4627 if (get_attr_cond (this_insn) != COND_CANUSE)
4628 fail = TRUE;
4630 break;
4632 case INSN:
4633 scanbody = PATTERN (this_insn);
4635 /* We can only do this with insns that can use the condition
4636 codes (and don't set them). */
4637 if (GET_CODE (scanbody) == SET
4638 || GET_CODE (scanbody) == PARALLEL)
4640 if (get_attr_cond (this_insn) != COND_CANUSE)
4641 fail = TRUE;
4643 /* We can't handle other insns like sequences. */
4644 else
4645 fail = TRUE;
4646 break;
4648 default:
4649 break;
4653 if (succeed)
4655 if ((!seeking_return) && (state->state == 1 || reverse))
4656 state->target_label = CODE_LABEL_NUMBER (label);
4657 else if (seeking_return || state->state == 2)
4659 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
4661 this_insn = next_nonnote_insn (this_insn);
4663 gcc_assert (!this_insn ||
4664 (GET_CODE (this_insn) != BARRIER
4665 && GET_CODE (this_insn) != CODE_LABEL));
4667 if (!this_insn)
4669 /* Oh dear! we ran off the end, give up. */
4670 extract_insn_cached (insn);
4671 state->state = 0;
4672 state->target_insn = NULL;
4673 return;
4675 state->target_insn = this_insn;
4677 else
4678 gcc_unreachable ();
4680 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
4681 what it was. */
4682 if (!reverse)
4684 state->cond = XEXP (SET_SRC (body), 0);
4685 state->cc = get_arc_condition_code (XEXP (SET_SRC (body), 0));
4688 if (reverse || then_not_else)
4689 state->cc = ARC_INVERSE_CONDITION_CODE (state->cc);
4692 /* Restore recog_operand. Getting the attributes of other insns can
4693 destroy this array, but final.c assumes that it remains intact
4694 across this call; since the insn has been recognized already we
4695 call insn_extract direct. */
4696 extract_insn_cached (insn);
4700 /* Record that we are currently outputting label NUM with prefix PREFIX.
4701 It it's the label we're looking for, reset the ccfsm machinery.
4703 Called from ASM_OUTPUT_INTERNAL_LABEL. */
4705 static void
4706 arc_ccfsm_at_label (const char *prefix, int num, struct arc_ccfsm *state)
4708 if (state->state == 3 && state->target_label == num
4709 && !strcmp (prefix, "L"))
4711 state->state = 0;
4712 state->target_insn = NULL;
4716 /* We are considering a conditional branch with the condition COND.
4717 Check if we want to conditionalize a delay slot insn, and if so modify
4718 the ccfsm state accordingly.
4719 REVERSE says branch will branch when the condition is false. */
4720 void
4721 arc_ccfsm_record_condition (rtx cond, bool reverse, rtx_insn *jump,
4722 struct arc_ccfsm *state)
4724 rtx_insn *seq_insn = NEXT_INSN (PREV_INSN (jump));
4725 if (!state)
4726 state = &arc_ccfsm_current;
4728 gcc_assert (state->state == 0);
4729 if (seq_insn != jump)
4731 rtx insn = XVECEXP (PATTERN (seq_insn), 0, 1);
4733 if (!as_a<rtx_insn *> (insn)->deleted ()
4734 && INSN_ANNULLED_BRANCH_P (jump)
4735 && (TARGET_AT_DBR_CONDEXEC || INSN_FROM_TARGET_P (insn)))
4737 state->cond = cond;
4738 state->cc = get_arc_condition_code (cond);
4739 if (!reverse)
4740 arc_ccfsm_current.cc
4741 = ARC_INVERSE_CONDITION_CODE (state->cc);
4742 rtx pat = PATTERN (insn);
4743 if (GET_CODE (pat) == COND_EXEC)
4744 gcc_assert ((INSN_FROM_TARGET_P (insn)
4745 ? ARC_INVERSE_CONDITION_CODE (state->cc) : state->cc)
4746 == get_arc_condition_code (XEXP (pat, 0)));
4747 else
4748 state->state = 5;
4753 /* Update *STATE as we would when we emit INSN. */
4755 static void
4756 arc_ccfsm_post_advance (rtx_insn *insn, struct arc_ccfsm *state)
4758 enum attr_type type;
4760 if (LABEL_P (insn))
4761 arc_ccfsm_at_label ("L", CODE_LABEL_NUMBER (insn), state);
4762 else if (JUMP_P (insn)
4763 && GET_CODE (PATTERN (insn)) != ADDR_VEC
4764 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
4765 && ((type = get_attr_type (insn)) == TYPE_BRANCH
4766 || ((type == TYPE_UNCOND_BRANCH
4767 || type == TYPE_RETURN)
4768 && ARC_CCFSM_BRANCH_DELETED_P (state))))
4770 if (ARC_CCFSM_BRANCH_DELETED_P (state))
4771 ARC_CCFSM_RECORD_BRANCH_DELETED (state);
4772 else
4774 rtx src = SET_SRC (PATTERN (insn));
4775 arc_ccfsm_record_condition (XEXP (src, 0), XEXP (src, 1) == pc_rtx,
4776 insn, state);
4779 else if (arc_ccfsm_current.state == 5)
4780 arc_ccfsm_current.state = 0;
4783 /* Return true if the current insn, which is a conditional branch, is to be
4784 deleted. */
4786 bool
4787 arc_ccfsm_branch_deleted_p (void)
4789 return ARC_CCFSM_BRANCH_DELETED_P (&arc_ccfsm_current);
4792 /* Record a branch isn't output because subsequent insns can be
4793 conditionalized. */
4795 void
4796 arc_ccfsm_record_branch_deleted (void)
4798 ARC_CCFSM_RECORD_BRANCH_DELETED (&arc_ccfsm_current);
4801 /* During insn output, indicate if the current insn is predicated. */
4803 bool
4804 arc_ccfsm_cond_exec_p (void)
4806 return (cfun->machine->prescan_initialized
4807 && ARC_CCFSM_COND_EXEC_P (&arc_ccfsm_current));
4810 /* Like next_active_insn, but return NULL if we find an ADDR_(DIFF_)VEC,
4811 and look inside SEQUENCEs. */
4813 static rtx_insn *
4814 arc_next_active_insn (rtx_insn *insn, struct arc_ccfsm *statep)
4816 rtx pat;
4820 if (statep)
4821 arc_ccfsm_post_advance (insn, statep);
4822 insn = NEXT_INSN (insn);
4823 if (!insn || BARRIER_P (insn))
4824 return NULL;
4825 if (statep)
4826 arc_ccfsm_advance (insn, statep);
4828 while (NOTE_P (insn)
4829 || (cfun->machine->arc_reorg_started
4830 && LABEL_P (insn) && !label_to_alignment (insn))
4831 || (NONJUMP_INSN_P (insn)
4832 && (GET_CODE (PATTERN (insn)) == USE
4833 || GET_CODE (PATTERN (insn)) == CLOBBER)));
4834 if (!LABEL_P (insn))
4836 gcc_assert (INSN_P (insn));
4837 pat = PATTERN (insn);
4838 if (GET_CODE (pat) == ADDR_VEC || GET_CODE (pat) == ADDR_DIFF_VEC)
4839 return NULL;
4840 if (GET_CODE (pat) == SEQUENCE)
4841 return as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
4843 return insn;
4846 /* When deciding if an insn should be output short, we want to know something
4847 about the following insns:
4848 - if another insn follows which we know we can output as a short insn
4849 before an alignment-sensitive point, we can output this insn short:
4850 the decision about the eventual alignment can be postponed.
4851 - if a to-be-aligned label comes next, we should output this insn such
4852 as to get / preserve 4-byte alignment.
4853 - if a likely branch without delay slot insn, or a call with an immediately
4854 following short insn comes next, we should out output this insn such as to
4855 get / preserve 2 mod 4 unalignment.
4856 - do the same for a not completely unlikely branch with a short insn
4857 following before any other branch / label.
4858 - in order to decide if we are actually looking at a branch, we need to
4859 call arc_ccfsm_advance.
4860 - in order to decide if we are looking at a short insn, we should know
4861 if it is conditionalized. To a first order of approximation this is
4862 the case if the state from arc_ccfsm_advance from before this insn
4863 indicates the insn is conditionalized. However, a further refinement
4864 could be to not conditionalize an insn if the destination register(s)
4865 is/are dead in the non-executed case. */
4866 /* Return non-zero if INSN should be output as a short insn. UNALIGN is
4867 zero if the current insn is aligned to a 4-byte-boundary, two otherwise.
4868 If CHECK_ATTR is greater than 0, check the iscompact attribute first. */
4871 arc_verify_short (rtx_insn *insn, int, int check_attr)
4873 enum attr_iscompact iscompact;
4874 struct machine_function *machine;
4876 if (check_attr > 0)
4878 iscompact = get_attr_iscompact (insn);
4879 if (iscompact == ISCOMPACT_FALSE)
4880 return 0;
4882 machine = cfun->machine;
4884 if (machine->force_short_suffix >= 0)
4885 return machine->force_short_suffix;
4887 return (get_attr_length (insn) & 2) != 0;
4890 /* When outputting an instruction (alternative) that can potentially be short,
4891 output the short suffix if the insn is in fact short, and update
4892 cfun->machine->unalign accordingly. */
4894 static void
4895 output_short_suffix (FILE *file)
4897 rtx_insn *insn = current_output_insn;
4899 if (arc_verify_short (insn, cfun->machine->unalign, 1))
4901 fprintf (file, "_s");
4902 cfun->machine->unalign ^= 2;
4904 /* Restore recog_operand. */
4905 extract_insn_cached (insn);
4908 /* Implement FINAL_PRESCAN_INSN. */
4910 void
4911 arc_final_prescan_insn (rtx_insn *insn, rtx *opvec ATTRIBUTE_UNUSED,
4912 int noperands ATTRIBUTE_UNUSED)
4914 if (TARGET_DUMPISIZE)
4915 fprintf (asm_out_file, "\n; at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
4917 /* Output a nop if necessary to prevent a hazard.
4918 Don't do this for delay slots: inserting a nop would
4919 alter semantics, and the only time we would find a hazard is for a
4920 call function result - and in that case, the hazard is spurious to
4921 start with. */
4922 if (PREV_INSN (insn)
4923 && PREV_INSN (NEXT_INSN (insn)) == insn
4924 && arc_hazard (prev_real_insn (insn), insn))
4926 current_output_insn =
4927 emit_insn_before (gen_nop (), NEXT_INSN (PREV_INSN (insn)));
4928 final_scan_insn (current_output_insn, asm_out_file, optimize, 1, NULL);
4929 current_output_insn = insn;
4931 /* Restore extraction data which might have been clobbered by arc_hazard. */
4932 extract_constrain_insn_cached (insn);
4934 if (!cfun->machine->prescan_initialized)
4936 /* Clear lingering state from branch shortening. */
4937 memset (&arc_ccfsm_current, 0, sizeof arc_ccfsm_current);
4938 cfun->machine->prescan_initialized = 1;
4940 arc_ccfsm_advance (insn, &arc_ccfsm_current);
4942 cfun->machine->size_reason = 0;
4945 /* Given FROM and TO register numbers, say whether this elimination is allowed.
4946 Frame pointer elimination is automatically handled.
4948 All eliminations are permissible. If we need a frame
4949 pointer, we must eliminate ARG_POINTER_REGNUM into
4950 FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
4952 static bool
4953 arc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
4955 return ((to == FRAME_POINTER_REGNUM) || !arc_frame_pointer_needed ());
4958 /* Define the offset between two registers, one to be eliminated, and
4959 the other its replacement, at the start of a routine. */
4962 arc_initial_elimination_offset (int from, int to)
4964 if (!cfun->machine->frame_info.initialized)
4965 arc_compute_frame_size ();
4967 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
4969 return (cfun->machine->frame_info.extra_size
4970 + cfun->machine->frame_info.reg_size);
4973 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
4975 return (cfun->machine->frame_info.total_size
4976 - cfun->machine->frame_info.pretend_size);
4979 if ((from == FRAME_POINTER_REGNUM) && (to == STACK_POINTER_REGNUM))
4981 return (cfun->machine->frame_info.total_size
4982 - (cfun->machine->frame_info.pretend_size
4983 + cfun->machine->frame_info.extra_size
4984 + cfun->machine->frame_info.reg_size));
4987 gcc_unreachable ();
4990 static bool
4991 arc_frame_pointer_required (void)
4993 return cfun->calls_alloca || crtl->calls_eh_return;
4997 /* Return the destination address of a branch. */
5000 branch_dest (rtx branch)
5002 rtx pat = PATTERN (branch);
5003 rtx dest = (GET_CODE (pat) == PARALLEL
5004 ? SET_SRC (XVECEXP (pat, 0, 0)) : SET_SRC (pat));
5005 int dest_uid;
5007 if (GET_CODE (dest) == IF_THEN_ELSE)
5008 dest = XEXP (dest, XEXP (dest, 1) == pc_rtx ? 2 : 1);
5010 dest = XEXP (dest, 0);
5011 dest_uid = INSN_UID (dest);
5013 return INSN_ADDRESSES (dest_uid);
5017 /* Implement TARGET_ENCODE_SECTION_INFO hook. */
5019 static void
5020 arc_encode_section_info (tree decl, rtx rtl, int first)
5022 /* For sdata, SYMBOL_FLAG_LOCAL and SYMBOL_FLAG_FUNCTION.
5023 This clears machine specific flags, so has to come first. */
5024 default_encode_section_info (decl, rtl, first);
5026 /* Check if it is a function, and whether it has the
5027 [long/medium/short]_call attribute specified. */
5028 if (TREE_CODE (decl) == FUNCTION_DECL)
5030 rtx symbol = XEXP (rtl, 0);
5031 int flags = SYMBOL_REF_FLAGS (symbol);
5033 tree attr = (TREE_TYPE (decl) != error_mark_node
5034 ? TYPE_ATTRIBUTES (TREE_TYPE (decl)) : NULL_TREE);
5035 tree long_call_attr = lookup_attribute ("long_call", attr);
5036 tree medium_call_attr = lookup_attribute ("medium_call", attr);
5037 tree short_call_attr = lookup_attribute ("short_call", attr);
5039 if (long_call_attr != NULL_TREE)
5040 flags |= SYMBOL_FLAG_LONG_CALL;
5041 else if (medium_call_attr != NULL_TREE)
5042 flags |= SYMBOL_FLAG_MEDIUM_CALL;
5043 else if (short_call_attr != NULL_TREE)
5044 flags |= SYMBOL_FLAG_SHORT_CALL;
5046 SYMBOL_REF_FLAGS (symbol) = flags;
5048 else if (TREE_CODE (decl) == VAR_DECL)
5050 rtx symbol = XEXP (rtl, 0);
5052 tree attr = (TREE_TYPE (decl) != error_mark_node
5053 ? DECL_ATTRIBUTES (decl) : NULL_TREE);
5055 tree sec_attr = lookup_attribute ("section", attr);
5056 if (sec_attr)
5058 const char *sec_name
5059 = TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (sec_attr)));
5060 if (strcmp (sec_name, ".cmem") == 0
5061 || strcmp (sec_name, ".cmem_shared") == 0
5062 || strcmp (sec_name, ".cmem_private") == 0)
5063 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_CMEM;
5068 /* This is how to output a definition of an internal numbered label where
5069 PREFIX is the class of label and NUM is the number within the class. */
5071 static void arc_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
5073 if (cfun)
5074 arc_ccfsm_at_label (prefix, labelno, &arc_ccfsm_current);
5075 default_internal_label (stream, prefix, labelno);
5078 /* Set the cpu type and print out other fancy things,
5079 at the top of the file. */
5081 static void arc_file_start (void)
5083 default_file_start ();
5084 fprintf (asm_out_file, "\t.cpu %s\n", arc_cpu_string);
5087 /* Cost functions. */
5089 /* Compute a (partial) cost for rtx X. Return true if the complete
5090 cost has been computed, and false if subexpressions should be
5091 scanned. In either case, *TOTAL contains the cost result. */
5093 static bool
5094 arc_rtx_costs (rtx x, machine_mode mode, int outer_code,
5095 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
5097 int code = GET_CODE (x);
5099 switch (code)
5101 /* Small integers are as cheap as registers. */
5102 case CONST_INT:
5104 bool nolimm = false; /* Can we do without long immediate? */
5105 bool fast = false; /* Is the result available immediately? */
5106 bool condexec = false; /* Does this allow conditiobnal execution? */
5107 bool compact = false; /* Is a 16 bit opcode available? */
5108 /* CONDEXEC also implies that we can have an unconditional
5109 3-address operation. */
5111 nolimm = compact = condexec = false;
5112 if (UNSIGNED_INT6 (INTVAL (x)))
5113 nolimm = condexec = compact = true;
5114 else
5116 if (SMALL_INT (INTVAL (x)))
5117 nolimm = fast = true;
5118 switch (outer_code)
5120 case AND: /* bclr, bmsk, ext[bw] */
5121 if (satisfies_constraint_Ccp (x) /* bclr */
5122 || satisfies_constraint_C1p (x) /* bmsk */)
5123 nolimm = fast = condexec = compact = true;
5124 break;
5125 case IOR: /* bset */
5126 if (satisfies_constraint_C0p (x)) /* bset */
5127 nolimm = fast = condexec = compact = true;
5128 break;
5129 case XOR:
5130 if (satisfies_constraint_C0p (x)) /* bxor */
5131 nolimm = fast = condexec = true;
5132 break;
5133 case SET:
5134 if (satisfies_constraint_Crr (x)) /* ror b,u6 */
5135 nolimm = true;
5136 default:
5137 break;
5140 /* FIXME: Add target options to attach a small cost if
5141 condexec / compact is not true. */
5142 if (nolimm)
5144 *total = 0;
5145 return true;
5148 /* FALLTHRU */
5150 /* 4 byte values can be fetched as immediate constants -
5151 let's give that the cost of an extra insn. */
5152 case CONST:
5153 case LABEL_REF:
5154 case SYMBOL_REF:
5155 *total = COSTS_N_INSNS (1);
5156 return true;
5158 case CONST_DOUBLE:
5160 rtx first, second;
5162 if (TARGET_DPFP)
5164 *total = COSTS_N_INSNS (1);
5165 return true;
5167 split_double (x, &first, &second);
5168 *total = COSTS_N_INSNS (!SMALL_INT (INTVAL (first))
5169 + !SMALL_INT (INTVAL (second)));
5170 return true;
5173 /* Encourage synth_mult to find a synthetic multiply when reasonable.
5174 If we need more than 12 insns to do a multiply, then go out-of-line,
5175 since the call overhead will be < 10% of the cost of the multiply. */
5176 case ASHIFT:
5177 case ASHIFTRT:
5178 case LSHIFTRT:
5179 if (TARGET_BARREL_SHIFTER)
5181 /* If we want to shift a constant, we need a LIMM. */
5182 /* ??? when the optimizers want to know if a constant should be
5183 hoisted, they ask for the cost of the constant. OUTER_CODE is
5184 insufficient context for shifts since we don't know which operand
5185 we are looking at. */
5186 if (CONSTANT_P (XEXP (x, 0)))
5188 *total += (COSTS_N_INSNS (2)
5189 + rtx_cost (XEXP (x, 1), mode, (enum rtx_code) code,
5190 0, speed));
5191 return true;
5193 *total = COSTS_N_INSNS (1);
5195 else if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5196 *total = COSTS_N_INSNS (16);
5197 else
5199 *total = COSTS_N_INSNS (INTVAL (XEXP ((x), 1)));
5200 /* ??? want_to_gcse_p can throw negative shift counts at us,
5201 and then panics when it gets a negative cost as result.
5202 Seen for gcc.c-torture/compile/20020710-1.c -Os . */
5203 if (*total < 0)
5204 *total = 0;
5206 return false;
5208 case DIV:
5209 case UDIV:
5210 if (speed)
5211 *total = COSTS_N_INSNS(30);
5212 else
5213 *total = COSTS_N_INSNS(1);
5214 return false;
5216 case MULT:
5217 if ((TARGET_DPFP && GET_MODE (x) == DFmode))
5218 *total = COSTS_N_INSNS (1);
5219 else if (speed)
5220 *total= arc_multcost;
5221 /* We do not want synth_mult sequences when optimizing
5222 for size. */
5223 else if (TARGET_MUL64_SET || TARGET_ARC700_MPY)
5224 *total = COSTS_N_INSNS (1);
5225 else
5226 *total = COSTS_N_INSNS (2);
5227 return false;
5228 case PLUS:
5229 if ((GET_CODE (XEXP (x, 0)) == ASHIFT
5230 && _1_2_3_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
5231 || (GET_CODE (XEXP (x, 0)) == MULT
5232 && _2_4_8_operand (XEXP (XEXP (x, 0), 1), VOIDmode)))
5234 *total += (rtx_cost (XEXP (x, 1), mode, PLUS, 0, speed)
5235 + rtx_cost (XEXP (XEXP (x, 0), 0), mode, PLUS, 1, speed));
5236 return true;
5238 return false;
5239 case MINUS:
5240 if ((GET_CODE (XEXP (x, 1)) == ASHIFT
5241 && _1_2_3_operand (XEXP (XEXP (x, 1), 1), VOIDmode))
5242 || (GET_CODE (XEXP (x, 1)) == MULT
5243 && _2_4_8_operand (XEXP (XEXP (x, 1), 1), VOIDmode)))
5245 *total += (rtx_cost (XEXP (x, 0), mode, PLUS, 0, speed)
5246 + rtx_cost (XEXP (XEXP (x, 1), 0), mode, PLUS, 1, speed));
5247 return true;
5249 return false;
5250 case COMPARE:
5252 rtx op0 = XEXP (x, 0);
5253 rtx op1 = XEXP (x, 1);
5255 if (GET_CODE (op0) == ZERO_EXTRACT && op1 == const0_rtx
5256 && XEXP (op0, 1) == const1_rtx)
5258 /* btst / bbit0 / bbit1:
5259 Small integers and registers are free; everything else can
5260 be put in a register. */
5261 mode = GET_MODE (XEXP (op0, 0));
5262 *total = (rtx_cost (XEXP (op0, 0), mode, SET, 1, speed)
5263 + rtx_cost (XEXP (op0, 2), mode, SET, 1, speed));
5264 return true;
5266 if (GET_CODE (op0) == AND && op1 == const0_rtx
5267 && satisfies_constraint_C1p (XEXP (op0, 1)))
5269 /* bmsk.f */
5270 *total = rtx_cost (XEXP (op0, 0), VOIDmode, SET, 1, speed);
5271 return true;
5273 /* add.f */
5274 if (GET_CODE (op1) == NEG)
5276 /* op0 might be constant, the inside of op1 is rather
5277 unlikely to be so. So swapping the operands might lower
5278 the cost. */
5279 mode = GET_MODE (op0);
5280 *total = (rtx_cost (op0, mode, PLUS, 1, speed)
5281 + rtx_cost (XEXP (op1, 0), mode, PLUS, 0, speed));
5283 return false;
5285 case EQ: case NE:
5286 if (outer_code == IF_THEN_ELSE
5287 && GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
5288 && XEXP (x, 1) == const0_rtx
5289 && XEXP (XEXP (x, 0), 1) == const1_rtx)
5291 /* btst / bbit0 / bbit1:
5292 Small integers and registers are free; everything else can
5293 be put in a register. */
5294 rtx op0 = XEXP (x, 0);
5296 mode = GET_MODE (XEXP (op0, 0));
5297 *total = (rtx_cost (XEXP (op0, 0), mode, SET, 1, speed)
5298 + rtx_cost (XEXP (op0, 2), mode, SET, 1, speed));
5299 return true;
5301 /* Fall through. */
5302 /* scc_insn expands into two insns. */
5303 case GTU: case GEU: case LEU:
5304 if (mode == SImode)
5305 *total += COSTS_N_INSNS (1);
5306 return false;
5307 case LTU: /* might use adc. */
5308 if (mode == SImode)
5309 *total += COSTS_N_INSNS (1) - 1;
5310 return false;
5311 default:
5312 return false;
5316 /* Return true if ADDR is a valid pic address.
5317 A valid pic address on arc should look like
5318 const (unspec (SYMBOL_REF/LABEL) (ARC_UNSPEC_GOTOFF/ARC_UNSPEC_GOT)) */
5320 bool
5321 arc_legitimate_pic_addr_p (rtx addr)
5323 if (GET_CODE (addr) != CONST)
5324 return false;
5326 addr = XEXP (addr, 0);
5329 if (GET_CODE (addr) == PLUS)
5331 if (GET_CODE (XEXP (addr, 1)) != CONST_INT)
5332 return false;
5333 addr = XEXP (addr, 0);
5336 if (GET_CODE (addr) != UNSPEC
5337 || XVECLEN (addr, 0) != 1)
5338 return false;
5340 /* Must be one of @GOT, @GOTOFF, @GOTOFFPC, @tlsgd, tlsie. */
5341 if (XINT (addr, 1) != ARC_UNSPEC_GOT
5342 && XINT (addr, 1) != ARC_UNSPEC_GOTOFF
5343 && XINT (addr, 1) != ARC_UNSPEC_GOTOFFPC
5344 && XINT (addr, 1) != UNSPEC_TLS_GD
5345 && XINT (addr, 1) != UNSPEC_TLS_IE)
5346 return false;
5348 if (GET_CODE (XVECEXP (addr, 0, 0)) != SYMBOL_REF
5349 && GET_CODE (XVECEXP (addr, 0, 0)) != LABEL_REF)
5350 return false;
5352 return true;
5357 /* Return true if OP contains a symbol reference. */
5359 static bool
5360 symbolic_reference_mentioned_p (rtx op)
5362 register const char *fmt;
5363 register int i;
5365 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
5366 return true;
5368 fmt = GET_RTX_FORMAT (GET_CODE (op));
5369 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
5371 if (fmt[i] == 'E')
5373 register int j;
5375 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
5376 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
5377 return true;
5380 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
5381 return true;
5384 return false;
5387 /* Return true if OP contains a SYMBOL_REF that is not wrapped in an unspec.
5388 If SKIP_LOCAL is true, skip symbols that bind locally.
5389 This is used further down in this file, and, without SKIP_LOCAL,
5390 in the addsi3 / subsi3 expanders when generating PIC code. */
5392 bool
5393 arc_raw_symbolic_reference_mentioned_p (rtx op, bool skip_local)
5395 register const char *fmt;
5396 register int i;
5398 if (GET_CODE(op) == UNSPEC)
5399 return false;
5401 if (GET_CODE (op) == SYMBOL_REF)
5403 if (SYMBOL_REF_TLS_MODEL (op))
5404 return true;
5405 if (!flag_pic)
5406 return false;
5407 tree decl = SYMBOL_REF_DECL (op);
5408 return !skip_local || !decl || !default_binds_local_p (decl);
5411 fmt = GET_RTX_FORMAT (GET_CODE (op));
5412 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
5414 if (fmt[i] == 'E')
5416 register int j;
5418 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
5419 if (arc_raw_symbolic_reference_mentioned_p (XVECEXP (op, i, j),
5420 skip_local))
5421 return true;
5424 else if (fmt[i] == 'e'
5425 && arc_raw_symbolic_reference_mentioned_p (XEXP (op, i),
5426 skip_local))
5427 return true;
5430 return false;
5433 /* Get the thread pointer. */
5435 static rtx
5436 arc_get_tp (void)
5438 /* If arc_tp_regno has been set, we can use that hard register
5439 directly as a base register. */
5440 if (arc_tp_regno != -1)
5441 return gen_rtx_REG (Pmode, arc_tp_regno);
5443 /* Otherwise, call __read_tp. Copy the result to a pseudo to avoid
5444 conflicts with function arguments / results. */
5445 rtx reg = gen_reg_rtx (Pmode);
5446 emit_insn (gen_tls_load_tp_soft ());
5447 emit_move_insn (reg, gen_rtx_REG (Pmode, R0_REG));
5448 return reg;
5451 /* Helper to be used by TLS Global dynamic model. */
5453 static rtx
5454 arc_emit_call_tls_get_addr (rtx sym, int reloc, rtx eqv)
5456 rtx r0 = gen_rtx_REG (Pmode, R0_REG);
5457 rtx call_fusage = NULL_RTX;
5459 start_sequence ();
5461 rtx x = arc_unspec_offset (sym, reloc);
5462 emit_move_insn (r0, x);
5463 use_reg (&call_fusage, r0);
5465 gcc_assert (reloc == UNSPEC_TLS_GD);
5466 rtx call_insn = emit_call_insn (gen_tls_gd_get_addr (sym));
5467 /* Should we set RTL_CONST_CALL_P? We read memory, but not in a
5468 way that the application should care. */
5469 RTL_PURE_CALL_P (call_insn) = 1;
5470 add_function_usage_to (call_insn, call_fusage);
5472 rtx_insn *insns = get_insns ();
5473 end_sequence ();
5475 rtx dest = gen_reg_rtx (Pmode);
5476 emit_libcall_block (insns, dest, r0, eqv);
5477 return dest;
5480 #define DTPOFF_ZERO_SYM ".tdata"
5482 /* Return a legitimized address for ADDR,
5483 which is a SYMBOL_REF with tls_model MODEL. */
5485 static rtx
5486 arc_legitimize_tls_address (rtx addr, enum tls_model model)
5488 if (!flag_pic && model == TLS_MODEL_LOCAL_DYNAMIC)
5489 model = TLS_MODEL_LOCAL_EXEC;
5491 switch (model)
5493 case TLS_MODEL_LOCAL_DYNAMIC:
5494 rtx base;
5495 tree decl;
5496 const char *base_name;
5497 rtvec v;
5499 decl = SYMBOL_REF_DECL (addr);
5500 base_name = DTPOFF_ZERO_SYM;
5501 if (decl && bss_initializer_p (decl))
5502 base_name = ".tbss";
5504 base = gen_rtx_SYMBOL_REF (Pmode, base_name);
5505 if (strcmp (base_name, DTPOFF_ZERO_SYM) == 0)
5507 if (!flag_pic)
5508 goto local_exec;
5509 v = gen_rtvec (1, addr);
5511 else
5512 v = gen_rtvec (2, addr, base);
5513 addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_TLS_OFF);
5514 addr = gen_rtx_CONST (Pmode, addr);
5515 base = arc_legitimize_tls_address (base, TLS_MODEL_GLOBAL_DYNAMIC);
5516 return gen_rtx_PLUS (Pmode, force_reg (Pmode, base), addr);
5518 case TLS_MODEL_GLOBAL_DYNAMIC:
5519 return arc_emit_call_tls_get_addr (addr, UNSPEC_TLS_GD, addr);
5521 case TLS_MODEL_INITIAL_EXEC:
5522 addr = arc_unspec_offset (addr, UNSPEC_TLS_IE);
5523 addr = copy_to_mode_reg (Pmode, gen_const_mem (Pmode, addr));
5524 return gen_rtx_PLUS (Pmode, arc_get_tp (), addr);
5526 case TLS_MODEL_LOCAL_EXEC:
5527 local_exec:
5528 addr = arc_unspec_offset (addr, UNSPEC_TLS_OFF);
5529 return gen_rtx_PLUS (Pmode, arc_get_tp (), addr);
5530 default:
5531 gcc_unreachable ();
5535 /* Legitimize a pic address reference in ORIG.
5536 The return value is the legitimated address.
5537 If OLDX is non-zero, it is the target to assign the address to first. */
5539 static rtx
5540 arc_legitimize_pic_address (rtx orig, rtx oldx)
5542 rtx addr = orig;
5543 rtx pat = orig;
5544 rtx base;
5546 if (oldx == orig)
5547 oldx = NULL;
5549 if (GET_CODE (addr) == LABEL_REF)
5550 ; /* Do nothing. */
5551 else if (GET_CODE (addr) == SYMBOL_REF)
5553 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
5554 if (model != 0)
5555 return arc_legitimize_tls_address (addr, model);
5556 else if (!flag_pic)
5557 return orig;
5558 else if (CONSTANT_POOL_ADDRESS_P (addr) || SYMBOL_REF_LOCAL_P (addr))
5559 return arc_unspec_offset (addr, ARC_UNSPEC_GOTOFFPC);
5561 /* This symbol must be referenced via a load from the Global
5562 Offset Table (@GOTPC). */
5563 pat = arc_unspec_offset (addr, ARC_UNSPEC_GOT);
5564 pat = gen_const_mem (Pmode, pat);
5566 if (oldx == NULL)
5567 oldx = gen_reg_rtx (Pmode);
5569 emit_move_insn (oldx, pat);
5570 pat = oldx;
5572 else
5574 if (GET_CODE (addr) == CONST)
5576 addr = XEXP (addr, 0);
5577 if (GET_CODE (addr) == UNSPEC)
5579 /* Check that the unspec is one of the ones we generate? */
5580 return orig;
5582 /* fwprop is placing in the REG_EQUIV notes constant pic
5583 unspecs expressions. Then, loop may use these notes for
5584 optimizations resulting in complex patterns that are not
5585 supported by the current implementation. The following
5586 two if-cases are simplifying the complex patters to
5587 simpler ones. */
5588 else if (GET_CODE (addr) == MINUS)
5590 rtx op0 = XEXP (addr, 0);
5591 rtx op1 = XEXP (addr, 1);
5592 gcc_assert (oldx);
5593 gcc_assert (GET_CODE (op1) == UNSPEC);
5595 emit_move_insn (oldx,
5596 gen_rtx_CONST (SImode,
5597 arc_legitimize_pic_address (op1,
5598 NULL_RTX)));
5599 emit_insn (gen_rtx_SET (oldx, gen_rtx_MINUS (SImode, op0, oldx)));
5600 return oldx;
5603 else if (GET_CODE (addr) != PLUS)
5605 rtx tmp = XEXP (addr, 0);
5606 enum rtx_code code = GET_CODE (addr);
5608 /* It only works for UNARY operations. */
5609 gcc_assert (UNARY_P (addr));
5610 gcc_assert (GET_CODE (tmp) == UNSPEC);
5611 gcc_assert (oldx);
5613 emit_move_insn
5614 (oldx,
5615 gen_rtx_CONST (SImode,
5616 arc_legitimize_pic_address (tmp,
5617 NULL_RTX)));
5619 emit_insn (gen_rtx_SET (oldx,
5620 gen_rtx_fmt_ee (code, SImode,
5621 oldx, const0_rtx)));
5623 return oldx;
5625 else
5627 gcc_assert (GET_CODE (addr) == PLUS);
5628 if (GET_CODE (XEXP (addr, 0)) == UNSPEC)
5629 return orig;
5633 if (GET_CODE (addr) == PLUS)
5635 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5637 base = arc_legitimize_pic_address (op0, oldx);
5638 pat = arc_legitimize_pic_address (op1,
5639 base == oldx ? NULL_RTX : oldx);
5641 if (base == op0 && pat == op1)
5642 return orig;
5644 if (GET_CODE (pat) == CONST_INT)
5645 pat = plus_constant (Pmode, base, INTVAL (pat));
5646 else
5648 if (GET_CODE (pat) == PLUS && CONSTANT_P (XEXP (pat, 1)))
5650 base = gen_rtx_PLUS (Pmode, base, XEXP (pat, 0));
5651 pat = XEXP (pat, 1);
5653 pat = gen_rtx_PLUS (Pmode, base, pat);
5658 return pat;
5661 /* Output address constant X to FILE, taking PIC into account. */
5663 static void
5664 arc_output_pic_addr_const (FILE * file, rtx x, int code)
5666 char buf[256];
5668 restart:
5669 switch (GET_CODE (x))
5671 case PC:
5672 if (flag_pic)
5673 putc ('.', file);
5674 else
5675 gcc_unreachable ();
5676 break;
5678 case SYMBOL_REF:
5679 output_addr_const (file, x);
5681 /* Local functions do not get references through the PLT. */
5682 if (code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
5683 fputs ("@plt", file);
5684 break;
5686 case LABEL_REF:
5687 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (XEXP (x, 0)));
5688 assemble_name (file, buf);
5689 break;
5691 case CODE_LABEL:
5692 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
5693 assemble_name (file, buf);
5694 break;
5696 case CONST_INT:
5697 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5698 break;
5700 case CONST:
5701 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5702 break;
5704 case CONST_DOUBLE:
5705 if (GET_MODE (x) == VOIDmode)
5707 /* We can use %d if the number is one word and positive. */
5708 if (CONST_DOUBLE_HIGH (x))
5709 fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
5710 CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x));
5711 else if (CONST_DOUBLE_LOW (x) < 0)
5712 fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x));
5713 else
5714 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
5716 else
5717 /* We can't handle floating point constants;
5718 PRINT_OPERAND must handle them. */
5719 output_operand_lossage ("floating constant misused");
5720 break;
5722 case PLUS:
5723 /* FIXME: Not needed here. */
5724 /* Some assemblers need integer constants to appear last (eg masm). */
5725 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5727 arc_output_pic_addr_const (file, XEXP (x, 1), code);
5728 fprintf (file, "+");
5729 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5731 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5733 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5734 if (INTVAL (XEXP (x, 1)) >= 0)
5735 fprintf (file, "+");
5736 arc_output_pic_addr_const (file, XEXP (x, 1), code);
5738 else
5739 gcc_unreachable();
5740 break;
5742 case MINUS:
5743 /* Avoid outputting things like x-x or x+5-x,
5744 since some assemblers can't handle that. */
5745 x = simplify_subtraction (x);
5746 if (GET_CODE (x) != MINUS)
5747 goto restart;
5749 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5750 fprintf (file, "-");
5751 if (GET_CODE (XEXP (x, 1)) == CONST_INT
5752 && INTVAL (XEXP (x, 1)) < 0)
5754 fprintf (file, "(");
5755 arc_output_pic_addr_const (file, XEXP (x, 1), code);
5756 fprintf (file, ")");
5758 else
5759 arc_output_pic_addr_const (file, XEXP (x, 1), code);
5760 break;
5762 case ZERO_EXTEND:
5763 case SIGN_EXTEND:
5764 arc_output_pic_addr_const (file, XEXP (x, 0), code);
5765 break;
5768 case UNSPEC:
5769 const char *suffix;
5770 bool pcrel; pcrel = false;
5771 rtx base; base = NULL;
5772 gcc_assert (XVECLEN (x, 0) >= 1);
5773 switch (XINT (x, 1))
5775 case ARC_UNSPEC_GOT:
5776 suffix = "@gotpc", pcrel = true;
5777 break;
5778 case ARC_UNSPEC_GOTOFF:
5779 suffix = "@gotoff";
5780 break;
5781 case ARC_UNSPEC_GOTOFFPC:
5782 suffix = "@pcl", pcrel = true;
5783 break;
5784 case ARC_UNSPEC_PLT:
5785 suffix = "@plt";
5786 break;
5787 case UNSPEC_TLS_GD:
5788 suffix = "@tlsgd", pcrel = true;
5789 break;
5790 case UNSPEC_TLS_IE:
5791 suffix = "@tlsie", pcrel = true;
5792 break;
5793 case UNSPEC_TLS_OFF:
5794 if (XVECLEN (x, 0) == 2)
5795 base = XVECEXP (x, 0, 1);
5796 if (SYMBOL_REF_TLS_MODEL (XVECEXP (x, 0, 0)) == TLS_MODEL_LOCAL_EXEC
5797 || (!flag_pic && !base))
5798 suffix = "@tpoff";
5799 else
5800 suffix = "@dtpoff";
5801 break;
5802 default:
5803 suffix = "@invalid";
5804 output_operand_lossage ("invalid UNSPEC as operand: %d", XINT (x,1));
5805 break;
5807 if (pcrel)
5808 fputs ("pcl,", file);
5809 arc_output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
5810 fputs (suffix, file);
5811 if (base)
5812 arc_output_pic_addr_const (file, base, code);
5813 break;
5815 default:
5816 output_operand_lossage ("invalid expression as operand");
5820 #define SYMBOLIC_CONST(X) \
5821 (GET_CODE (X) == SYMBOL_REF \
5822 || GET_CODE (X) == LABEL_REF \
5823 || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
5825 /* Emit insns to move operands[1] into operands[0]. */
5827 static void
5828 prepare_pic_move (rtx *operands, machine_mode)
5830 if (GET_CODE (operands[0]) == MEM && SYMBOLIC_CONST (operands[1])
5831 && flag_pic)
5832 operands[1] = force_reg (Pmode, operands[1]);
5833 else
5835 rtx temp = (reload_in_progress ? operands[0]
5836 : flag_pic? gen_reg_rtx (Pmode) : NULL_RTX);
5837 operands[1] = arc_legitimize_pic_address (operands[1], temp);
5842 /* The function returning the number of words, at the beginning of an
5843 argument, must be put in registers. The returned value must be
5844 zero for arguments that are passed entirely in registers or that
5845 are entirely pushed on the stack.
5847 On some machines, certain arguments must be passed partially in
5848 registers and partially in memory. On these machines, typically
5849 the first N words of arguments are passed in registers, and the
5850 rest on the stack. If a multi-word argument (a `double' or a
5851 structure) crosses that boundary, its first few words must be
5852 passed in registers and the rest must be pushed. This function
5853 tells the compiler when this occurs, and how many of the words
5854 should go in registers.
5856 `FUNCTION_ARG' for these arguments should return the first register
5857 to be used by the caller for this argument; likewise
5858 `FUNCTION_INCOMING_ARG', for the called function.
5860 The function is used to implement macro FUNCTION_ARG_PARTIAL_NREGS. */
5862 /* If REGNO is the least arg reg available then what is the total number of arg
5863 regs available. */
5864 #define GPR_REST_ARG_REGS(REGNO) \
5865 ((REGNO) <= MAX_ARC_PARM_REGS ? MAX_ARC_PARM_REGS - (REGNO) : 0 )
5867 /* Since arc parm regs are contiguous. */
5868 #define ARC_NEXT_ARG_REG(REGNO) ( (REGNO) + 1 )
5870 /* Implement TARGET_ARG_PARTIAL_BYTES. */
5872 static int
5873 arc_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
5874 tree type, bool named ATTRIBUTE_UNUSED)
5876 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5877 int bytes = (mode == BLKmode
5878 ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode));
5879 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5880 int arg_num = *cum;
5881 int ret;
5883 arg_num = ROUND_ADVANCE_CUM (arg_num, mode, type);
5884 ret = GPR_REST_ARG_REGS (arg_num);
5886 /* ICEd at function.c:2361, and ret is copied to data->partial */
5887 ret = (ret >= words ? 0 : ret * UNITS_PER_WORD);
5889 return ret;
5892 /* This function is used to control a function argument is passed in a
5893 register, and which register.
5895 The arguments are CUM, of type CUMULATIVE_ARGS, which summarizes
5896 (in a way defined by INIT_CUMULATIVE_ARGS and FUNCTION_ARG_ADVANCE)
5897 all of the previous arguments so far passed in registers; MODE, the
5898 machine mode of the argument; TYPE, the data type of the argument
5899 as a tree node or 0 if that is not known (which happens for C
5900 support library functions); and NAMED, which is 1 for an ordinary
5901 argument and 0 for nameless arguments that correspond to `...' in
5902 the called function's prototype.
5904 The returned value should either be a `reg' RTX for the hard
5905 register in which to pass the argument, or zero to pass the
5906 argument on the stack.
5908 For machines like the Vax and 68000, where normally all arguments
5909 are pushed, zero suffices as a definition.
5911 The usual way to make the ANSI library `stdarg.h' work on a machine
5912 where some arguments are usually passed in registers, is to cause
5913 nameless arguments to be passed on the stack instead. This is done
5914 by making the function return 0 whenever NAMED is 0.
5916 You may use the macro `MUST_PASS_IN_STACK (MODE, TYPE)' in the
5917 definition of this function to determine if this argument is of a
5918 type that must be passed in the stack. If `REG_PARM_STACK_SPACE'
5919 is not defined and the function returns non-zero for such an
5920 argument, the compiler will abort. If `REG_PARM_STACK_SPACE' is
5921 defined, the argument will be computed in the stack and then loaded
5922 into a register.
5924 The function is used to implement macro FUNCTION_ARG. */
5925 /* On the ARC the first MAX_ARC_PARM_REGS args are normally in registers
5926 and the rest are pushed. */
5928 static rtx
5929 arc_function_arg (cumulative_args_t cum_v,
5930 machine_mode mode,
5931 const_tree type ATTRIBUTE_UNUSED,
5932 bool named ATTRIBUTE_UNUSED)
5934 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5935 int arg_num = *cum;
5936 rtx ret;
5937 const char *debstr ATTRIBUTE_UNUSED;
5939 arg_num = ROUND_ADVANCE_CUM (arg_num, mode, type);
5940 /* Return a marker for use in the call instruction. */
5941 if (mode == VOIDmode)
5943 ret = const0_rtx;
5944 debstr = "<0>";
5946 else if (GPR_REST_ARG_REGS (arg_num) > 0)
5948 ret = gen_rtx_REG (mode, arg_num);
5949 debstr = reg_names [arg_num];
5951 else
5953 ret = NULL_RTX;
5954 debstr = "memory";
5956 return ret;
5959 /* The function to update the summarizer variable *CUM to advance past
5960 an argument in the argument list. The values MODE, TYPE and NAMED
5961 describe that argument. Once this is done, the variable *CUM is
5962 suitable for analyzing the *following* argument with
5963 `FUNCTION_ARG', etc.
5965 This function need not do anything if the argument in question was
5966 passed on the stack. The compiler knows how to track the amount of
5967 stack space used for arguments without any special help.
5969 The function is used to implement macro FUNCTION_ARG_ADVANCE. */
5970 /* For the ARC: the cum set here is passed on to function_arg where we
5971 look at its value and say which reg to use. Strategy: advance the
5972 regnumber here till we run out of arg regs, then set *cum to last
5973 reg. In function_arg, since *cum > last arg reg we would return 0
5974 and thus the arg will end up on the stack. For straddling args of
5975 course function_arg_partial_nregs will come into play. */
5977 static void
5978 arc_function_arg_advance (cumulative_args_t cum_v,
5979 machine_mode mode,
5980 const_tree type,
5981 bool named ATTRIBUTE_UNUSED)
5983 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5984 int bytes = (mode == BLKmode
5985 ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode));
5986 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5987 int i;
5989 if (words)
5990 *cum = ROUND_ADVANCE_CUM (*cum, mode, type);
5991 for (i = 0; i < words; i++)
5992 *cum = ARC_NEXT_ARG_REG (*cum);
5996 /* Define how to find the value returned by a function.
5997 VALTYPE is the data type of the value (as a tree).
5998 If the precise function being called is known, FN_DECL_OR_TYPE is its
5999 FUNCTION_DECL; otherwise, FN_DECL_OR_TYPE is its type. */
6001 static rtx
6002 arc_function_value (const_tree valtype,
6003 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6004 bool outgoing ATTRIBUTE_UNUSED)
6006 machine_mode mode = TYPE_MODE (valtype);
6007 int unsignedp ATTRIBUTE_UNUSED;
6009 unsignedp = TYPE_UNSIGNED (valtype);
6010 if (INTEGRAL_TYPE_P (valtype) || TREE_CODE (valtype) == OFFSET_TYPE)
6011 PROMOTE_MODE (mode, unsignedp, valtype);
6012 return gen_rtx_REG (mode, 0);
6015 /* Returns the return address that is used by builtin_return_address. */
6018 arc_return_addr_rtx (int count, ATTRIBUTE_UNUSED rtx frame)
6020 if (count != 0)
6021 return const0_rtx;
6023 return get_hard_reg_initial_val (Pmode , RETURN_ADDR_REGNUM);
6026 /* Determine if a given RTX is a valid constant. We already know this
6027 satisfies CONSTANT_P. */
6029 bool
6030 arc_legitimate_constant_p (machine_mode mode, rtx x)
6032 switch (GET_CODE (x))
6034 case CONST:
6035 if (flag_pic)
6037 if (arc_legitimate_pic_addr_p (x))
6038 return true;
6040 return arc_legitimate_constant_p (mode, XEXP (x, 0));
6042 case SYMBOL_REF:
6043 if (SYMBOL_REF_TLS_MODEL (x))
6044 return false;
6045 /* Fall through. */
6046 case LABEL_REF:
6047 if (flag_pic)
6048 return false;
6049 /* Fall through. */
6050 case CONST_INT:
6051 case CONST_DOUBLE:
6052 return true;
6054 case NEG:
6055 return arc_legitimate_constant_p (mode, XEXP (x, 0));
6057 case PLUS:
6058 case MINUS:
6060 bool t1 = arc_legitimate_constant_p (mode, XEXP (x, 0));
6061 bool t2 = arc_legitimate_constant_p (mode, XEXP (x, 1));
6063 return (t1 && t2);
6066 case CONST_VECTOR:
6067 switch (mode)
6069 case E_V2HImode:
6070 return TARGET_PLUS_DMPY;
6071 case E_V2SImode:
6072 case E_V4HImode:
6073 return TARGET_PLUS_QMACW;
6074 default:
6075 return false;
6078 case UNSPEC:
6079 switch (XINT (x, 1))
6081 case UNSPEC_TLS_GD:
6082 case UNSPEC_TLS_OFF:
6083 case UNSPEC_TLS_IE:
6084 return true;
6085 default:
6086 /* Any other unspec ending here are pic related, hence the above
6087 constant pic address checking returned false. */
6088 return false;
6090 /* Fall through. */
6092 default:
6093 fatal_insn ("unrecognized supposed constant", x);
6096 gcc_unreachable ();
6099 static bool
6100 arc_legitimate_address_p (machine_mode mode, rtx x, bool strict)
6102 if (RTX_OK_FOR_BASE_P (x, strict))
6103 return true;
6104 if (legitimate_offset_address_p (mode, x, TARGET_INDEXED_LOADS, strict))
6105 return true;
6106 if (legitimate_scaled_address_p (mode, x, strict))
6107 return true;
6108 if (LEGITIMATE_SMALL_DATA_ADDRESS_P (x))
6109 return true;
6110 if (GET_CODE (x) == CONST_INT && LARGE_INT (INTVAL (x)))
6111 return true;
6113 /* When we compile for size avoid const (@sym + offset)
6114 addresses. */
6115 if (!flag_pic && optimize_size && !reload_completed
6116 && (GET_CODE (x) == CONST)
6117 && (GET_CODE (XEXP (x, 0)) == PLUS)
6118 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
6119 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) == 0
6120 && !SYMBOL_REF_FUNCTION_P (XEXP (XEXP (x, 0), 0)))
6122 rtx addend = XEXP (XEXP (x, 0), 1);
6123 gcc_assert (CONST_INT_P (addend));
6124 HOST_WIDE_INT offset = INTVAL (addend);
6126 /* Allow addresses having a large offset to pass. Anyhow they
6127 will end in a limm. */
6128 return !(offset > -1024 && offset < 1020);
6131 if ((GET_MODE_SIZE (mode) != 16) && CONSTANT_P (x))
6133 return arc_legitimate_constant_p (mode, x);
6135 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == PRE_INC
6136 || GET_CODE (x) == POST_DEC || GET_CODE (x) == POST_INC)
6137 && RTX_OK_FOR_BASE_P (XEXP (x, 0), strict))
6138 return true;
6139 /* We're restricted here by the `st' insn. */
6140 if ((GET_CODE (x) == PRE_MODIFY || GET_CODE (x) == POST_MODIFY)
6141 && GET_CODE (XEXP ((x), 1)) == PLUS
6142 && rtx_equal_p (XEXP ((x), 0), XEXP (XEXP (x, 1), 0))
6143 && legitimate_offset_address_p (QImode, XEXP (x, 1),
6144 TARGET_AUTO_MODIFY_REG, strict))
6145 return true;
6146 return false;
6149 /* Return true iff ADDR (a legitimate address expression)
6150 has an effect that depends on the machine mode it is used for. */
6152 static bool
6153 arc_mode_dependent_address_p (const_rtx addr, addr_space_t)
6155 /* SYMBOL_REF is not mode dependent: it is either a small data reference,
6156 which is valid for loads and stores, or a limm offset, which is valid for
6157 loads. Scaled indices are scaled by the access mode. */
6158 if (GET_CODE (addr) == PLUS
6159 && GET_CODE (XEXP ((addr), 0)) == MULT)
6160 return true;
6161 return false;
6164 /* Determine if it's legal to put X into the constant pool. */
6166 static bool
6167 arc_cannot_force_const_mem (machine_mode mode, rtx x)
6169 return !arc_legitimate_constant_p (mode, x);
6172 /* IDs for all the ARC builtins. */
6174 enum arc_builtin_id
6176 #define DEF_BUILTIN(NAME, N_ARGS, TYPE, ICODE, MASK) \
6177 ARC_BUILTIN_ ## NAME,
6178 #include "builtins.def"
6179 #undef DEF_BUILTIN
6181 ARC_BUILTIN_COUNT
6184 struct GTY(()) arc_builtin_description
6186 enum insn_code icode;
6187 int n_args;
6188 tree fndecl;
6191 static GTY(()) struct arc_builtin_description
6192 arc_bdesc[ARC_BUILTIN_COUNT] =
6194 #define DEF_BUILTIN(NAME, N_ARGS, TYPE, ICODE, MASK) \
6195 { (enum insn_code) CODE_FOR_ ## ICODE, N_ARGS, NULL_TREE },
6196 #include "builtins.def"
6197 #undef DEF_BUILTIN
6200 /* Transform UP into lowercase and write the result to LO.
6201 You must provide enough space for LO. Return LO. */
6203 static char*
6204 arc_tolower (char *lo, const char *up)
6206 char *lo0 = lo;
6208 for (; *up; up++, lo++)
6209 *lo = TOLOWER (*up);
6211 *lo = '\0';
6213 return lo0;
6216 /* Implement `TARGET_BUILTIN_DECL'. */
6218 static tree
6219 arc_builtin_decl (unsigned id, bool initialize_p ATTRIBUTE_UNUSED)
6221 if (id < ARC_BUILTIN_COUNT)
6222 return arc_bdesc[id].fndecl;
6224 return error_mark_node;
6227 static void
6228 arc_init_builtins (void)
6230 tree V4HI_type_node;
6231 tree V2SI_type_node;
6232 tree V2HI_type_node;
6234 /* Vector types based on HS SIMD elements. */
6235 V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
6236 V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
6237 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
6239 tree pcvoid_type_node
6240 = build_pointer_type (build_qualified_type (void_type_node,
6241 TYPE_QUAL_CONST));
6242 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node,
6243 V8HImode);
6245 tree void_ftype_void
6246 = build_function_type_list (void_type_node, NULL_TREE);
6247 tree int_ftype_int
6248 = build_function_type_list (integer_type_node, integer_type_node,
6249 NULL_TREE);
6250 tree int_ftype_pcvoid_int
6251 = build_function_type_list (integer_type_node, pcvoid_type_node,
6252 integer_type_node, NULL_TREE);
6253 tree void_ftype_usint_usint
6254 = build_function_type_list (void_type_node, long_unsigned_type_node,
6255 long_unsigned_type_node, NULL_TREE);
6256 tree int_ftype_int_int
6257 = build_function_type_list (integer_type_node, integer_type_node,
6258 integer_type_node, NULL_TREE);
6259 tree usint_ftype_usint
6260 = build_function_type_list (long_unsigned_type_node,
6261 long_unsigned_type_node, NULL_TREE);
6262 tree void_ftype_usint
6263 = build_function_type_list (void_type_node, long_unsigned_type_node,
6264 NULL_TREE);
6265 tree int_ftype_void
6266 = build_function_type_list (integer_type_node, void_type_node,
6267 NULL_TREE);
6268 tree void_ftype_int
6269 = build_function_type_list (void_type_node, integer_type_node,
6270 NULL_TREE);
6271 tree int_ftype_short
6272 = build_function_type_list (integer_type_node, short_integer_type_node,
6273 NULL_TREE);
6275 /* Old ARC SIMD types. */
6276 tree v8hi_ftype_v8hi_v8hi
6277 = build_function_type_list (V8HI_type_node, V8HI_type_node,
6278 V8HI_type_node, NULL_TREE);
6279 tree v8hi_ftype_v8hi_int
6280 = build_function_type_list (V8HI_type_node, V8HI_type_node,
6281 integer_type_node, NULL_TREE);
6282 tree v8hi_ftype_v8hi_int_int
6283 = build_function_type_list (V8HI_type_node, V8HI_type_node,
6284 integer_type_node, integer_type_node,
6285 NULL_TREE);
6286 tree void_ftype_v8hi_int_int
6287 = build_function_type_list (void_type_node, V8HI_type_node,
6288 integer_type_node, integer_type_node,
6289 NULL_TREE);
6290 tree void_ftype_v8hi_int_int_int
6291 = build_function_type_list (void_type_node, V8HI_type_node,
6292 integer_type_node, integer_type_node,
6293 integer_type_node, NULL_TREE);
6294 tree v8hi_ftype_int_int
6295 = build_function_type_list (V8HI_type_node, integer_type_node,
6296 integer_type_node, NULL_TREE);
6297 tree void_ftype_int_int
6298 = build_function_type_list (void_type_node, integer_type_node,
6299 integer_type_node, NULL_TREE);
6300 tree v8hi_ftype_v8hi
6301 = build_function_type_list (V8HI_type_node, V8HI_type_node,
6302 NULL_TREE);
6303 /* ARCv2 SIMD types. */
6304 tree long_ftype_v4hi_v4hi
6305 = build_function_type_list (long_long_integer_type_node,
6306 V4HI_type_node, V4HI_type_node, NULL_TREE);
6307 tree int_ftype_v2hi_v2hi
6308 = build_function_type_list (integer_type_node,
6309 V2HI_type_node, V2HI_type_node, NULL_TREE);
6310 tree v2si_ftype_v2hi_v2hi
6311 = build_function_type_list (V2SI_type_node,
6312 V2HI_type_node, V2HI_type_node, NULL_TREE);
6313 tree v2hi_ftype_v2hi_v2hi
6314 = build_function_type_list (V2HI_type_node,
6315 V2HI_type_node, V2HI_type_node, NULL_TREE);
6316 tree v2si_ftype_v2si_v2si
6317 = build_function_type_list (V2SI_type_node,
6318 V2SI_type_node, V2SI_type_node, NULL_TREE);
6319 tree v4hi_ftype_v4hi_v4hi
6320 = build_function_type_list (V4HI_type_node,
6321 V4HI_type_node, V4HI_type_node, NULL_TREE);
6322 tree long_ftype_v2si_v2hi
6323 = build_function_type_list (long_long_integer_type_node,
6324 V2SI_type_node, V2HI_type_node, NULL_TREE);
6326 /* Add the builtins. */
6327 #define DEF_BUILTIN(NAME, N_ARGS, TYPE, ICODE, MASK) \
6329 int id = ARC_BUILTIN_ ## NAME; \
6330 const char *Name = "__builtin_arc_" #NAME; \
6331 char *name = (char*) alloca (1 + strlen (Name)); \
6333 gcc_assert (id < ARC_BUILTIN_COUNT); \
6334 if (MASK) \
6335 arc_bdesc[id].fndecl \
6336 = add_builtin_function (arc_tolower(name, Name), TYPE, id, \
6337 BUILT_IN_MD, NULL, NULL_TREE); \
6339 #include "builtins.def"
6340 #undef DEF_BUILTIN
6343 /* Helper to expand __builtin_arc_aligned (void* val, int
6344 alignval). */
6346 static rtx
6347 arc_expand_builtin_aligned (tree exp)
6349 tree arg0 = CALL_EXPR_ARG (exp, 0);
6350 tree arg1 = CALL_EXPR_ARG (exp, 1);
6351 fold (arg1);
6352 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6353 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6355 if (!CONST_INT_P (op1))
6357 /* If we can't fold the alignment to a constant integer
6358 whilst optimizing, this is probably a user error. */
6359 if (optimize)
6360 warning (0, "__builtin_arc_aligned with non-constant alignment");
6362 else
6364 HOST_WIDE_INT alignTest = INTVAL (op1);
6365 /* Check alignTest is positive, and a power of two. */
6366 if (alignTest <= 0 || alignTest != (alignTest & -alignTest))
6368 error ("invalid alignment value for __builtin_arc_aligned");
6369 return NULL_RTX;
6372 if (CONST_INT_P (op0))
6374 HOST_WIDE_INT pnt = INTVAL (op0);
6376 if ((pnt & (alignTest - 1)) == 0)
6377 return const1_rtx;
6379 else
6381 unsigned align = get_pointer_alignment (arg0);
6382 unsigned numBits = alignTest * BITS_PER_UNIT;
6384 if (align && align >= numBits)
6385 return const1_rtx;
6386 /* Another attempt to ascertain alignment. Check the type
6387 we are pointing to. */
6388 if (POINTER_TYPE_P (TREE_TYPE (arg0))
6389 && TYPE_ALIGN (TREE_TYPE (TREE_TYPE (arg0))) >= numBits)
6390 return const1_rtx;
6394 /* Default to false. */
6395 return const0_rtx;
6398 /* Helper arc_expand_builtin, generates a pattern for the given icode
6399 and arguments. */
6401 static rtx_insn *
6402 apply_GEN_FCN (enum insn_code icode, rtx *arg)
6404 switch (insn_data[icode].n_generator_args)
6406 case 0:
6407 return GEN_FCN (icode) ();
6408 case 1:
6409 return GEN_FCN (icode) (arg[0]);
6410 case 2:
6411 return GEN_FCN (icode) (arg[0], arg[1]);
6412 case 3:
6413 return GEN_FCN (icode) (arg[0], arg[1], arg[2]);
6414 case 4:
6415 return GEN_FCN (icode) (arg[0], arg[1], arg[2], arg[3]);
6416 case 5:
6417 return GEN_FCN (icode) (arg[0], arg[1], arg[2], arg[3], arg[4]);
6418 default:
6419 gcc_unreachable ();
6423 /* Expand an expression EXP that calls a built-in function,
6424 with result going to TARGET if that's convenient
6425 (and in mode MODE if that's convenient).
6426 SUBTARGET may be used as the target for computing one of EXP's operands.
6427 IGNORE is nonzero if the value is to be ignored. */
6429 static rtx
6430 arc_expand_builtin (tree exp,
6431 rtx target,
6432 rtx subtarget ATTRIBUTE_UNUSED,
6433 machine_mode mode ATTRIBUTE_UNUSED,
6434 int ignore ATTRIBUTE_UNUSED)
6436 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6437 unsigned int id = DECL_FUNCTION_CODE (fndecl);
6438 const struct arc_builtin_description *d = &arc_bdesc[id];
6439 int i, j, n_args = call_expr_nargs (exp);
6440 rtx pat = NULL_RTX;
6441 rtx xop[5];
6442 enum insn_code icode = d->icode;
6443 machine_mode tmode = insn_data[icode].operand[0].mode;
6444 int nonvoid;
6445 tree arg0;
6446 tree arg1;
6447 tree arg2;
6448 tree arg3;
6449 rtx op0;
6450 rtx op1;
6451 rtx op2;
6452 rtx op3;
6453 rtx op4;
6454 machine_mode mode0;
6455 machine_mode mode1;
6456 machine_mode mode2;
6457 machine_mode mode3;
6458 machine_mode mode4;
6460 if (id >= ARC_BUILTIN_COUNT)
6461 internal_error ("bad builtin fcode");
6463 /* 1st part: Expand special builtins. */
6464 switch (id)
6466 case ARC_BUILTIN_NOP:
6467 emit_insn (gen_nopv ());
6468 return NULL_RTX;
6470 case ARC_BUILTIN_RTIE:
6471 case ARC_BUILTIN_SYNC:
6472 case ARC_BUILTIN_BRK:
6473 case ARC_BUILTIN_SWI:
6474 case ARC_BUILTIN_UNIMP_S:
6475 gcc_assert (icode != 0);
6476 emit_insn (GEN_FCN (icode) (const1_rtx));
6477 return NULL_RTX;
6479 case ARC_BUILTIN_ALIGNED:
6480 return arc_expand_builtin_aligned (exp);
6482 case ARC_BUILTIN_CLRI:
6483 target = gen_reg_rtx (SImode);
6484 emit_insn (gen_clri (target, const1_rtx));
6485 return target;
6487 case ARC_BUILTIN_TRAP_S:
6488 case ARC_BUILTIN_SLEEP:
6489 arg0 = CALL_EXPR_ARG (exp, 0);
6490 fold (arg0);
6491 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6493 if (!CONST_INT_P (op0) || !satisfies_constraint_L (op0))
6495 error ("builtin operand should be an unsigned 6-bit value");
6496 return NULL_RTX;
6498 gcc_assert (icode != 0);
6499 emit_insn (GEN_FCN (icode) (op0));
6500 return NULL_RTX;
6502 case ARC_BUILTIN_VDORUN:
6503 case ARC_BUILTIN_VDIRUN:
6504 arg0 = CALL_EXPR_ARG (exp, 0);
6505 arg1 = CALL_EXPR_ARG (exp, 1);
6506 op0 = expand_expr (arg0, NULL_RTX, SImode, EXPAND_NORMAL);
6507 op1 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6509 target = gen_rtx_REG (SImode, (id == ARC_BUILTIN_VDIRUN) ? 131 : 139);
6511 mode0 = insn_data[icode].operand[1].mode;
6512 mode1 = insn_data[icode].operand[2].mode;
6514 if (!insn_data[icode].operand[1].predicate (op0, mode0))
6515 op0 = copy_to_mode_reg (mode0, op0);
6517 if (!insn_data[icode].operand[2].predicate (op1, mode1))
6518 op1 = copy_to_mode_reg (mode1, op1);
6520 pat = GEN_FCN (icode) (target, op0, op1);
6521 if (!pat)
6522 return NULL_RTX;
6524 emit_insn (pat);
6525 return NULL_RTX;
6527 case ARC_BUILTIN_VDIWR:
6528 case ARC_BUILTIN_VDOWR:
6529 arg0 = CALL_EXPR_ARG (exp, 0);
6530 arg1 = CALL_EXPR_ARG (exp, 1);
6531 op0 = expand_expr (arg0, NULL_RTX, SImode, EXPAND_NORMAL);
6532 op1 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6534 if (!CONST_INT_P (op0)
6535 || !(UNSIGNED_INT3 (INTVAL (op0))))
6536 error ("operand 1 should be an unsigned 3-bit immediate");
6538 mode1 = insn_data[icode].operand[1].mode;
6540 if (icode == CODE_FOR_vdiwr_insn)
6541 target = gen_rtx_REG (SImode,
6542 ARC_FIRST_SIMD_DMA_CONFIG_IN_REG + INTVAL (op0));
6543 else if (icode == CODE_FOR_vdowr_insn)
6544 target = gen_rtx_REG (SImode,
6545 ARC_FIRST_SIMD_DMA_CONFIG_OUT_REG + INTVAL (op0));
6546 else
6547 gcc_unreachable ();
6549 if (!insn_data[icode].operand[2].predicate (op1, mode1))
6550 op1 = copy_to_mode_reg (mode1, op1);
6552 pat = GEN_FCN (icode) (target, op1);
6553 if (!pat)
6554 return NULL_RTX;
6556 emit_insn (pat);
6557 return NULL_RTX;
6559 case ARC_BUILTIN_VASRW:
6560 case ARC_BUILTIN_VSR8:
6561 case ARC_BUILTIN_VSR8AW:
6562 arg0 = CALL_EXPR_ARG (exp, 0);
6563 arg1 = CALL_EXPR_ARG (exp, 1);
6564 op0 = expand_expr (arg0, NULL_RTX, V8HImode, EXPAND_NORMAL);
6565 op1 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6566 op2 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6568 target = gen_reg_rtx (V8HImode);
6569 mode0 = insn_data[icode].operand[1].mode;
6570 mode1 = insn_data[icode].operand[2].mode;
6572 if (!insn_data[icode].operand[1].predicate (op0, mode0))
6573 op0 = copy_to_mode_reg (mode0, op0);
6575 if ((!insn_data[icode].operand[2].predicate (op1, mode1))
6576 || !(UNSIGNED_INT3 (INTVAL (op1))))
6577 error ("operand 2 should be an unsigned 3-bit value (I0-I7)");
6579 pat = GEN_FCN (icode) (target, op0, op1, op2);
6580 if (!pat)
6581 return NULL_RTX;
6583 emit_insn (pat);
6584 return target;
6586 case ARC_BUILTIN_VLD32WH:
6587 case ARC_BUILTIN_VLD32WL:
6588 case ARC_BUILTIN_VLD64:
6589 case ARC_BUILTIN_VLD32:
6590 rtx src_vreg;
6591 icode = d->icode;
6592 arg0 = CALL_EXPR_ARG (exp, 0); /* source vreg. */
6593 arg1 = CALL_EXPR_ARG (exp, 1); /* [I]0-7. */
6594 arg2 = CALL_EXPR_ARG (exp, 2); /* u8. */
6596 src_vreg = expand_expr (arg0, NULL_RTX, V8HImode, EXPAND_NORMAL);
6597 op0 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6598 op1 = expand_expr (arg2, NULL_RTX, SImode, EXPAND_NORMAL);
6599 op2 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6601 /* target <- src vreg. */
6602 emit_insn (gen_move_insn (target, src_vreg));
6604 /* target <- vec_concat: target, mem (Ib, u8). */
6605 mode0 = insn_data[icode].operand[3].mode;
6606 mode1 = insn_data[icode].operand[1].mode;
6608 if ((!insn_data[icode].operand[3].predicate (op0, mode0))
6609 || !(UNSIGNED_INT3 (INTVAL (op0))))
6610 error ("operand 1 should be an unsigned 3-bit value (I0-I7)");
6612 if ((!insn_data[icode].operand[1].predicate (op1, mode1))
6613 || !(UNSIGNED_INT8 (INTVAL (op1))))
6614 error ("operand 2 should be an unsigned 8-bit value");
6616 pat = GEN_FCN (icode) (target, op1, op2, op0);
6617 if (!pat)
6618 return NULL_RTX;
6620 emit_insn (pat);
6621 return target;
6623 case ARC_BUILTIN_VLD64W:
6624 case ARC_BUILTIN_VLD128:
6625 arg0 = CALL_EXPR_ARG (exp, 0); /* dest vreg. */
6626 arg1 = CALL_EXPR_ARG (exp, 1); /* [I]0-7. */
6628 op0 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6629 op1 = expand_expr (arg0, NULL_RTX, SImode, EXPAND_NORMAL);
6630 op2 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6632 /* target <- src vreg. */
6633 target = gen_reg_rtx (V8HImode);
6635 /* target <- vec_concat: target, mem (Ib, u8). */
6636 mode0 = insn_data[icode].operand[1].mode;
6637 mode1 = insn_data[icode].operand[2].mode;
6638 mode2 = insn_data[icode].operand[3].mode;
6640 if ((!insn_data[icode].operand[2].predicate (op1, mode1))
6641 || !(UNSIGNED_INT3 (INTVAL (op1))))
6642 error ("operand 1 should be an unsigned 3-bit value (I0-I7)");
6644 if ((!insn_data[icode].operand[3].predicate (op2, mode2))
6645 || !(UNSIGNED_INT8 (INTVAL (op2))))
6646 error ("operand 2 should be an unsigned 8-bit value");
6648 pat = GEN_FCN (icode) (target, op0, op1, op2);
6650 if (!pat)
6651 return NULL_RTX;
6653 emit_insn (pat);
6654 return target;
6656 case ARC_BUILTIN_VST128:
6657 case ARC_BUILTIN_VST64:
6658 arg0 = CALL_EXPR_ARG (exp, 0); /* src vreg. */
6659 arg1 = CALL_EXPR_ARG (exp, 1); /* [I]0-7. */
6660 arg2 = CALL_EXPR_ARG (exp, 2); /* u8. */
6662 op0 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6663 op1 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6664 op2 = expand_expr (arg2, NULL_RTX, SImode, EXPAND_NORMAL);
6665 op3 = expand_expr (arg0, NULL_RTX, V8HImode, EXPAND_NORMAL);
6667 mode0 = insn_data[icode].operand[0].mode;
6668 mode1 = insn_data[icode].operand[1].mode;
6669 mode2 = insn_data[icode].operand[2].mode;
6670 mode3 = insn_data[icode].operand[3].mode;
6672 if ((!insn_data[icode].operand[1].predicate (op1, mode1))
6673 || !(UNSIGNED_INT3 (INTVAL (op1))))
6674 error ("operand 2 should be an unsigned 3-bit value (I0-I7)");
6676 if ((!insn_data[icode].operand[2].predicate (op2, mode2))
6677 || !(UNSIGNED_INT8 (INTVAL (op2))))
6678 error ("operand 3 should be an unsigned 8-bit value");
6680 if (!insn_data[icode].operand[3].predicate (op3, mode3))
6681 op3 = copy_to_mode_reg (mode3, op3);
6683 pat = GEN_FCN (icode) (op0, op1, op2, op3);
6684 if (!pat)
6685 return NULL_RTX;
6687 emit_insn (pat);
6688 return NULL_RTX;
6690 case ARC_BUILTIN_VST16_N:
6691 case ARC_BUILTIN_VST32_N:
6692 arg0 = CALL_EXPR_ARG (exp, 0); /* source vreg. */
6693 arg1 = CALL_EXPR_ARG (exp, 1); /* u3. */
6694 arg2 = CALL_EXPR_ARG (exp, 2); /* [I]0-7. */
6695 arg3 = CALL_EXPR_ARG (exp, 3); /* u8. */
6697 op0 = expand_expr (arg3, NULL_RTX, SImode, EXPAND_NORMAL);
6698 op1 = gen_rtx_REG (V8HImode, ARC_FIRST_SIMD_VR_REG);
6699 op2 = expand_expr (arg2, NULL_RTX, SImode, EXPAND_NORMAL);
6700 op3 = expand_expr (arg0, NULL_RTX, V8HImode, EXPAND_NORMAL);
6701 op4 = expand_expr (arg1, NULL_RTX, SImode, EXPAND_NORMAL);
6703 mode0 = insn_data[icode].operand[0].mode;
6704 mode2 = insn_data[icode].operand[2].mode;
6705 mode3 = insn_data[icode].operand[3].mode;
6706 mode4 = insn_data[icode].operand[4].mode;
6708 /* Do some correctness checks for the operands. */
6709 if ((!insn_data[icode].operand[0].predicate (op0, mode0))
6710 || !(UNSIGNED_INT8 (INTVAL (op0))))
6711 error ("operand 4 should be an unsigned 8-bit value (0-255)");
6713 if ((!insn_data[icode].operand[2].predicate (op2, mode2))
6714 || !(UNSIGNED_INT3 (INTVAL (op2))))
6715 error ("operand 3 should be an unsigned 3-bit value (I0-I7)");
6717 if (!insn_data[icode].operand[3].predicate (op3, mode3))
6718 op3 = copy_to_mode_reg (mode3, op3);
6720 if ((!insn_data[icode].operand[4].predicate (op4, mode4))
6721 || !(UNSIGNED_INT3 (INTVAL (op4))))
6722 error ("operand 2 should be an unsigned 3-bit value (subreg 0-7)");
6723 else if (icode == CODE_FOR_vst32_n_insn
6724 && ((INTVAL (op4) % 2) != 0))
6725 error ("operand 2 should be an even 3-bit value (subreg 0,2,4,6)");
6727 pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
6728 if (!pat)
6729 return NULL_RTX;
6731 emit_insn (pat);
6732 return NULL_RTX;
6734 default:
6735 break;
6738 /* 2nd part: Expand regular builtins. */
6739 if (icode == 0)
6740 internal_error ("bad builtin fcode");
6742 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6743 j = 0;
6745 if (nonvoid)
6747 if (target == NULL_RTX
6748 || GET_MODE (target) != tmode
6749 || !insn_data[icode].operand[0].predicate (target, tmode))
6751 target = gen_reg_rtx (tmode);
6753 xop[j++] = target;
6756 gcc_assert (n_args <= 4);
6757 for (i = 0; i < n_args; i++, j++)
6759 tree arg = CALL_EXPR_ARG (exp, i);
6760 machine_mode mode = insn_data[icode].operand[j].mode;
6761 rtx op = expand_expr (arg, NULL_RTX, mode, EXPAND_NORMAL);
6762 machine_mode opmode = GET_MODE (op);
6763 char c = insn_data[icode].operand[j].constraint[0];
6765 /* SIMD extension requires exact immediate operand match. */
6766 if ((id > ARC_BUILTIN_SIMD_BEGIN)
6767 && (id < ARC_BUILTIN_SIMD_END)
6768 && (c != 'v')
6769 && (c != 'r'))
6771 if (!CONST_INT_P (op))
6772 error ("builtin requires an immediate for operand %d", j);
6773 switch (c)
6775 case 'L':
6776 if (!satisfies_constraint_L (op))
6777 error ("operand %d should be a 6 bit unsigned immediate", j);
6778 break;
6779 case 'P':
6780 if (!satisfies_constraint_P (op))
6781 error ("operand %d should be a 8 bit unsigned immediate", j);
6782 break;
6783 case 'K':
6784 if (!satisfies_constraint_K (op))
6785 error ("operand %d should be a 3 bit unsigned immediate", j);
6786 break;
6787 default:
6788 error ("unknown builtin immediate operand type for operand %d",
6793 if (CONST_INT_P (op))
6794 opmode = mode;
6796 if ((opmode == SImode) && (mode == HImode))
6798 opmode = HImode;
6799 op = gen_lowpart (HImode, op);
6802 /* In case the insn wants input operands in modes different from
6803 the result, abort. */
6804 gcc_assert (opmode == mode || opmode == VOIDmode);
6806 if (!insn_data[icode].operand[i + nonvoid].predicate (op, mode))
6807 op = copy_to_mode_reg (mode, op);
6809 xop[j] = op;
6812 pat = apply_GEN_FCN (icode, xop);
6813 if (pat == NULL_RTX)
6814 return NULL_RTX;
6816 emit_insn (pat);
6818 if (nonvoid)
6819 return target;
6820 else
6821 return const0_rtx;
6824 /* Returns true if the operands[opno] is a valid compile-time constant to be
6825 used as register number in the code for builtins. Else it flags an error
6826 and returns false. */
6828 bool
6829 check_if_valid_regno_const (rtx *operands, int opno)
6832 switch (GET_CODE (operands[opno]))
6834 case SYMBOL_REF :
6835 case CONST :
6836 case CONST_INT :
6837 return true;
6838 default:
6839 error ("register number must be a compile-time constant. Try giving higher optimization levels");
6840 break;
6842 return false;
6845 /* Check that after all the constant folding, whether the operand to
6846 __builtin_arc_sleep is an unsigned int of 6 bits. If not, flag an error. */
6848 bool
6849 check_if_valid_sleep_operand (rtx *operands, int opno)
6851 switch (GET_CODE (operands[opno]))
6853 case CONST :
6854 case CONST_INT :
6855 if( UNSIGNED_INT6 (INTVAL (operands[opno])))
6856 return true;
6857 /* FALLTHRU */
6858 default:
6859 fatal_error (input_location,
6860 "operand for sleep instruction must be an unsigned 6 bit compile-time constant");
6861 break;
6863 return false;
6866 /* Return true if it is ok to make a tail-call to DECL. */
6868 static bool
6869 arc_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
6870 tree exp ATTRIBUTE_UNUSED)
6872 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
6873 if (ARC_INTERRUPT_P (arc_compute_function_type (cfun)))
6874 return false;
6876 /* Everything else is ok. */
6877 return true;
6880 /* Output code to add DELTA to the first argument, and then jump
6881 to FUNCTION. Used for C++ multiple inheritance. */
6883 static void
6884 arc_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
6885 HOST_WIDE_INT delta,
6886 HOST_WIDE_INT vcall_offset,
6887 tree function)
6889 int mi_delta = delta;
6890 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
6891 int shift = 0;
6892 int this_regno
6893 = aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ? 1 : 0;
6894 rtx fnaddr;
6896 if (mi_delta < 0)
6897 mi_delta = - mi_delta;
6899 /* Add DELTA. When possible use a plain add, otherwise load it into
6900 a register first. */
6902 while (mi_delta != 0)
6904 if ((mi_delta & (3 << shift)) == 0)
6905 shift += 2;
6906 else
6908 asm_fprintf (file, "\t%s\t%s, %s, %d\n",
6909 mi_op, reg_names[this_regno], reg_names[this_regno],
6910 mi_delta & (0xff << shift));
6911 mi_delta &= ~(0xff << shift);
6912 shift += 8;
6916 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
6917 if (vcall_offset != 0)
6919 /* ld r12,[this] --> temp = *this
6920 add r12,r12,vcall_offset --> temp = *(*this + vcall_offset)
6921 ld r12,[r12]
6922 add this,this,r12 --> this+ = *(*this + vcall_offset) */
6923 asm_fprintf (file, "\tld\t%s, [%s]\n",
6924 ARC_TEMP_SCRATCH_REG, reg_names[this_regno]);
6925 asm_fprintf (file, "\tadd\t%s, %s, " HOST_WIDE_INT_PRINT_DEC "\n",
6926 ARC_TEMP_SCRATCH_REG, ARC_TEMP_SCRATCH_REG, vcall_offset);
6927 asm_fprintf (file, "\tld\t%s, [%s]\n",
6928 ARC_TEMP_SCRATCH_REG, ARC_TEMP_SCRATCH_REG);
6929 asm_fprintf (file, "\tadd\t%s, %s, %s\n", reg_names[this_regno],
6930 reg_names[this_regno], ARC_TEMP_SCRATCH_REG);
6933 fnaddr = XEXP (DECL_RTL (function), 0);
6935 if (arc_is_longcall_p (fnaddr))
6937 if (flag_pic)
6939 asm_fprintf (file, "\tld\t%s, [pcl, @",
6940 ARC_TEMP_SCRATCH_REG);
6941 assemble_name (file, XSTR (fnaddr, 0));
6942 fputs ("@gotpc]\n", file);
6943 asm_fprintf (file, "\tj\t[%s]", ARC_TEMP_SCRATCH_REG);
6945 else
6947 fputs ("\tj\t@", file);
6948 assemble_name (file, XSTR (fnaddr, 0));
6951 else
6953 fputs ("\tb\t@", file);
6954 assemble_name (file, XSTR (fnaddr, 0));
6955 if (flag_pic)
6956 fputs ("@plt\n", file);
6958 fputc ('\n', file);
6961 /* Return true if a 32 bit "long_call" should be generated for
6962 this calling SYM_REF. We generate a long_call if the function:
6964 a. has an __attribute__((long call))
6965 or b. the -mlong-calls command line switch has been specified
6967 However we do not generate a long call if the function has an
6968 __attribute__ ((short_call)) or __attribute__ ((medium_call))
6970 This function will be called by C fragments contained in the machine
6971 description file. */
6973 bool
6974 arc_is_longcall_p (rtx sym_ref)
6976 if (GET_CODE (sym_ref) != SYMBOL_REF)
6977 return false;
6979 return (SYMBOL_REF_LONG_CALL_P (sym_ref)
6980 || (TARGET_LONG_CALLS_SET
6981 && !SYMBOL_REF_SHORT_CALL_P (sym_ref)
6982 && !SYMBOL_REF_MEDIUM_CALL_P (sym_ref)));
6986 /* Likewise for short calls. */
6988 bool
6989 arc_is_shortcall_p (rtx sym_ref)
6991 if (GET_CODE (sym_ref) != SYMBOL_REF)
6992 return false;
6994 return (SYMBOL_REF_SHORT_CALL_P (sym_ref)
6995 || (!TARGET_LONG_CALLS_SET && !TARGET_MEDIUM_CALLS
6996 && !SYMBOL_REF_LONG_CALL_P (sym_ref)
6997 && !SYMBOL_REF_MEDIUM_CALL_P (sym_ref)));
7001 /* Worker function for TARGET_RETURN_IN_MEMORY. */
7003 static bool
7004 arc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7006 if (AGGREGATE_TYPE_P (type) || TREE_ADDRESSABLE (type))
7007 return true;
7008 else
7010 HOST_WIDE_INT size = int_size_in_bytes (type);
7011 return (size == -1 || size > (TARGET_V2 ? 16 : 8));
7016 /* This was in rtlanal.c, and can go in there when we decide we want
7017 to submit the change for inclusion in the GCC tree. */
7018 /* Like note_stores, but allow the callback to have side effects on the rtl
7019 (like the note_stores of yore):
7020 Call FUN on each register or MEM that is stored into or clobbered by X.
7021 (X would be the pattern of an insn). DATA is an arbitrary pointer,
7022 ignored by note_stores, but passed to FUN.
7023 FUN may alter parts of the RTL.
7025 FUN receives three arguments:
7026 1. the REG, MEM, CC0 or PC being stored in or clobbered,
7027 2. the SET or CLOBBER rtx that does the store,
7028 3. the pointer DATA provided to note_stores.
7030 If the item being stored in or clobbered is a SUBREG of a hard register,
7031 the SUBREG will be passed. */
7033 /* For now. */ static
7034 void
7035 walk_stores (rtx x, void (*fun) (rtx, rtx, void *), void *data)
7037 int i;
7039 if (GET_CODE (x) == COND_EXEC)
7040 x = COND_EXEC_CODE (x);
7042 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
7044 rtx dest = SET_DEST (x);
7046 while ((GET_CODE (dest) == SUBREG
7047 && (!REG_P (SUBREG_REG (dest))
7048 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
7049 || GET_CODE (dest) == ZERO_EXTRACT
7050 || GET_CODE (dest) == STRICT_LOW_PART)
7051 dest = XEXP (dest, 0);
7053 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
7054 each of whose first operand is a register. */
7055 if (GET_CODE (dest) == PARALLEL)
7057 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
7058 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
7059 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
7061 else
7062 (*fun) (dest, x, data);
7065 else if (GET_CODE (x) == PARALLEL)
7066 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7067 walk_stores (XVECEXP (x, 0, i), fun, data);
7070 static bool
7071 arc_pass_by_reference (cumulative_args_t ca_v ATTRIBUTE_UNUSED,
7072 machine_mode mode ATTRIBUTE_UNUSED,
7073 const_tree type,
7074 bool named ATTRIBUTE_UNUSED)
7076 return (type != 0
7077 && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
7078 || TREE_ADDRESSABLE (type)));
7081 /* Implement TARGET_CAN_USE_DOLOOP_P. */
7083 static bool
7084 arc_can_use_doloop_p (const widest_int &,
7085 const widest_int &iterations_max,
7086 unsigned int loop_depth, bool entered_at_top)
7088 /* Considering limitations in the hardware, only use doloop
7089 for innermost loops which must be entered from the top. */
7090 if (loop_depth > 1 || !entered_at_top)
7091 return false;
7093 /* Check for lp_count width boundary. */
7094 if (arc_lpcwidth != 32
7095 && (wi::gtu_p (iterations_max, ((1 << arc_lpcwidth) - 1))
7096 || wi::eq_p (iterations_max, 0)))
7097 return false;
7098 return true;
7101 /* NULL if INSN insn is valid within a low-overhead loop. Otherwise
7102 return why doloop cannot be applied. */
7104 static const char *
7105 arc_invalid_within_doloop (const rtx_insn *insn)
7107 if (CALL_P (insn))
7108 return "Function call in the loop.";
7110 /* FIXME! add here all the ZOL exceptions. */
7111 return NULL;
7114 /* Return true if a load instruction (CONSUMER) uses the same address as a
7115 store instruction (PRODUCER). This function is used to avoid st/ld
7116 address hazard in ARC700 cores. */
7117 bool
7118 arc_store_addr_hazard_p (rtx_insn* producer, rtx_insn* consumer)
7120 rtx in_set, out_set;
7121 rtx out_addr, in_addr;
7123 if (!producer)
7124 return false;
7126 if (!consumer)
7127 return false;
7129 /* Peel the producer and the consumer for the address. */
7130 out_set = single_set (producer);
7131 if (out_set)
7133 out_addr = SET_DEST (out_set);
7134 if (!out_addr)
7135 return false;
7136 if (GET_CODE (out_addr) == ZERO_EXTEND
7137 || GET_CODE (out_addr) == SIGN_EXTEND)
7138 out_addr = XEXP (out_addr, 0);
7140 if (!MEM_P (out_addr))
7141 return false;
7143 in_set = single_set (consumer);
7144 if (in_set)
7146 in_addr = SET_SRC (in_set);
7147 if (!in_addr)
7148 return false;
7149 if (GET_CODE (in_addr) == ZERO_EXTEND
7150 || GET_CODE (in_addr) == SIGN_EXTEND)
7151 in_addr = XEXP (in_addr, 0);
7153 if (!MEM_P (in_addr))
7154 return false;
7155 /* Get rid of the MEM and check if the addresses are
7156 equivalent. */
7157 in_addr = XEXP (in_addr, 0);
7158 out_addr = XEXP (out_addr, 0);
7160 return exp_equiv_p (in_addr, out_addr, 0, true);
7163 return false;
7166 /* The same functionality as arc_hazard. It is called in machine
7167 reorg before any other optimization. Hence, the NOP size is taken
7168 into account when doing branch shortening. */
7170 static void
7171 workaround_arc_anomaly (void)
7173 rtx_insn *insn, *succ0;
7175 /* For any architecture: call arc_hazard here. */
7176 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7178 succ0 = next_real_insn (insn);
7179 if (arc_hazard (insn, succ0))
7181 emit_insn_before (gen_nopv (), succ0);
7185 if (TARGET_ARC700)
7187 rtx_insn *succ1;
7189 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7191 succ0 = next_real_insn (insn);
7192 if (arc_store_addr_hazard_p (insn, succ0))
7194 emit_insn_after (gen_nopv (), insn);
7195 emit_insn_after (gen_nopv (), insn);
7196 continue;
7199 /* Avoid adding nops if the instruction between the ST and LD is
7200 a call or jump. */
7201 succ1 = next_real_insn (succ0);
7202 if (succ0 && !JUMP_P (succ0) && !CALL_P (succ0)
7203 && arc_store_addr_hazard_p (insn, succ1))
7204 emit_insn_after (gen_nopv (), insn);
7209 /* A callback for the hw-doloop pass. Called when a loop we have discovered
7210 turns out not to be optimizable; we have to split the loop_end pattern into
7211 a subtract and a test. */
7213 static void
7214 hwloop_fail (hwloop_info loop)
7216 rtx test;
7217 rtx insn = loop->loop_end;
7219 if (TARGET_V2
7220 && (loop->length && (loop->length <= ARC_MAX_LOOP_LENGTH))
7221 && REG_P (loop->iter_reg))
7223 /* TARGET_V2 has dbnz instructions. */
7224 test = gen_dbnz (loop->iter_reg, loop->start_label);
7225 insn = emit_jump_insn_before (test, loop->loop_end);
7227 else if (REG_P (loop->iter_reg) && (REGNO (loop->iter_reg) == LP_COUNT))
7229 /* We have the lp_count as loop iterator, try to use it. */
7230 emit_insn_before (gen_loop_fail (), loop->loop_end);
7231 test = gen_rtx_NE (VOIDmode, gen_rtx_REG (CC_ZNmode, CC_REG),
7232 const0_rtx);
7233 test = gen_rtx_IF_THEN_ELSE (VOIDmode, test,
7234 gen_rtx_LABEL_REF (Pmode, loop->start_label),
7235 pc_rtx);
7236 insn = emit_jump_insn_before (gen_rtx_SET (pc_rtx, test),
7237 loop->loop_end);
7239 else
7241 emit_insn_before (gen_addsi3 (loop->iter_reg,
7242 loop->iter_reg,
7243 constm1_rtx),
7244 loop->loop_end);
7245 test = gen_rtx_NE (VOIDmode, loop->iter_reg, const0_rtx);
7246 insn = emit_jump_insn_before (gen_cbranchsi4 (test,
7247 loop->iter_reg,
7248 const0_rtx,
7249 loop->start_label),
7250 loop->loop_end);
7252 JUMP_LABEL (insn) = loop->start_label;
7253 LABEL_NUSES (loop->start_label)++;
7254 delete_insn (loop->loop_end);
7257 /* Optimize LOOP. */
7259 static bool
7260 hwloop_optimize (hwloop_info loop)
7262 int i;
7263 edge entry_edge;
7264 basic_block entry_bb, bb;
7265 rtx iter_reg, end_label;
7266 rtx_insn *insn, *seq, *entry_after, *last_insn;
7267 unsigned int length;
7268 bool need_fix = false;
7269 rtx lp_reg = gen_rtx_REG (SImode, LP_COUNT);
7271 if (loop->depth > 1)
7273 if (dump_file)
7274 fprintf (dump_file, ";; loop %d is not innermost\n",
7275 loop->loop_no);
7276 return false;
7279 if (!loop->incoming_dest)
7281 if (dump_file)
7282 fprintf (dump_file, ";; loop %d has more than one entry\n",
7283 loop->loop_no);
7284 return false;
7287 if (loop->incoming_dest != loop->head)
7289 if (dump_file)
7290 fprintf (dump_file, ";; loop %d is not entered from head\n",
7291 loop->loop_no);
7292 return false;
7295 if (loop->has_call || loop->has_asm)
7297 if (dump_file)
7298 fprintf (dump_file, ";; loop %d has invalid insn\n",
7299 loop->loop_no);
7300 return false;
7303 /* Scan all the blocks to make sure they don't use iter_reg. */
7304 if (loop->iter_reg_used || loop->iter_reg_used_outside)
7306 if (dump_file)
7307 fprintf (dump_file, ";; loop %d uses iterator\n",
7308 loop->loop_no);
7309 return false;
7312 /* Check if start_label appears before doloop_end. */
7313 length = 0;
7314 for (insn = loop->start_label;
7315 insn && insn != loop->loop_end;
7316 insn = NEXT_INSN (insn))
7317 length += NONDEBUG_INSN_P (insn) ? get_attr_length (insn) : 0;
7319 if (!insn)
7321 if (dump_file)
7322 fprintf (dump_file, ";; loop %d start_label not before loop_end\n",
7323 loop->loop_no);
7324 return false;
7327 loop->length = length;
7328 if (loop->length > ARC_MAX_LOOP_LENGTH)
7330 if (dump_file)
7331 fprintf (dump_file, ";; loop %d too long\n", loop->loop_no);
7332 return false;
7334 else if (!loop->length)
7336 if (dump_file)
7337 fprintf (dump_file, ";; loop %d is empty\n", loop->loop_no);
7338 return false;
7341 /* Check if we use a register or not. */
7342 if (!REG_P (loop->iter_reg))
7344 if (dump_file)
7345 fprintf (dump_file, ";; loop %d iterator is MEM\n",
7346 loop->loop_no);
7347 return false;
7350 /* Check if loop register is lpcount. */
7351 if (REG_P (loop->iter_reg) && (REGNO (loop->iter_reg)) != LP_COUNT)
7353 if (dump_file)
7354 fprintf (dump_file, ";; loop %d doesn't use lp_count as loop"
7355 " iterator\n",
7356 loop->loop_no);
7357 /* This loop doesn't use the lp_count, check though if we can
7358 fix it. */
7359 if (TEST_HARD_REG_BIT (loop->regs_set_in_loop, LP_COUNT)
7360 /* In very unique cases we may have LP_COUNT alive. */
7361 || (loop->incoming_src
7362 && REGNO_REG_SET_P (df_get_live_out (loop->incoming_src),
7363 LP_COUNT)))
7364 return false;
7365 else
7366 need_fix = true;
7369 /* Check for control like instruction as the last instruction of a
7370 ZOL. */
7371 bb = loop->tail;
7372 last_insn = PREV_INSN (loop->loop_end);
7374 while (1)
7376 for (; last_insn != BB_HEAD (bb);
7377 last_insn = PREV_INSN (last_insn))
7378 if (NONDEBUG_INSN_P (last_insn))
7379 break;
7381 if (last_insn != BB_HEAD (bb))
7382 break;
7384 if (single_pred_p (bb)
7385 && single_pred_edge (bb)->flags & EDGE_FALLTHRU
7386 && single_pred (bb) != ENTRY_BLOCK_PTR_FOR_FN (cfun))
7388 bb = single_pred (bb);
7389 last_insn = BB_END (bb);
7390 continue;
7392 else
7394 last_insn = NULL;
7395 break;
7399 if (!last_insn)
7401 if (dump_file)
7402 fprintf (dump_file, ";; loop %d has no last instruction\n",
7403 loop->loop_no);
7404 return false;
7407 if ((TARGET_ARC600_FAMILY || TARGET_HS)
7408 && INSN_P (last_insn)
7409 && (JUMP_P (last_insn) || CALL_P (last_insn)
7410 || GET_CODE (PATTERN (last_insn)) == SEQUENCE
7411 /* At this stage we can have (insn (clobber (mem:BLK
7412 (reg)))) instructions, ignore them. */
7413 || (GET_CODE (PATTERN (last_insn)) != CLOBBER
7414 && (get_attr_type (last_insn) == TYPE_BRCC
7415 || get_attr_type (last_insn) == TYPE_BRCC_NO_DELAY_SLOT))))
7417 if (loop->length + 2 > ARC_MAX_LOOP_LENGTH)
7419 if (dump_file)
7420 fprintf (dump_file, ";; loop %d too long\n", loop->loop_no);
7421 return false;
7423 if (dump_file)
7424 fprintf (dump_file, ";; loop %d has a control like last insn;"
7425 "add a nop\n",
7426 loop->loop_no);
7428 last_insn = emit_insn_after (gen_nopv (), last_insn);
7431 if (LABEL_P (last_insn))
7433 if (dump_file)
7434 fprintf (dump_file, ";; loop %d has a label as last insn;"
7435 "add a nop\n",
7436 loop->loop_no);
7437 last_insn = emit_insn_after (gen_nopv (), last_insn);
7440 /* SAVE_NOTE is used by haifa scheduler. However, we are after it
7441 and we can use it to indicate the last ZOL instruction cannot be
7442 part of a delay slot. */
7443 add_reg_note (last_insn, REG_SAVE_NOTE, GEN_INT (2));
7445 loop->last_insn = last_insn;
7447 /* Get the loop iteration register. */
7448 iter_reg = loop->iter_reg;
7450 gcc_assert (REG_P (iter_reg));
7452 entry_edge = NULL;
7454 FOR_EACH_VEC_SAFE_ELT (loop->incoming, i, entry_edge)
7455 if (entry_edge->flags & EDGE_FALLTHRU)
7456 break;
7458 if (entry_edge == NULL)
7460 if (dump_file)
7461 fprintf (dump_file, ";; loop %d has no fallthru edge jumping"
7462 "into the loop\n",
7463 loop->loop_no);
7464 return false;
7466 /* The loop is good. */
7467 end_label = gen_label_rtx ();
7468 loop->end_label = end_label;
7470 /* Place the zero_cost_loop_start instruction before the loop. */
7471 entry_bb = entry_edge->src;
7473 start_sequence ();
7475 if (need_fix)
7477 /* The loop uses a R-register, but the lp_count is free, thus
7478 use lp_count. */
7479 emit_insn (gen_movsi (lp_reg, iter_reg));
7480 SET_HARD_REG_BIT (loop->regs_set_in_loop, LP_COUNT);
7481 iter_reg = lp_reg;
7482 if (dump_file)
7484 fprintf (dump_file, ";; fix loop %d to use lp_count\n",
7485 loop->loop_no);
7489 insn = emit_insn (gen_arc_lp (iter_reg,
7490 loop->start_label,
7491 loop->end_label));
7493 seq = get_insns ();
7494 end_sequence ();
7496 entry_after = BB_END (entry_bb);
7497 if (!single_succ_p (entry_bb) || vec_safe_length (loop->incoming) > 1
7498 || !entry_after)
7500 basic_block new_bb;
7501 edge e;
7502 edge_iterator ei;
7504 emit_insn_before (seq, BB_HEAD (loop->head));
7505 seq = emit_label_before (gen_label_rtx (), seq);
7506 new_bb = create_basic_block (seq, insn, entry_bb);
7507 FOR_EACH_EDGE (e, ei, loop->incoming)
7509 if (!(e->flags & EDGE_FALLTHRU))
7510 redirect_edge_and_branch_force (e, new_bb);
7511 else
7512 redirect_edge_succ (e, new_bb);
7515 make_edge (new_bb, loop->head, 0);
7517 else
7519 #if 0
7520 while (DEBUG_INSN_P (entry_after)
7521 || (NOTE_P (entry_after)
7522 && NOTE_KIND (entry_after) != NOTE_INSN_BASIC_BLOCK
7523 /* Make sure we don't split a call and its corresponding
7524 CALL_ARG_LOCATION note. */
7525 && NOTE_KIND (entry_after) != NOTE_INSN_CALL_ARG_LOCATION))
7526 entry_after = NEXT_INSN (entry_after);
7527 #endif
7528 entry_after = next_nonnote_nondebug_insn_bb (entry_after);
7530 gcc_assert (entry_after);
7531 emit_insn_before (seq, entry_after);
7534 delete_insn (loop->loop_end);
7535 /* Insert the loop end label before the last instruction of the
7536 loop. */
7537 emit_label_after (end_label, loop->last_insn);
7538 /* Make sure we mark the begining and end label as used. */
7539 LABEL_NUSES (loop->end_label)++;
7540 LABEL_NUSES (loop->start_label)++;
7542 return true;
7545 /* A callback for the hw-doloop pass. This function examines INSN; if
7546 it is a loop_end pattern we recognize, return the reg rtx for the
7547 loop counter. Otherwise, return NULL_RTX. */
7549 static rtx
7550 hwloop_pattern_reg (rtx_insn *insn)
7552 rtx reg;
7554 if (!JUMP_P (insn) || recog_memoized (insn) != CODE_FOR_loop_end)
7555 return NULL_RTX;
7557 reg = SET_DEST (XVECEXP (PATTERN (insn), 0, 1));
7558 if (!REG_P (reg))
7559 return NULL_RTX;
7560 return reg;
7563 static struct hw_doloop_hooks arc_doloop_hooks =
7565 hwloop_pattern_reg,
7566 hwloop_optimize,
7567 hwloop_fail
7570 /* Run from machine_dependent_reorg, this pass looks for doloop_end insns
7571 and tries to rewrite the RTL of these loops so that proper Blackfin
7572 hardware loops are generated. */
7574 static void
7575 arc_reorg_loops (void)
7577 reorg_loops (true, &arc_doloop_hooks);
7580 static int arc_reorg_in_progress = 0;
7582 /* ARC's machince specific reorg function. */
7584 static void
7585 arc_reorg (void)
7587 rtx_insn *insn;
7588 rtx pattern;
7589 rtx pc_target;
7590 long offset;
7591 int changed;
7593 cfun->machine->arc_reorg_started = 1;
7594 arc_reorg_in_progress = 1;
7596 compute_bb_for_insn ();
7598 df_analyze ();
7600 /* Doloop optimization. */
7601 arc_reorg_loops ();
7603 workaround_arc_anomaly ();
7605 /* FIXME: should anticipate ccfsm action, generate special patterns for
7606 to-be-deleted branches that have no delay slot and have at least the
7607 length of the size increase forced on other insns that are conditionalized.
7608 This can also have an insn_list inside that enumerates insns which are
7609 not actually conditionalized because the destinations are dead in the
7610 not-execute case.
7611 Could also tag branches that we want to be unaligned if they get no delay
7612 slot, or even ones that we don't want to do delay slot sheduling for
7613 because we can unalign them.
7615 However, there are cases when conditional execution is only possible after
7616 delay slot scheduling:
7618 - If a delay slot is filled with a nocond/set insn from above, the previous
7619 basic block can become elegible for conditional execution.
7620 - If a delay slot is filled with a nocond insn from the fall-through path,
7621 the branch with that delay slot can become eligble for conditional
7622 execution (however, with the same sort of data flow analysis that dbr
7623 does, we could have figured out before that we don't need to
7624 conditionalize this insn.)
7625 - If a delay slot insn is filled with an insn from the target, the
7626 target label gets its uses decremented (even deleted if falling to zero),
7627 thus possibly creating more condexec opportunities there.
7628 Therefore, we should still be prepared to apply condexec optimization on
7629 non-prepared branches if the size increase of conditionalized insns is no
7630 more than the size saved from eliminating the branch. An invocation option
7631 could also be used to reserve a bit of extra size for condbranches so that
7632 this'll work more often (could also test in arc_reorg if the block is
7633 'close enough' to be eligible for condexec to make this likely, and
7634 estimate required size increase). */
7635 /* Generate BRcc insns, by combining cmp and Bcc insns wherever possible. */
7636 if (TARGET_NO_BRCC_SET)
7637 return;
7641 init_insn_lengths();
7642 changed = 0;
7644 if (optimize > 1 && !TARGET_NO_COND_EXEC)
7646 arc_ifcvt ();
7647 unsigned int flags = pass_data_arc_ifcvt.todo_flags_finish;
7648 df_finish_pass ((flags & TODO_df_verify) != 0);
7650 if (dump_file)
7652 fprintf (dump_file, ";; After if conversion:\n\n");
7653 print_rtl (dump_file, get_insns ());
7657 /* Call shorten_branches to calculate the insn lengths. */
7658 shorten_branches (get_insns());
7659 cfun->machine->ccfsm_current_insn = NULL_RTX;
7661 if (!INSN_ADDRESSES_SET_P())
7662 fatal_error (input_location, "Insn addresses not set after shorten_branches");
7664 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7666 rtx label;
7667 enum attr_type insn_type;
7669 /* If a non-jump insn (or a casesi jump table), continue. */
7670 if (GET_CODE (insn) != JUMP_INSN ||
7671 GET_CODE (PATTERN (insn)) == ADDR_VEC
7672 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
7673 continue;
7675 /* If we already have a brcc, note if it is suitable for brcc_s.
7676 Be a bit generous with the brcc_s range so that we can take
7677 advantage of any code shortening from delay slot scheduling. */
7678 if (recog_memoized (insn) == CODE_FOR_cbranchsi4_scratch)
7680 rtx pat = PATTERN (insn);
7681 rtx op = XEXP (SET_SRC (XVECEXP (pat, 0, 0)), 0);
7682 rtx *ccp = &XEXP (XVECEXP (pat, 0, 1), 0);
7684 offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
7685 if ((offset >= -140 && offset < 140)
7686 && rtx_equal_p (XEXP (op, 1), const0_rtx)
7687 && compact_register_operand (XEXP (op, 0), VOIDmode)
7688 && equality_comparison_operator (op, VOIDmode))
7689 PUT_MODE (*ccp, CC_Zmode);
7690 else if (GET_MODE (*ccp) == CC_Zmode)
7691 PUT_MODE (*ccp, CC_ZNmode);
7692 continue;
7694 if ((insn_type = get_attr_type (insn)) == TYPE_BRCC
7695 || insn_type == TYPE_BRCC_NO_DELAY_SLOT)
7696 continue;
7698 /* OK. so we have a jump insn. */
7699 /* We need to check that it is a bcc. */
7700 /* Bcc => set (pc) (if_then_else ) */
7701 pattern = PATTERN (insn);
7702 if (GET_CODE (pattern) != SET
7703 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE
7704 || ANY_RETURN_P (XEXP (SET_SRC (pattern), 1)))
7705 continue;
7707 /* Now check if the jump is beyond the s9 range. */
7708 if (CROSSING_JUMP_P (insn))
7709 continue;
7710 offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
7712 if(offset > 253 || offset < -254)
7713 continue;
7715 pc_target = SET_SRC (pattern);
7717 /* Avoid FPU instructions. */
7718 if ((GET_MODE (XEXP (XEXP (pc_target, 0), 0)) == CC_FPUmode)
7719 || (GET_MODE (XEXP (XEXP (pc_target, 0), 0)) == CC_FPU_UNEQmode))
7720 continue;
7722 /* Now go back and search for the set cc insn. */
7724 label = XEXP (pc_target, 1);
7727 rtx pat;
7728 rtx_insn *scan, *link_insn = NULL;
7730 for (scan = PREV_INSN (insn);
7731 scan && GET_CODE (scan) != CODE_LABEL;
7732 scan = PREV_INSN (scan))
7734 if (! INSN_P (scan))
7735 continue;
7736 pat = PATTERN (scan);
7737 if (GET_CODE (pat) == SET
7738 && cc_register (SET_DEST (pat), VOIDmode))
7740 link_insn = scan;
7741 break;
7744 if (!link_insn)
7745 continue;
7746 else
7747 /* Check if this is a data dependency. */
7749 rtx op, cc_clob_rtx, op0, op1, brcc_insn, note;
7750 rtx cmp0, cmp1;
7752 /* Ok this is the set cc. copy args here. */
7753 op = XEXP (pc_target, 0);
7755 op0 = cmp0 = XEXP (SET_SRC (pat), 0);
7756 op1 = cmp1 = XEXP (SET_SRC (pat), 1);
7757 if (GET_CODE (op0) == ZERO_EXTRACT
7758 && XEXP (op0, 1) == const1_rtx
7759 && (GET_CODE (op) == EQ
7760 || GET_CODE (op) == NE))
7762 /* btst / b{eq,ne} -> bbit{0,1} */
7763 op0 = XEXP (cmp0, 0);
7764 op1 = XEXP (cmp0, 2);
7766 else if (!register_operand (op0, VOIDmode)
7767 || !general_operand (op1, VOIDmode))
7768 continue;
7769 /* Be careful not to break what cmpsfpx_raw is
7770 trying to create for checking equality of
7771 single-precision floats. */
7772 else if (TARGET_SPFP
7773 && GET_MODE (op0) == SFmode
7774 && GET_MODE (op1) == SFmode)
7775 continue;
7777 /* None of the two cmp operands should be set between the
7778 cmp and the branch. */
7779 if (reg_set_between_p (op0, link_insn, insn))
7780 continue;
7782 if (reg_set_between_p (op1, link_insn, insn))
7783 continue;
7785 /* Since the MODE check does not work, check that this is
7786 CC reg's last set location before insn, and also no
7787 instruction between the cmp and branch uses the
7788 condition codes. */
7789 if ((reg_set_between_p (SET_DEST (pat), link_insn, insn))
7790 || (reg_used_between_p (SET_DEST (pat), link_insn, insn)))
7791 continue;
7793 /* CC reg should be dead after insn. */
7794 if (!find_regno_note (insn, REG_DEAD, CC_REG))
7795 continue;
7797 op = gen_rtx_fmt_ee (GET_CODE (op),
7798 GET_MODE (op), cmp0, cmp1);
7799 /* If we create a LIMM where there was none before,
7800 we only benefit if we can avoid a scheduling bubble
7801 for the ARC600. Otherwise, we'd only forgo chances
7802 at short insn generation, and risk out-of-range
7803 branches. */
7804 if (!brcc_nolimm_operator (op, VOIDmode)
7805 && !long_immediate_operand (op1, VOIDmode)
7806 && (TARGET_ARC700
7807 || next_active_insn (link_insn) != insn))
7808 continue;
7810 /* Emit bbit / brcc (or brcc_s if possible).
7811 CC_Zmode indicates that brcc_s is possible. */
7813 if (op0 != cmp0)
7814 cc_clob_rtx = gen_rtx_REG (CC_ZNmode, CC_REG);
7815 else if ((offset >= -140 && offset < 140)
7816 && rtx_equal_p (op1, const0_rtx)
7817 && compact_register_operand (op0, VOIDmode)
7818 && (GET_CODE (op) == EQ
7819 || GET_CODE (op) == NE))
7820 cc_clob_rtx = gen_rtx_REG (CC_Zmode, CC_REG);
7821 else
7822 cc_clob_rtx = gen_rtx_REG (CCmode, CC_REG);
7824 brcc_insn
7825 = gen_rtx_IF_THEN_ELSE (VOIDmode, op, label, pc_rtx);
7826 brcc_insn = gen_rtx_SET (pc_rtx, brcc_insn);
7827 cc_clob_rtx = gen_rtx_CLOBBER (VOIDmode, cc_clob_rtx);
7828 brcc_insn
7829 = gen_rtx_PARALLEL
7830 (VOIDmode, gen_rtvec (2, brcc_insn, cc_clob_rtx));
7831 brcc_insn = emit_jump_insn_before (brcc_insn, insn);
7833 JUMP_LABEL (brcc_insn) = JUMP_LABEL (insn);
7834 note = find_reg_note (insn, REG_BR_PROB, 0);
7835 if (note)
7837 XEXP (note, 1) = REG_NOTES (brcc_insn);
7838 REG_NOTES (brcc_insn) = note;
7840 note = find_reg_note (link_insn, REG_DEAD, op0);
7841 if (note)
7843 remove_note (link_insn, note);
7844 XEXP (note, 1) = REG_NOTES (brcc_insn);
7845 REG_NOTES (brcc_insn) = note;
7847 note = find_reg_note (link_insn, REG_DEAD, op1);
7848 if (note)
7850 XEXP (note, 1) = REG_NOTES (brcc_insn);
7851 REG_NOTES (brcc_insn) = note;
7854 changed = 1;
7856 /* Delete the bcc insn. */
7857 set_insn_deleted (insn);
7859 /* Delete the cmp insn. */
7860 set_insn_deleted (link_insn);
7865 /* Clear out insn_addresses. */
7866 INSN_ADDRESSES_FREE ();
7868 } while (changed);
7870 if (INSN_ADDRESSES_SET_P())
7871 fatal_error (input_location, "insn addresses not freed");
7873 arc_reorg_in_progress = 0;
7876 /* Check if the operands are valid for BRcc.d generation
7877 Valid Brcc.d patterns are
7878 Brcc.d b, c, s9
7879 Brcc.d b, u6, s9
7881 For cc={GT, LE, GTU, LEU}, u6=63 can not be allowed,
7882 since they are encoded by the assembler as {GE, LT, HS, LS} 64, which
7883 does not have a delay slot
7885 Assumed precondition: Second operand is either a register or a u6 value. */
7887 bool
7888 valid_brcc_with_delay_p (rtx *operands)
7890 if (optimize_size && GET_MODE (operands[4]) == CC_Zmode)
7891 return false;
7892 return brcc_nolimm_operator (operands[0], VOIDmode);
7895 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
7896 access DECL using %gp_rel(...)($gp). */
7898 static bool
7899 arc_in_small_data_p (const_tree decl)
7901 HOST_WIDE_INT size;
7903 /* Only variables are going into small data area. */
7904 if (TREE_CODE (decl) != VAR_DECL)
7905 return false;
7907 if (TARGET_NO_SDATA_SET)
7908 return false;
7910 /* Disable sdata references to weak variables. */
7911 if (DECL_WEAK (decl))
7912 return false;
7914 /* Don't put constants into the small data section: we want them to
7915 be in ROM rather than RAM. */
7916 if (TREE_READONLY (decl))
7917 return false;
7919 /* To ensure -mvolatile-cache works ld.di does not have a
7920 gp-relative variant. */
7921 if (!TARGET_VOLATILE_CACHE_SET
7922 && TREE_THIS_VOLATILE (decl))
7923 return false;
7925 if (DECL_SECTION_NAME (decl) != 0)
7927 const char *name = DECL_SECTION_NAME (decl);
7928 if (strcmp (name, ".sdata") == 0
7929 || strcmp (name, ".sbss") == 0)
7930 return true;
7932 /* If it's not public, there's no need to put it in the small data
7933 section. */
7934 else if (TREE_PUBLIC (decl))
7936 size = int_size_in_bytes (TREE_TYPE (decl));
7937 return (size > 0 && size <= g_switch_value);
7939 return false;
7942 /* Return true if X is a small data address that can be rewritten
7943 as a gp+symref. */
7945 static bool
7946 arc_rewrite_small_data_p (const_rtx x)
7948 if (GET_CODE (x) == CONST)
7949 x = XEXP (x, 0);
7951 if (GET_CODE (x) == PLUS)
7953 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7954 x = XEXP (x, 0);
7957 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_SMALL_P (x))
7959 gcc_assert (SYMBOL_REF_TLS_MODEL (x) == 0);
7960 return true;
7962 return false;
7965 /* If possible, rewrite OP so that it refers to small data using
7966 explicit relocations. */
7968 static rtx
7969 arc_rewrite_small_data_1 (rtx op)
7971 rtx rgp = gen_rtx_REG (Pmode, SDATA_BASE_REGNUM);
7972 op = copy_insn (op);
7973 subrtx_ptr_iterator::array_type array;
7974 FOR_EACH_SUBRTX_PTR (iter, array, &op, ALL)
7976 rtx *loc = *iter;
7977 if (arc_rewrite_small_data_p (*loc))
7979 *loc = gen_rtx_PLUS (Pmode, rgp, *loc);
7980 iter.skip_subrtxes ();
7982 else if (GET_CODE (*loc) == PLUS
7983 && rtx_equal_p (XEXP (*loc, 0), rgp))
7984 iter.skip_subrtxes ();
7986 return op;
7990 arc_rewrite_small_data (rtx op)
7992 op = arc_rewrite_small_data_1 (op);
7994 /* Check if we fit small data constraints. */
7995 if (MEM_P (op)
7996 && !LEGITIMATE_SMALL_DATA_ADDRESS_P (XEXP (op, 0)))
7998 rtx addr = XEXP (op, 0);
7999 rtx tmp = gen_reg_rtx (Pmode);
8000 emit_move_insn (tmp, addr);
8001 op = replace_equiv_address_nv (op, tmp);
8003 return op;
8006 /* Return true if OP refers to small data symbols directly, not through
8007 a PLUS. */
8009 bool
8010 small_data_pattern (rtx op, machine_mode)
8012 if (GET_CODE (op) == SEQUENCE)
8013 return false;
8015 rtx rgp = gen_rtx_REG (Pmode, SDATA_BASE_REGNUM);
8016 subrtx_iterator::array_type array;
8017 FOR_EACH_SUBRTX (iter, array, op, ALL)
8019 const_rtx x = *iter;
8020 if (GET_CODE (x) == PLUS
8021 && rtx_equal_p (XEXP (x, 0), rgp))
8022 iter.skip_subrtxes ();
8023 else if (arc_rewrite_small_data_p (x))
8024 return true;
8026 return false;
8029 /* Return true if OP is an acceptable memory operand for ARCompact
8030 16-bit gp-relative load instructions.
8031 op shd look like : [r26, symref@sda]
8032 i.e. (mem (plus (reg 26) (symref with smalldata flag set))
8034 /* volatile cache option still to be handled. */
8036 bool
8037 compact_sda_memory_operand (rtx op, machine_mode mode, bool short_p)
8039 rtx addr;
8040 int size;
8041 tree decl = NULL_TREE;
8042 int align = 0;
8043 int mask = 0;
8045 /* Eliminate non-memory operations. */
8046 if (GET_CODE (op) != MEM)
8047 return false;
8049 if (mode == VOIDmode)
8050 mode = GET_MODE (op);
8052 size = GET_MODE_SIZE (mode);
8054 /* dword operations really put out 2 instructions, so eliminate them. */
8055 if (size > UNITS_PER_WORD)
8056 return false;
8058 /* Decode the address now. */
8059 addr = XEXP (op, 0);
8061 if (!LEGITIMATE_SMALL_DATA_ADDRESS_P (addr))
8062 return false;
8064 if (!short_p || size == 1)
8065 return true;
8067 /* Now check for the alignment, the short loads using gp require the
8068 addresses to be aligned. */
8069 if (GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
8070 decl = SYMBOL_REF_DECL (XEXP (addr, 1));
8071 else if (GET_CODE (XEXP (XEXP (XEXP (addr, 1), 0), 0)) == SYMBOL_REF)
8072 decl = SYMBOL_REF_DECL (XEXP (XEXP (XEXP (addr, 1), 0), 0));
8073 if (decl)
8074 align = DECL_ALIGN (decl);
8075 align = align / BITS_PER_UNIT;
8077 switch (mode)
8079 case E_HImode:
8080 mask = 1;
8081 break;
8082 default:
8083 mask = 3;
8084 break;
8087 if (align && ((align & mask) == 0))
8088 return true;
8089 return false;
8092 /* Implement ASM_OUTPUT_ALIGNED_DECL_LOCAL. */
8094 void
8095 arc_asm_output_aligned_decl_local (FILE * stream, tree decl, const char * name,
8096 unsigned HOST_WIDE_INT size,
8097 unsigned HOST_WIDE_INT align,
8098 unsigned HOST_WIDE_INT globalize_p)
8100 int in_small_data = arc_in_small_data_p (decl);
8102 if (in_small_data)
8103 switch_to_section (get_named_section (NULL, ".sbss", 0));
8104 /* named_section (0,".sbss",0); */
8105 else
8106 switch_to_section (bss_section);
8108 if (globalize_p)
8109 (*targetm.asm_out.globalize_label) (stream, name);
8111 ASM_OUTPUT_ALIGN (stream, floor_log2 ((align) / BITS_PER_UNIT));
8112 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8113 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8114 ASM_OUTPUT_LABEL (stream, name);
8116 if (size != 0)
8117 ASM_OUTPUT_SKIP (stream, size);
8120 static bool
8121 arc_preserve_reload_p (rtx in)
8123 return (GET_CODE (in) == PLUS
8124 && RTX_OK_FOR_BASE_P (XEXP (in, 0), true)
8125 && CONST_INT_P (XEXP (in, 1))
8126 && !((INTVAL (XEXP (in, 1)) & 511)));
8130 arc_register_move_cost (machine_mode,
8131 enum reg_class from_class, enum reg_class to_class)
8133 /* The ARC600 has no bypass for extension registers, hence a nop might be
8134 needed to be inserted after a write so that reads are safe. */
8135 if (TARGET_ARC600)
8137 if (to_class == MPY_WRITABLE_CORE_REGS)
8138 return 3;
8139 /* Instructions modifying LP_COUNT need 4 additional cycles before
8140 the register will actually contain the value. */
8141 else if (to_class == LPCOUNT_REG)
8142 return 6;
8143 else if (to_class == WRITABLE_CORE_REGS)
8144 return 6;
8147 /* Using lp_count as scratch reg is a VERY bad idea. */
8148 if (from_class == LPCOUNT_REG)
8149 return 1000;
8150 if (to_class == LPCOUNT_REG)
8151 return 6;
8153 /* Force an attempt to 'mov Dy,Dx' to spill. */
8154 if ((TARGET_ARC700 || TARGET_EM) && TARGET_DPFP
8155 && from_class == DOUBLE_REGS && to_class == DOUBLE_REGS)
8156 return 100;
8158 return 2;
8161 /* Emit code for an addsi3 instruction with OPERANDS.
8162 COND_P indicates if this will use conditional execution.
8163 Return the length of the instruction.
8164 If OUTPUT_P is false, don't actually output the instruction, just return
8165 its length. */
8167 arc_output_addsi (rtx *operands, bool cond_p, bool output_p)
8169 char format[35];
8171 int match = operands_match_p (operands[0], operands[1]);
8172 int match2 = operands_match_p (operands[0], operands[2]);
8173 int intval = (REG_P (operands[2]) ? 1
8174 : CONST_INT_P (operands[2]) ? INTVAL (operands[2]) : 0xbadc057);
8175 int neg_intval = -intval;
8176 int short_0 = satisfies_constraint_Rcq (operands[0]);
8177 int short_p = (!cond_p && short_0 && satisfies_constraint_Rcq (operands[1]));
8178 int ret = 0;
8180 #define REG_H_P(OP) (REG_P (OP) && ((TARGET_V2 && REGNO (OP) <= 31 \
8181 && REGNO (OP) != 30) \
8182 || !TARGET_V2))
8184 #define ADDSI_OUTPUT1(FORMAT) do {\
8185 if (output_p) \
8186 output_asm_insn (FORMAT, operands);\
8187 return ret; \
8188 } while (0)
8189 #define ADDSI_OUTPUT(LIST) do {\
8190 if (output_p) \
8191 sprintf LIST;\
8192 ADDSI_OUTPUT1 (format);\
8193 return ret; \
8194 } while (0)
8196 /* First try to emit a 16 bit insn. */
8197 ret = 2;
8198 if (!cond_p
8199 /* If we are actually about to output this insn, don't try a 16 bit
8200 variant if we already decided that we don't want that
8201 (I.e. we upsized this insn to align some following insn.)
8202 E.g. add_s r0,sp,70 is 16 bit, but add r0,sp,70 requires a LIMM -
8203 but add1 r0,sp,35 doesn't. */
8204 && (!output_p || (get_attr_length (current_output_insn) & 2)))
8206 /* Generate add_s a,b,c; add_s b,b,u7; add_s c,b,u3; add_s b,b,h
8207 patterns. */
8208 if (short_p
8209 && ((REG_H_P (operands[2])
8210 && (match || satisfies_constraint_Rcq (operands[2])))
8211 || (CONST_INT_P (operands[2])
8212 && ((unsigned) intval <= (match ? 127 : 7)))))
8213 ADDSI_OUTPUT1 ("add%? %0,%1,%2 ;1");
8215 /* Generate add_s b,b,h patterns. */
8216 if (short_0 && match2 && REG_H_P (operands[1]))
8217 ADDSI_OUTPUT1 ("add%? %0,%2,%1 ;2");
8219 /* Generate add_s b,sp,u7; add_s sp,sp,u7 patterns. */
8220 if ((short_0 || REGNO (operands[0]) == STACK_POINTER_REGNUM)
8221 && REGNO (operands[1]) == STACK_POINTER_REGNUM && !(intval & ~124))
8222 ADDSI_OUTPUT1 ("add%? %0,%1,%2 ;3");
8224 if ((short_p && (unsigned) neg_intval <= (match ? 31 : 7))
8225 || (REGNO (operands[0]) == STACK_POINTER_REGNUM
8226 && match && !(neg_intval & ~124)))
8227 ADDSI_OUTPUT1 ("sub%? %0,%1,%n2 ;4");
8229 /* Generate add_s h,h,s3 patterns. */
8230 if (REG_H_P (operands[0]) && match && TARGET_V2
8231 && CONST_INT_P (operands[2]) && ((intval>= -1) && (intval <= 6)))
8232 ADDSI_OUTPUT1 ("add%? %0,%1,%2 ;5");
8234 /* Generate add_s r0,b,u6; add_s r1,b,u6 patterns. */
8235 if (TARGET_CODE_DENSITY && REG_P (operands[0]) && REG_P (operands[1])
8236 && ((REGNO (operands[0]) == 0) || (REGNO (operands[0]) == 1))
8237 && satisfies_constraint_Rcq (operands[1])
8238 && satisfies_constraint_L (operands[2]))
8239 ADDSI_OUTPUT1 ("add%? %0,%1,%2 ;6");
8242 /* Now try to emit a 32 bit insn without long immediate. */
8243 ret = 4;
8244 if (!match && match2 && REG_P (operands[1]))
8245 ADDSI_OUTPUT1 ("add%? %0,%2,%1");
8246 if (match || !cond_p)
8248 int limit = (match && !cond_p) ? 0x7ff : 0x3f;
8249 int range_factor = neg_intval & intval;
8250 int shift;
8252 if (intval == (HOST_WIDE_INT) (HOST_WIDE_INT_M1U << 31))
8253 ADDSI_OUTPUT1 ("bxor%? %0,%1,31");
8255 /* If we can use a straight add / sub instead of a {add,sub}[123] of
8256 same size, do, so - the insn latency is lower. */
8257 /* -0x800 is a 12-bit constant for add /add3 / sub / sub3, but
8258 0x800 is not. */
8259 if ((intval >= 0 && intval <= limit)
8260 || (intval == -0x800 && limit == 0x7ff))
8261 ADDSI_OUTPUT1 ("add%? %0,%1,%2");
8262 else if ((intval < 0 && neg_intval <= limit)
8263 || (intval == 0x800 && limit == 0x7ff))
8264 ADDSI_OUTPUT1 ("sub%? %0,%1,%n2");
8265 shift = range_factor >= 8 ? 3 : (range_factor >> 1);
8266 gcc_assert (shift == 0 || shift == 1 || shift == 2 || shift == 3);
8267 gcc_assert ((((1 << shift) - 1) & intval) == 0);
8268 if (((intval < 0 && intval != -0x4000)
8269 /* sub[123] is slower than add_s / sub, only use it if it
8270 avoids a long immediate. */
8271 && neg_intval <= limit << shift)
8272 || (intval == 0x4000 && limit == 0x7ff))
8273 ADDSI_OUTPUT ((format, "sub%d%%? %%0,%%1,%d",
8274 shift, neg_intval >> shift));
8275 else if ((intval >= 0 && intval <= limit << shift)
8276 || (intval == -0x4000 && limit == 0x7ff))
8277 ADDSI_OUTPUT ((format, "add%d%%? %%0,%%1,%d", shift, intval >> shift));
8279 /* Try to emit a 16 bit opcode with long immediate. */
8280 ret = 6;
8281 if (short_p && match)
8282 ADDSI_OUTPUT1 ("add%? %0,%1,%S2");
8284 /* We have to use a 32 bit opcode, and with a long immediate. */
8285 ret = 8;
8286 ADDSI_OUTPUT1 (intval < 0 ? "sub%? %0,%1,%n2" : "add%? %0,%1,%S2");
8289 /* Emit code for an commutative_cond_exec instruction with OPERANDS.
8290 Return the length of the instruction.
8291 If OUTPUT_P is false, don't actually output the instruction, just return
8292 its length. */
8294 arc_output_commutative_cond_exec (rtx *operands, bool output_p)
8296 enum rtx_code commutative_op = GET_CODE (operands[3]);
8297 const char *pat = NULL;
8299 /* Canonical rtl should not have a constant in the first operand position. */
8300 gcc_assert (!CONSTANT_P (operands[1]));
8302 switch (commutative_op)
8304 case AND:
8305 if (satisfies_constraint_C1p (operands[2]))
8306 pat = "bmsk%? %0,%1,%Z2";
8307 else if (satisfies_constraint_C2p (operands[2]))
8309 operands[2] = GEN_INT ((~INTVAL (operands[2])));
8310 pat = "bmskn%? %0,%1,%Z2";
8312 else if (satisfies_constraint_Ccp (operands[2]))
8313 pat = "bclr%? %0,%1,%M2";
8314 else if (satisfies_constraint_CnL (operands[2]))
8315 pat = "bic%? %0,%1,%n2-1";
8316 break;
8317 case IOR:
8318 if (satisfies_constraint_C0p (operands[2]))
8319 pat = "bset%? %0,%1,%z2";
8320 break;
8321 case XOR:
8322 if (satisfies_constraint_C0p (operands[2]))
8323 pat = "bxor%? %0,%1,%z2";
8324 break;
8325 case PLUS:
8326 return arc_output_addsi (operands, true, output_p);
8327 default: break;
8329 if (output_p)
8330 output_asm_insn (pat ? pat : "%O3.%d5 %0,%1,%2", operands);
8331 if (pat || REG_P (operands[2]) || satisfies_constraint_L (operands[2]))
8332 return 4;
8333 return 8;
8336 /* Helper function of arc_expand_movmem. ADDR points to a chunk of memory.
8337 Emit code and return an potentially modified address such that offsets
8338 up to SIZE are can be added to yield a legitimate address.
8339 if REUSE is set, ADDR is a register that may be modified. */
8341 static rtx
8342 force_offsettable (rtx addr, HOST_WIDE_INT size, bool reuse)
8344 rtx base = addr;
8345 rtx offs = const0_rtx;
8347 if (GET_CODE (base) == PLUS)
8349 offs = XEXP (base, 1);
8350 base = XEXP (base, 0);
8352 if (!REG_P (base)
8353 || (REGNO (base) != STACK_POINTER_REGNUM
8354 && REGNO_PTR_FRAME_P (REGNO (base)))
8355 || !CONST_INT_P (offs) || !SMALL_INT (INTVAL (offs))
8356 || !SMALL_INT (INTVAL (offs) + size))
8358 if (reuse)
8359 emit_insn (gen_add2_insn (addr, offs));
8360 else
8361 addr = copy_to_mode_reg (Pmode, addr);
8363 return addr;
8366 /* Like move_by_pieces, but take account of load latency, and actual
8367 offset ranges. Return true on success. */
8369 bool
8370 arc_expand_movmem (rtx *operands)
8372 rtx dst = operands[0];
8373 rtx src = operands[1];
8374 rtx dst_addr, src_addr;
8375 HOST_WIDE_INT size;
8376 int align = INTVAL (operands[3]);
8377 unsigned n_pieces;
8378 int piece = align;
8379 rtx store[2];
8380 rtx tmpx[2];
8381 int i;
8383 if (!CONST_INT_P (operands[2]))
8384 return false;
8385 size = INTVAL (operands[2]);
8386 /* move_by_pieces_ninsns is static, so we can't use it. */
8387 if (align >= 4)
8389 if (TARGET_LL64)
8390 n_pieces = (size + 4) / 8U + ((size >> 1) & 1) + (size & 1);
8391 else
8392 n_pieces = (size + 2) / 4U + (size & 1);
8394 else if (align == 2)
8395 n_pieces = (size + 1) / 2U;
8396 else
8397 n_pieces = size;
8398 if (n_pieces >= (unsigned int) (optimize_size ? 3 : 15))
8399 return false;
8400 /* Force 32 bit aligned and larger datum to use 64 bit transfers, if
8401 possible. */
8402 if (TARGET_LL64 && (piece >= 4) && (size >= 8))
8403 piece = 8;
8404 else if (piece > 4)
8405 piece = 4;
8406 dst_addr = force_offsettable (XEXP (operands[0], 0), size, 0);
8407 src_addr = force_offsettable (XEXP (operands[1], 0), size, 0);
8408 store[0] = store[1] = NULL_RTX;
8409 tmpx[0] = tmpx[1] = NULL_RTX;
8410 for (i = 0; size > 0; i ^= 1, size -= piece)
8412 rtx tmp;
8413 machine_mode mode;
8415 while (piece > size)
8416 piece >>= 1;
8417 mode = smallest_int_mode_for_size (piece * BITS_PER_UNIT);
8418 /* If we don't re-use temporaries, the scheduler gets carried away,
8419 and the register pressure gets unnecessarily high. */
8420 if (0 && tmpx[i] && GET_MODE (tmpx[i]) == mode)
8421 tmp = tmpx[i];
8422 else
8423 tmpx[i] = tmp = gen_reg_rtx (mode);
8424 dst_addr = force_offsettable (dst_addr, piece, 1);
8425 src_addr = force_offsettable (src_addr, piece, 1);
8426 if (store[i])
8427 emit_insn (store[i]);
8428 emit_move_insn (tmp, change_address (src, mode, src_addr));
8429 store[i] = gen_move_insn (change_address (dst, mode, dst_addr), tmp);
8430 dst_addr = plus_constant (Pmode, dst_addr, piece);
8431 src_addr = plus_constant (Pmode, src_addr, piece);
8433 if (store[i])
8434 emit_insn (store[i]);
8435 if (store[i^1])
8436 emit_insn (store[i^1]);
8437 return true;
8440 /* Prepare operands for move in MODE. Return true iff the move has
8441 been emitted. */
8443 bool
8444 prepare_move_operands (rtx *operands, machine_mode mode)
8446 /* We used to do this only for MODE_INT Modes, but addresses to floating
8447 point variables may well be in the small data section. */
8448 if (!TARGET_NO_SDATA_SET && small_data_pattern (operands[0], Pmode))
8449 operands[0] = arc_rewrite_small_data (operands[0]);
8451 if (mode == SImode && SYMBOLIC_CONST (operands[1]))
8453 prepare_pic_move (operands, SImode);
8455 /* Disable any REG_EQUALs associated with the symref
8456 otherwise the optimization pass undoes the work done
8457 here and references the variable directly. */
8460 if (GET_CODE (operands[0]) != MEM
8461 && !TARGET_NO_SDATA_SET
8462 && small_data_pattern (operands[1], Pmode))
8464 /* This is to take care of address calculations involving sdata
8465 variables. */
8466 operands[1] = arc_rewrite_small_data (operands[1]);
8468 emit_insn (gen_rtx_SET (operands[0],operands[1]));
8469 /* ??? This note is useless, since it only restates the set itself.
8470 We should rather use the original SYMBOL_REF. However, there is
8471 the problem that we are lying to the compiler about these
8472 SYMBOL_REFs to start with. symbol@sda should be encoded specially
8473 so that we can tell it apart from an actual symbol. */
8474 set_unique_reg_note (get_last_insn (), REG_EQUAL, operands[1]);
8476 /* Take care of the REG_EQUAL note that will be attached to mark the
8477 output reg equal to the initial symbol_ref after this code is
8478 executed. */
8479 emit_move_insn (operands[0], operands[0]);
8480 return true;
8483 if (MEM_P (operands[0])
8484 && !(reload_in_progress || reload_completed))
8486 operands[1] = force_reg (mode, operands[1]);
8487 if (!move_dest_operand (operands[0], mode))
8489 rtx addr = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
8490 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
8491 except that we can't use that function because it is static. */
8492 rtx pat = change_address (operands[0], mode, addr);
8493 MEM_COPY_ATTRIBUTES (pat, operands[0]);
8494 operands[0] = pat;
8496 if (!cse_not_expected)
8498 rtx pat = XEXP (operands[0], 0);
8500 pat = arc_legitimize_address_0 (pat, pat, mode);
8501 if (pat)
8503 pat = change_address (operands[0], mode, pat);
8504 MEM_COPY_ATTRIBUTES (pat, operands[0]);
8505 operands[0] = pat;
8510 if (MEM_P (operands[1]) && !cse_not_expected)
8512 rtx pat = XEXP (operands[1], 0);
8514 pat = arc_legitimize_address_0 (pat, pat, mode);
8515 if (pat)
8517 pat = change_address (operands[1], mode, pat);
8518 MEM_COPY_ATTRIBUTES (pat, operands[1]);
8519 operands[1] = pat;
8523 return false;
8526 /* Prepare OPERANDS for an extension using CODE to OMODE.
8527 Return true iff the move has been emitted. */
8529 bool
8530 prepare_extend_operands (rtx *operands, enum rtx_code code,
8531 machine_mode omode)
8533 if (!TARGET_NO_SDATA_SET && small_data_pattern (operands[1], Pmode))
8535 /* This is to take care of address calculations involving sdata
8536 variables. */
8537 operands[1]
8538 = gen_rtx_fmt_e (code, omode, arc_rewrite_small_data (operands[1]));
8539 emit_insn (gen_rtx_SET (operands[0], operands[1]));
8540 set_unique_reg_note (get_last_insn (), REG_EQUAL, operands[1]);
8542 /* Take care of the REG_EQUAL note that will be attached to mark the
8543 output reg equal to the initial extension after this code is
8544 executed. */
8545 emit_move_insn (operands[0], operands[0]);
8546 return true;
8548 return false;
8551 /* Output a library call to a function called FNAME that has been arranged
8552 to be local to any dso. */
8554 const char *
8555 arc_output_libcall (const char *fname)
8557 unsigned len = strlen (fname);
8558 static char buf[64];
8560 gcc_assert (len < sizeof buf - 35);
8561 if (TARGET_LONG_CALLS_SET
8562 || (TARGET_MEDIUM_CALLS && arc_ccfsm_cond_exec_p ()))
8564 if (flag_pic)
8565 sprintf (buf, "add r12,pcl,@%s@pcl\n\tjl%%!%%* [r12]", fname);
8566 else
8567 sprintf (buf, "jl%%! @%s", fname);
8569 else
8570 sprintf (buf, "bl%%!%%* @%s", fname);
8571 return buf;
8574 /* Return the SImode highpart of the DImode value IN. */
8577 disi_highpart (rtx in)
8579 return simplify_gen_subreg (SImode, in, DImode, TARGET_BIG_ENDIAN ? 0 : 4);
8582 /* Return length adjustment for INSN.
8583 For ARC600:
8584 A write to a core reg greater or equal to 32 must not be immediately
8585 followed by a use. Anticipate the length requirement to insert a nop
8586 between PRED and SUCC to prevent a hazard. */
8588 static int
8589 arc600_corereg_hazard (rtx_insn *pred, rtx_insn *succ)
8591 if (!TARGET_ARC600)
8592 return 0;
8593 if (GET_CODE (PATTERN (pred)) == SEQUENCE)
8594 pred = as_a <rtx_sequence *> (PATTERN (pred))->insn (1);
8595 if (GET_CODE (PATTERN (succ)) == SEQUENCE)
8596 succ = as_a <rtx_sequence *> (PATTERN (succ))->insn (0);
8597 if (recog_memoized (pred) == CODE_FOR_mulsi_600
8598 || recog_memoized (pred) == CODE_FOR_umul_600
8599 || recog_memoized (pred) == CODE_FOR_mac_600
8600 || recog_memoized (pred) == CODE_FOR_mul64_600
8601 || recog_memoized (pred) == CODE_FOR_mac64_600
8602 || recog_memoized (pred) == CODE_FOR_umul64_600
8603 || recog_memoized (pred) == CODE_FOR_umac64_600)
8604 return 0;
8605 subrtx_iterator::array_type array;
8606 FOR_EACH_SUBRTX (iter, array, PATTERN (pred), NONCONST)
8608 const_rtx x = *iter;
8609 switch (GET_CODE (x))
8611 case SET: case POST_INC: case POST_DEC: case PRE_INC: case PRE_DEC:
8612 break;
8613 default:
8614 /* This is also fine for PRE/POST_MODIFY, because they
8615 contain a SET. */
8616 continue;
8618 rtx dest = XEXP (x, 0);
8619 /* Check if this sets a an extension register. N.B. we use 61 for the
8620 condition codes, which is definitely not an extension register. */
8621 if (REG_P (dest) && REGNO (dest) >= 32 && REGNO (dest) < 61
8622 /* Check if the same register is used by the PAT. */
8623 && (refers_to_regno_p
8624 (REGNO (dest),
8625 REGNO (dest) + (GET_MODE_SIZE (GET_MODE (dest)) + 3) / 4U,
8626 PATTERN (succ), 0)))
8627 return 4;
8629 return 0;
8632 /* Given a rtx, check if it is an assembly instruction or not. */
8634 static int
8635 arc_asm_insn_p (rtx x)
8637 int i, j;
8639 if (x == 0)
8640 return 0;
8642 switch (GET_CODE (x))
8644 case ASM_OPERANDS:
8645 case ASM_INPUT:
8646 return 1;
8648 case SET:
8649 return arc_asm_insn_p (SET_SRC (x));
8651 case PARALLEL:
8652 j = 0;
8653 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8654 j += arc_asm_insn_p (XVECEXP (x, 0, i));
8655 if ( j > 0)
8656 return 1;
8657 break;
8659 default:
8660 break;
8663 return 0;
8666 /* For ARC600:
8667 A write to a core reg greater or equal to 32 must not be immediately
8668 followed by a use. Anticipate the length requirement to insert a nop
8669 between PRED and SUCC to prevent a hazard. */
8672 arc_hazard (rtx_insn *pred, rtx_insn *succ)
8674 if (!pred || !INSN_P (pred) || !succ || !INSN_P (succ))
8675 return 0;
8677 if (TARGET_ARC600)
8678 return arc600_corereg_hazard (pred, succ);
8680 return 0;
8683 /* Return length adjustment for INSN. */
8686 arc_adjust_insn_length (rtx_insn *insn, int len, bool)
8688 if (!INSN_P (insn))
8689 return len;
8690 /* We already handle sequences by ignoring the delay sequence flag. */
8691 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8692 return len;
8694 /* Check for return with but one preceding insn since function
8695 start / call. */
8696 if (TARGET_PAD_RETURN
8697 && JUMP_P (insn)
8698 && GET_CODE (PATTERN (insn)) != ADDR_VEC
8699 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8700 && get_attr_type (insn) == TYPE_RETURN)
8702 rtx_insn *prev = prev_active_insn (insn);
8704 if (!prev || !(prev = prev_active_insn (prev))
8705 || ((NONJUMP_INSN_P (prev)
8706 && GET_CODE (PATTERN (prev)) == SEQUENCE)
8707 ? CALL_ATTR (as_a <rtx_sequence *> (PATTERN (prev))->insn (0),
8708 NON_SIBCALL)
8709 : CALL_ATTR (prev, NON_SIBCALL)))
8710 return len + 4;
8712 if (TARGET_ARC600)
8714 rtx_insn *succ = next_real_insn (insn);
8716 /* One the ARC600, a write to an extension register must be separated
8717 from a read. */
8718 if (succ && INSN_P (succ))
8719 len += arc600_corereg_hazard (insn, succ);
8722 /* Restore extracted operands - otherwise splitters like the addsi3_mixed one
8723 can go awry. */
8724 extract_constrain_insn_cached (insn);
8726 return len;
8729 /* Values for length_sensitive. */
8730 enum
8732 ARC_LS_NONE,// Jcc
8733 ARC_LS_25, // 25 bit offset, B
8734 ARC_LS_21, // 21 bit offset, Bcc
8735 ARC_LS_U13,// 13 bit unsigned offset, LP
8736 ARC_LS_10, // 10 bit offset, B_s, Beq_s, Bne_s
8737 ARC_LS_9, // 9 bit offset, BRcc
8738 ARC_LS_8, // 8 bit offset, BRcc_s
8739 ARC_LS_U7, // 7 bit unsigned offset, LPcc
8740 ARC_LS_7 // 7 bit offset, Bcc_s
8743 /* While the infrastructure patch is waiting for review, duplicate the
8744 struct definitions, to allow this file to compile. */
8745 #if 1
8746 typedef struct
8748 unsigned align_set;
8749 /* Cost as a branch / call target or call return address. */
8750 int target_cost;
8751 int fallthrough_cost;
8752 int branch_cost;
8753 int length;
8754 /* 0 for not length sensitive, 1 for largest offset range,
8755 * 2 for next smaller etc. */
8756 unsigned length_sensitive : 8;
8757 bool enabled;
8758 } insn_length_variant_t;
8760 typedef struct insn_length_parameters_s
8762 int align_unit_log;
8763 int align_base_log;
8764 int max_variants;
8765 int (*get_variants) (rtx_insn *, int, bool, bool, insn_length_variant_t *);
8766 } insn_length_parameters_t;
8768 static void
8769 arc_insn_length_parameters (insn_length_parameters_t *ilp) ATTRIBUTE_UNUSED;
8770 #endif
8772 static int
8773 arc_get_insn_variants (rtx_insn *insn, int len, bool, bool target_p,
8774 insn_length_variant_t *ilv)
8776 if (!NONDEBUG_INSN_P (insn))
8777 return 0;
8778 enum attr_type type;
8779 /* shorten_branches doesn't take optimize_size into account yet for the
8780 get_variants mechanism, so turn this off for now. */
8781 if (optimize_size)
8782 return 0;
8783 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
8785 /* The interaction of a short delay slot insn with a short branch is
8786 too weird for shorten_branches to piece together, so describe the
8787 entire SEQUENCE. */
8788 rtx_insn *inner;
8789 if (TARGET_UPSIZE_DBR
8790 && get_attr_length (pat->insn (1)) <= 2
8791 && (((type = get_attr_type (inner = pat->insn (0)))
8792 == TYPE_UNCOND_BRANCH)
8793 || type == TYPE_BRANCH)
8794 && get_attr_delay_slot_filled (inner) == DELAY_SLOT_FILLED_YES)
8796 int n_variants
8797 = arc_get_insn_variants (inner, get_attr_length (inner), true,
8798 target_p, ilv+1);
8799 /* The short variant gets split into a higher-cost aligned
8800 and a lower cost unaligned variant. */
8801 gcc_assert (n_variants);
8802 gcc_assert (ilv[1].length_sensitive == ARC_LS_7
8803 || ilv[1].length_sensitive == ARC_LS_10);
8804 gcc_assert (ilv[1].align_set == 3);
8805 ilv[0] = ilv[1];
8806 ilv[0].align_set = 1;
8807 ilv[0].branch_cost += 1;
8808 ilv[1].align_set = 2;
8809 n_variants++;
8810 for (int i = 0; i < n_variants; i++)
8811 ilv[i].length += 2;
8812 /* In case an instruction with aligned size is wanted, and
8813 the short variants are unavailable / too expensive, add
8814 versions of long branch + long delay slot. */
8815 for (int i = 2, end = n_variants; i < end; i++, n_variants++)
8817 ilv[n_variants] = ilv[i];
8818 ilv[n_variants].length += 2;
8820 return n_variants;
8822 return 0;
8824 insn_length_variant_t *first_ilv = ilv;
8825 type = get_attr_type (insn);
8826 bool delay_filled
8827 = (get_attr_delay_slot_filled (insn) == DELAY_SLOT_FILLED_YES);
8828 int branch_align_cost = delay_filled ? 0 : 1;
8829 int branch_unalign_cost = delay_filled ? 0 : TARGET_UNALIGN_BRANCH ? 0 : 1;
8830 /* If the previous instruction is an sfunc call, this insn is always
8831 a target, even though the middle-end is unaware of this. */
8832 bool force_target = false;
8833 rtx_insn *prev = prev_active_insn (insn);
8834 if (prev && arc_next_active_insn (prev, 0) == insn
8835 && ((NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE)
8836 ? CALL_ATTR (as_a <rtx_sequence *> (PATTERN (prev))->insn (0),
8837 NON_SIBCALL)
8838 : (CALL_ATTR (prev, NON_SIBCALL)
8839 && NEXT_INSN (PREV_INSN (prev)) == prev)))
8840 force_target = true;
8842 switch (type)
8844 case TYPE_BRCC:
8845 /* Short BRCC only comes in no-delay-slot version, and without limm */
8846 if (!delay_filled)
8848 ilv->align_set = 3;
8849 ilv->length = 2;
8850 ilv->branch_cost = 1;
8851 ilv->enabled = (len == 2);
8852 ilv->length_sensitive = ARC_LS_8;
8853 ilv++;
8855 /* Fall through. */
8856 case TYPE_BRCC_NO_DELAY_SLOT:
8857 /* doloop_fallback* patterns are TYPE_BRCC_NO_DELAY_SLOT for
8858 (delay slot) scheduling purposes, but they are longer. */
8859 if (GET_CODE (PATTERN (insn)) == PARALLEL
8860 && GET_CODE (XVECEXP (PATTERN (insn), 0, 1)) == SET)
8861 return 0;
8862 /* Standard BRCC: 4 bytes, or 8 bytes with limm. */
8863 ilv->length = ((type == TYPE_BRCC) ? 4 : 8);
8864 ilv->align_set = 3;
8865 ilv->branch_cost = branch_align_cost;
8866 ilv->enabled = (len <= ilv->length);
8867 ilv->length_sensitive = ARC_LS_9;
8868 if ((target_p || force_target)
8869 || (!delay_filled && TARGET_UNALIGN_BRANCH))
8871 ilv[1] = *ilv;
8872 ilv->align_set = 1;
8873 ilv++;
8874 ilv->align_set = 2;
8875 ilv->target_cost = 1;
8876 ilv->branch_cost = branch_unalign_cost;
8878 ilv++;
8880 rtx op, op0;
8881 op = XEXP (SET_SRC (XVECEXP (PATTERN (insn), 0, 0)), 0);
8882 op0 = XEXP (op, 0);
8884 if (GET_CODE (op0) == ZERO_EXTRACT
8885 && satisfies_constraint_L (XEXP (op0, 2)))
8886 op0 = XEXP (op0, 0);
8887 if (satisfies_constraint_Rcq (op0))
8889 ilv->length = ((type == TYPE_BRCC) ? 6 : 10);
8890 ilv->align_set = 3;
8891 ilv->branch_cost = 1 + branch_align_cost;
8892 ilv->fallthrough_cost = 1;
8893 ilv->enabled = true;
8894 ilv->length_sensitive = ARC_LS_21;
8895 if (!delay_filled && TARGET_UNALIGN_BRANCH)
8897 ilv[1] = *ilv;
8898 ilv->align_set = 1;
8899 ilv++;
8900 ilv->align_set = 2;
8901 ilv->branch_cost = 1 + branch_unalign_cost;
8903 ilv++;
8905 ilv->length = ((type == TYPE_BRCC) ? 8 : 12);
8906 ilv->align_set = 3;
8907 ilv->branch_cost = 1 + branch_align_cost;
8908 ilv->fallthrough_cost = 1;
8909 ilv->enabled = true;
8910 ilv->length_sensitive = ARC_LS_21;
8911 if ((target_p || force_target)
8912 || (!delay_filled && TARGET_UNALIGN_BRANCH))
8914 ilv[1] = *ilv;
8915 ilv->align_set = 1;
8916 ilv++;
8917 ilv->align_set = 2;
8918 ilv->target_cost = 1;
8919 ilv->branch_cost = 1 + branch_unalign_cost;
8921 ilv++;
8922 break;
8924 case TYPE_SFUNC:
8925 ilv->length = 12;
8926 goto do_call;
8927 case TYPE_CALL_NO_DELAY_SLOT:
8928 ilv->length = 8;
8929 goto do_call;
8930 case TYPE_CALL:
8931 ilv->length = 4;
8932 ilv->length_sensitive
8933 = GET_CODE (PATTERN (insn)) == COND_EXEC ? ARC_LS_21 : ARC_LS_25;
8934 do_call:
8935 ilv->align_set = 3;
8936 ilv->fallthrough_cost = branch_align_cost;
8937 ilv->enabled = true;
8938 if ((target_p || force_target)
8939 || (!delay_filled && TARGET_UNALIGN_BRANCH))
8941 ilv[1] = *ilv;
8942 ilv->align_set = 1;
8943 ilv++;
8944 ilv->align_set = 2;
8945 ilv->target_cost = 1;
8946 ilv->fallthrough_cost = branch_unalign_cost;
8948 ilv++;
8949 break;
8950 case TYPE_UNCOND_BRANCH:
8951 /* Strictly speaking, this should be ARC_LS_10 for equality comparisons,
8952 but that makes no difference at the moment. */
8953 ilv->length_sensitive = ARC_LS_7;
8954 ilv[1].length_sensitive = ARC_LS_25;
8955 goto do_branch;
8956 case TYPE_BRANCH:
8957 ilv->length_sensitive = ARC_LS_10;
8958 ilv[1].length_sensitive = ARC_LS_21;
8959 do_branch:
8960 ilv->align_set = 3;
8961 ilv->length = 2;
8962 ilv->branch_cost = branch_align_cost;
8963 ilv->enabled = (len == ilv->length);
8964 ilv++;
8965 ilv->length = 4;
8966 ilv->align_set = 3;
8967 ilv->branch_cost = branch_align_cost;
8968 ilv->enabled = true;
8969 if ((target_p || force_target)
8970 || (!delay_filled && TARGET_UNALIGN_BRANCH))
8972 ilv[1] = *ilv;
8973 ilv->align_set = 1;
8974 ilv++;
8975 ilv->align_set = 2;
8976 ilv->target_cost = 1;
8977 ilv->branch_cost = branch_unalign_cost;
8979 ilv++;
8980 break;
8981 case TYPE_JUMP:
8982 return 0;
8983 default:
8984 /* For every short insn, there is generally also a long insn.
8985 trap_s is an exception. */
8986 if ((len & 2) == 0 || recog_memoized (insn) == CODE_FOR_trap_s)
8987 return 0;
8988 ilv->align_set = 3;
8989 ilv->length = len;
8990 ilv->enabled = 1;
8991 ilv++;
8992 ilv->align_set = 3;
8993 ilv->length = len + 2;
8994 ilv->enabled = 1;
8995 if (target_p || force_target)
8997 ilv[1] = *ilv;
8998 ilv->align_set = 1;
8999 ilv++;
9000 ilv->align_set = 2;
9001 ilv->target_cost = 1;
9003 ilv++;
9005 /* If the previous instruction is an sfunc call, this insn is always
9006 a target, even though the middle-end is unaware of this.
9007 Therefore, if we have a call predecessor, transfer the target cost
9008 to the fallthrough and branch costs. */
9009 if (force_target)
9011 for (insn_length_variant_t *p = first_ilv; p < ilv; p++)
9013 p->fallthrough_cost += p->target_cost;
9014 p->branch_cost += p->target_cost;
9015 p->target_cost = 0;
9019 return ilv - first_ilv;
9022 static void
9023 arc_insn_length_parameters (insn_length_parameters_t *ilp)
9025 ilp->align_unit_log = 1;
9026 ilp->align_base_log = 1;
9027 ilp->max_variants = 7;
9028 ilp->get_variants = arc_get_insn_variants;
9031 /* Return a copy of COND from *STATEP, inverted if that is indicated by the
9032 CC field of *STATEP. */
9034 static rtx
9035 arc_get_ccfsm_cond (struct arc_ccfsm *statep, bool reverse)
9037 rtx cond = statep->cond;
9038 int raw_cc = get_arc_condition_code (cond);
9039 if (reverse)
9040 raw_cc = ARC_INVERSE_CONDITION_CODE (raw_cc);
9042 if (statep->cc == raw_cc)
9043 return copy_rtx (cond);
9045 gcc_assert (ARC_INVERSE_CONDITION_CODE (raw_cc) == statep->cc);
9047 machine_mode ccm = GET_MODE (XEXP (cond, 0));
9048 enum rtx_code code = reverse_condition (GET_CODE (cond));
9049 if (code == UNKNOWN || ccm == CC_FP_GTmode || ccm == CC_FP_GEmode)
9050 code = reverse_condition_maybe_unordered (GET_CODE (cond));
9052 return gen_rtx_fmt_ee (code, GET_MODE (cond),
9053 copy_rtx (XEXP (cond, 0)), copy_rtx (XEXP (cond, 1)));
9056 /* Return version of PAT conditionalized with COND, which is part of INSN.
9057 ANNULLED indicates if INSN is an annulled delay-slot insn.
9058 Register further changes if necessary. */
9059 static rtx
9060 conditionalize_nonjump (rtx pat, rtx cond, rtx insn, bool annulled)
9062 /* For commutative operators, we generally prefer to have
9063 the first source match the destination. */
9064 if (GET_CODE (pat) == SET)
9066 rtx src = SET_SRC (pat);
9068 if (COMMUTATIVE_P (src))
9070 rtx src0 = XEXP (src, 0);
9071 rtx src1 = XEXP (src, 1);
9072 rtx dst = SET_DEST (pat);
9074 if (rtx_equal_p (src1, dst) && !rtx_equal_p (src0, dst)
9075 /* Leave add_n alone - the canonical form is to
9076 have the complex summand first. */
9077 && REG_P (src0))
9078 pat = gen_rtx_SET (dst,
9079 gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src),
9080 src1, src0));
9084 /* dwarf2out.c:dwarf2out_frame_debug_expr doesn't know
9085 what to do with COND_EXEC. */
9086 if (RTX_FRAME_RELATED_P (insn))
9088 /* If this is the delay slot insn of an anulled branch,
9089 dwarf2out.c:scan_trace understands the anulling semantics
9090 without the COND_EXEC. */
9091 gcc_assert (annulled);
9092 rtx note = alloc_reg_note (REG_FRAME_RELATED_EXPR, pat,
9093 REG_NOTES (insn));
9094 validate_change (insn, &REG_NOTES (insn), note, 1);
9096 pat = gen_rtx_COND_EXEC (VOIDmode, cond, pat);
9097 return pat;
9100 /* Use the ccfsm machinery to do if conversion. */
9102 static unsigned
9103 arc_ifcvt (void)
9105 struct arc_ccfsm *statep = &cfun->machine->ccfsm_current;
9107 memset (statep, 0, sizeof *statep);
9108 for (rtx_insn *insn = get_insns (); insn; insn = next_insn (insn))
9110 arc_ccfsm_advance (insn, statep);
9112 switch (statep->state)
9114 case 0:
9115 break;
9116 case 1: case 2:
9118 /* Deleted branch. */
9119 arc_ccfsm_post_advance (insn, statep);
9120 gcc_assert (!IN_RANGE (statep->state, 1, 2));
9121 rtx_insn *seq = NEXT_INSN (PREV_INSN (insn));
9122 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
9124 rtx slot = XVECEXP (PATTERN (seq), 0, 1);
9125 rtx pat = PATTERN (slot);
9126 if (INSN_ANNULLED_BRANCH_P (insn))
9128 rtx cond
9129 = arc_get_ccfsm_cond (statep, INSN_FROM_TARGET_P (slot));
9130 pat = gen_rtx_COND_EXEC (VOIDmode, cond, pat);
9132 if (!validate_change (seq, &PATTERN (seq), pat, 0))
9133 gcc_unreachable ();
9134 PUT_CODE (slot, NOTE);
9135 NOTE_KIND (slot) = NOTE_INSN_DELETED;
9137 else
9139 set_insn_deleted (insn);
9141 continue;
9143 case 3:
9144 if (LABEL_P (insn)
9145 && statep->target_label == CODE_LABEL_NUMBER (insn))
9147 arc_ccfsm_post_advance (insn, statep);
9148 if (--LABEL_NUSES (insn) == 0)
9149 delete_insn (insn);
9150 continue;
9152 /* Fall through. */
9153 case 4: case 5:
9154 if (!NONDEBUG_INSN_P (insn))
9155 break;
9157 /* Conditionalized insn. */
9159 rtx_insn *prev, *pprev;
9160 rtx *patp, pat, cond;
9161 bool annulled; annulled = false;
9163 /* If this is a delay slot insn in a non-annulled branch,
9164 don't conditionalize it. N.B., this should be fine for
9165 conditional return too. However, don't do this for
9166 unconditional branches, as these would be encountered when
9167 processing an 'else' part. */
9168 prev = PREV_INSN (insn);
9169 pprev = PREV_INSN (prev);
9170 if (pprev && NEXT_INSN (NEXT_INSN (pprev)) == NEXT_INSN (insn)
9171 && JUMP_P (prev) && get_attr_cond (prev) == COND_USE)
9173 if (!INSN_ANNULLED_BRANCH_P (prev))
9174 break;
9175 annulled = true;
9178 patp = &PATTERN (insn);
9179 pat = *patp;
9180 cond = arc_get_ccfsm_cond (statep, INSN_FROM_TARGET_P (insn));
9181 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9183 /* ??? don't conditionalize if all side effects are dead
9184 in the not-execute case. */
9186 pat = conditionalize_nonjump (pat, cond, insn, annulled);
9188 else if (simplejump_p (insn))
9190 patp = &SET_SRC (pat);
9191 pat = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, *patp, pc_rtx);
9193 else if (JUMP_P (insn) && ANY_RETURN_P (PATTERN (insn)))
9195 pat = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, pat, pc_rtx);
9196 pat = gen_rtx_SET (pc_rtx, pat);
9198 else
9199 gcc_unreachable ();
9200 validate_change (insn, patp, pat, 1);
9201 if (!apply_change_group ())
9202 gcc_unreachable ();
9203 if (JUMP_P (insn))
9205 rtx_insn *next = next_nonnote_insn (insn);
9206 if (GET_CODE (next) == BARRIER)
9207 delete_insn (next);
9208 if (statep->state == 3)
9209 continue;
9211 break;
9212 default:
9213 gcc_unreachable ();
9215 arc_ccfsm_post_advance (insn, statep);
9217 return 0;
9220 /* Find annulled delay insns and convert them to use the appropriate predicate.
9221 This allows branch shortening to size up these insns properly. */
9223 static unsigned
9224 arc_predicate_delay_insns (void)
9226 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
9228 rtx pat, jump, dlay, src, cond, *patp;
9229 int reverse;
9231 if (!NONJUMP_INSN_P (insn)
9232 || GET_CODE (pat = PATTERN (insn)) != SEQUENCE)
9233 continue;
9234 jump = XVECEXP (pat, 0, 0);
9235 dlay = XVECEXP (pat, 0, 1);
9236 if (!JUMP_P (jump) || !INSN_ANNULLED_BRANCH_P (jump))
9237 continue;
9238 /* If the branch insn does the annulling, leave the delay insn alone. */
9239 if (!TARGET_AT_DBR_CONDEXEC && !INSN_FROM_TARGET_P (dlay))
9240 continue;
9241 /* ??? Could also leave DLAY un-conditionalized if its target is dead
9242 on the other path. */
9243 gcc_assert (GET_CODE (PATTERN (jump)) == SET);
9244 gcc_assert (SET_DEST (PATTERN (jump)) == pc_rtx);
9245 src = SET_SRC (PATTERN (jump));
9246 gcc_assert (GET_CODE (src) == IF_THEN_ELSE);
9247 cond = XEXP (src, 0);
9248 if (XEXP (src, 2) == pc_rtx)
9249 reverse = 0;
9250 else if (XEXP (src, 1) == pc_rtx)
9251 reverse = 1;
9252 else
9253 gcc_unreachable ();
9254 if (reverse != !INSN_FROM_TARGET_P (dlay))
9256 machine_mode ccm = GET_MODE (XEXP (cond, 0));
9257 enum rtx_code code = reverse_condition (GET_CODE (cond));
9258 if (code == UNKNOWN || ccm == CC_FP_GTmode || ccm == CC_FP_GEmode)
9259 code = reverse_condition_maybe_unordered (GET_CODE (cond));
9261 cond = gen_rtx_fmt_ee (code, GET_MODE (cond),
9262 copy_rtx (XEXP (cond, 0)),
9263 copy_rtx (XEXP (cond, 1)));
9265 else
9266 cond = copy_rtx (cond);
9267 patp = &PATTERN (dlay);
9268 pat = *patp;
9269 pat = conditionalize_nonjump (pat, cond, dlay, true);
9270 validate_change (dlay, patp, pat, 1);
9271 if (!apply_change_group ())
9272 gcc_unreachable ();
9274 return 0;
9277 /* For ARC600: If a write to a core reg >=32 appears in a delay slot
9278 (other than of a forward brcc), it creates a hazard when there is a read
9279 of the same register at the branch target. We can't know what is at the
9280 branch target of calls, and for branches, we don't really know before the
9281 end of delay slot scheduling, either. Not only can individual instruction
9282 be hoisted out into a delay slot, a basic block can also be emptied this
9283 way, and branch and/or fall through targets be redirected. Hence we don't
9284 want such writes in a delay slot. */
9286 /* Return nonzreo iff INSN writes to an extension core register. */
9289 arc_write_ext_corereg (rtx insn)
9291 subrtx_iterator::array_type array;
9292 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
9294 const_rtx x = *iter;
9295 switch (GET_CODE (x))
9297 case SET: case POST_INC: case POST_DEC: case PRE_INC: case PRE_DEC:
9298 break;
9299 default:
9300 /* This is also fine for PRE/POST_MODIFY, because they
9301 contain a SET. */
9302 continue;
9304 const_rtx dest = XEXP (x, 0);
9305 if (REG_P (dest) && REGNO (dest) >= 32 && REGNO (dest) < 61)
9306 return 1;
9308 return 0;
9311 /* This is like the hook, but returns NULL when it can't / won't generate
9312 a legitimate address. */
9314 static rtx
9315 arc_legitimize_address_0 (rtx x, rtx oldx ATTRIBUTE_UNUSED,
9316 machine_mode mode)
9318 rtx addr, inner;
9320 if (flag_pic && SYMBOLIC_CONST (x))
9321 (x) = arc_legitimize_pic_address (x, 0);
9322 addr = x;
9323 if (GET_CODE (addr) == CONST)
9324 addr = XEXP (addr, 0);
9325 if (GET_CODE (addr) == PLUS
9326 && CONST_INT_P (XEXP (addr, 1))
9327 && ((GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
9328 && !SYMBOL_REF_FUNCTION_P (XEXP (addr, 0)))
9329 || (REG_P (XEXP (addr, 0))
9330 && (INTVAL (XEXP (addr, 1)) & 252))))
9332 HOST_WIDE_INT offs, upper;
9333 int size = GET_MODE_SIZE (mode);
9335 offs = INTVAL (XEXP (addr, 1));
9336 upper = (offs + 256 * size) & ~511 * size;
9337 inner = plus_constant (Pmode, XEXP (addr, 0), upper);
9338 #if 0 /* ??? this produces worse code for EEMBC idctrn01 */
9339 if (GET_CODE (x) == CONST)
9340 inner = gen_rtx_CONST (Pmode, inner);
9341 #endif
9342 addr = plus_constant (Pmode, force_reg (Pmode, inner), offs - upper);
9343 x = addr;
9345 else if (GET_CODE (addr) == SYMBOL_REF && !SYMBOL_REF_FUNCTION_P (addr))
9346 x = force_reg (Pmode, x);
9347 if (memory_address_p ((machine_mode) mode, x))
9348 return x;
9349 return NULL_RTX;
9352 static rtx
9353 arc_legitimize_address (rtx orig_x, rtx oldx, machine_mode mode)
9355 if (GET_CODE (orig_x) == SYMBOL_REF)
9357 enum tls_model model = SYMBOL_REF_TLS_MODEL (orig_x);
9358 if (model != 0)
9359 return arc_legitimize_tls_address (orig_x, model);
9362 rtx new_x = arc_legitimize_address_0 (orig_x, oldx, mode);
9364 if (new_x)
9365 return new_x;
9366 return orig_x;
9369 static rtx
9370 arc_delegitimize_address_0 (rtx x)
9372 rtx u, gp, p;
9374 if (GET_CODE (x) == CONST && GET_CODE (u = XEXP (x, 0)) == UNSPEC)
9376 if (XINT (u, 1) == ARC_UNSPEC_GOT
9377 || XINT (u, 1) == ARC_UNSPEC_GOTOFFPC)
9378 return XVECEXP (u, 0, 0);
9380 else if (GET_CODE (x) == CONST && GET_CODE (p = XEXP (x, 0)) == PLUS
9381 && GET_CODE (u = XEXP (p, 0)) == UNSPEC
9382 && (XINT (u, 1) == ARC_UNSPEC_GOT
9383 || XINT (u, 1) == ARC_UNSPEC_GOTOFFPC))
9384 return gen_rtx_CONST
9385 (GET_MODE (x),
9386 gen_rtx_PLUS (GET_MODE (p), XVECEXP (u, 0, 0), XEXP (p, 1)));
9387 else if (GET_CODE (x) == PLUS
9388 && ((REG_P (gp = XEXP (x, 0))
9389 && REGNO (gp) == PIC_OFFSET_TABLE_REGNUM)
9390 || (GET_CODE (gp) == CONST
9391 && GET_CODE (u = XEXP (gp, 0)) == UNSPEC
9392 && XINT (u, 1) == ARC_UNSPEC_GOT
9393 && GET_CODE (XVECEXP (u, 0, 0)) == SYMBOL_REF
9394 && !strcmp (XSTR (XVECEXP (u, 0, 0), 0), "_DYNAMIC")))
9395 && GET_CODE (XEXP (x, 1)) == CONST
9396 && GET_CODE (u = XEXP (XEXP (x, 1), 0)) == UNSPEC
9397 && XINT (u, 1) == ARC_UNSPEC_GOTOFF)
9398 return XVECEXP (u, 0, 0);
9399 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
9400 && ((REG_P (gp = XEXP (XEXP (x, 0), 1))
9401 && REGNO (gp) == PIC_OFFSET_TABLE_REGNUM)
9402 || (GET_CODE (gp) == CONST
9403 && GET_CODE (u = XEXP (gp, 0)) == UNSPEC
9404 && XINT (u, 1) == ARC_UNSPEC_GOT
9405 && GET_CODE (XVECEXP (u, 0, 0)) == SYMBOL_REF
9406 && !strcmp (XSTR (XVECEXP (u, 0, 0), 0), "_DYNAMIC")))
9407 && GET_CODE (XEXP (x, 1)) == CONST
9408 && GET_CODE (u = XEXP (XEXP (x, 1), 0)) == UNSPEC
9409 && XINT (u, 1) == ARC_UNSPEC_GOTOFF)
9410 return gen_rtx_PLUS (GET_MODE (x), XEXP (XEXP (x, 0), 0),
9411 XVECEXP (u, 0, 0));
9412 else if (GET_CODE (x) == PLUS
9413 && (u = arc_delegitimize_address_0 (XEXP (x, 1))))
9414 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), u);
9415 return NULL_RTX;
9418 static rtx
9419 arc_delegitimize_address (rtx x)
9421 rtx orig_x = x = delegitimize_mem_from_attrs (x);
9422 if (GET_CODE (x) == MEM)
9423 x = XEXP (x, 0);
9424 x = arc_delegitimize_address_0 (x);
9425 if (x)
9427 if (MEM_P (orig_x))
9428 x = replace_equiv_address_nv (orig_x, x);
9429 return x;
9431 return orig_x;
9434 /* Return a REG rtx for acc1. N.B. the gcc-internal representation may
9435 differ from the hardware register number in order to allow the generic
9436 code to correctly split the concatenation of acc1 and acc2. */
9439 gen_acc1 (void)
9441 return gen_rtx_REG (SImode, TARGET_BIG_ENDIAN ? 56: 57);
9444 /* Return a REG rtx for acc2. N.B. the gcc-internal representation may
9445 differ from the hardware register number in order to allow the generic
9446 code to correctly split the concatenation of acc1 and acc2. */
9449 gen_acc2 (void)
9451 return gen_rtx_REG (SImode, TARGET_BIG_ENDIAN ? 57: 56);
9454 /* Return a REG rtx for mlo. N.B. the gcc-internal representation may
9455 differ from the hardware register number in order to allow the generic
9456 code to correctly split the concatenation of mhi and mlo. */
9459 gen_mlo (void)
9461 return gen_rtx_REG (SImode, TARGET_BIG_ENDIAN ? 59: 58);
9464 /* Return a REG rtx for mhi. N.B. the gcc-internal representation may
9465 differ from the hardware register number in order to allow the generic
9466 code to correctly split the concatenation of mhi and mlo. */
9469 gen_mhi (void)
9471 return gen_rtx_REG (SImode, TARGET_BIG_ENDIAN ? 58: 59);
9474 /* FIXME: a parameter should be added, and code added to final.c,
9475 to reproduce this functionality in shorten_branches. */
9476 #if 0
9477 /* Return nonzero iff BRANCH should be unaligned if possible by upsizing
9478 a previous instruction. */
9480 arc_unalign_branch_p (rtx branch)
9482 rtx note;
9484 if (!TARGET_UNALIGN_BRANCH)
9485 return 0;
9486 /* Do not do this if we have a filled delay slot. */
9487 if (get_attr_delay_slot_filled (branch) == DELAY_SLOT_FILLED_YES
9488 && !NEXT_INSN (branch)->deleted ())
9489 return 0;
9490 note = find_reg_note (branch, REG_BR_PROB, 0);
9491 return (!note
9492 || (arc_unalign_prob_threshold && !br_prob_note_reliable_p (note))
9493 || INTVAL (XEXP (note, 0)) < arc_unalign_prob_threshold);
9495 #endif
9497 /* When estimating sizes during arc_reorg, when optimizing for speed, there
9498 are three reasons why we need to consider branches to be length 6:
9499 - annull-false delay slot insns are implemented using conditional execution,
9500 thus preventing short insn formation where used.
9501 - for ARC600: annul-true delay slot insns are implemented where possible
9502 using conditional execution, preventing short insn formation where used.
9503 - for ARC700: likely or somewhat likely taken branches are made long and
9504 unaligned if possible to avoid branch penalty. */
9506 bool
9507 arc_branch_size_unknown_p (void)
9509 return !optimize_size && arc_reorg_in_progress;
9512 /* We are about to output a return insn. Add padding if necessary to avoid
9513 a mispredict. A return could happen immediately after the function
9514 start, but after a call we know that there will be at least a blink
9515 restore. */
9517 void
9518 arc_pad_return (void)
9520 rtx_insn *insn = current_output_insn;
9521 rtx_insn *prev = prev_active_insn (insn);
9522 int want_long;
9524 if (!prev)
9526 fputs ("\tnop_s\n", asm_out_file);
9527 cfun->machine->unalign ^= 2;
9528 want_long = 1;
9530 /* If PREV is a sequence, we know it must be a branch / jump or a tailcall,
9531 because after a call, we'd have to restore blink first. */
9532 else if (GET_CODE (PATTERN (prev)) == SEQUENCE)
9533 return;
9534 else
9536 want_long = (get_attr_length (prev) == 2);
9537 prev = prev_active_insn (prev);
9539 if (!prev
9540 || ((NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE)
9541 ? CALL_ATTR (as_a <rtx_sequence *> (PATTERN (prev))->insn (0),
9542 NON_SIBCALL)
9543 : CALL_ATTR (prev, NON_SIBCALL)))
9545 if (want_long)
9546 cfun->machine->size_reason
9547 = "call/return and return/return must be 6 bytes apart to avoid mispredict";
9548 else if (TARGET_UNALIGN_BRANCH && cfun->machine->unalign)
9550 cfun->machine->size_reason
9551 = "Long unaligned jump avoids non-delay slot penalty";
9552 want_long = 1;
9554 /* Disgorge delay insn, if there is any, and it may be moved. */
9555 if (final_sequence
9556 /* ??? Annulled would be OK if we can and do conditionalize
9557 the delay slot insn accordingly. */
9558 && !INSN_ANNULLED_BRANCH_P (insn)
9559 && (get_attr_cond (insn) != COND_USE
9560 || !reg_set_p (gen_rtx_REG (CCmode, CC_REG),
9561 XVECEXP (final_sequence, 0, 1))))
9563 prev = as_a <rtx_insn *> (XVECEXP (final_sequence, 0, 1));
9564 gcc_assert (!prev_real_insn (insn)
9565 || !arc_hazard (prev_real_insn (insn), prev));
9566 cfun->machine->force_short_suffix = !want_long;
9567 rtx save_pred = current_insn_predicate;
9568 final_scan_insn (prev, asm_out_file, optimize, 1, NULL);
9569 cfun->machine->force_short_suffix = -1;
9570 prev->set_deleted ();
9571 current_output_insn = insn;
9572 current_insn_predicate = save_pred;
9574 else if (want_long)
9575 fputs ("\tnop\n", asm_out_file);
9576 else
9578 fputs ("\tnop_s\n", asm_out_file);
9579 cfun->machine->unalign ^= 2;
9582 return;
9585 /* The usual; we set up our machine_function data. */
9587 static struct machine_function *
9588 arc_init_machine_status (void)
9590 struct machine_function *machine;
9591 machine = ggc_cleared_alloc<machine_function> ();
9592 machine->fn_type = ARC_FUNCTION_UNKNOWN;
9593 machine->force_short_suffix = -1;
9595 return machine;
9598 /* Implements INIT_EXPANDERS. We just set up to call the above
9599 function. */
9601 void
9602 arc_init_expanders (void)
9604 init_machine_status = arc_init_machine_status;
9607 /* Check if OP is a proper parallel of a millicode call pattern. OFFSET
9608 indicates a number of elements to ignore - that allows to have a
9609 sibcall pattern that starts with (return). LOAD_P is zero for store
9610 multiple (for prologues), and one for load multiples (for epilogues),
9611 and two for load multiples where no final clobber of blink is required.
9612 We also skip the first load / store element since this is supposed to
9613 be checked in the instruction pattern. */
9616 arc_check_millicode (rtx op, int offset, int load_p)
9618 int len = XVECLEN (op, 0) - offset;
9619 int i;
9621 if (load_p == 2)
9623 if (len < 2 || len > 13)
9624 return 0;
9625 load_p = 1;
9627 else
9629 rtx elt = XVECEXP (op, 0, --len);
9631 if (GET_CODE (elt) != CLOBBER
9632 || !REG_P (XEXP (elt, 0))
9633 || REGNO (XEXP (elt, 0)) != RETURN_ADDR_REGNUM
9634 || len < 3 || len > 13)
9635 return 0;
9637 for (i = 1; i < len; i++)
9639 rtx elt = XVECEXP (op, 0, i + offset);
9640 rtx reg, mem, addr;
9642 if (GET_CODE (elt) != SET)
9643 return 0;
9644 mem = XEXP (elt, load_p);
9645 reg = XEXP (elt, 1-load_p);
9646 if (!REG_P (reg) || REGNO (reg) != 13U+i || !MEM_P (mem))
9647 return 0;
9648 addr = XEXP (mem, 0);
9649 if (GET_CODE (addr) != PLUS
9650 || !rtx_equal_p (stack_pointer_rtx, XEXP (addr, 0))
9651 || !CONST_INT_P (XEXP (addr, 1)) || INTVAL (XEXP (addr, 1)) != i*4)
9652 return 0;
9654 return 1;
9657 /* Accessor functions for cfun->machine->unalign. */
9660 arc_get_unalign (void)
9662 return cfun->machine->unalign;
9665 void
9666 arc_clear_unalign (void)
9668 if (cfun)
9669 cfun->machine->unalign = 0;
9672 void
9673 arc_toggle_unalign (void)
9675 cfun->machine->unalign ^= 2;
9678 /* Operands 0..2 are the operands of a addsi which uses a 12 bit
9679 constant in operand 2, but which would require a LIMM because of
9680 operand mismatch.
9681 operands 3 and 4 are new SET_SRCs for operands 0. */
9683 void
9684 split_addsi (rtx *operands)
9686 int val = INTVAL (operands[2]);
9688 /* Try for two short insns first. Lengths being equal, we prefer
9689 expansions with shorter register lifetimes. */
9690 if (val > 127 && val <= 255
9691 && satisfies_constraint_Rcq (operands[0]))
9693 operands[3] = operands[2];
9694 operands[4] = gen_rtx_PLUS (SImode, operands[0], operands[1]);
9696 else
9698 operands[3] = operands[1];
9699 operands[4] = gen_rtx_PLUS (SImode, operands[0], operands[2]);
9703 /* Operands 0..2 are the operands of a subsi which uses a 12 bit
9704 constant in operand 1, but which would require a LIMM because of
9705 operand mismatch.
9706 operands 3 and 4 are new SET_SRCs for operands 0. */
9708 void
9709 split_subsi (rtx *operands)
9711 int val = INTVAL (operands[1]);
9713 /* Try for two short insns first. Lengths being equal, we prefer
9714 expansions with shorter register lifetimes. */
9715 if (satisfies_constraint_Rcq (operands[0])
9716 && satisfies_constraint_Rcq (operands[2]))
9718 if (val >= -31 && val <= 127)
9720 operands[3] = gen_rtx_NEG (SImode, operands[2]);
9721 operands[4] = gen_rtx_PLUS (SImode, operands[0], operands[1]);
9722 return;
9724 else if (val >= 0 && val < 255)
9726 operands[3] = operands[1];
9727 operands[4] = gen_rtx_MINUS (SImode, operands[0], operands[2]);
9728 return;
9731 /* If the destination is not an ARCompact16 register, we might
9732 still have a chance to make a short insn if the source is;
9733 we need to start with a reg-reg move for this. */
9734 operands[3] = operands[2];
9735 operands[4] = gen_rtx_MINUS (SImode, operands[1], operands[0]);
9738 /* Handle DOUBLE_REGS uses.
9739 Operand 0: destination register
9740 Operand 1: source register */
9742 static bool
9743 arc_process_double_reg_moves (rtx *operands)
9745 rtx dest = operands[0];
9746 rtx src = operands[1];
9748 enum usesDxState { none, srcDx, destDx, maxDx };
9749 enum usesDxState state = none;
9751 if (refers_to_regno_p (40, 44, src, 0))
9752 state = srcDx;
9753 if (refers_to_regno_p (40, 44, dest, 0))
9755 /* Via arc_register_move_cost, we should never see D,D moves. */
9756 gcc_assert (state == none);
9757 state = destDx;
9760 if (state == none)
9761 return false;
9763 if (state == srcDx)
9765 /* Without the LR insn, we need to split this into a
9766 sequence of insns which will use the DEXCLx and DADDHxy
9767 insns to be able to read the Dx register in question. */
9768 if (TARGET_DPFP_DISABLE_LRSR)
9770 /* gen *movdf_insn_nolrsr */
9771 rtx set = gen_rtx_SET (dest, src);
9772 rtx use1 = gen_rtx_USE (VOIDmode, const1_rtx);
9773 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, use1)));
9775 else
9777 /* When we have 'mov D, r' or 'mov D, D' then get the target
9778 register pair for use with LR insn. */
9779 rtx destHigh = simplify_gen_subreg (SImode, dest, DFmode,
9780 TARGET_BIG_ENDIAN ? 0 : 4);
9781 rtx destLow = simplify_gen_subreg (SImode, dest, DFmode,
9782 TARGET_BIG_ENDIAN ? 4 : 0);
9784 /* Produce the two LR insns to get the high and low parts. */
9785 emit_insn (gen_rtx_SET (destHigh,
9786 gen_rtx_UNSPEC_VOLATILE (Pmode,
9787 gen_rtvec (1, src),
9788 VUNSPEC_ARC_LR_HIGH)));
9789 emit_insn (gen_rtx_SET (destLow,
9790 gen_rtx_UNSPEC_VOLATILE (Pmode,
9791 gen_rtvec (1, src),
9792 VUNSPEC_ARC_LR)));
9795 else if (state == destDx)
9797 /* When we have 'mov r, D' or 'mov D, D' and we have access to the
9798 LR insn get the target register pair. */
9799 rtx srcHigh = simplify_gen_subreg (SImode, src, DFmode,
9800 TARGET_BIG_ENDIAN ? 0 : 4);
9801 rtx srcLow = simplify_gen_subreg (SImode, src, DFmode,
9802 TARGET_BIG_ENDIAN ? 4 : 0);
9804 emit_insn (gen_dexcl_2op (dest, srcHigh, srcLow));
9806 else
9807 gcc_unreachable ();
9809 return true;
9812 /* operands 0..1 are the operands of a 64 bit move instruction.
9813 split it into two moves with operands 2/3 and 4/5. */
9815 void
9816 arc_split_move (rtx *operands)
9818 machine_mode mode = GET_MODE (operands[0]);
9819 int i;
9820 int swap = 0;
9821 rtx xop[4];
9823 if (TARGET_DPFP)
9825 if (arc_process_double_reg_moves (operands))
9826 return;
9829 if (TARGET_LL64
9830 && ((memory_operand (operands[0], mode)
9831 && even_register_operand (operands[1], mode))
9832 || (memory_operand (operands[1], mode)
9833 && even_register_operand (operands[0], mode))))
9835 emit_move_insn (operands[0], operands[1]);
9836 return;
9839 if (TARGET_PLUS_QMACW
9840 && GET_CODE (operands[1]) == CONST_VECTOR)
9842 HOST_WIDE_INT intval0, intval1;
9843 if (GET_MODE (operands[1]) == V2SImode)
9845 intval0 = INTVAL (XVECEXP (operands[1], 0, 0));
9846 intval1 = INTVAL (XVECEXP (operands[1], 0, 1));
9848 else
9850 intval1 = INTVAL (XVECEXP (operands[1], 0, 3)) << 16;
9851 intval1 |= INTVAL (XVECEXP (operands[1], 0, 2)) & 0xFFFF;
9852 intval0 = INTVAL (XVECEXP (operands[1], 0, 1)) << 16;
9853 intval0 |= INTVAL (XVECEXP (operands[1], 0, 0)) & 0xFFFF;
9855 xop[0] = gen_rtx_REG (SImode, REGNO (operands[0]));
9856 xop[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
9857 xop[2] = GEN_INT (trunc_int_for_mode (intval0, SImode));
9858 xop[1] = GEN_INT (trunc_int_for_mode (intval1, SImode));
9859 emit_move_insn (xop[0], xop[2]);
9860 emit_move_insn (xop[3], xop[1]);
9861 return;
9864 for (i = 0; i < 2; i++)
9866 if (MEM_P (operands[i]) && auto_inc_p (XEXP (operands[i], 0)))
9868 rtx addr = XEXP (operands[i], 0);
9869 rtx r, o;
9870 enum rtx_code code;
9872 gcc_assert (!reg_overlap_mentioned_p (operands[0], addr));
9873 switch (GET_CODE (addr))
9875 case PRE_DEC: o = GEN_INT (-8); goto pre_modify;
9876 case PRE_INC: o = GEN_INT (8); goto pre_modify;
9877 case PRE_MODIFY: o = XEXP (XEXP (addr, 1), 1);
9878 pre_modify:
9879 code = PRE_MODIFY;
9880 break;
9881 case POST_DEC: o = GEN_INT (-8); goto post_modify;
9882 case POST_INC: o = GEN_INT (8); goto post_modify;
9883 case POST_MODIFY: o = XEXP (XEXP (addr, 1), 1);
9884 post_modify:
9885 code = POST_MODIFY;
9886 swap = 2;
9887 break;
9888 default:
9889 gcc_unreachable ();
9891 r = XEXP (addr, 0);
9892 xop[0+i] = adjust_automodify_address_nv
9893 (operands[i], SImode,
9894 gen_rtx_fmt_ee (code, Pmode, r,
9895 gen_rtx_PLUS (Pmode, r, o)),
9897 xop[2+i] = adjust_automodify_address_nv
9898 (operands[i], SImode, plus_constant (Pmode, r, 4), 4);
9900 else
9902 xop[0+i] = operand_subword (operands[i], 0, 0, mode);
9903 xop[2+i] = operand_subword (operands[i], 1, 0, mode);
9906 if (reg_overlap_mentioned_p (xop[0], xop[3]))
9908 swap = 2;
9909 gcc_assert (!reg_overlap_mentioned_p (xop[2], xop[1]));
9912 emit_move_insn (xop[0 + swap], xop[1 + swap]);
9913 emit_move_insn (xop[2 - swap], xop[3 - swap]);
9917 /* Select between the instruction output templates s_tmpl (for short INSNs)
9918 and l_tmpl (for long INSNs). */
9920 const char *
9921 arc_short_long (rtx_insn *insn, const char *s_tmpl, const char *l_tmpl)
9923 int is_short = arc_verify_short (insn, cfun->machine->unalign, -1);
9925 extract_constrain_insn_cached (insn);
9926 return is_short ? s_tmpl : l_tmpl;
9929 /* Searches X for any reference to REGNO, returning the rtx of the
9930 reference found if any. Otherwise, returns NULL_RTX. */
9933 arc_regno_use_in (unsigned int regno, rtx x)
9935 const char *fmt;
9936 int i, j;
9937 rtx tem;
9939 if (REG_P (x) && refers_to_regno_p (regno, x))
9940 return x;
9942 fmt = GET_RTX_FORMAT (GET_CODE (x));
9943 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9945 if (fmt[i] == 'e')
9947 if ((tem = regno_use_in (regno, XEXP (x, i))))
9948 return tem;
9950 else if (fmt[i] == 'E')
9951 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9952 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
9953 return tem;
9956 return NULL_RTX;
9959 /* Return the integer value of the "type" attribute for INSN, or -1 if
9960 INSN can't have attributes. */
9963 arc_attr_type (rtx_insn *insn)
9965 if (NONJUMP_INSN_P (insn)
9966 ? (GET_CODE (PATTERN (insn)) == USE
9967 || GET_CODE (PATTERN (insn)) == CLOBBER)
9968 : JUMP_P (insn)
9969 ? (GET_CODE (PATTERN (insn)) == ADDR_VEC
9970 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
9971 : !CALL_P (insn))
9972 return -1;
9973 return get_attr_type (insn);
9976 /* Return true if insn sets the condition codes. */
9978 bool
9979 arc_sets_cc_p (rtx_insn *insn)
9981 if (NONJUMP_INSN_P (insn))
9982 if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
9983 insn = seq->insn (seq->len () - 1);
9984 return arc_attr_type (insn) == TYPE_COMPARE;
9987 /* Return true if INSN is an instruction with a delay slot we may want
9988 to fill. */
9990 bool
9991 arc_need_delay (rtx_insn *insn)
9993 rtx_insn *next;
9995 if (!flag_delayed_branch)
9996 return false;
9997 /* The return at the end of a function needs a delay slot. */
9998 if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE
9999 && (!(next = next_active_insn (insn))
10000 || ((!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) != SEQUENCE)
10001 && arc_attr_type (next) == TYPE_RETURN))
10002 && (!TARGET_PAD_RETURN
10003 || (prev_active_insn (insn)
10004 && prev_active_insn (prev_active_insn (insn))
10005 && prev_active_insn (prev_active_insn (prev_active_insn (insn))))))
10006 return true;
10007 if (NONJUMP_INSN_P (insn)
10008 ? (GET_CODE (PATTERN (insn)) == USE
10009 || GET_CODE (PATTERN (insn)) == CLOBBER
10010 || GET_CODE (PATTERN (insn)) == SEQUENCE)
10011 : JUMP_P (insn)
10012 ? (GET_CODE (PATTERN (insn)) == ADDR_VEC
10013 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
10014 : !CALL_P (insn))
10015 return false;
10016 return num_delay_slots (insn) != 0;
10019 /* Return true if the scheduling pass(es) has/have already run,
10020 i.e. where possible, we should try to mitigate high latencies
10021 by different instruction selection. */
10023 bool
10024 arc_scheduling_not_expected (void)
10026 return cfun->machine->arc_reorg_started;
10030 arc_label_align (rtx_insn *label)
10032 /* Code has a minimum p2 alignment of 1, which we must restore after an
10033 ADDR_DIFF_VEC. */
10034 if (align_labels_log < 1)
10036 rtx_insn *next = next_nonnote_nondebug_insn (label);
10037 if (INSN_P (next) && recog_memoized (next) >= 0)
10038 return 1;
10040 return align_labels_log;
10043 /* Return true if LABEL is in executable code. */
10045 bool
10046 arc_text_label (rtx_insn *label)
10048 rtx_insn *next;
10050 /* ??? We use deleted labels like they were still there, see
10051 gcc.c-torture/compile/20000326-2.c . */
10052 gcc_assert (GET_CODE (label) == CODE_LABEL
10053 || (GET_CODE (label) == NOTE
10054 && NOTE_KIND (label) == NOTE_INSN_DELETED_LABEL));
10055 next = next_nonnote_insn (label);
10056 if (next)
10057 return (!JUMP_TABLE_DATA_P (next)
10058 || GET_CODE (PATTERN (next)) != ADDR_VEC);
10059 else if (!PREV_INSN (label))
10060 /* ??? sometimes text labels get inserted very late, see
10061 gcc.dg/torture/stackalign/comp-goto-1.c */
10062 return true;
10063 return false;
10066 /* Without this, gcc.dg/tree-prof/bb-reorg.c fails to assemble
10067 when compiling with -O2 -freorder-blocks-and-partition -fprofile-use
10068 -D_PROFILE_USE; delay branch scheduling then follows a crossing jump
10069 to redirect two breqs. */
10071 static bool
10072 arc_can_follow_jump (const rtx_insn *follower, const rtx_insn *followee)
10074 /* ??? get_attr_type is declared to take an rtx. */
10075 union { const rtx_insn *c; rtx_insn *r; } u;
10077 u.c = follower;
10078 if (CROSSING_JUMP_P (followee))
10079 switch (get_attr_type (u.r))
10081 case TYPE_BRANCH:
10082 if (get_attr_length (u.r) != 2)
10083 break;
10084 /* Fall through. */
10085 case TYPE_BRCC:
10086 case TYPE_BRCC_NO_DELAY_SLOT:
10087 return false;
10088 default:
10089 return true;
10091 return true;
10094 /* Return the register number of the register holding the return address
10095 for a function of type TYPE. */
10098 arc_return_address_register (unsigned int fn_type)
10100 int regno = 0;
10102 if (ARC_INTERRUPT_P (fn_type))
10104 if (((fn_type & ARC_FUNCTION_ILINK1) | ARC_FUNCTION_FIRQ) != 0)
10105 regno = ILINK1_REGNUM;
10106 else if ((fn_type & ARC_FUNCTION_ILINK2) != 0)
10107 regno = ILINK2_REGNUM;
10108 else
10109 gcc_unreachable ();
10111 else if (ARC_NORMAL_P (fn_type) || ARC_NAKED_P (fn_type))
10112 regno = RETURN_ADDR_REGNUM;
10114 gcc_assert (regno != 0);
10115 return regno;
10118 /* Implement EPILOGUE_USES.
10119 Return true if REGNO should be added to the deemed uses of the epilogue.
10121 We have to make sure all the register restore instructions are
10122 known to be live in interrupt functions, plus the blink register if
10123 it is clobbered by the isr. */
10125 bool
10126 arc_epilogue_uses (int regno)
10128 unsigned int fn_type;
10130 if (regno == arc_tp_regno)
10131 return true;
10133 fn_type = arc_compute_function_type (cfun);
10134 if (reload_completed)
10136 if (ARC_INTERRUPT_P (cfun->machine->fn_type))
10138 if (!fixed_regs[regno])
10139 return true;
10140 return ((regno == arc_return_address_register (fn_type))
10141 || (regno == RETURN_ADDR_REGNUM));
10143 else
10144 return regno == RETURN_ADDR_REGNUM;
10146 else
10147 return regno == arc_return_address_register (fn_type);
10150 /* Helper for EH_USES macro. */
10152 bool
10153 arc_eh_uses (int regno)
10155 if (regno == arc_tp_regno)
10156 return true;
10157 return false;
10160 #ifndef TARGET_NO_LRA
10161 #define TARGET_NO_LRA !TARGET_LRA
10162 #endif
10164 static bool
10165 arc_lra_p (void)
10167 return !TARGET_NO_LRA;
10170 /* ??? Should we define TARGET_REGISTER_PRIORITY? We might perfer to use
10171 Rcq registers, because some insn are shorter with them. OTOH we already
10172 have separate alternatives for this purpose, and other insns don't
10173 mind, so maybe we should rather prefer the other registers?
10174 We need more data, and we can only get that if we allow people to
10175 try all options. */
10176 static int
10177 arc_register_priority (int r)
10179 switch (arc_lra_priority_tag)
10181 case ARC_LRA_PRIORITY_NONE:
10182 return 0;
10183 case ARC_LRA_PRIORITY_NONCOMPACT:
10184 return ((((r & 7) ^ 4) - 4) & 15) != r;
10185 case ARC_LRA_PRIORITY_COMPACT:
10186 return ((((r & 7) ^ 4) - 4) & 15) == r;
10187 default:
10188 gcc_unreachable ();
10192 static reg_class_t
10193 arc_spill_class (reg_class_t /* orig_class */, machine_mode)
10195 return GENERAL_REGS;
10198 bool
10199 arc_legitimize_reload_address (rtx *p, machine_mode mode, int opnum,
10200 int itype)
10202 rtx x = *p;
10203 enum reload_type type = (enum reload_type) itype;
10205 if (GET_CODE (x) == PLUS
10206 && CONST_INT_P (XEXP (x, 1))
10207 && (RTX_OK_FOR_BASE_P (XEXP (x, 0), true)
10208 || (REG_P (XEXP (x, 0))
10209 && reg_equiv_constant (REGNO (XEXP (x, 0))))))
10211 int scale = GET_MODE_SIZE (mode);
10212 int shift;
10213 rtx index_rtx = XEXP (x, 1);
10214 HOST_WIDE_INT offset = INTVAL (index_rtx), offset_base;
10215 rtx reg, sum, sum2;
10217 if (scale > 4)
10218 scale = 4;
10219 if ((scale-1) & offset)
10220 scale = 1;
10221 shift = scale >> 1;
10222 offset_base
10223 = ((offset + (256 << shift))
10224 & ((HOST_WIDE_INT)((unsigned HOST_WIDE_INT) -512 << shift)));
10225 /* Sometimes the normal form does not suit DImode. We
10226 could avoid that by using smaller ranges, but that
10227 would give less optimized code when SImode is
10228 prevalent. */
10229 if (GET_MODE_SIZE (mode) + offset - offset_base <= (256 << shift))
10231 int regno;
10233 reg = XEXP (x, 0);
10234 regno = REGNO (reg);
10235 sum2 = sum = plus_constant (Pmode, reg, offset_base);
10237 if (reg_equiv_constant (regno))
10239 sum2 = plus_constant (Pmode, reg_equiv_constant (regno),
10240 offset_base);
10241 if (GET_CODE (sum2) == PLUS)
10242 sum2 = gen_rtx_CONST (Pmode, sum2);
10244 *p = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - offset_base));
10245 push_reload (sum2, NULL_RTX, &XEXP (*p, 0), NULL,
10246 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum,
10247 type);
10248 return true;
10251 /* We must re-recognize what we created before. */
10252 else if (GET_CODE (x) == PLUS
10253 && GET_CODE (XEXP (x, 0)) == PLUS
10254 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10255 && REG_P (XEXP (XEXP (x, 0), 0))
10256 && CONST_INT_P (XEXP (x, 1)))
10258 /* Because this address is so complex, we know it must have
10259 been created by LEGITIMIZE_RELOAD_ADDRESS before; thus,
10260 it is already unshared, and needs no further unsharing. */
10261 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
10262 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
10263 return true;
10265 return false;
10268 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
10270 static bool
10271 arc_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
10272 unsigned int align,
10273 enum by_pieces_operation op,
10274 bool speed_p)
10276 /* Let the movmem expander handle small block moves. */
10277 if (op == MOVE_BY_PIECES)
10278 return false;
10280 return default_use_by_pieces_infrastructure_p (size, align, op, speed_p);
10283 /* Emit a (pre) memory barrier around an atomic sequence according to
10284 MODEL. */
10286 static void
10287 arc_pre_atomic_barrier (enum memmodel model)
10289 if (need_atomic_barrier_p (model, true))
10290 emit_insn (gen_memory_barrier ());
10293 /* Emit a (post) memory barrier around an atomic sequence according to
10294 MODEL. */
10296 static void
10297 arc_post_atomic_barrier (enum memmodel model)
10299 if (need_atomic_barrier_p (model, false))
10300 emit_insn (gen_memory_barrier ());
10303 /* Expand a compare and swap pattern. */
10305 static void
10306 emit_unlikely_jump (rtx insn)
10308 rtx_insn *jump = emit_jump_insn (insn);
10309 add_reg_br_prob_note (jump, profile_probability::very_unlikely ());
10312 /* Expand code to perform a 8 or 16-bit compare and swap by doing
10313 32-bit compare and swap on the word containing the byte or
10314 half-word. The difference between a weak and a strong CAS is that
10315 the weak version may simply fail. The strong version relies on two
10316 loops, one checks if the SCOND op is succsfully or not, the other
10317 checks if the 32 bit accessed location which contains the 8 or 16
10318 bit datum is not changed by other thread. The first loop is
10319 implemented by the atomic_compare_and_swapsi_1 pattern. The second
10320 loops is implemented by this routine. */
10322 static void
10323 arc_expand_compare_and_swap_qh (rtx bool_result, rtx result, rtx mem,
10324 rtx oldval, rtx newval, rtx weak,
10325 rtx mod_s, rtx mod_f)
10327 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
10328 rtx addr = gen_reg_rtx (Pmode);
10329 rtx off = gen_reg_rtx (SImode);
10330 rtx oldv = gen_reg_rtx (SImode);
10331 rtx newv = gen_reg_rtx (SImode);
10332 rtx oldvalue = gen_reg_rtx (SImode);
10333 rtx newvalue = gen_reg_rtx (SImode);
10334 rtx res = gen_reg_rtx (SImode);
10335 rtx resv = gen_reg_rtx (SImode);
10336 rtx memsi, val, mask, end_label, loop_label, cc, x;
10337 machine_mode mode;
10338 bool is_weak = (weak != const0_rtx);
10340 /* Truncate the address. */
10341 emit_insn (gen_rtx_SET (addr,
10342 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
10344 /* Compute the datum offset. */
10345 emit_insn (gen_rtx_SET (off,
10346 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
10347 if (TARGET_BIG_ENDIAN)
10348 emit_insn (gen_rtx_SET (off,
10349 gen_rtx_MINUS (SImode,
10350 (GET_MODE (mem) == QImode) ?
10351 GEN_INT (3) : GEN_INT (2), off)));
10353 /* Normal read from truncated address. */
10354 memsi = gen_rtx_MEM (SImode, addr);
10355 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
10356 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
10358 val = copy_to_reg (memsi);
10360 /* Convert the offset in bits. */
10361 emit_insn (gen_rtx_SET (off,
10362 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
10364 /* Get the proper mask. */
10365 if (GET_MODE (mem) == QImode)
10366 mask = force_reg (SImode, GEN_INT (0xff));
10367 else
10368 mask = force_reg (SImode, GEN_INT (0xffff));
10370 emit_insn (gen_rtx_SET (mask,
10371 gen_rtx_ASHIFT (SImode, mask, off)));
10373 /* Prepare the old and new values. */
10374 emit_insn (gen_rtx_SET (val,
10375 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10376 val)));
10378 oldval = gen_lowpart (SImode, oldval);
10379 emit_insn (gen_rtx_SET (oldv,
10380 gen_rtx_ASHIFT (SImode, oldval, off)));
10382 newval = gen_lowpart_common (SImode, newval);
10383 emit_insn (gen_rtx_SET (newv,
10384 gen_rtx_ASHIFT (SImode, newval, off)));
10386 emit_insn (gen_rtx_SET (oldv,
10387 gen_rtx_AND (SImode, oldv, mask)));
10389 emit_insn (gen_rtx_SET (newv,
10390 gen_rtx_AND (SImode, newv, mask)));
10392 if (!is_weak)
10394 end_label = gen_label_rtx ();
10395 loop_label = gen_label_rtx ();
10396 emit_label (loop_label);
10399 /* Make the old and new values. */
10400 emit_insn (gen_rtx_SET (oldvalue,
10401 gen_rtx_IOR (SImode, oldv, val)));
10403 emit_insn (gen_rtx_SET (newvalue,
10404 gen_rtx_IOR (SImode, newv, val)));
10406 /* Try an 32bit atomic compare and swap. It clobbers the CC
10407 register. */
10408 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue,
10409 weak, mod_s, mod_f));
10411 /* Regardless of the weakness of the operation, a proper boolean
10412 result needs to be provided. */
10413 x = gen_rtx_REG (CC_Zmode, CC_REG);
10414 x = gen_rtx_EQ (SImode, x, const0_rtx);
10415 emit_insn (gen_rtx_SET (bool_result, x));
10417 if (!is_weak)
10419 /* Check the results: if the atomic op is successfully the goto
10420 to end label. */
10421 x = gen_rtx_REG (CC_Zmode, CC_REG);
10422 x = gen_rtx_EQ (VOIDmode, x, const0_rtx);
10423 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10424 gen_rtx_LABEL_REF (Pmode, end_label), pc_rtx);
10425 emit_jump_insn (gen_rtx_SET (pc_rtx, x));
10427 /* Wait for the right moment when the accessed 32-bit location
10428 is stable. */
10429 emit_insn (gen_rtx_SET (resv,
10430 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10431 res)));
10432 mode = SELECT_CC_MODE (NE, resv, val);
10433 cc = gen_rtx_REG (mode, CC_REG);
10434 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, resv, val)));
10436 /* Set the new value of the 32 bit location, proper masked. */
10437 emit_insn (gen_rtx_SET (val, resv));
10439 /* Try again if location is unstable. Fall through if only
10440 scond op failed. */
10441 x = gen_rtx_NE (VOIDmode, cc, const0_rtx);
10442 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10443 gen_rtx_LABEL_REF (Pmode, loop_label), pc_rtx);
10444 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
10446 emit_label (end_label);
10449 /* End: proper return the result for the given mode. */
10450 emit_insn (gen_rtx_SET (res,
10451 gen_rtx_AND (SImode, res, mask)));
10453 emit_insn (gen_rtx_SET (res,
10454 gen_rtx_LSHIFTRT (SImode, res, off)));
10456 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
10459 /* Helper function used by "atomic_compare_and_swap" expand
10460 pattern. */
10462 void
10463 arc_expand_compare_and_swap (rtx operands[])
10465 rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
10466 machine_mode mode;
10468 bval = operands[0];
10469 rval = operands[1];
10470 mem = operands[2];
10471 oldval = operands[3];
10472 newval = operands[4];
10473 is_weak = operands[5];
10474 mod_s = operands[6];
10475 mod_f = operands[7];
10476 mode = GET_MODE (mem);
10478 if (reg_overlap_mentioned_p (rval, oldval))
10479 oldval = copy_to_reg (oldval);
10481 if (mode == SImode)
10483 emit_insn (gen_atomic_compare_and_swapsi_1 (rval, mem, oldval, newval,
10484 is_weak, mod_s, mod_f));
10485 x = gen_rtx_REG (CC_Zmode, CC_REG);
10486 x = gen_rtx_EQ (SImode, x, const0_rtx);
10487 emit_insn (gen_rtx_SET (bval, x));
10489 else
10491 arc_expand_compare_and_swap_qh (bval, rval, mem, oldval, newval,
10492 is_weak, mod_s, mod_f);
10496 /* Helper function used by the "atomic_compare_and_swapsi_1"
10497 pattern. */
10499 void
10500 arc_split_compare_and_swap (rtx operands[])
10502 rtx rval, mem, oldval, newval;
10503 machine_mode mode;
10504 enum memmodel mod_s, mod_f;
10505 bool is_weak;
10506 rtx label1, label2, x, cond;
10508 rval = operands[0];
10509 mem = operands[1];
10510 oldval = operands[2];
10511 newval = operands[3];
10512 is_weak = (operands[4] != const0_rtx);
10513 mod_s = (enum memmodel) INTVAL (operands[5]);
10514 mod_f = (enum memmodel) INTVAL (operands[6]);
10515 mode = GET_MODE (mem);
10517 /* ARC atomic ops work only with 32-bit aligned memories. */
10518 gcc_assert (mode == SImode);
10520 arc_pre_atomic_barrier (mod_s);
10522 label1 = NULL_RTX;
10523 if (!is_weak)
10525 label1 = gen_label_rtx ();
10526 emit_label (label1);
10528 label2 = gen_label_rtx ();
10530 /* Load exclusive. */
10531 emit_insn (gen_arc_load_exclusivesi (rval, mem));
10533 /* Check if it is oldval. */
10534 mode = SELECT_CC_MODE (NE, rval, oldval);
10535 cond = gen_rtx_REG (mode, CC_REG);
10536 emit_insn (gen_rtx_SET (cond, gen_rtx_COMPARE (mode, rval, oldval)));
10538 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
10539 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10540 gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
10541 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
10543 /* Exclusively store new item. Store clobbers CC reg. */
10544 emit_insn (gen_arc_store_exclusivesi (mem, newval));
10546 if (!is_weak)
10548 /* Check the result of the store. */
10549 cond = gen_rtx_REG (CC_Zmode, CC_REG);
10550 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
10551 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10552 gen_rtx_LABEL_REF (Pmode, label1), pc_rtx);
10553 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
10556 if (mod_f != MEMMODEL_RELAXED)
10557 emit_label (label2);
10559 arc_post_atomic_barrier (mod_s);
10561 if (mod_f == MEMMODEL_RELAXED)
10562 emit_label (label2);
10565 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
10566 to perform. MEM is the memory on which to operate. VAL is the second
10567 operand of the binary operator. BEFORE and AFTER are optional locations to
10568 return the value of MEM either before of after the operation. MODEL_RTX
10569 is a CONST_INT containing the memory model to use. */
10571 void
10572 arc_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
10573 rtx orig_before, rtx orig_after, rtx model_rtx)
10575 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
10576 machine_mode mode = GET_MODE (mem);
10577 rtx label, x, cond;
10578 rtx before = orig_before, after = orig_after;
10580 /* ARC atomic ops work only with 32-bit aligned memories. */
10581 gcc_assert (mode == SImode);
10583 arc_pre_atomic_barrier (model);
10585 label = gen_label_rtx ();
10586 emit_label (label);
10587 label = gen_rtx_LABEL_REF (VOIDmode, label);
10589 if (before == NULL_RTX)
10590 before = gen_reg_rtx (mode);
10592 if (after == NULL_RTX)
10593 after = gen_reg_rtx (mode);
10595 /* Load exclusive. */
10596 emit_insn (gen_arc_load_exclusivesi (before, mem));
10598 switch (code)
10600 case NOT:
10601 x = gen_rtx_AND (mode, before, val);
10602 emit_insn (gen_rtx_SET (after, x));
10603 x = gen_rtx_NOT (mode, after);
10604 emit_insn (gen_rtx_SET (after, x));
10605 break;
10607 case MINUS:
10608 if (CONST_INT_P (val))
10610 val = GEN_INT (-INTVAL (val));
10611 code = PLUS;
10614 /* FALLTHRU. */
10615 default:
10616 x = gen_rtx_fmt_ee (code, mode, before, val);
10617 emit_insn (gen_rtx_SET (after, x));
10618 break;
10621 /* Exclusively store new item. Store clobbers CC reg. */
10622 emit_insn (gen_arc_store_exclusivesi (mem, after));
10624 /* Check the result of the store. */
10625 cond = gen_rtx_REG (CC_Zmode, CC_REG);
10626 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
10627 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
10628 label, pc_rtx);
10629 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
10631 arc_post_atomic_barrier (model);
10634 /* Implement TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P. */
10636 static bool
10637 arc_no_speculation_in_delay_slots_p ()
10639 return true;
10642 /* Return a parallel of registers to represent where to find the
10643 register pieces if required, otherwise NULL_RTX. */
10645 static rtx
10646 arc_dwarf_register_span (rtx rtl)
10648 machine_mode mode = GET_MODE (rtl);
10649 unsigned regno;
10650 rtx p;
10652 if (GET_MODE_SIZE (mode) != 8)
10653 return NULL_RTX;
10655 p = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
10656 regno = REGNO (rtl);
10657 XVECEXP (p, 0, 0) = gen_rtx_REG (SImode, regno);
10658 XVECEXP (p, 0, 1) = gen_rtx_REG (SImode, regno + 1);
10660 return p;
10663 /* Return true if OP is an acceptable memory operand for ARCompact
10664 16-bit load instructions of MODE.
10666 AV2SHORT: TRUE if address needs to fit into the new ARCv2 short
10667 non scaled instructions.
10669 SCALED: TRUE if address can be scaled. */
10671 bool
10672 compact_memory_operand_p (rtx op, machine_mode mode,
10673 bool av2short, bool scaled)
10675 rtx addr, plus0, plus1;
10676 int size, off;
10678 /* Eliminate non-memory operations. */
10679 if (GET_CODE (op) != MEM)
10680 return 0;
10682 /* .di instructions have no 16-bit form. */
10683 if (MEM_VOLATILE_P (op) && !TARGET_VOLATILE_CACHE_SET)
10684 return false;
10686 if (mode == VOIDmode)
10687 mode = GET_MODE (op);
10689 size = GET_MODE_SIZE (mode);
10691 /* dword operations really put out 2 instructions, so eliminate
10692 them. */
10693 if (size > UNITS_PER_WORD)
10694 return false;
10696 /* Decode the address now. */
10697 addr = XEXP (op, 0);
10698 switch (GET_CODE (addr))
10700 case REG:
10701 return (REGNO (addr) >= FIRST_PSEUDO_REGISTER
10702 || COMPACT_GP_REG_P (REGNO (addr))
10703 || (SP_REG_P (REGNO (addr)) && (size != 2)));
10704 case PLUS:
10705 plus0 = XEXP (addr, 0);
10706 plus1 = XEXP (addr, 1);
10708 if ((GET_CODE (plus0) == REG)
10709 && ((REGNO (plus0) >= FIRST_PSEUDO_REGISTER)
10710 || COMPACT_GP_REG_P (REGNO (plus0)))
10711 && ((GET_CODE (plus1) == REG)
10712 && ((REGNO (plus1) >= FIRST_PSEUDO_REGISTER)
10713 || COMPACT_GP_REG_P (REGNO (plus1)))))
10715 return !av2short;
10718 if ((GET_CODE (plus0) == REG)
10719 && ((REGNO (plus0) >= FIRST_PSEUDO_REGISTER)
10720 || (COMPACT_GP_REG_P (REGNO (plus0)) && !av2short)
10721 || (IN_RANGE (REGNO (plus0), 0, 31) && av2short))
10722 && (GET_CODE (plus1) == CONST_INT))
10724 bool valid = false;
10726 off = INTVAL (plus1);
10728 /* Negative offset is not supported in 16-bit load/store insns. */
10729 if (off < 0)
10730 return 0;
10732 /* Only u5 immediates allowed in code density instructions. */
10733 if (av2short)
10735 switch (size)
10737 case 1:
10738 return false;
10739 case 2:
10740 /* This is an ldh_s.x instruction, check the u6
10741 immediate. */
10742 if (COMPACT_GP_REG_P (REGNO (plus0)))
10743 valid = true;
10744 break;
10745 case 4:
10746 /* Only u5 immediates allowed in 32bit access code
10747 density instructions. */
10748 if (REGNO (plus0) <= 31)
10749 return ((off < 32) && (off % 4 == 0));
10750 break;
10751 default:
10752 return false;
10755 else
10756 if (COMPACT_GP_REG_P (REGNO (plus0)))
10757 valid = true;
10759 if (valid)
10762 switch (size)
10764 case 1:
10765 return (off < 32);
10766 case 2:
10767 /* The 6-bit constant get shifted to fit the real
10768 5-bits field. Check also for the alignment. */
10769 return ((off < 64) && (off % 2 == 0));
10770 case 4:
10771 return ((off < 128) && (off % 4 == 0));
10772 default:
10773 return false;
10778 if (REG_P (plus0) && CONST_INT_P (plus1)
10779 && ((REGNO (plus0) >= FIRST_PSEUDO_REGISTER)
10780 || SP_REG_P (REGNO (plus0)))
10781 && !av2short)
10783 off = INTVAL (plus1);
10784 return ((size != 2) && (off >= 0 && off < 128) && (off % 4 == 0));
10787 if ((GET_CODE (plus0) == MULT)
10788 && (GET_CODE (XEXP (plus0, 0)) == REG)
10789 && ((REGNO (XEXP (plus0, 0)) >= FIRST_PSEUDO_REGISTER)
10790 || COMPACT_GP_REG_P (REGNO (XEXP (plus0, 0))))
10791 && (GET_CODE (plus1) == REG)
10792 && ((REGNO (plus1) >= FIRST_PSEUDO_REGISTER)
10793 || COMPACT_GP_REG_P (REGNO (plus1))))
10794 return scaled;
10795 default:
10796 break ;
10797 /* TODO: 'gp' and 'pcl' are to supported as base address operand
10798 for 16-bit load instructions. */
10800 return false;
10803 /* Return the frame pointer value to be backed up in the setjmp buffer. */
10805 static rtx
10806 arc_builtin_setjmp_frame_value (void)
10808 /* We always want to preserve whatever value is currently in the frame
10809 pointer register. For frames that are using the frame pointer the new
10810 value of the frame pointer register will have already been computed
10811 (as part of the prologue). For frames that are not using the frame
10812 pointer it is important that we backup whatever value is in the frame
10813 pointer register, as earlier (more outer) frames may have placed a
10814 value into the frame pointer register. It might be tempting to try
10815 and use `frame_pointer_rtx` here, however, this is not what we want.
10816 For frames that are using the frame pointer this will give the
10817 correct value. However, for frames that are not using the frame
10818 pointer this will still give the value that _would_ have been the
10819 frame pointer value for this frame (if the use of the frame pointer
10820 had not been removed). We really do want the raw frame pointer
10821 register value. */
10822 return gen_raw_REG (Pmode, FRAME_POINTER_REGNUM);
10825 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
10826 anchors for small data: the GP register acts as an anchor in that
10827 case. We also don't want to use them for PC-relative accesses,
10828 where the PC acts as an anchor. Prohibit also TLS symbols to use
10829 anchors. */
10831 static bool
10832 arc_use_anchors_for_symbol_p (const_rtx symbol)
10834 if (SYMBOL_REF_TLS_MODEL (symbol))
10835 return false;
10837 if (flag_pic)
10838 return false;
10840 if (SYMBOL_REF_SMALL_P (symbol))
10841 return false;
10843 return default_use_anchors_for_symbol_p (symbol);
10846 /* Return true if SUBST can't safely replace its equivalent during RA. */
10847 static bool
10848 arc_cannot_substitute_mem_equiv_p (rtx)
10850 /* If SUBST is mem[base+index], the address may not fit ISA,
10851 thus return true. */
10852 return true;
10855 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
10856 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P arc_use_anchors_for_symbol_p
10858 #undef TARGET_CONSTANT_ALIGNMENT
10859 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
10861 #undef TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P
10862 #define TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P arc_cannot_substitute_mem_equiv_p
10864 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10865 #define TARGET_ASM_TRAMPOLINE_TEMPLATE arc_asm_trampoline_template
10867 struct gcc_target targetm = TARGET_INITIALIZER;
10869 #include "gt-arc.h"