Merge from mainline (165734:167278).
[official-gcc/graphite-test-results.git] / gcc / config / ia64 / ia64.c
blob878cd72a6526c6756962486ffcc6f02f7a5f77df
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
3 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by James E. Wilson <wilson@cygnus.com> and
6 David Mosberger <davidm@hpl.hp.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "libfuncs.h"
45 #include "diagnostic-core.h"
46 #include "toplev.h"
47 #include "sched-int.h"
48 #include "timevar.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "tm_p.h"
52 #include "hashtab.h"
53 #include "langhooks.h"
54 #include "cfglayout.h"
55 #include "gimple.h"
56 #include "intl.h"
57 #include "df.h"
58 #include "debug.h"
59 #include "params.h"
60 #include "dbgcnt.h"
61 #include "tm-constrs.h"
62 #include "sel-sched.h"
63 #include "reload.h"
65 /* This is used for communication between ASM_OUTPUT_LABEL and
66 ASM_OUTPUT_LABELREF. */
67 int ia64_asm_output_label = 0;
69 /* Register names for ia64_expand_prologue. */
70 static const char * const ia64_reg_numbers[96] =
71 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
72 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
73 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
74 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
75 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
76 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
77 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
78 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
79 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
80 "r104","r105","r106","r107","r108","r109","r110","r111",
81 "r112","r113","r114","r115","r116","r117","r118","r119",
82 "r120","r121","r122","r123","r124","r125","r126","r127"};
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_input_reg_names[8] =
86 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
88 /* ??? These strings could be shared with REGISTER_NAMES. */
89 static const char * const ia64_local_reg_names[80] =
90 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
91 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
92 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
93 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
94 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
95 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
96 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
97 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
98 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
99 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
101 /* ??? These strings could be shared with REGISTER_NAMES. */
102 static const char * const ia64_output_reg_names[8] =
103 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
105 /* Which cpu are we scheduling for. */
106 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
108 /* Determines whether we run our final scheduling pass or not. We always
109 avoid the normal second scheduling pass. */
110 static int ia64_flag_schedule_insns2;
112 /* Determines whether we run variable tracking in machine dependent
113 reorganization. */
114 static int ia64_flag_var_tracking;
116 /* Variables which are this size or smaller are put in the sdata/sbss
117 sections. */
119 unsigned int ia64_section_threshold;
121 /* The following variable is used by the DFA insn scheduler. The value is
122 TRUE if we do insn bundling instead of insn scheduling. */
123 int bundling_p = 0;
125 enum ia64_frame_regs
127 reg_fp,
128 reg_save_b0,
129 reg_save_pr,
130 reg_save_ar_pfs,
131 reg_save_ar_unat,
132 reg_save_ar_lc,
133 reg_save_gp,
134 number_of_ia64_frame_regs
137 /* Structure to be filled in by ia64_compute_frame_size with register
138 save masks and offsets for the current function. */
140 struct ia64_frame_info
142 HOST_WIDE_INT total_size; /* size of the stack frame, not including
143 the caller's scratch area. */
144 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
145 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
146 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
147 HARD_REG_SET mask; /* mask of saved registers. */
148 unsigned int gr_used_mask; /* mask of registers in use as gr spill
149 registers or long-term scratches. */
150 int n_spilled; /* number of spilled registers. */
151 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
152 int n_input_regs; /* number of input registers used. */
153 int n_local_regs; /* number of local registers used. */
154 int n_output_regs; /* number of output registers used. */
155 int n_rotate_regs; /* number of rotating registers used. */
157 char need_regstk; /* true if a .regstk directive needed. */
158 char initialized; /* true if the data is finalized. */
161 /* Current frame information calculated by ia64_compute_frame_size. */
162 static struct ia64_frame_info current_frame_info;
163 /* The actual registers that are emitted. */
164 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
166 static int ia64_first_cycle_multipass_dfa_lookahead (void);
167 static void ia64_dependencies_evaluation_hook (rtx, rtx);
168 static void ia64_init_dfa_pre_cycle_insn (void);
169 static rtx ia64_dfa_pre_cycle_insn (void);
170 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
171 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static void ia64_h_i_d_extended (void);
174 static void * ia64_alloc_sched_context (void);
175 static void ia64_init_sched_context (void *, bool);
176 static void ia64_set_sched_context (void *);
177 static void ia64_clear_sched_context (void *);
178 static void ia64_free_sched_context (void *);
179 static int ia64_mode_to_int (enum machine_mode);
180 static void ia64_set_sched_flags (spec_info_t);
181 static ds_t ia64_get_insn_spec_ds (rtx);
182 static ds_t ia64_get_insn_checked_ds (rtx);
183 static bool ia64_skip_rtx_p (const_rtx);
184 static int ia64_speculate_insn (rtx, ds_t, rtx *);
185 static bool ia64_needs_block_p (int);
186 static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
187 static int ia64_spec_check_p (rtx);
188 static int ia64_spec_check_src_p (rtx);
189 static rtx gen_tls_get_addr (void);
190 static rtx gen_thread_pointer (void);
191 static int find_gr_spill (enum ia64_frame_regs, int);
192 static int next_scratch_gr_reg (void);
193 static void mark_reg_gr_used_mask (rtx, void *);
194 static void ia64_compute_frame_size (HOST_WIDE_INT);
195 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
196 static void finish_spill_pointers (void);
197 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
198 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
199 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
200 static rtx gen_movdi_x (rtx, rtx, rtx);
201 static rtx gen_fr_spill_x (rtx, rtx, rtx);
202 static rtx gen_fr_restore_x (rtx, rtx, rtx);
204 static void ia64_option_override (void);
205 static void ia64_option_default_params (void);
206 static bool ia64_can_eliminate (const int, const int);
207 static enum machine_mode hfa_element_mode (const_tree, bool);
208 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
209 tree, int *, int);
210 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
211 tree, bool);
212 static rtx ia64_function_arg_1 (const CUMULATIVE_ARGS *, enum machine_mode,
213 const_tree, bool, bool);
214 static rtx ia64_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
215 const_tree, bool);
216 static rtx ia64_function_incoming_arg (CUMULATIVE_ARGS *,
217 enum machine_mode, const_tree, bool);
218 static void ia64_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
219 const_tree, bool);
220 static unsigned int ia64_function_arg_boundary (enum machine_mode,
221 const_tree);
222 static bool ia64_function_ok_for_sibcall (tree, tree);
223 static bool ia64_return_in_memory (const_tree, const_tree);
224 static rtx ia64_function_value (const_tree, const_tree, bool);
225 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
226 static bool ia64_function_value_regno_p (const unsigned int);
227 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
228 reg_class_t);
229 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
230 bool);
231 static bool ia64_rtx_costs (rtx, int, int, int *, bool);
232 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
233 static void fix_range (const char *);
234 static bool ia64_handle_option (size_t, const char *, int);
235 static struct machine_function * ia64_init_machine_status (void);
236 static void emit_insn_group_barriers (FILE *);
237 static void emit_all_insn_group_barriers (FILE *);
238 static void final_emit_insn_group_barriers (FILE *);
239 static void emit_predicate_relation_info (void);
240 static void ia64_reorg (void);
241 static bool ia64_in_small_data_p (const_tree);
242 static void process_epilogue (FILE *, rtx, bool, bool);
244 static bool ia64_assemble_integer (rtx, unsigned int, int);
245 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
246 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
247 static void ia64_output_function_end_prologue (FILE *);
249 static int ia64_issue_rate (void);
250 static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
251 static void ia64_sched_init (FILE *, int, int);
252 static void ia64_sched_init_global (FILE *, int, int);
253 static void ia64_sched_finish_global (FILE *, int);
254 static void ia64_sched_finish (FILE *, int);
255 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
256 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
257 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
258 static int ia64_variable_issue (FILE *, int, rtx, int);
260 static void ia64_asm_unwind_emit (FILE *, rtx);
261 static void ia64_asm_emit_except_personality (rtx);
262 static void ia64_asm_init_sections (void);
264 static enum unwind_info_type ia64_debug_unwind_info (void);
265 static enum unwind_info_type ia64_except_unwind_info (struct gcc_options *);
267 static struct bundle_state *get_free_bundle_state (void);
268 static void free_bundle_state (struct bundle_state *);
269 static void initiate_bundle_states (void);
270 static void finish_bundle_states (void);
271 static unsigned bundle_state_hash (const void *);
272 static int bundle_state_eq_p (const void *, const void *);
273 static int insert_bundle_state (struct bundle_state *);
274 static void initiate_bundle_state_table (void);
275 static void finish_bundle_state_table (void);
276 static int try_issue_nops (struct bundle_state *, int);
277 static int try_issue_insn (struct bundle_state *, rtx);
278 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
279 static int get_max_pos (state_t);
280 static int get_template (state_t, int);
282 static rtx get_next_important_insn (rtx, rtx);
283 static bool important_for_bundling_p (rtx);
284 static void bundling (FILE *, int, rtx, rtx);
286 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
287 HOST_WIDE_INT, tree);
288 static void ia64_file_start (void);
289 static void ia64_globalize_decl_name (FILE *, tree);
291 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
292 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
293 static section *ia64_select_rtx_section (enum machine_mode, rtx,
294 unsigned HOST_WIDE_INT);
295 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
296 ATTRIBUTE_UNUSED;
297 static unsigned int ia64_section_type_flags (tree, const char *, int);
298 static void ia64_init_libfuncs (void)
299 ATTRIBUTE_UNUSED;
300 static void ia64_hpux_init_libfuncs (void)
301 ATTRIBUTE_UNUSED;
302 static void ia64_sysv4_init_libfuncs (void)
303 ATTRIBUTE_UNUSED;
304 static void ia64_vms_init_libfuncs (void)
305 ATTRIBUTE_UNUSED;
306 static void ia64_soft_fp_init_libfuncs (void)
307 ATTRIBUTE_UNUSED;
308 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
309 ATTRIBUTE_UNUSED;
310 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
311 ATTRIBUTE_UNUSED;
313 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
314 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
315 static void ia64_encode_section_info (tree, rtx, int);
316 static rtx ia64_struct_value_rtx (tree, int);
317 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
318 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
319 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
320 static bool ia64_cannot_force_const_mem (rtx);
321 static const char *ia64_mangle_type (const_tree);
322 static const char *ia64_invalid_conversion (const_tree, const_tree);
323 static const char *ia64_invalid_unary_op (int, const_tree);
324 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
325 static enum machine_mode ia64_c_mode_for_suffix (char);
326 static enum machine_mode ia64_promote_function_mode (const_tree,
327 enum machine_mode,
328 int *,
329 const_tree,
330 int);
331 static void ia64_trampoline_init (rtx, tree, rtx);
332 static void ia64_override_options_after_change (void);
334 static void ia64_dwarf_handle_frame_unspec (const char *, rtx, int);
335 static tree ia64_builtin_decl (unsigned, bool);
337 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
338 static enum machine_mode ia64_get_reg_raw_mode (int regno);
339 static section * ia64_hpux_function_section (tree, enum node_frequency,
340 bool, bool);
342 /* Table of valid machine attributes. */
343 static const struct attribute_spec ia64_attribute_table[] =
345 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
346 { "syscall_linkage", 0, 0, false, true, true, NULL },
347 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
348 #if TARGET_ABI_OPEN_VMS
349 { "common_object", 1, 1, true, false, false, ia64_vms_common_object_attribute},
350 #endif
351 { "version_id", 1, 1, true, false, false,
352 ia64_handle_version_id_attribute },
353 { NULL, 0, 0, false, false, false, NULL }
356 /* Implement overriding of the optimization options. */
357 static const struct default_options ia64_option_optimization_table[] =
359 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
360 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
361 SUBTARGET_OPTIMIZATION_OPTIONS,
362 #endif
363 { OPT_LEVELS_NONE, 0, NULL, 0 }
366 /* Initialize the GCC target structure. */
367 #undef TARGET_ATTRIBUTE_TABLE
368 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
370 #undef TARGET_INIT_BUILTINS
371 #define TARGET_INIT_BUILTINS ia64_init_builtins
373 #undef TARGET_EXPAND_BUILTIN
374 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
376 #undef TARGET_BUILTIN_DECL
377 #define TARGET_BUILTIN_DECL ia64_builtin_decl
379 #undef TARGET_ASM_BYTE_OP
380 #define TARGET_ASM_BYTE_OP "\tdata1\t"
381 #undef TARGET_ASM_ALIGNED_HI_OP
382 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
383 #undef TARGET_ASM_ALIGNED_SI_OP
384 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
385 #undef TARGET_ASM_ALIGNED_DI_OP
386 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
387 #undef TARGET_ASM_UNALIGNED_HI_OP
388 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
389 #undef TARGET_ASM_UNALIGNED_SI_OP
390 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
391 #undef TARGET_ASM_UNALIGNED_DI_OP
392 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
393 #undef TARGET_ASM_INTEGER
394 #define TARGET_ASM_INTEGER ia64_assemble_integer
396 #undef TARGET_OPTION_OVERRIDE
397 #define TARGET_OPTION_OVERRIDE ia64_option_override
398 #undef TARGET_OPTION_OPTIMIZATION_TABLE
399 #define TARGET_OPTION_OPTIMIZATION_TABLE ia64_option_optimization_table
400 #undef TARGET_OPTION_DEFAULT_PARAMS
401 #define TARGET_OPTION_DEFAULT_PARAMS ia64_option_default_params
403 #undef TARGET_ASM_FUNCTION_PROLOGUE
404 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
405 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
406 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
407 #undef TARGET_ASM_FUNCTION_EPILOGUE
408 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
410 #undef TARGET_IN_SMALL_DATA_P
411 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
413 #undef TARGET_SCHED_ADJUST_COST_2
414 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
415 #undef TARGET_SCHED_ISSUE_RATE
416 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
417 #undef TARGET_SCHED_VARIABLE_ISSUE
418 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
419 #undef TARGET_SCHED_INIT
420 #define TARGET_SCHED_INIT ia64_sched_init
421 #undef TARGET_SCHED_FINISH
422 #define TARGET_SCHED_FINISH ia64_sched_finish
423 #undef TARGET_SCHED_INIT_GLOBAL
424 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
425 #undef TARGET_SCHED_FINISH_GLOBAL
426 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
427 #undef TARGET_SCHED_REORDER
428 #define TARGET_SCHED_REORDER ia64_sched_reorder
429 #undef TARGET_SCHED_REORDER2
430 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
432 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
433 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
435 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
436 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
438 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
439 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
440 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
441 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
443 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
444 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
445 ia64_first_cycle_multipass_dfa_lookahead_guard
447 #undef TARGET_SCHED_DFA_NEW_CYCLE
448 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
450 #undef TARGET_SCHED_H_I_D_EXTENDED
451 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
453 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
454 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
456 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
457 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
459 #undef TARGET_SCHED_SET_SCHED_CONTEXT
460 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
462 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
463 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
465 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
466 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
468 #undef TARGET_SCHED_SET_SCHED_FLAGS
469 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
471 #undef TARGET_SCHED_GET_INSN_SPEC_DS
472 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
474 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
475 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
477 #undef TARGET_SCHED_SPECULATE_INSN
478 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
480 #undef TARGET_SCHED_NEEDS_BLOCK_P
481 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
483 #undef TARGET_SCHED_GEN_SPEC_CHECK
484 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
486 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
487 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
488 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
490 #undef TARGET_SCHED_SKIP_RTX_P
491 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
493 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
494 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
495 #undef TARGET_ARG_PARTIAL_BYTES
496 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
497 #undef TARGET_FUNCTION_ARG
498 #define TARGET_FUNCTION_ARG ia64_function_arg
499 #undef TARGET_FUNCTION_INCOMING_ARG
500 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
501 #undef TARGET_FUNCTION_ARG_ADVANCE
502 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
503 #undef TARGET_FUNCTION_ARG_BOUNDARY
504 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
506 #undef TARGET_ASM_OUTPUT_MI_THUNK
507 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
508 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
509 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
511 #undef TARGET_ASM_FILE_START
512 #define TARGET_ASM_FILE_START ia64_file_start
514 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
515 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
517 #undef TARGET_REGISTER_MOVE_COST
518 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
519 #undef TARGET_MEMORY_MOVE_COST
520 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
521 #undef TARGET_RTX_COSTS
522 #define TARGET_RTX_COSTS ia64_rtx_costs
523 #undef TARGET_ADDRESS_COST
524 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
526 #undef TARGET_UNSPEC_MAY_TRAP_P
527 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
529 #undef TARGET_MACHINE_DEPENDENT_REORG
530 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
532 #undef TARGET_ENCODE_SECTION_INFO
533 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
535 #undef TARGET_SECTION_TYPE_FLAGS
536 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
538 #ifdef HAVE_AS_TLS
539 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
540 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
541 #endif
543 #undef TARGET_PROMOTE_FUNCTION_MODE
544 #define TARGET_PROMOTE_FUNCTION_MODE ia64_promote_function_mode
546 /* ??? Investigate. */
547 #if 0
548 #undef TARGET_PROMOTE_PROTOTYPES
549 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
550 #endif
552 #undef TARGET_FUNCTION_VALUE
553 #define TARGET_FUNCTION_VALUE ia64_function_value
554 #undef TARGET_LIBCALL_VALUE
555 #define TARGET_LIBCALL_VALUE ia64_libcall_value
556 #undef TARGET_FUNCTION_VALUE_REGNO_P
557 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
559 #undef TARGET_STRUCT_VALUE_RTX
560 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
561 #undef TARGET_RETURN_IN_MEMORY
562 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
563 #undef TARGET_SETUP_INCOMING_VARARGS
564 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
565 #undef TARGET_STRICT_ARGUMENT_NAMING
566 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
567 #undef TARGET_MUST_PASS_IN_STACK
568 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
569 #undef TARGET_GET_RAW_RESULT_MODE
570 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
571 #undef TARGET_GET_RAW_ARG_MODE
572 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
574 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
575 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
577 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
578 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ia64_dwarf_handle_frame_unspec
579 #undef TARGET_ASM_UNWIND_EMIT
580 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
581 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
582 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
583 #undef TARGET_ASM_INIT_SECTIONS
584 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
586 #undef TARGET_DEBUG_UNWIND_INFO
587 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
588 #undef TARGET_EXCEPT_UNWIND_INFO
589 #define TARGET_EXCEPT_UNWIND_INFO ia64_except_unwind_info
591 #undef TARGET_SCALAR_MODE_SUPPORTED_P
592 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
593 #undef TARGET_VECTOR_MODE_SUPPORTED_P
594 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
596 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
597 in an order different from the specified program order. */
598 #undef TARGET_RELAXED_ORDERING
599 #define TARGET_RELAXED_ORDERING true
601 #undef TARGET_DEFAULT_TARGET_FLAGS
602 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
603 #undef TARGET_HANDLE_OPTION
604 #define TARGET_HANDLE_OPTION ia64_handle_option
606 #undef TARGET_CANNOT_FORCE_CONST_MEM
607 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
609 #undef TARGET_MANGLE_TYPE
610 #define TARGET_MANGLE_TYPE ia64_mangle_type
612 #undef TARGET_INVALID_CONVERSION
613 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
614 #undef TARGET_INVALID_UNARY_OP
615 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
616 #undef TARGET_INVALID_BINARY_OP
617 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
619 #undef TARGET_C_MODE_FOR_SUFFIX
620 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
622 #undef TARGET_CAN_ELIMINATE
623 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
625 #undef TARGET_TRAMPOLINE_INIT
626 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
628 #undef TARGET_INVALID_WITHIN_DOLOOP
629 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
631 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
632 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
634 #undef TARGET_PREFERRED_RELOAD_CLASS
635 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
637 struct gcc_target targetm = TARGET_INITIALIZER;
639 typedef enum
641 ADDR_AREA_NORMAL, /* normal address area */
642 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
644 ia64_addr_area;
646 static GTY(()) tree small_ident1;
647 static GTY(()) tree small_ident2;
649 static void
650 init_idents (void)
652 if (small_ident1 == 0)
654 small_ident1 = get_identifier ("small");
655 small_ident2 = get_identifier ("__small__");
659 /* Retrieve the address area that has been chosen for the given decl. */
661 static ia64_addr_area
662 ia64_get_addr_area (tree decl)
664 tree model_attr;
666 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
667 if (model_attr)
669 tree id;
671 init_idents ();
672 id = TREE_VALUE (TREE_VALUE (model_attr));
673 if (id == small_ident1 || id == small_ident2)
674 return ADDR_AREA_SMALL;
676 return ADDR_AREA_NORMAL;
679 static tree
680 ia64_handle_model_attribute (tree *node, tree name, tree args,
681 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
683 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
684 ia64_addr_area area;
685 tree arg, decl = *node;
687 init_idents ();
688 arg = TREE_VALUE (args);
689 if (arg == small_ident1 || arg == small_ident2)
691 addr_area = ADDR_AREA_SMALL;
693 else
695 warning (OPT_Wattributes, "invalid argument of %qE attribute",
696 name);
697 *no_add_attrs = true;
700 switch (TREE_CODE (decl))
702 case VAR_DECL:
703 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
704 == FUNCTION_DECL)
705 && !TREE_STATIC (decl))
707 error_at (DECL_SOURCE_LOCATION (decl),
708 "an address area attribute cannot be specified for "
709 "local variables");
710 *no_add_attrs = true;
712 area = ia64_get_addr_area (decl);
713 if (area != ADDR_AREA_NORMAL && addr_area != area)
715 error ("address area of %q+D conflicts with previous "
716 "declaration", decl);
717 *no_add_attrs = true;
719 break;
721 case FUNCTION_DECL:
722 error_at (DECL_SOURCE_LOCATION (decl),
723 "address area attribute cannot be specified for "
724 "functions");
725 *no_add_attrs = true;
726 break;
728 default:
729 warning (OPT_Wattributes, "%qE attribute ignored",
730 name);
731 *no_add_attrs = true;
732 break;
735 return NULL_TREE;
738 /* The section must have global and overlaid attributes. */
739 #define SECTION_VMS_OVERLAY SECTION_MACH_DEP
741 /* Part of the low level implementation of DEC Ada pragma Common_Object which
742 enables the shared use of variables stored in overlaid linker areas
743 corresponding to the use of Fortran COMMON. */
745 static tree
746 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
747 int flags ATTRIBUTE_UNUSED,
748 bool *no_add_attrs)
750 tree decl = *node;
751 tree id, val;
752 if (! DECL_P (decl))
753 abort ();
755 DECL_COMMON (decl) = 1;
756 id = TREE_VALUE (args);
757 if (TREE_CODE (id) == IDENTIFIER_NODE)
758 val = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id));
759 else if (TREE_CODE (id) == STRING_CST)
760 val = id;
761 else
763 warning (OPT_Wattributes,
764 "%qE attribute requires a string constant argument", name);
765 *no_add_attrs = true;
766 return NULL_TREE;
768 DECL_SECTION_NAME (decl) = val;
769 return NULL_TREE;
772 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
774 void
775 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
776 unsigned HOST_WIDE_INT size,
777 unsigned int align)
779 tree attr = DECL_ATTRIBUTES (decl);
781 /* As common_object attribute set DECL_SECTION_NAME check it before
782 looking up the attribute. */
783 if (DECL_SECTION_NAME (decl) && attr)
784 attr = lookup_attribute ("common_object", attr);
785 else
786 attr = NULL_TREE;
788 if (!attr)
790 /* Code from elfos.h. */
791 fprintf (file, "%s", COMMON_ASM_OP);
792 assemble_name (file, name);
793 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
794 size, align / BITS_PER_UNIT);
796 else
798 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
799 ASM_OUTPUT_LABEL (file, name);
800 ASM_OUTPUT_SKIP (file, size ? size : 1);
804 /* Definition of TARGET_ASM_NAMED_SECTION for VMS. */
806 void
807 ia64_vms_elf_asm_named_section (const char *name, unsigned int flags,
808 tree decl)
810 if (!(flags & SECTION_VMS_OVERLAY))
812 default_elf_asm_named_section (name, flags, decl);
813 return;
815 if (flags != (SECTION_VMS_OVERLAY | SECTION_WRITE))
816 abort ();
818 if (flags & SECTION_DECLARED)
820 fprintf (asm_out_file, "\t.section\t%s\n", name);
821 return;
824 fprintf (asm_out_file, "\t.section\t%s,\"awgO\"\n", name);
827 static void
828 ia64_encode_addr_area (tree decl, rtx symbol)
830 int flags;
832 flags = SYMBOL_REF_FLAGS (symbol);
833 switch (ia64_get_addr_area (decl))
835 case ADDR_AREA_NORMAL: break;
836 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
837 default: gcc_unreachable ();
839 SYMBOL_REF_FLAGS (symbol) = flags;
842 static void
843 ia64_encode_section_info (tree decl, rtx rtl, int first)
845 default_encode_section_info (decl, rtl, first);
847 /* Careful not to prod global register variables. */
848 if (TREE_CODE (decl) == VAR_DECL
849 && GET_CODE (DECL_RTL (decl)) == MEM
850 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
851 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
852 ia64_encode_addr_area (decl, XEXP (rtl, 0));
855 /* Return 1 if the operands of a move are ok. */
858 ia64_move_ok (rtx dst, rtx src)
860 /* If we're under init_recog_no_volatile, we'll not be able to use
861 memory_operand. So check the code directly and don't worry about
862 the validity of the underlying address, which should have been
863 checked elsewhere anyway. */
864 if (GET_CODE (dst) != MEM)
865 return 1;
866 if (GET_CODE (src) == MEM)
867 return 0;
868 if (register_operand (src, VOIDmode))
869 return 1;
871 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
872 if (INTEGRAL_MODE_P (GET_MODE (dst)))
873 return src == const0_rtx;
874 else
875 return satisfies_constraint_G (src);
878 /* Return 1 if the operands are ok for a floating point load pair. */
881 ia64_load_pair_ok (rtx dst, rtx src)
883 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
884 return 0;
885 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
886 return 0;
887 switch (GET_CODE (XEXP (src, 0)))
889 case REG:
890 case POST_INC:
891 break;
892 case POST_DEC:
893 return 0;
894 case POST_MODIFY:
896 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
898 if (GET_CODE (adjust) != CONST_INT
899 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
900 return 0;
902 break;
903 default:
904 abort ();
906 return 1;
910 addp4_optimize_ok (rtx op1, rtx op2)
912 return (basereg_operand (op1, GET_MODE(op1)) !=
913 basereg_operand (op2, GET_MODE(op2)));
916 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
917 Return the length of the field, or <= 0 on failure. */
920 ia64_depz_field_mask (rtx rop, rtx rshift)
922 unsigned HOST_WIDE_INT op = INTVAL (rop);
923 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
925 /* Get rid of the zero bits we're shifting in. */
926 op >>= shift;
928 /* We must now have a solid block of 1's at bit 0. */
929 return exact_log2 (op + 1);
932 /* Return the TLS model to use for ADDR. */
934 static enum tls_model
935 tls_symbolic_operand_type (rtx addr)
937 enum tls_model tls_kind = TLS_MODEL_NONE;
939 if (GET_CODE (addr) == CONST)
941 if (GET_CODE (XEXP (addr, 0)) == PLUS
942 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
943 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
945 else if (GET_CODE (addr) == SYMBOL_REF)
946 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
948 return tls_kind;
951 /* Return true if X is a constant that is valid for some immediate
952 field in an instruction. */
954 bool
955 ia64_legitimate_constant_p (rtx x)
957 switch (GET_CODE (x))
959 case CONST_INT:
960 case LABEL_REF:
961 return true;
963 case CONST_DOUBLE:
964 if (GET_MODE (x) == VOIDmode || GET_MODE (x) == SFmode
965 || GET_MODE (x) == DFmode)
966 return true;
967 return satisfies_constraint_G (x);
969 case CONST:
970 case SYMBOL_REF:
971 /* ??? Short term workaround for PR 28490. We must make the code here
972 match the code in ia64_expand_move and move_operand, even though they
973 are both technically wrong. */
974 if (tls_symbolic_operand_type (x) == 0)
976 HOST_WIDE_INT addend = 0;
977 rtx op = x;
979 if (GET_CODE (op) == CONST
980 && GET_CODE (XEXP (op, 0)) == PLUS
981 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
983 addend = INTVAL (XEXP (XEXP (op, 0), 1));
984 op = XEXP (XEXP (op, 0), 0);
987 if (any_offset_symbol_operand (op, GET_MODE (op))
988 || function_operand (op, GET_MODE (op)))
989 return true;
990 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
991 return (addend & 0x3fff) == 0;
992 return false;
994 return false;
996 case CONST_VECTOR:
998 enum machine_mode mode = GET_MODE (x);
1000 if (mode == V2SFmode)
1001 return satisfies_constraint_Y (x);
1003 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1004 && GET_MODE_SIZE (mode) <= 8);
1007 default:
1008 return false;
1012 /* Don't allow TLS addresses to get spilled to memory. */
1014 static bool
1015 ia64_cannot_force_const_mem (rtx x)
1017 if (GET_MODE (x) == RFmode)
1018 return true;
1019 return tls_symbolic_operand_type (x) != 0;
1022 /* Expand a symbolic constant load. */
1024 bool
1025 ia64_expand_load_address (rtx dest, rtx src)
1027 gcc_assert (GET_CODE (dest) == REG);
1029 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1030 having to pointer-extend the value afterward. Other forms of address
1031 computation below are also more natural to compute as 64-bit quantities.
1032 If we've been given an SImode destination register, change it. */
1033 if (GET_MODE (dest) != Pmode)
1034 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1035 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1037 if (TARGET_NO_PIC)
1038 return false;
1039 if (small_addr_symbolic_operand (src, VOIDmode))
1040 return false;
1042 if (TARGET_AUTO_PIC)
1043 emit_insn (gen_load_gprel64 (dest, src));
1044 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1045 emit_insn (gen_load_fptr (dest, src));
1046 else if (sdata_symbolic_operand (src, VOIDmode))
1047 emit_insn (gen_load_gprel (dest, src));
1048 else
1050 HOST_WIDE_INT addend = 0;
1051 rtx tmp;
1053 /* We did split constant offsets in ia64_expand_move, and we did try
1054 to keep them split in move_operand, but we also allowed reload to
1055 rematerialize arbitrary constants rather than spill the value to
1056 the stack and reload it. So we have to be prepared here to split
1057 them apart again. */
1058 if (GET_CODE (src) == CONST)
1060 HOST_WIDE_INT hi, lo;
1062 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1063 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1064 hi = hi - lo;
1066 if (lo != 0)
1068 addend = lo;
1069 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
1073 tmp = gen_rtx_HIGH (Pmode, src);
1074 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1075 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1077 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
1078 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1080 if (addend)
1082 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1083 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1087 return true;
1090 static GTY(()) rtx gen_tls_tga;
1091 static rtx
1092 gen_tls_get_addr (void)
1094 if (!gen_tls_tga)
1095 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1096 return gen_tls_tga;
1099 static GTY(()) rtx thread_pointer_rtx;
1100 static rtx
1101 gen_thread_pointer (void)
1103 if (!thread_pointer_rtx)
1104 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1105 return thread_pointer_rtx;
1108 static rtx
1109 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1110 rtx orig_op1, HOST_WIDE_INT addend)
1112 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1113 rtx orig_op0 = op0;
1114 HOST_WIDE_INT addend_lo, addend_hi;
1116 switch (tls_kind)
1118 case TLS_MODEL_GLOBAL_DYNAMIC:
1119 start_sequence ();
1121 tga_op1 = gen_reg_rtx (Pmode);
1122 emit_insn (gen_load_dtpmod (tga_op1, op1));
1124 tga_op2 = gen_reg_rtx (Pmode);
1125 emit_insn (gen_load_dtprel (tga_op2, op1));
1127 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1128 LCT_CONST, Pmode, 2, tga_op1,
1129 Pmode, tga_op2, Pmode);
1131 insns = get_insns ();
1132 end_sequence ();
1134 if (GET_MODE (op0) != Pmode)
1135 op0 = tga_ret;
1136 emit_libcall_block (insns, op0, tga_ret, op1);
1137 break;
1139 case TLS_MODEL_LOCAL_DYNAMIC:
1140 /* ??? This isn't the completely proper way to do local-dynamic
1141 If the call to __tls_get_addr is used only by a single symbol,
1142 then we should (somehow) move the dtprel to the second arg
1143 to avoid the extra add. */
1144 start_sequence ();
1146 tga_op1 = gen_reg_rtx (Pmode);
1147 emit_insn (gen_load_dtpmod (tga_op1, op1));
1149 tga_op2 = const0_rtx;
1151 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1152 LCT_CONST, Pmode, 2, tga_op1,
1153 Pmode, tga_op2, Pmode);
1155 insns = get_insns ();
1156 end_sequence ();
1158 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1159 UNSPEC_LD_BASE);
1160 tmp = gen_reg_rtx (Pmode);
1161 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1163 if (!register_operand (op0, Pmode))
1164 op0 = gen_reg_rtx (Pmode);
1165 if (TARGET_TLS64)
1167 emit_insn (gen_load_dtprel (op0, op1));
1168 emit_insn (gen_adddi3 (op0, tmp, op0));
1170 else
1171 emit_insn (gen_add_dtprel (op0, op1, tmp));
1172 break;
1174 case TLS_MODEL_INITIAL_EXEC:
1175 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1176 addend_hi = addend - addend_lo;
1178 op1 = plus_constant (op1, addend_hi);
1179 addend = addend_lo;
1181 tmp = gen_reg_rtx (Pmode);
1182 emit_insn (gen_load_tprel (tmp, op1));
1184 if (!register_operand (op0, Pmode))
1185 op0 = gen_reg_rtx (Pmode);
1186 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1187 break;
1189 case TLS_MODEL_LOCAL_EXEC:
1190 if (!register_operand (op0, Pmode))
1191 op0 = gen_reg_rtx (Pmode);
1193 op1 = orig_op1;
1194 addend = 0;
1195 if (TARGET_TLS64)
1197 emit_insn (gen_load_tprel (op0, op1));
1198 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1200 else
1201 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1202 break;
1204 default:
1205 gcc_unreachable ();
1208 if (addend)
1209 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1210 orig_op0, 1, OPTAB_DIRECT);
1211 if (orig_op0 == op0)
1212 return NULL_RTX;
1213 if (GET_MODE (orig_op0) == Pmode)
1214 return op0;
1215 return gen_lowpart (GET_MODE (orig_op0), op0);
1219 ia64_expand_move (rtx op0, rtx op1)
1221 enum machine_mode mode = GET_MODE (op0);
1223 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1224 op1 = force_reg (mode, op1);
1226 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1228 HOST_WIDE_INT addend = 0;
1229 enum tls_model tls_kind;
1230 rtx sym = op1;
1232 if (GET_CODE (op1) == CONST
1233 && GET_CODE (XEXP (op1, 0)) == PLUS
1234 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1236 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1237 sym = XEXP (XEXP (op1, 0), 0);
1240 tls_kind = tls_symbolic_operand_type (sym);
1241 if (tls_kind)
1242 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1244 if (any_offset_symbol_operand (sym, mode))
1245 addend = 0;
1246 else if (aligned_offset_symbol_operand (sym, mode))
1248 HOST_WIDE_INT addend_lo, addend_hi;
1250 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1251 addend_hi = addend - addend_lo;
1253 if (addend_lo != 0)
1255 op1 = plus_constant (sym, addend_hi);
1256 addend = addend_lo;
1258 else
1259 addend = 0;
1261 else
1262 op1 = sym;
1264 if (reload_completed)
1266 /* We really should have taken care of this offset earlier. */
1267 gcc_assert (addend == 0);
1268 if (ia64_expand_load_address (op0, op1))
1269 return NULL_RTX;
1272 if (addend)
1274 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1276 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1278 op1 = expand_simple_binop (mode, PLUS, subtarget,
1279 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1280 if (op0 == op1)
1281 return NULL_RTX;
1285 return op1;
1288 /* Split a move from OP1 to OP0 conditional on COND. */
1290 void
1291 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1293 rtx insn, first = get_last_insn ();
1295 emit_move_insn (op0, op1);
1297 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1298 if (INSN_P (insn))
1299 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1300 PATTERN (insn));
1303 /* Split a post-reload TImode or TFmode reference into two DImode
1304 components. This is made extra difficult by the fact that we do
1305 not get any scratch registers to work with, because reload cannot
1306 be prevented from giving us a scratch that overlaps the register
1307 pair involved. So instead, when addressing memory, we tweak the
1308 pointer register up and back down with POST_INCs. Or up and not
1309 back down when we can get away with it.
1311 REVERSED is true when the loads must be done in reversed order
1312 (high word first) for correctness. DEAD is true when the pointer
1313 dies with the second insn we generate and therefore the second
1314 address must not carry a postmodify.
1316 May return an insn which is to be emitted after the moves. */
1318 static rtx
1319 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1321 rtx fixup = 0;
1323 switch (GET_CODE (in))
1325 case REG:
1326 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1327 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1328 break;
1330 case CONST_INT:
1331 case CONST_DOUBLE:
1332 /* Cannot occur reversed. */
1333 gcc_assert (!reversed);
1335 if (GET_MODE (in) != TFmode)
1336 split_double (in, &out[0], &out[1]);
1337 else
1338 /* split_double does not understand how to split a TFmode
1339 quantity into a pair of DImode constants. */
1341 REAL_VALUE_TYPE r;
1342 unsigned HOST_WIDE_INT p[2];
1343 long l[4]; /* TFmode is 128 bits */
1345 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1346 real_to_target (l, &r, TFmode);
1348 if (FLOAT_WORDS_BIG_ENDIAN)
1350 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1351 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1353 else
1355 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1356 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1358 out[0] = GEN_INT (p[0]);
1359 out[1] = GEN_INT (p[1]);
1361 break;
1363 case MEM:
1365 rtx base = XEXP (in, 0);
1366 rtx offset;
1368 switch (GET_CODE (base))
1370 case REG:
1371 if (!reversed)
1373 out[0] = adjust_automodify_address
1374 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1375 out[1] = adjust_automodify_address
1376 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1378 else
1380 /* Reversal requires a pre-increment, which can only
1381 be done as a separate insn. */
1382 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1383 out[0] = adjust_automodify_address
1384 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1385 out[1] = adjust_address (in, DImode, 0);
1387 break;
1389 case POST_INC:
1390 gcc_assert (!reversed && !dead);
1392 /* Just do the increment in two steps. */
1393 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1394 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1395 break;
1397 case POST_DEC:
1398 gcc_assert (!reversed && !dead);
1400 /* Add 8, subtract 24. */
1401 base = XEXP (base, 0);
1402 out[0] = adjust_automodify_address
1403 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1404 out[1] = adjust_automodify_address
1405 (in, DImode,
1406 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1408 break;
1410 case POST_MODIFY:
1411 gcc_assert (!reversed && !dead);
1413 /* Extract and adjust the modification. This case is
1414 trickier than the others, because we might have an
1415 index register, or we might have a combined offset that
1416 doesn't fit a signed 9-bit displacement field. We can
1417 assume the incoming expression is already legitimate. */
1418 offset = XEXP (base, 1);
1419 base = XEXP (base, 0);
1421 out[0] = adjust_automodify_address
1422 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1424 if (GET_CODE (XEXP (offset, 1)) == REG)
1426 /* Can't adjust the postmodify to match. Emit the
1427 original, then a separate addition insn. */
1428 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1429 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1431 else
1433 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1434 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1436 /* Again the postmodify cannot be made to match,
1437 but in this case it's more efficient to get rid
1438 of the postmodify entirely and fix up with an
1439 add insn. */
1440 out[1] = adjust_automodify_address (in, DImode, base, 8);
1441 fixup = gen_adddi3
1442 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1444 else
1446 /* Combined offset still fits in the displacement field.
1447 (We cannot overflow it at the high end.) */
1448 out[1] = adjust_automodify_address
1449 (in, DImode, gen_rtx_POST_MODIFY
1450 (Pmode, base, gen_rtx_PLUS
1451 (Pmode, base,
1452 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1456 break;
1458 default:
1459 gcc_unreachable ();
1461 break;
1464 default:
1465 gcc_unreachable ();
1468 return fixup;
1471 /* Split a TImode or TFmode move instruction after reload.
1472 This is used by *movtf_internal and *movti_internal. */
1473 void
1474 ia64_split_tmode_move (rtx operands[])
1476 rtx in[2], out[2], insn;
1477 rtx fixup[2];
1478 bool dead = false;
1479 bool reversed = false;
1481 /* It is possible for reload to decide to overwrite a pointer with
1482 the value it points to. In that case we have to do the loads in
1483 the appropriate order so that the pointer is not destroyed too
1484 early. Also we must not generate a postmodify for that second
1485 load, or rws_access_regno will die. */
1486 if (GET_CODE (operands[1]) == MEM
1487 && reg_overlap_mentioned_p (operands[0], operands[1]))
1489 rtx base = XEXP (operands[1], 0);
1490 while (GET_CODE (base) != REG)
1491 base = XEXP (base, 0);
1493 if (REGNO (base) == REGNO (operands[0]))
1494 reversed = true;
1495 dead = true;
1497 /* Another reason to do the moves in reversed order is if the first
1498 element of the target register pair is also the second element of
1499 the source register pair. */
1500 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1501 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1502 reversed = true;
1504 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1505 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1507 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1508 if (GET_CODE (EXP) == MEM \
1509 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1510 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1511 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1512 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1514 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1515 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1516 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1518 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1519 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1520 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1522 if (fixup[0])
1523 emit_insn (fixup[0]);
1524 if (fixup[1])
1525 emit_insn (fixup[1]);
1527 #undef MAYBE_ADD_REG_INC_NOTE
1530 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1531 through memory plus an extra GR scratch register. Except that you can
1532 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1533 SECONDARY_RELOAD_CLASS, but not both.
1535 We got into problems in the first place by allowing a construct like
1536 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1537 This solution attempts to prevent this situation from occurring. When
1538 we see something like the above, we spill the inner register to memory. */
1540 static rtx
1541 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1543 if (GET_CODE (in) == SUBREG
1544 && GET_MODE (SUBREG_REG (in)) == TImode
1545 && GET_CODE (SUBREG_REG (in)) == REG)
1547 rtx memt = assign_stack_temp (TImode, 16, 0);
1548 emit_move_insn (memt, SUBREG_REG (in));
1549 return adjust_address (memt, mode, 0);
1551 else if (force && GET_CODE (in) == REG)
1553 rtx memx = assign_stack_temp (mode, 16, 0);
1554 emit_move_insn (memx, in);
1555 return memx;
1557 else
1558 return in;
1561 /* Expand the movxf or movrf pattern (MODE says which) with the given
1562 OPERANDS, returning true if the pattern should then invoke
1563 DONE. */
1565 bool
1566 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1568 rtx op0 = operands[0];
1570 if (GET_CODE (op0) == SUBREG)
1571 op0 = SUBREG_REG (op0);
1573 /* We must support XFmode loads into general registers for stdarg/vararg,
1574 unprototyped calls, and a rare case where a long double is passed as
1575 an argument after a float HFA fills the FP registers. We split them into
1576 DImode loads for convenience. We also need to support XFmode stores
1577 for the last case. This case does not happen for stdarg/vararg routines,
1578 because we do a block store to memory of unnamed arguments. */
1580 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1582 rtx out[2];
1584 /* We're hoping to transform everything that deals with XFmode
1585 quantities and GR registers early in the compiler. */
1586 gcc_assert (can_create_pseudo_p ());
1588 /* Struct to register can just use TImode instead. */
1589 if ((GET_CODE (operands[1]) == SUBREG
1590 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1591 || (GET_CODE (operands[1]) == REG
1592 && GR_REGNO_P (REGNO (operands[1]))))
1594 rtx op1 = operands[1];
1596 if (GET_CODE (op1) == SUBREG)
1597 op1 = SUBREG_REG (op1);
1598 else
1599 op1 = gen_rtx_REG (TImode, REGNO (op1));
1601 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1602 return true;
1605 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1607 /* Don't word-swap when reading in the constant. */
1608 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1609 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1610 0, mode));
1611 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1612 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1613 0, mode));
1614 return true;
1617 /* If the quantity is in a register not known to be GR, spill it. */
1618 if (register_operand (operands[1], mode))
1619 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1621 gcc_assert (GET_CODE (operands[1]) == MEM);
1623 /* Don't word-swap when reading in the value. */
1624 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1625 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1627 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1628 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1629 return true;
1632 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1634 /* We're hoping to transform everything that deals with XFmode
1635 quantities and GR registers early in the compiler. */
1636 gcc_assert (can_create_pseudo_p ());
1638 /* Op0 can't be a GR_REG here, as that case is handled above.
1639 If op0 is a register, then we spill op1, so that we now have a
1640 MEM operand. This requires creating an XFmode subreg of a TImode reg
1641 to force the spill. */
1642 if (register_operand (operands[0], mode))
1644 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1645 op1 = gen_rtx_SUBREG (mode, op1, 0);
1646 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1649 else
1651 rtx in[2];
1653 gcc_assert (GET_CODE (operands[0]) == MEM);
1655 /* Don't word-swap when writing out the value. */
1656 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1657 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1659 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1660 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1661 return true;
1665 if (!reload_in_progress && !reload_completed)
1667 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1669 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1671 rtx memt, memx, in = operands[1];
1672 if (CONSTANT_P (in))
1673 in = validize_mem (force_const_mem (mode, in));
1674 if (GET_CODE (in) == MEM)
1675 memt = adjust_address (in, TImode, 0);
1676 else
1678 memt = assign_stack_temp (TImode, 16, 0);
1679 memx = adjust_address (memt, mode, 0);
1680 emit_move_insn (memx, in);
1682 emit_move_insn (op0, memt);
1683 return true;
1686 if (!ia64_move_ok (operands[0], operands[1]))
1687 operands[1] = force_reg (mode, operands[1]);
1690 return false;
1693 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1694 with the expression that holds the compare result (in VOIDmode). */
1696 static GTY(()) rtx cmptf_libfunc;
1698 void
1699 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1701 enum rtx_code code = GET_CODE (*expr);
1702 rtx cmp;
1704 /* If we have a BImode input, then we already have a compare result, and
1705 do not need to emit another comparison. */
1706 if (GET_MODE (*op0) == BImode)
1708 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1709 cmp = *op0;
1711 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1712 magic number as its third argument, that indicates what to do.
1713 The return value is an integer to be compared against zero. */
1714 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1716 enum qfcmp_magic {
1717 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1718 QCMP_UNORD = 2,
1719 QCMP_EQ = 4,
1720 QCMP_LT = 8,
1721 QCMP_GT = 16
1723 int magic;
1724 enum rtx_code ncode;
1725 rtx ret, insns;
1727 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1728 switch (code)
1730 /* 1 = equal, 0 = not equal. Equality operators do
1731 not raise FP_INVALID when given an SNaN operand. */
1732 case EQ: magic = QCMP_EQ; ncode = NE; break;
1733 case NE: magic = QCMP_EQ; ncode = EQ; break;
1734 /* isunordered() from C99. */
1735 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1736 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1737 /* Relational operators raise FP_INVALID when given
1738 an SNaN operand. */
1739 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1740 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1741 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1742 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1743 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1744 Expanders for buneq etc. weuld have to be added to ia64.md
1745 for this to be useful. */
1746 default: gcc_unreachable ();
1749 start_sequence ();
1751 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1752 *op0, TFmode, *op1, TFmode,
1753 GEN_INT (magic), DImode);
1754 cmp = gen_reg_rtx (BImode);
1755 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1756 gen_rtx_fmt_ee (ncode, BImode,
1757 ret, const0_rtx)));
1759 insns = get_insns ();
1760 end_sequence ();
1762 emit_libcall_block (insns, cmp, cmp,
1763 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1764 code = NE;
1766 else
1768 cmp = gen_reg_rtx (BImode);
1769 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1770 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1771 code = NE;
1774 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1775 *op0 = cmp;
1776 *op1 = const0_rtx;
1779 /* Generate an integral vector comparison. Return true if the condition has
1780 been reversed, and so the sense of the comparison should be inverted. */
1782 static bool
1783 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1784 rtx dest, rtx op0, rtx op1)
1786 bool negate = false;
1787 rtx x;
1789 /* Canonicalize the comparison to EQ, GT, GTU. */
1790 switch (code)
1792 case EQ:
1793 case GT:
1794 case GTU:
1795 break;
1797 case NE:
1798 case LE:
1799 case LEU:
1800 code = reverse_condition (code);
1801 negate = true;
1802 break;
1804 case GE:
1805 case GEU:
1806 code = reverse_condition (code);
1807 negate = true;
1808 /* FALLTHRU */
1810 case LT:
1811 case LTU:
1812 code = swap_condition (code);
1813 x = op0, op0 = op1, op1 = x;
1814 break;
1816 default:
1817 gcc_unreachable ();
1820 /* Unsigned parallel compare is not supported by the hardware. Play some
1821 tricks to turn this into a signed comparison against 0. */
1822 if (code == GTU)
1824 switch (mode)
1826 case V2SImode:
1828 rtx t1, t2, mask;
1830 /* Subtract (-(INT MAX) - 1) from both operands to make
1831 them signed. */
1832 mask = GEN_INT (0x80000000);
1833 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1834 mask = force_reg (mode, mask);
1835 t1 = gen_reg_rtx (mode);
1836 emit_insn (gen_subv2si3 (t1, op0, mask));
1837 t2 = gen_reg_rtx (mode);
1838 emit_insn (gen_subv2si3 (t2, op1, mask));
1839 op0 = t1;
1840 op1 = t2;
1841 code = GT;
1843 break;
1845 case V8QImode:
1846 case V4HImode:
1847 /* Perform a parallel unsigned saturating subtraction. */
1848 x = gen_reg_rtx (mode);
1849 emit_insn (gen_rtx_SET (VOIDmode, x,
1850 gen_rtx_US_MINUS (mode, op0, op1)));
1852 code = EQ;
1853 op0 = x;
1854 op1 = CONST0_RTX (mode);
1855 negate = !negate;
1856 break;
1858 default:
1859 gcc_unreachable ();
1863 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1864 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1866 return negate;
1869 /* Emit an integral vector conditional move. */
1871 void
1872 ia64_expand_vecint_cmov (rtx operands[])
1874 enum machine_mode mode = GET_MODE (operands[0]);
1875 enum rtx_code code = GET_CODE (operands[3]);
1876 bool negate;
1877 rtx cmp, x, ot, of;
1879 cmp = gen_reg_rtx (mode);
1880 negate = ia64_expand_vecint_compare (code, mode, cmp,
1881 operands[4], operands[5]);
1883 ot = operands[1+negate];
1884 of = operands[2-negate];
1886 if (ot == CONST0_RTX (mode))
1888 if (of == CONST0_RTX (mode))
1890 emit_move_insn (operands[0], ot);
1891 return;
1894 x = gen_rtx_NOT (mode, cmp);
1895 x = gen_rtx_AND (mode, x, of);
1896 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1898 else if (of == CONST0_RTX (mode))
1900 x = gen_rtx_AND (mode, cmp, ot);
1901 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1903 else
1905 rtx t, f;
1907 t = gen_reg_rtx (mode);
1908 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1909 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1911 f = gen_reg_rtx (mode);
1912 x = gen_rtx_NOT (mode, cmp);
1913 x = gen_rtx_AND (mode, x, operands[2-negate]);
1914 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1916 x = gen_rtx_IOR (mode, t, f);
1917 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1921 /* Emit an integral vector min or max operation. Return true if all done. */
1923 bool
1924 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1925 rtx operands[])
1927 rtx xops[6];
1929 /* These four combinations are supported directly. */
1930 if (mode == V8QImode && (code == UMIN || code == UMAX))
1931 return false;
1932 if (mode == V4HImode && (code == SMIN || code == SMAX))
1933 return false;
1935 /* This combination can be implemented with only saturating subtraction. */
1936 if (mode == V4HImode && code == UMAX)
1938 rtx x, tmp = gen_reg_rtx (mode);
1940 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1941 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1943 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1944 return true;
1947 /* Everything else implemented via vector comparisons. */
1948 xops[0] = operands[0];
1949 xops[4] = xops[1] = operands[1];
1950 xops[5] = xops[2] = operands[2];
1952 switch (code)
1954 case UMIN:
1955 code = LTU;
1956 break;
1957 case UMAX:
1958 code = GTU;
1959 break;
1960 case SMIN:
1961 code = LT;
1962 break;
1963 case SMAX:
1964 code = GT;
1965 break;
1966 default:
1967 gcc_unreachable ();
1969 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1971 ia64_expand_vecint_cmov (xops);
1972 return true;
1975 /* Emit an integral vector unpack operation. */
1977 void
1978 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
1980 enum machine_mode mode = GET_MODE (operands[1]);
1981 rtx (*gen) (rtx, rtx, rtx);
1982 rtx x;
1984 switch (mode)
1986 case V8QImode:
1987 gen = highp ? gen_vec_interleave_highv8qi : gen_vec_interleave_lowv8qi;
1988 break;
1989 case V4HImode:
1990 gen = highp ? gen_vec_interleave_highv4hi : gen_vec_interleave_lowv4hi;
1991 break;
1992 default:
1993 gcc_unreachable ();
1996 /* Fill in x with the sign extension of each element in op1. */
1997 if (unsignedp)
1998 x = CONST0_RTX (mode);
1999 else
2001 bool neg;
2003 x = gen_reg_rtx (mode);
2005 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
2006 CONST0_RTX (mode));
2007 gcc_assert (!neg);
2010 emit_insn (gen (gen_lowpart (mode, operands[0]), operands[1], x));
2013 /* Emit an integral vector widening sum operations. */
2015 void
2016 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2018 rtx l, h, x, s;
2019 enum machine_mode wmode, mode;
2020 rtx (*unpack_l) (rtx, rtx, rtx);
2021 rtx (*unpack_h) (rtx, rtx, rtx);
2022 rtx (*plus) (rtx, rtx, rtx);
2024 wmode = GET_MODE (operands[0]);
2025 mode = GET_MODE (operands[1]);
2027 switch (mode)
2029 case V8QImode:
2030 unpack_l = gen_vec_interleave_lowv8qi;
2031 unpack_h = gen_vec_interleave_highv8qi;
2032 plus = gen_addv4hi3;
2033 break;
2034 case V4HImode:
2035 unpack_l = gen_vec_interleave_lowv4hi;
2036 unpack_h = gen_vec_interleave_highv4hi;
2037 plus = gen_addv2si3;
2038 break;
2039 default:
2040 gcc_unreachable ();
2043 /* Fill in x with the sign extension of each element in op1. */
2044 if (unsignedp)
2045 x = CONST0_RTX (mode);
2046 else
2048 bool neg;
2050 x = gen_reg_rtx (mode);
2052 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
2053 CONST0_RTX (mode));
2054 gcc_assert (!neg);
2057 l = gen_reg_rtx (wmode);
2058 h = gen_reg_rtx (wmode);
2059 s = gen_reg_rtx (wmode);
2061 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
2062 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
2063 emit_insn (plus (s, l, operands[2]));
2064 emit_insn (plus (operands[0], h, s));
2067 void
2068 ia64_expand_widen_mul_v4hi (rtx operands[3], bool unsignedp, bool highp)
2070 rtx l = gen_reg_rtx (V4HImode);
2071 rtx h = gen_reg_rtx (V4HImode);
2072 rtx (*mulhigh)(rtx, rtx, rtx, rtx);
2073 rtx (*interl)(rtx, rtx, rtx);
2075 emit_insn (gen_mulv4hi3 (l, operands[1], operands[2]));
2077 /* For signed, pmpy2.r would appear to more closely match this operation.
2078 However, the vectorizer is more likely to use the LO and HI patterns
2079 in pairs. At which point, with this formulation, the first two insns
2080 of each can be CSEd. */
2081 mulhigh = unsignedp ? gen_pmpyshr2_u : gen_pmpyshr2;
2082 emit_insn (mulhigh (h, operands[1], operands[2], GEN_INT (16)));
2084 interl = highp ? gen_vec_interleave_highv4hi : gen_vec_interleave_lowv4hi;
2085 emit_insn (interl (gen_lowpart (V4HImode, operands[0]), l, h));
2088 /* Emit a signed or unsigned V8QI dot product operation. */
2090 void
2091 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
2093 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
2095 /* Fill in x1 and x2 with the sign extension of each element. */
2096 if (unsignedp)
2097 x1 = x2 = CONST0_RTX (V8QImode);
2098 else
2100 bool neg;
2102 x1 = gen_reg_rtx (V8QImode);
2103 x2 = gen_reg_rtx (V8QImode);
2105 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
2106 CONST0_RTX (V8QImode));
2107 gcc_assert (!neg);
2108 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
2109 CONST0_RTX (V8QImode));
2110 gcc_assert (!neg);
2113 l1 = gen_reg_rtx (V4HImode);
2114 l2 = gen_reg_rtx (V4HImode);
2115 h1 = gen_reg_rtx (V4HImode);
2116 h2 = gen_reg_rtx (V4HImode);
2118 emit_insn (gen_vec_interleave_lowv8qi
2119 (gen_lowpart (V8QImode, l1), operands[1], x1));
2120 emit_insn (gen_vec_interleave_lowv8qi
2121 (gen_lowpart (V8QImode, l2), operands[2], x2));
2122 emit_insn (gen_vec_interleave_highv8qi
2123 (gen_lowpart (V8QImode, h1), operands[1], x1));
2124 emit_insn (gen_vec_interleave_highv8qi
2125 (gen_lowpart (V8QImode, h2), operands[2], x2));
2127 p1 = gen_reg_rtx (V2SImode);
2128 p2 = gen_reg_rtx (V2SImode);
2129 p3 = gen_reg_rtx (V2SImode);
2130 p4 = gen_reg_rtx (V2SImode);
2131 emit_insn (gen_pmpy2_r (p1, l1, l2));
2132 emit_insn (gen_pmpy2_l (p2, l1, l2));
2133 emit_insn (gen_pmpy2_r (p3, h1, h2));
2134 emit_insn (gen_pmpy2_l (p4, h1, h2));
2136 s1 = gen_reg_rtx (V2SImode);
2137 s2 = gen_reg_rtx (V2SImode);
2138 s3 = gen_reg_rtx (V2SImode);
2139 emit_insn (gen_addv2si3 (s1, p1, p2));
2140 emit_insn (gen_addv2si3 (s2, p3, p4));
2141 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
2142 emit_insn (gen_addv2si3 (operands[0], s2, s3));
2145 /* Emit the appropriate sequence for a call. */
2147 void
2148 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2149 int sibcall_p)
2151 rtx insn, b0;
2153 addr = XEXP (addr, 0);
2154 addr = convert_memory_address (DImode, addr);
2155 b0 = gen_rtx_REG (DImode, R_BR (0));
2157 /* ??? Should do this for functions known to bind local too. */
2158 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2160 if (sibcall_p)
2161 insn = gen_sibcall_nogp (addr);
2162 else if (! retval)
2163 insn = gen_call_nogp (addr, b0);
2164 else
2165 insn = gen_call_value_nogp (retval, addr, b0);
2166 insn = emit_call_insn (insn);
2168 else
2170 if (sibcall_p)
2171 insn = gen_sibcall_gp (addr);
2172 else if (! retval)
2173 insn = gen_call_gp (addr, b0);
2174 else
2175 insn = gen_call_value_gp (retval, addr, b0);
2176 insn = emit_call_insn (insn);
2178 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2181 if (sibcall_p)
2182 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2184 if (TARGET_ABI_OPEN_VMS)
2185 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2186 gen_rtx_REG (DImode, GR_REG (25)));
2189 static void
2190 reg_emitted (enum ia64_frame_regs r)
2192 if (emitted_frame_related_regs[r] == 0)
2193 emitted_frame_related_regs[r] = current_frame_info.r[r];
2194 else
2195 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2198 static int
2199 get_reg (enum ia64_frame_regs r)
2201 reg_emitted (r);
2202 return current_frame_info.r[r];
2205 static bool
2206 is_emitted (int regno)
2208 unsigned int r;
2210 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2211 if (emitted_frame_related_regs[r] == regno)
2212 return true;
2213 return false;
2216 void
2217 ia64_reload_gp (void)
2219 rtx tmp;
2221 if (current_frame_info.r[reg_save_gp])
2223 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2225 else
2227 HOST_WIDE_INT offset;
2228 rtx offset_r;
2230 offset = (current_frame_info.spill_cfa_off
2231 + current_frame_info.spill_size);
2232 if (frame_pointer_needed)
2234 tmp = hard_frame_pointer_rtx;
2235 offset = -offset;
2237 else
2239 tmp = stack_pointer_rtx;
2240 offset = current_frame_info.total_size - offset;
2243 offset_r = GEN_INT (offset);
2244 if (satisfies_constraint_I (offset_r))
2245 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2246 else
2248 emit_move_insn (pic_offset_table_rtx, offset_r);
2249 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2250 pic_offset_table_rtx, tmp));
2253 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2256 emit_move_insn (pic_offset_table_rtx, tmp);
2259 void
2260 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2261 rtx scratch_b, int noreturn_p, int sibcall_p)
2263 rtx insn;
2264 bool is_desc = false;
2266 /* If we find we're calling through a register, then we're actually
2267 calling through a descriptor, so load up the values. */
2268 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2270 rtx tmp;
2271 bool addr_dead_p;
2273 /* ??? We are currently constrained to *not* use peep2, because
2274 we can legitimately change the global lifetime of the GP
2275 (in the form of killing where previously live). This is
2276 because a call through a descriptor doesn't use the previous
2277 value of the GP, while a direct call does, and we do not
2278 commit to either form until the split here.
2280 That said, this means that we lack precise life info for
2281 whether ADDR is dead after this call. This is not terribly
2282 important, since we can fix things up essentially for free
2283 with the POST_DEC below, but it's nice to not use it when we
2284 can immediately tell it's not necessary. */
2285 addr_dead_p = ((noreturn_p || sibcall_p
2286 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2287 REGNO (addr)))
2288 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2290 /* Load the code address into scratch_b. */
2291 tmp = gen_rtx_POST_INC (Pmode, addr);
2292 tmp = gen_rtx_MEM (Pmode, tmp);
2293 emit_move_insn (scratch_r, tmp);
2294 emit_move_insn (scratch_b, scratch_r);
2296 /* Load the GP address. If ADDR is not dead here, then we must
2297 revert the change made above via the POST_INCREMENT. */
2298 if (!addr_dead_p)
2299 tmp = gen_rtx_POST_DEC (Pmode, addr);
2300 else
2301 tmp = addr;
2302 tmp = gen_rtx_MEM (Pmode, tmp);
2303 emit_move_insn (pic_offset_table_rtx, tmp);
2305 is_desc = true;
2306 addr = scratch_b;
2309 if (sibcall_p)
2310 insn = gen_sibcall_nogp (addr);
2311 else if (retval)
2312 insn = gen_call_value_nogp (retval, addr, retaddr);
2313 else
2314 insn = gen_call_nogp (addr, retaddr);
2315 emit_call_insn (insn);
2317 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2318 ia64_reload_gp ();
2321 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2323 This differs from the generic code in that we know about the zero-extending
2324 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2325 also know that ld.acq+cmpxchg.rel equals a full barrier.
2327 The loop we want to generate looks like
2329 cmp_reg = mem;
2330 label:
2331 old_reg = cmp_reg;
2332 new_reg = cmp_reg op val;
2333 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2334 if (cmp_reg != old_reg)
2335 goto label;
2337 Note that we only do the plain load from memory once. Subsequent
2338 iterations use the value loaded by the compare-and-swap pattern. */
2340 void
2341 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2342 rtx old_dst, rtx new_dst)
2344 enum machine_mode mode = GET_MODE (mem);
2345 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2346 enum insn_code icode;
2348 /* Special case for using fetchadd. */
2349 if ((mode == SImode || mode == DImode)
2350 && (code == PLUS || code == MINUS)
2351 && fetchadd_operand (val, mode))
2353 if (code == MINUS)
2354 val = GEN_INT (-INTVAL (val));
2356 if (!old_dst)
2357 old_dst = gen_reg_rtx (mode);
2359 emit_insn (gen_memory_barrier ());
2361 if (mode == SImode)
2362 icode = CODE_FOR_fetchadd_acq_si;
2363 else
2364 icode = CODE_FOR_fetchadd_acq_di;
2365 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2367 if (new_dst)
2369 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2370 true, OPTAB_WIDEN);
2371 if (new_reg != new_dst)
2372 emit_move_insn (new_dst, new_reg);
2374 return;
2377 /* Because of the volatile mem read, we get an ld.acq, which is the
2378 front half of the full barrier. The end half is the cmpxchg.rel. */
2379 gcc_assert (MEM_VOLATILE_P (mem));
2381 old_reg = gen_reg_rtx (DImode);
2382 cmp_reg = gen_reg_rtx (DImode);
2383 label = gen_label_rtx ();
2385 if (mode != DImode)
2387 val = simplify_gen_subreg (DImode, val, mode, 0);
2388 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2390 else
2391 emit_move_insn (cmp_reg, mem);
2393 emit_label (label);
2395 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2396 emit_move_insn (old_reg, cmp_reg);
2397 emit_move_insn (ar_ccv, cmp_reg);
2399 if (old_dst)
2400 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2402 new_reg = cmp_reg;
2403 if (code == NOT)
2405 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2406 true, OPTAB_DIRECT);
2407 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2409 else
2410 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2411 true, OPTAB_DIRECT);
2413 if (mode != DImode)
2414 new_reg = gen_lowpart (mode, new_reg);
2415 if (new_dst)
2416 emit_move_insn (new_dst, new_reg);
2418 switch (mode)
2420 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2421 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2422 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2423 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2424 default:
2425 gcc_unreachable ();
2428 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2430 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2433 /* Begin the assembly file. */
2435 static void
2436 ia64_file_start (void)
2438 /* Variable tracking should be run after all optimizations which change order
2439 of insns. It also needs a valid CFG. This can't be done in
2440 ia64_option_override, because flag_var_tracking is finalized after
2441 that. */
2442 ia64_flag_var_tracking = flag_var_tracking;
2443 flag_var_tracking = 0;
2445 default_file_start ();
2446 emit_safe_across_calls ();
2449 void
2450 emit_safe_across_calls (void)
2452 unsigned int rs, re;
2453 int out_state;
2455 rs = 1;
2456 out_state = 0;
2457 while (1)
2459 while (rs < 64 && call_used_regs[PR_REG (rs)])
2460 rs++;
2461 if (rs >= 64)
2462 break;
2463 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2464 continue;
2465 if (out_state == 0)
2467 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2468 out_state = 1;
2470 else
2471 fputc (',', asm_out_file);
2472 if (re == rs + 1)
2473 fprintf (asm_out_file, "p%u", rs);
2474 else
2475 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2476 rs = re + 1;
2478 if (out_state)
2479 fputc ('\n', asm_out_file);
2482 /* Globalize a declaration. */
2484 static void
2485 ia64_globalize_decl_name (FILE * stream, tree decl)
2487 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2488 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2489 if (version_attr)
2491 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2492 const char *p = TREE_STRING_POINTER (v);
2493 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2495 targetm.asm_out.globalize_label (stream, name);
2496 if (TREE_CODE (decl) == FUNCTION_DECL)
2497 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2500 /* Helper function for ia64_compute_frame_size: find an appropriate general
2501 register to spill some special register to. SPECIAL_SPILL_MASK contains
2502 bits in GR0 to GR31 that have already been allocated by this routine.
2503 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2505 static int
2506 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2508 int regno;
2510 if (emitted_frame_related_regs[r] != 0)
2512 regno = emitted_frame_related_regs[r];
2513 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2514 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2515 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2516 else if (current_function_is_leaf
2517 && regno >= GR_REG (1) && regno <= GR_REG (31))
2518 current_frame_info.gr_used_mask |= 1 << regno;
2520 return regno;
2523 /* If this is a leaf function, first try an otherwise unused
2524 call-clobbered register. */
2525 if (current_function_is_leaf)
2527 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2528 if (! df_regs_ever_live_p (regno)
2529 && call_used_regs[regno]
2530 && ! fixed_regs[regno]
2531 && ! global_regs[regno]
2532 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2533 && ! is_emitted (regno))
2535 current_frame_info.gr_used_mask |= 1 << regno;
2536 return regno;
2540 if (try_locals)
2542 regno = current_frame_info.n_local_regs;
2543 /* If there is a frame pointer, then we can't use loc79, because
2544 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2545 reg_name switching code in ia64_expand_prologue. */
2546 while (regno < (80 - frame_pointer_needed))
2547 if (! is_emitted (LOC_REG (regno++)))
2549 current_frame_info.n_local_regs = regno;
2550 return LOC_REG (regno - 1);
2554 /* Failed to find a general register to spill to. Must use stack. */
2555 return 0;
2558 /* In order to make for nice schedules, we try to allocate every temporary
2559 to a different register. We must of course stay away from call-saved,
2560 fixed, and global registers. We must also stay away from registers
2561 allocated in current_frame_info.gr_used_mask, since those include regs
2562 used all through the prologue.
2564 Any register allocated here must be used immediately. The idea is to
2565 aid scheduling, not to solve data flow problems. */
2567 static int last_scratch_gr_reg;
2569 static int
2570 next_scratch_gr_reg (void)
2572 int i, regno;
2574 for (i = 0; i < 32; ++i)
2576 regno = (last_scratch_gr_reg + i + 1) & 31;
2577 if (call_used_regs[regno]
2578 && ! fixed_regs[regno]
2579 && ! global_regs[regno]
2580 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2582 last_scratch_gr_reg = regno;
2583 return regno;
2587 /* There must be _something_ available. */
2588 gcc_unreachable ();
2591 /* Helper function for ia64_compute_frame_size, called through
2592 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2594 static void
2595 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2597 unsigned int regno = REGNO (reg);
2598 if (regno < 32)
2600 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2601 for (i = 0; i < n; ++i)
2602 current_frame_info.gr_used_mask |= 1 << (regno + i);
2607 /* Returns the number of bytes offset between the frame pointer and the stack
2608 pointer for the current function. SIZE is the number of bytes of space
2609 needed for local variables. */
2611 static void
2612 ia64_compute_frame_size (HOST_WIDE_INT size)
2614 HOST_WIDE_INT total_size;
2615 HOST_WIDE_INT spill_size = 0;
2616 HOST_WIDE_INT extra_spill_size = 0;
2617 HOST_WIDE_INT pretend_args_size;
2618 HARD_REG_SET mask;
2619 int n_spilled = 0;
2620 int spilled_gr_p = 0;
2621 int spilled_fr_p = 0;
2622 unsigned int regno;
2623 int min_regno;
2624 int max_regno;
2625 int i;
2627 if (current_frame_info.initialized)
2628 return;
2630 memset (&current_frame_info, 0, sizeof current_frame_info);
2631 CLEAR_HARD_REG_SET (mask);
2633 /* Don't allocate scratches to the return register. */
2634 diddle_return_value (mark_reg_gr_used_mask, NULL);
2636 /* Don't allocate scratches to the EH scratch registers. */
2637 if (cfun->machine->ia64_eh_epilogue_sp)
2638 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2639 if (cfun->machine->ia64_eh_epilogue_bsp)
2640 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2642 /* Find the size of the register stack frame. We have only 80 local
2643 registers, because we reserve 8 for the inputs and 8 for the
2644 outputs. */
2646 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2647 since we'll be adjusting that down later. */
2648 regno = LOC_REG (78) + ! frame_pointer_needed;
2649 for (; regno >= LOC_REG (0); regno--)
2650 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2651 break;
2652 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2654 /* For functions marked with the syscall_linkage attribute, we must mark
2655 all eight input registers as in use, so that locals aren't visible to
2656 the caller. */
2658 if (cfun->machine->n_varargs > 0
2659 || lookup_attribute ("syscall_linkage",
2660 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2661 current_frame_info.n_input_regs = 8;
2662 else
2664 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2665 if (df_regs_ever_live_p (regno))
2666 break;
2667 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2670 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2671 if (df_regs_ever_live_p (regno))
2672 break;
2673 i = regno - OUT_REG (0) + 1;
2675 #ifndef PROFILE_HOOK
2676 /* When -p profiling, we need one output register for the mcount argument.
2677 Likewise for -a profiling for the bb_init_func argument. For -ax
2678 profiling, we need two output registers for the two bb_init_trace_func
2679 arguments. */
2680 if (crtl->profile)
2681 i = MAX (i, 1);
2682 #endif
2683 current_frame_info.n_output_regs = i;
2685 /* ??? No rotating register support yet. */
2686 current_frame_info.n_rotate_regs = 0;
2688 /* Discover which registers need spilling, and how much room that
2689 will take. Begin with floating point and general registers,
2690 which will always wind up on the stack. */
2692 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2693 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2695 SET_HARD_REG_BIT (mask, regno);
2696 spill_size += 16;
2697 n_spilled += 1;
2698 spilled_fr_p = 1;
2701 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2702 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2704 SET_HARD_REG_BIT (mask, regno);
2705 spill_size += 8;
2706 n_spilled += 1;
2707 spilled_gr_p = 1;
2710 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2711 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2713 SET_HARD_REG_BIT (mask, regno);
2714 spill_size += 8;
2715 n_spilled += 1;
2718 /* Now come all special registers that might get saved in other
2719 general registers. */
2721 if (frame_pointer_needed)
2723 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2724 /* If we did not get a register, then we take LOC79. This is guaranteed
2725 to be free, even if regs_ever_live is already set, because this is
2726 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2727 as we don't count loc79 above. */
2728 if (current_frame_info.r[reg_fp] == 0)
2730 current_frame_info.r[reg_fp] = LOC_REG (79);
2731 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2735 if (! current_function_is_leaf)
2737 /* Emit a save of BR0 if we call other functions. Do this even
2738 if this function doesn't return, as EH depends on this to be
2739 able to unwind the stack. */
2740 SET_HARD_REG_BIT (mask, BR_REG (0));
2742 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2743 if (current_frame_info.r[reg_save_b0] == 0)
2745 extra_spill_size += 8;
2746 n_spilled += 1;
2749 /* Similarly for ar.pfs. */
2750 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2751 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2752 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2754 extra_spill_size += 8;
2755 n_spilled += 1;
2758 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2759 registers are clobbered, so we fall back to the stack. */
2760 current_frame_info.r[reg_save_gp]
2761 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2762 if (current_frame_info.r[reg_save_gp] == 0)
2764 SET_HARD_REG_BIT (mask, GR_REG (1));
2765 spill_size += 8;
2766 n_spilled += 1;
2769 else
2771 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2773 SET_HARD_REG_BIT (mask, BR_REG (0));
2774 extra_spill_size += 8;
2775 n_spilled += 1;
2778 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2780 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2781 current_frame_info.r[reg_save_ar_pfs]
2782 = find_gr_spill (reg_save_ar_pfs, 1);
2783 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2785 extra_spill_size += 8;
2786 n_spilled += 1;
2791 /* Unwind descriptor hackery: things are most efficient if we allocate
2792 consecutive GR save registers for RP, PFS, FP in that order. However,
2793 it is absolutely critical that FP get the only hard register that's
2794 guaranteed to be free, so we allocated it first. If all three did
2795 happen to be allocated hard regs, and are consecutive, rearrange them
2796 into the preferred order now.
2798 If we have already emitted code for any of those registers,
2799 then it's already too late to change. */
2800 min_regno = MIN (current_frame_info.r[reg_fp],
2801 MIN (current_frame_info.r[reg_save_b0],
2802 current_frame_info.r[reg_save_ar_pfs]));
2803 max_regno = MAX (current_frame_info.r[reg_fp],
2804 MAX (current_frame_info.r[reg_save_b0],
2805 current_frame_info.r[reg_save_ar_pfs]));
2806 if (min_regno > 0
2807 && min_regno + 2 == max_regno
2808 && (current_frame_info.r[reg_fp] == min_regno + 1
2809 || current_frame_info.r[reg_save_b0] == min_regno + 1
2810 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2811 && (emitted_frame_related_regs[reg_save_b0] == 0
2812 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2813 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2814 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2815 && (emitted_frame_related_regs[reg_fp] == 0
2816 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2818 current_frame_info.r[reg_save_b0] = min_regno;
2819 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2820 current_frame_info.r[reg_fp] = min_regno + 2;
2823 /* See if we need to store the predicate register block. */
2824 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2825 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2826 break;
2827 if (regno <= PR_REG (63))
2829 SET_HARD_REG_BIT (mask, PR_REG (0));
2830 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2831 if (current_frame_info.r[reg_save_pr] == 0)
2833 extra_spill_size += 8;
2834 n_spilled += 1;
2837 /* ??? Mark them all as used so that register renaming and such
2838 are free to use them. */
2839 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2840 df_set_regs_ever_live (regno, true);
2843 /* If we're forced to use st8.spill, we're forced to save and restore
2844 ar.unat as well. The check for existing liveness allows inline asm
2845 to touch ar.unat. */
2846 if (spilled_gr_p || cfun->machine->n_varargs
2847 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2849 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2850 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2851 current_frame_info.r[reg_save_ar_unat]
2852 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2853 if (current_frame_info.r[reg_save_ar_unat] == 0)
2855 extra_spill_size += 8;
2856 n_spilled += 1;
2860 if (df_regs_ever_live_p (AR_LC_REGNUM))
2862 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2863 current_frame_info.r[reg_save_ar_lc]
2864 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2865 if (current_frame_info.r[reg_save_ar_lc] == 0)
2867 extra_spill_size += 8;
2868 n_spilled += 1;
2872 /* If we have an odd number of words of pretend arguments written to
2873 the stack, then the FR save area will be unaligned. We round the
2874 size of this area up to keep things 16 byte aligned. */
2875 if (spilled_fr_p)
2876 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2877 else
2878 pretend_args_size = crtl->args.pretend_args_size;
2880 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2881 + crtl->outgoing_args_size);
2882 total_size = IA64_STACK_ALIGN (total_size);
2884 /* We always use the 16-byte scratch area provided by the caller, but
2885 if we are a leaf function, there's no one to which we need to provide
2886 a scratch area. */
2887 if (current_function_is_leaf)
2888 total_size = MAX (0, total_size - 16);
2890 current_frame_info.total_size = total_size;
2891 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2892 current_frame_info.spill_size = spill_size;
2893 current_frame_info.extra_spill_size = extra_spill_size;
2894 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2895 current_frame_info.n_spilled = n_spilled;
2896 current_frame_info.initialized = reload_completed;
2899 /* Worker function for TARGET_CAN_ELIMINATE. */
2901 bool
2902 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2904 return (to == BR_REG (0) ? current_function_is_leaf : true);
2907 /* Compute the initial difference between the specified pair of registers. */
2909 HOST_WIDE_INT
2910 ia64_initial_elimination_offset (int from, int to)
2912 HOST_WIDE_INT offset;
2914 ia64_compute_frame_size (get_frame_size ());
2915 switch (from)
2917 case FRAME_POINTER_REGNUM:
2918 switch (to)
2920 case HARD_FRAME_POINTER_REGNUM:
2921 if (current_function_is_leaf)
2922 offset = -current_frame_info.total_size;
2923 else
2924 offset = -(current_frame_info.total_size
2925 - crtl->outgoing_args_size - 16);
2926 break;
2928 case STACK_POINTER_REGNUM:
2929 if (current_function_is_leaf)
2930 offset = 0;
2931 else
2932 offset = 16 + crtl->outgoing_args_size;
2933 break;
2935 default:
2936 gcc_unreachable ();
2938 break;
2940 case ARG_POINTER_REGNUM:
2941 /* Arguments start above the 16 byte save area, unless stdarg
2942 in which case we store through the 16 byte save area. */
2943 switch (to)
2945 case HARD_FRAME_POINTER_REGNUM:
2946 offset = 16 - crtl->args.pretend_args_size;
2947 break;
2949 case STACK_POINTER_REGNUM:
2950 offset = (current_frame_info.total_size
2951 + 16 - crtl->args.pretend_args_size);
2952 break;
2954 default:
2955 gcc_unreachable ();
2957 break;
2959 default:
2960 gcc_unreachable ();
2963 return offset;
2966 /* If there are more than a trivial number of register spills, we use
2967 two interleaved iterators so that we can get two memory references
2968 per insn group.
2970 In order to simplify things in the prologue and epilogue expanders,
2971 we use helper functions to fix up the memory references after the
2972 fact with the appropriate offsets to a POST_MODIFY memory mode.
2973 The following data structure tracks the state of the two iterators
2974 while insns are being emitted. */
2976 struct spill_fill_data
2978 rtx init_after; /* point at which to emit initializations */
2979 rtx init_reg[2]; /* initial base register */
2980 rtx iter_reg[2]; /* the iterator registers */
2981 rtx *prev_addr[2]; /* address of last memory use */
2982 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2983 HOST_WIDE_INT prev_off[2]; /* last offset */
2984 int n_iter; /* number of iterators in use */
2985 int next_iter; /* next iterator to use */
2986 unsigned int save_gr_used_mask;
2989 static struct spill_fill_data spill_fill_data;
2991 static void
2992 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2994 int i;
2996 spill_fill_data.init_after = get_last_insn ();
2997 spill_fill_data.init_reg[0] = init_reg;
2998 spill_fill_data.init_reg[1] = init_reg;
2999 spill_fill_data.prev_addr[0] = NULL;
3000 spill_fill_data.prev_addr[1] = NULL;
3001 spill_fill_data.prev_insn[0] = NULL;
3002 spill_fill_data.prev_insn[1] = NULL;
3003 spill_fill_data.prev_off[0] = cfa_off;
3004 spill_fill_data.prev_off[1] = cfa_off;
3005 spill_fill_data.next_iter = 0;
3006 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
3008 spill_fill_data.n_iter = 1 + (n_spills > 2);
3009 for (i = 0; i < spill_fill_data.n_iter; ++i)
3011 int regno = next_scratch_gr_reg ();
3012 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
3013 current_frame_info.gr_used_mask |= 1 << regno;
3017 static void
3018 finish_spill_pointers (void)
3020 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3023 static rtx
3024 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3026 int iter = spill_fill_data.next_iter;
3027 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3028 rtx disp_rtx = GEN_INT (disp);
3029 rtx mem;
3031 if (spill_fill_data.prev_addr[iter])
3033 if (satisfies_constraint_N (disp_rtx))
3035 *spill_fill_data.prev_addr[iter]
3036 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3037 gen_rtx_PLUS (DImode,
3038 spill_fill_data.iter_reg[iter],
3039 disp_rtx));
3040 add_reg_note (spill_fill_data.prev_insn[iter],
3041 REG_INC, spill_fill_data.iter_reg[iter]);
3043 else
3045 /* ??? Could use register post_modify for loads. */
3046 if (!satisfies_constraint_I (disp_rtx))
3048 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3049 emit_move_insn (tmp, disp_rtx);
3050 disp_rtx = tmp;
3052 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3053 spill_fill_data.iter_reg[iter], disp_rtx));
3056 /* Micro-optimization: if we've created a frame pointer, it's at
3057 CFA 0, which may allow the real iterator to be initialized lower,
3058 slightly increasing parallelism. Also, if there are few saves
3059 it may eliminate the iterator entirely. */
3060 else if (disp == 0
3061 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3062 && frame_pointer_needed)
3064 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3065 set_mem_alias_set (mem, get_varargs_alias_set ());
3066 return mem;
3068 else
3070 rtx seq, insn;
3072 if (disp == 0)
3073 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3074 spill_fill_data.init_reg[iter]);
3075 else
3077 start_sequence ();
3079 if (!satisfies_constraint_I (disp_rtx))
3081 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3082 emit_move_insn (tmp, disp_rtx);
3083 disp_rtx = tmp;
3086 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3087 spill_fill_data.init_reg[iter],
3088 disp_rtx));
3090 seq = get_insns ();
3091 end_sequence ();
3094 /* Careful for being the first insn in a sequence. */
3095 if (spill_fill_data.init_after)
3096 insn = emit_insn_after (seq, spill_fill_data.init_after);
3097 else
3099 rtx first = get_insns ();
3100 if (first)
3101 insn = emit_insn_before (seq, first);
3102 else
3103 insn = emit_insn (seq);
3105 spill_fill_data.init_after = insn;
3108 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3110 /* ??? Not all of the spills are for varargs, but some of them are.
3111 The rest of the spills belong in an alias set of their own. But
3112 it doesn't actually hurt to include them here. */
3113 set_mem_alias_set (mem, get_varargs_alias_set ());
3115 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3116 spill_fill_data.prev_off[iter] = cfa_off;
3118 if (++iter >= spill_fill_data.n_iter)
3119 iter = 0;
3120 spill_fill_data.next_iter = iter;
3122 return mem;
3125 static void
3126 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3127 rtx frame_reg)
3129 int iter = spill_fill_data.next_iter;
3130 rtx mem, insn;
3132 mem = spill_restore_mem (reg, cfa_off);
3133 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3134 spill_fill_data.prev_insn[iter] = insn;
3136 if (frame_reg)
3138 rtx base;
3139 HOST_WIDE_INT off;
3141 RTX_FRAME_RELATED_P (insn) = 1;
3143 /* Don't even pretend that the unwind code can intuit its way
3144 through a pair of interleaved post_modify iterators. Just
3145 provide the correct answer. */
3147 if (frame_pointer_needed)
3149 base = hard_frame_pointer_rtx;
3150 off = - cfa_off;
3152 else
3154 base = stack_pointer_rtx;
3155 off = current_frame_info.total_size - cfa_off;
3158 add_reg_note (insn, REG_CFA_OFFSET,
3159 gen_rtx_SET (VOIDmode,
3160 gen_rtx_MEM (GET_MODE (reg),
3161 plus_constant (base, off)),
3162 frame_reg));
3166 static void
3167 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3169 int iter = spill_fill_data.next_iter;
3170 rtx insn;
3172 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3173 GEN_INT (cfa_off)));
3174 spill_fill_data.prev_insn[iter] = insn;
3177 /* Wrapper functions that discards the CONST_INT spill offset. These
3178 exist so that we can give gr_spill/gr_fill the offset they need and
3179 use a consistent function interface. */
3181 static rtx
3182 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3184 return gen_movdi (dest, src);
3187 static rtx
3188 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3190 return gen_fr_spill (dest, src);
3193 static rtx
3194 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3196 return gen_fr_restore (dest, src);
3199 /* Called after register allocation to add any instructions needed for the
3200 prologue. Using a prologue insn is favored compared to putting all of the
3201 instructions in output_function_prologue(), since it allows the scheduler
3202 to intermix instructions with the saves of the caller saved registers. In
3203 some cases, it might be necessary to emit a barrier instruction as the last
3204 insn to prevent such scheduling.
3206 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3207 so that the debug info generation code can handle them properly.
3209 The register save area is layed out like so:
3210 cfa+16
3211 [ varargs spill area ]
3212 [ fr register spill area ]
3213 [ br register spill area ]
3214 [ ar register spill area ]
3215 [ pr register spill area ]
3216 [ gr register spill area ] */
3218 /* ??? Get inefficient code when the frame size is larger than can fit in an
3219 adds instruction. */
3221 void
3222 ia64_expand_prologue (void)
3224 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
3225 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3226 rtx reg, alt_reg;
3228 ia64_compute_frame_size (get_frame_size ());
3229 last_scratch_gr_reg = 15;
3231 if (flag_stack_usage)
3232 current_function_static_stack_size = current_frame_info.total_size;
3234 if (dump_file)
3236 fprintf (dump_file, "ia64 frame related registers "
3237 "recorded in current_frame_info.r[]:\n");
3238 #define PRINTREG(a) if (current_frame_info.r[a]) \
3239 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3240 PRINTREG(reg_fp);
3241 PRINTREG(reg_save_b0);
3242 PRINTREG(reg_save_pr);
3243 PRINTREG(reg_save_ar_pfs);
3244 PRINTREG(reg_save_ar_unat);
3245 PRINTREG(reg_save_ar_lc);
3246 PRINTREG(reg_save_gp);
3247 #undef PRINTREG
3250 /* If there is no epilogue, then we don't need some prologue insns.
3251 We need to avoid emitting the dead prologue insns, because flow
3252 will complain about them. */
3253 if (optimize)
3255 edge e;
3256 edge_iterator ei;
3258 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
3259 if ((e->flags & EDGE_FAKE) == 0
3260 && (e->flags & EDGE_FALLTHRU) != 0)
3261 break;
3262 epilogue_p = (e != NULL);
3264 else
3265 epilogue_p = 1;
3267 /* Set the local, input, and output register names. We need to do this
3268 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3269 half. If we use in/loc/out register names, then we get assembler errors
3270 in crtn.S because there is no alloc insn or regstk directive in there. */
3271 if (! TARGET_REG_NAMES)
3273 int inputs = current_frame_info.n_input_regs;
3274 int locals = current_frame_info.n_local_regs;
3275 int outputs = current_frame_info.n_output_regs;
3277 for (i = 0; i < inputs; i++)
3278 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3279 for (i = 0; i < locals; i++)
3280 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3281 for (i = 0; i < outputs; i++)
3282 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3285 /* Set the frame pointer register name. The regnum is logically loc79,
3286 but of course we'll not have allocated that many locals. Rather than
3287 worrying about renumbering the existing rtxs, we adjust the name. */
3288 /* ??? This code means that we can never use one local register when
3289 there is a frame pointer. loc79 gets wasted in this case, as it is
3290 renamed to a register that will never be used. See also the try_locals
3291 code in find_gr_spill. */
3292 if (current_frame_info.r[reg_fp])
3294 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3295 reg_names[HARD_FRAME_POINTER_REGNUM]
3296 = reg_names[current_frame_info.r[reg_fp]];
3297 reg_names[current_frame_info.r[reg_fp]] = tmp;
3300 /* We don't need an alloc instruction if we've used no outputs or locals. */
3301 if (current_frame_info.n_local_regs == 0
3302 && current_frame_info.n_output_regs == 0
3303 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3304 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3306 /* If there is no alloc, but there are input registers used, then we
3307 need a .regstk directive. */
3308 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3309 ar_pfs_save_reg = NULL_RTX;
3311 else
3313 current_frame_info.need_regstk = 0;
3315 if (current_frame_info.r[reg_save_ar_pfs])
3317 regno = current_frame_info.r[reg_save_ar_pfs];
3318 reg_emitted (reg_save_ar_pfs);
3320 else
3321 regno = next_scratch_gr_reg ();
3322 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3324 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3325 GEN_INT (current_frame_info.n_input_regs),
3326 GEN_INT (current_frame_info.n_local_regs),
3327 GEN_INT (current_frame_info.n_output_regs),
3328 GEN_INT (current_frame_info.n_rotate_regs)));
3329 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3332 /* Set up frame pointer, stack pointer, and spill iterators. */
3334 n_varargs = cfun->machine->n_varargs;
3335 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3336 stack_pointer_rtx, 0);
3338 if (frame_pointer_needed)
3340 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3341 RTX_FRAME_RELATED_P (insn) = 1;
3343 /* Force the unwind info to recognize this as defining a new CFA,
3344 rather than some temp register setup. */
3345 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3348 if (current_frame_info.total_size != 0)
3350 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3351 rtx offset;
3353 if (satisfies_constraint_I (frame_size_rtx))
3354 offset = frame_size_rtx;
3355 else
3357 regno = next_scratch_gr_reg ();
3358 offset = gen_rtx_REG (DImode, regno);
3359 emit_move_insn (offset, frame_size_rtx);
3362 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3363 stack_pointer_rtx, offset));
3365 if (! frame_pointer_needed)
3367 RTX_FRAME_RELATED_P (insn) = 1;
3368 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3369 gen_rtx_SET (VOIDmode,
3370 stack_pointer_rtx,
3371 gen_rtx_PLUS (DImode,
3372 stack_pointer_rtx,
3373 frame_size_rtx)));
3376 /* ??? At this point we must generate a magic insn that appears to
3377 modify the stack pointer, the frame pointer, and all spill
3378 iterators. This would allow the most scheduling freedom. For
3379 now, just hard stop. */
3380 emit_insn (gen_blockage ());
3383 /* Must copy out ar.unat before doing any integer spills. */
3384 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3386 if (current_frame_info.r[reg_save_ar_unat])
3388 ar_unat_save_reg
3389 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3390 reg_emitted (reg_save_ar_unat);
3392 else
3394 alt_regno = next_scratch_gr_reg ();
3395 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3396 current_frame_info.gr_used_mask |= 1 << alt_regno;
3399 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3400 insn = emit_move_insn (ar_unat_save_reg, reg);
3401 if (current_frame_info.r[reg_save_ar_unat])
3403 RTX_FRAME_RELATED_P (insn) = 1;
3404 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3407 /* Even if we're not going to generate an epilogue, we still
3408 need to save the register so that EH works. */
3409 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3410 emit_insn (gen_prologue_use (ar_unat_save_reg));
3412 else
3413 ar_unat_save_reg = NULL_RTX;
3415 /* Spill all varargs registers. Do this before spilling any GR registers,
3416 since we want the UNAT bits for the GR registers to override the UNAT
3417 bits from varargs, which we don't care about. */
3419 cfa_off = -16;
3420 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3422 reg = gen_rtx_REG (DImode, regno);
3423 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3426 /* Locate the bottom of the register save area. */
3427 cfa_off = (current_frame_info.spill_cfa_off
3428 + current_frame_info.spill_size
3429 + current_frame_info.extra_spill_size);
3431 /* Save the predicate register block either in a register or in memory. */
3432 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3434 reg = gen_rtx_REG (DImode, PR_REG (0));
3435 if (current_frame_info.r[reg_save_pr] != 0)
3437 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3438 reg_emitted (reg_save_pr);
3439 insn = emit_move_insn (alt_reg, reg);
3441 /* ??? Denote pr spill/fill by a DImode move that modifies all
3442 64 hard registers. */
3443 RTX_FRAME_RELATED_P (insn) = 1;
3444 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3446 /* Even if we're not going to generate an epilogue, we still
3447 need to save the register so that EH works. */
3448 if (! epilogue_p)
3449 emit_insn (gen_prologue_use (alt_reg));
3451 else
3453 alt_regno = next_scratch_gr_reg ();
3454 alt_reg = gen_rtx_REG (DImode, alt_regno);
3455 insn = emit_move_insn (alt_reg, reg);
3456 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3457 cfa_off -= 8;
3461 /* Handle AR regs in numerical order. All of them get special handling. */
3462 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3463 && current_frame_info.r[reg_save_ar_unat] == 0)
3465 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3466 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3467 cfa_off -= 8;
3470 /* The alloc insn already copied ar.pfs into a general register. The
3471 only thing we have to do now is copy that register to a stack slot
3472 if we'd not allocated a local register for the job. */
3473 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3474 && current_frame_info.r[reg_save_ar_pfs] == 0)
3476 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3477 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3478 cfa_off -= 8;
3481 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3483 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3484 if (current_frame_info.r[reg_save_ar_lc] != 0)
3486 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3487 reg_emitted (reg_save_ar_lc);
3488 insn = emit_move_insn (alt_reg, reg);
3489 RTX_FRAME_RELATED_P (insn) = 1;
3490 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3492 /* Even if we're not going to generate an epilogue, we still
3493 need to save the register so that EH works. */
3494 if (! epilogue_p)
3495 emit_insn (gen_prologue_use (alt_reg));
3497 else
3499 alt_regno = next_scratch_gr_reg ();
3500 alt_reg = gen_rtx_REG (DImode, alt_regno);
3501 emit_move_insn (alt_reg, reg);
3502 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3503 cfa_off -= 8;
3507 /* Save the return pointer. */
3508 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3510 reg = gen_rtx_REG (DImode, BR_REG (0));
3511 if (current_frame_info.r[reg_save_b0] != 0)
3513 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3514 reg_emitted (reg_save_b0);
3515 insn = emit_move_insn (alt_reg, reg);
3516 RTX_FRAME_RELATED_P (insn) = 1;
3517 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3519 /* Even if we're not going to generate an epilogue, we still
3520 need to save the register so that EH works. */
3521 if (! epilogue_p)
3522 emit_insn (gen_prologue_use (alt_reg));
3524 else
3526 alt_regno = next_scratch_gr_reg ();
3527 alt_reg = gen_rtx_REG (DImode, alt_regno);
3528 emit_move_insn (alt_reg, reg);
3529 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3530 cfa_off -= 8;
3534 if (current_frame_info.r[reg_save_gp])
3536 reg_emitted (reg_save_gp);
3537 insn = emit_move_insn (gen_rtx_REG (DImode,
3538 current_frame_info.r[reg_save_gp]),
3539 pic_offset_table_rtx);
3542 /* We should now be at the base of the gr/br/fr spill area. */
3543 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3544 + current_frame_info.spill_size));
3546 /* Spill all general registers. */
3547 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3548 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3550 reg = gen_rtx_REG (DImode, regno);
3551 do_spill (gen_gr_spill, reg, cfa_off, reg);
3552 cfa_off -= 8;
3555 /* Spill the rest of the BR registers. */
3556 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3557 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3559 alt_regno = next_scratch_gr_reg ();
3560 alt_reg = gen_rtx_REG (DImode, alt_regno);
3561 reg = gen_rtx_REG (DImode, regno);
3562 emit_move_insn (alt_reg, reg);
3563 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3564 cfa_off -= 8;
3567 /* Align the frame and spill all FR registers. */
3568 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3569 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3571 gcc_assert (!(cfa_off & 15));
3572 reg = gen_rtx_REG (XFmode, regno);
3573 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3574 cfa_off -= 16;
3577 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3579 finish_spill_pointers ();
3582 /* Output the textual info surrounding the prologue. */
3584 void
3585 ia64_start_function (FILE *file, const char *fnname,
3586 tree decl ATTRIBUTE_UNUSED)
3588 #if VMS_DEBUGGING_INFO
3589 if (vms_debug_main
3590 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
3592 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
3593 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
3594 dwarf2out_vms_debug_main_pointer ();
3595 vms_debug_main = 0;
3597 #endif
3599 fputs ("\t.proc ", file);
3600 assemble_name (file, fnname);
3601 fputc ('\n', file);
3602 ASM_OUTPUT_LABEL (file, fnname);
3605 /* Called after register allocation to add any instructions needed for the
3606 epilogue. Using an epilogue insn is favored compared to putting all of the
3607 instructions in output_function_prologue(), since it allows the scheduler
3608 to intermix instructions with the saves of the caller saved registers. In
3609 some cases, it might be necessary to emit a barrier instruction as the last
3610 insn to prevent such scheduling. */
3612 void
3613 ia64_expand_epilogue (int sibcall_p)
3615 rtx insn, reg, alt_reg, ar_unat_save_reg;
3616 int regno, alt_regno, cfa_off;
3618 ia64_compute_frame_size (get_frame_size ());
3620 /* If there is a frame pointer, then we use it instead of the stack
3621 pointer, so that the stack pointer does not need to be valid when
3622 the epilogue starts. See EXIT_IGNORE_STACK. */
3623 if (frame_pointer_needed)
3624 setup_spill_pointers (current_frame_info.n_spilled,
3625 hard_frame_pointer_rtx, 0);
3626 else
3627 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3628 current_frame_info.total_size);
3630 if (current_frame_info.total_size != 0)
3632 /* ??? At this point we must generate a magic insn that appears to
3633 modify the spill iterators and the frame pointer. This would
3634 allow the most scheduling freedom. For now, just hard stop. */
3635 emit_insn (gen_blockage ());
3638 /* Locate the bottom of the register save area. */
3639 cfa_off = (current_frame_info.spill_cfa_off
3640 + current_frame_info.spill_size
3641 + current_frame_info.extra_spill_size);
3643 /* Restore the predicate registers. */
3644 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3646 if (current_frame_info.r[reg_save_pr] != 0)
3648 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3649 reg_emitted (reg_save_pr);
3651 else
3653 alt_regno = next_scratch_gr_reg ();
3654 alt_reg = gen_rtx_REG (DImode, alt_regno);
3655 do_restore (gen_movdi_x, alt_reg, cfa_off);
3656 cfa_off -= 8;
3658 reg = gen_rtx_REG (DImode, PR_REG (0));
3659 emit_move_insn (reg, alt_reg);
3662 /* Restore the application registers. */
3664 /* Load the saved unat from the stack, but do not restore it until
3665 after the GRs have been restored. */
3666 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3668 if (current_frame_info.r[reg_save_ar_unat] != 0)
3670 ar_unat_save_reg
3671 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3672 reg_emitted (reg_save_ar_unat);
3674 else
3676 alt_regno = next_scratch_gr_reg ();
3677 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3678 current_frame_info.gr_used_mask |= 1 << alt_regno;
3679 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3680 cfa_off -= 8;
3683 else
3684 ar_unat_save_reg = NULL_RTX;
3686 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3688 reg_emitted (reg_save_ar_pfs);
3689 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3690 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3691 emit_move_insn (reg, alt_reg);
3693 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3695 alt_regno = next_scratch_gr_reg ();
3696 alt_reg = gen_rtx_REG (DImode, alt_regno);
3697 do_restore (gen_movdi_x, alt_reg, cfa_off);
3698 cfa_off -= 8;
3699 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3700 emit_move_insn (reg, alt_reg);
3703 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3705 if (current_frame_info.r[reg_save_ar_lc] != 0)
3707 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3708 reg_emitted (reg_save_ar_lc);
3710 else
3712 alt_regno = next_scratch_gr_reg ();
3713 alt_reg = gen_rtx_REG (DImode, alt_regno);
3714 do_restore (gen_movdi_x, alt_reg, cfa_off);
3715 cfa_off -= 8;
3717 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3718 emit_move_insn (reg, alt_reg);
3721 /* Restore the return pointer. */
3722 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3724 if (current_frame_info.r[reg_save_b0] != 0)
3726 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3727 reg_emitted (reg_save_b0);
3729 else
3731 alt_regno = next_scratch_gr_reg ();
3732 alt_reg = gen_rtx_REG (DImode, alt_regno);
3733 do_restore (gen_movdi_x, alt_reg, cfa_off);
3734 cfa_off -= 8;
3736 reg = gen_rtx_REG (DImode, BR_REG (0));
3737 emit_move_insn (reg, alt_reg);
3740 /* We should now be at the base of the gr/br/fr spill area. */
3741 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3742 + current_frame_info.spill_size));
3744 /* The GP may be stored on the stack in the prologue, but it's
3745 never restored in the epilogue. Skip the stack slot. */
3746 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3747 cfa_off -= 8;
3749 /* Restore all general registers. */
3750 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3751 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3753 reg = gen_rtx_REG (DImode, regno);
3754 do_restore (gen_gr_restore, reg, cfa_off);
3755 cfa_off -= 8;
3758 /* Restore the branch registers. */
3759 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3760 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3762 alt_regno = next_scratch_gr_reg ();
3763 alt_reg = gen_rtx_REG (DImode, alt_regno);
3764 do_restore (gen_movdi_x, alt_reg, cfa_off);
3765 cfa_off -= 8;
3766 reg = gen_rtx_REG (DImode, regno);
3767 emit_move_insn (reg, alt_reg);
3770 /* Restore floating point registers. */
3771 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3772 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3774 gcc_assert (!(cfa_off & 15));
3775 reg = gen_rtx_REG (XFmode, regno);
3776 do_restore (gen_fr_restore_x, reg, cfa_off);
3777 cfa_off -= 16;
3780 /* Restore ar.unat for real. */
3781 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3783 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3784 emit_move_insn (reg, ar_unat_save_reg);
3787 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3789 finish_spill_pointers ();
3791 if (current_frame_info.total_size
3792 || cfun->machine->ia64_eh_epilogue_sp
3793 || frame_pointer_needed)
3795 /* ??? At this point we must generate a magic insn that appears to
3796 modify the spill iterators, the stack pointer, and the frame
3797 pointer. This would allow the most scheduling freedom. For now,
3798 just hard stop. */
3799 emit_insn (gen_blockage ());
3802 if (cfun->machine->ia64_eh_epilogue_sp)
3803 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3804 else if (frame_pointer_needed)
3806 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3807 RTX_FRAME_RELATED_P (insn) = 1;
3808 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
3810 else if (current_frame_info.total_size)
3812 rtx offset, frame_size_rtx;
3814 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3815 if (satisfies_constraint_I (frame_size_rtx))
3816 offset = frame_size_rtx;
3817 else
3819 regno = next_scratch_gr_reg ();
3820 offset = gen_rtx_REG (DImode, regno);
3821 emit_move_insn (offset, frame_size_rtx);
3824 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3825 offset));
3827 RTX_FRAME_RELATED_P (insn) = 1;
3828 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3829 gen_rtx_SET (VOIDmode,
3830 stack_pointer_rtx,
3831 gen_rtx_PLUS (DImode,
3832 stack_pointer_rtx,
3833 frame_size_rtx)));
3836 if (cfun->machine->ia64_eh_epilogue_bsp)
3837 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3839 if (! sibcall_p)
3840 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3841 else
3843 int fp = GR_REG (2);
3844 /* We need a throw away register here, r0 and r1 are reserved,
3845 so r2 is the first available call clobbered register. If
3846 there was a frame_pointer register, we may have swapped the
3847 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
3848 sure we're using the string "r2" when emitting the register
3849 name for the assembler. */
3850 if (current_frame_info.r[reg_fp]
3851 && current_frame_info.r[reg_fp] == GR_REG (2))
3852 fp = HARD_FRAME_POINTER_REGNUM;
3854 /* We must emit an alloc to force the input registers to become output
3855 registers. Otherwise, if the callee tries to pass its parameters
3856 through to another call without an intervening alloc, then these
3857 values get lost. */
3858 /* ??? We don't need to preserve all input registers. We only need to
3859 preserve those input registers used as arguments to the sibling call.
3860 It is unclear how to compute that number here. */
3861 if (current_frame_info.n_input_regs != 0)
3863 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3864 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3865 const0_rtx, const0_rtx,
3866 n_inputs, const0_rtx));
3867 RTX_FRAME_RELATED_P (insn) = 1;
3872 /* Return 1 if br.ret can do all the work required to return from a
3873 function. */
3876 ia64_direct_return (void)
3878 if (reload_completed && ! frame_pointer_needed)
3880 ia64_compute_frame_size (get_frame_size ());
3882 return (current_frame_info.total_size == 0
3883 && current_frame_info.n_spilled == 0
3884 && current_frame_info.r[reg_save_b0] == 0
3885 && current_frame_info.r[reg_save_pr] == 0
3886 && current_frame_info.r[reg_save_ar_pfs] == 0
3887 && current_frame_info.r[reg_save_ar_unat] == 0
3888 && current_frame_info.r[reg_save_ar_lc] == 0);
3890 return 0;
3893 /* Return the magic cookie that we use to hold the return address
3894 during early compilation. */
3897 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3899 if (count != 0)
3900 return NULL;
3901 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3904 /* Split this value after reload, now that we know where the return
3905 address is saved. */
3907 void
3908 ia64_split_return_addr_rtx (rtx dest)
3910 rtx src;
3912 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3914 if (current_frame_info.r[reg_save_b0] != 0)
3916 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3917 reg_emitted (reg_save_b0);
3919 else
3921 HOST_WIDE_INT off;
3922 unsigned int regno;
3923 rtx off_r;
3925 /* Compute offset from CFA for BR0. */
3926 /* ??? Must be kept in sync with ia64_expand_prologue. */
3927 off = (current_frame_info.spill_cfa_off
3928 + current_frame_info.spill_size);
3929 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3930 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3931 off -= 8;
3933 /* Convert CFA offset to a register based offset. */
3934 if (frame_pointer_needed)
3935 src = hard_frame_pointer_rtx;
3936 else
3938 src = stack_pointer_rtx;
3939 off += current_frame_info.total_size;
3942 /* Load address into scratch register. */
3943 off_r = GEN_INT (off);
3944 if (satisfies_constraint_I (off_r))
3945 emit_insn (gen_adddi3 (dest, src, off_r));
3946 else
3948 emit_move_insn (dest, off_r);
3949 emit_insn (gen_adddi3 (dest, src, dest));
3952 src = gen_rtx_MEM (Pmode, dest);
3955 else
3956 src = gen_rtx_REG (DImode, BR_REG (0));
3958 emit_move_insn (dest, src);
3962 ia64_hard_regno_rename_ok (int from, int to)
3964 /* Don't clobber any of the registers we reserved for the prologue. */
3965 unsigned int r;
3967 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3968 if (to == current_frame_info.r[r]
3969 || from == current_frame_info.r[r]
3970 || to == emitted_frame_related_regs[r]
3971 || from == emitted_frame_related_regs[r])
3972 return 0;
3974 /* Don't use output registers outside the register frame. */
3975 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3976 return 0;
3978 /* Retain even/oddness on predicate register pairs. */
3979 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3980 return (from & 1) == (to & 1);
3982 return 1;
3985 /* Target hook for assembling integer objects. Handle word-sized
3986 aligned objects and detect the cases when @fptr is needed. */
3988 static bool
3989 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3991 if (size == POINTER_SIZE / BITS_PER_UNIT
3992 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3993 && GET_CODE (x) == SYMBOL_REF
3994 && SYMBOL_REF_FUNCTION_P (x))
3996 static const char * const directive[2][2] = {
3997 /* 64-bit pointer */ /* 32-bit pointer */
3998 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3999 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4001 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
4002 output_addr_const (asm_out_file, x);
4003 fputs (")\n", asm_out_file);
4004 return true;
4006 return default_assemble_integer (x, size, aligned_p);
4009 /* Emit the function prologue. */
4011 static void
4012 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4014 int mask, grsave, grsave_prev;
4016 if (current_frame_info.need_regstk)
4017 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4018 current_frame_info.n_input_regs,
4019 current_frame_info.n_local_regs,
4020 current_frame_info.n_output_regs,
4021 current_frame_info.n_rotate_regs);
4023 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4024 return;
4026 /* Emit the .prologue directive. */
4028 mask = 0;
4029 grsave = grsave_prev = 0;
4030 if (current_frame_info.r[reg_save_b0] != 0)
4032 mask |= 8;
4033 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4035 if (current_frame_info.r[reg_save_ar_pfs] != 0
4036 && (grsave_prev == 0
4037 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4039 mask |= 4;
4040 if (grsave_prev == 0)
4041 grsave = current_frame_info.r[reg_save_ar_pfs];
4042 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4044 if (current_frame_info.r[reg_fp] != 0
4045 && (grsave_prev == 0
4046 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4048 mask |= 2;
4049 if (grsave_prev == 0)
4050 grsave = HARD_FRAME_POINTER_REGNUM;
4051 grsave_prev = current_frame_info.r[reg_fp];
4053 if (current_frame_info.r[reg_save_pr] != 0
4054 && (grsave_prev == 0
4055 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4057 mask |= 1;
4058 if (grsave_prev == 0)
4059 grsave = current_frame_info.r[reg_save_pr];
4062 if (mask && TARGET_GNU_AS)
4063 fprintf (file, "\t.prologue %d, %d\n", mask,
4064 ia64_dbx_register_number (grsave));
4065 else
4066 fputs ("\t.prologue\n", file);
4068 /* Emit a .spill directive, if necessary, to relocate the base of
4069 the register spill area. */
4070 if (current_frame_info.spill_cfa_off != -16)
4071 fprintf (file, "\t.spill %ld\n",
4072 (long) (current_frame_info.spill_cfa_off
4073 + current_frame_info.spill_size));
4076 /* Emit the .body directive at the scheduled end of the prologue. */
4078 static void
4079 ia64_output_function_end_prologue (FILE *file)
4081 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4082 return;
4084 fputs ("\t.body\n", file);
4087 /* Emit the function epilogue. */
4089 static void
4090 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4091 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4093 int i;
4095 if (current_frame_info.r[reg_fp])
4097 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4098 reg_names[HARD_FRAME_POINTER_REGNUM]
4099 = reg_names[current_frame_info.r[reg_fp]];
4100 reg_names[current_frame_info.r[reg_fp]] = tmp;
4101 reg_emitted (reg_fp);
4103 if (! TARGET_REG_NAMES)
4105 for (i = 0; i < current_frame_info.n_input_regs; i++)
4106 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4107 for (i = 0; i < current_frame_info.n_local_regs; i++)
4108 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4109 for (i = 0; i < current_frame_info.n_output_regs; i++)
4110 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4113 current_frame_info.initialized = 0;
4117 ia64_dbx_register_number (int regno)
4119 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4120 from its home at loc79 to something inside the register frame. We
4121 must perform the same renumbering here for the debug info. */
4122 if (current_frame_info.r[reg_fp])
4124 if (regno == HARD_FRAME_POINTER_REGNUM)
4125 regno = current_frame_info.r[reg_fp];
4126 else if (regno == current_frame_info.r[reg_fp])
4127 regno = HARD_FRAME_POINTER_REGNUM;
4130 if (IN_REGNO_P (regno))
4131 return 32 + regno - IN_REG (0);
4132 else if (LOC_REGNO_P (regno))
4133 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4134 else if (OUT_REGNO_P (regno))
4135 return (32 + current_frame_info.n_input_regs
4136 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4137 else
4138 return regno;
4141 /* Implement TARGET_TRAMPOLINE_INIT.
4143 The trampoline should set the static chain pointer to value placed
4144 into the trampoline and should branch to the specified routine.
4145 To make the normal indirect-subroutine calling convention work,
4146 the trampoline must look like a function descriptor; the first
4147 word being the target address and the second being the target's
4148 global pointer.
4150 We abuse the concept of a global pointer by arranging for it
4151 to point to the data we need to load. The complete trampoline
4152 has the following form:
4154 +-------------------+ \
4155 TRAMP: | __ia64_trampoline | |
4156 +-------------------+ > fake function descriptor
4157 | TRAMP+16 | |
4158 +-------------------+ /
4159 | target descriptor |
4160 +-------------------+
4161 | static link |
4162 +-------------------+
4165 static void
4166 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4168 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4169 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4171 /* The Intel assembler requires that the global __ia64_trampoline symbol
4172 be declared explicitly */
4173 if (!TARGET_GNU_AS)
4175 static bool declared_ia64_trampoline = false;
4177 if (!declared_ia64_trampoline)
4179 declared_ia64_trampoline = true;
4180 (*targetm.asm_out.globalize_label) (asm_out_file,
4181 "__ia64_trampoline");
4185 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4186 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4187 fnaddr = convert_memory_address (Pmode, fnaddr);
4188 static_chain = convert_memory_address (Pmode, static_chain);
4190 /* Load up our iterator. */
4191 addr_reg = copy_to_reg (addr);
4192 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4194 /* The first two words are the fake descriptor:
4195 __ia64_trampoline, ADDR+16. */
4196 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4197 if (TARGET_ABI_OPEN_VMS)
4199 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4200 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4201 relocation against function symbols to make it identical to the
4202 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4203 strict ELF and dereference to get the bare code address. */
4204 rtx reg = gen_reg_rtx (Pmode);
4205 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4206 emit_move_insn (reg, tramp);
4207 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4208 tramp = reg;
4210 emit_move_insn (m_tramp, tramp);
4211 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4212 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4214 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
4215 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4216 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4218 /* The third word is the target descriptor. */
4219 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4220 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4221 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4223 /* The fourth word is the static chain. */
4224 emit_move_insn (m_tramp, static_chain);
4227 /* Do any needed setup for a variadic function. CUM has not been updated
4228 for the last named argument which has type TYPE and mode MODE.
4230 We generate the actual spill instructions during prologue generation. */
4232 static void
4233 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4234 tree type, int * pretend_size,
4235 int second_time ATTRIBUTE_UNUSED)
4237 CUMULATIVE_ARGS next_cum = *cum;
4239 /* Skip the current argument. */
4240 ia64_function_arg_advance (&next_cum, mode, type, 1);
4242 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4244 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4245 *pretend_size = n * UNITS_PER_WORD;
4246 cfun->machine->n_varargs = n;
4250 /* Check whether TYPE is a homogeneous floating point aggregate. If
4251 it is, return the mode of the floating point type that appears
4252 in all leafs. If it is not, return VOIDmode.
4254 An aggregate is a homogeneous floating point aggregate is if all
4255 fields/elements in it have the same floating point type (e.g,
4256 SFmode). 128-bit quad-precision floats are excluded.
4258 Variable sized aggregates should never arrive here, since we should
4259 have already decided to pass them by reference. Top-level zero-sized
4260 aggregates are excluded because our parallels crash the middle-end. */
4262 static enum machine_mode
4263 hfa_element_mode (const_tree type, bool nested)
4265 enum machine_mode element_mode = VOIDmode;
4266 enum machine_mode mode;
4267 enum tree_code code = TREE_CODE (type);
4268 int know_element_mode = 0;
4269 tree t;
4271 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4272 return VOIDmode;
4274 switch (code)
4276 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4277 case BOOLEAN_TYPE: case POINTER_TYPE:
4278 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4279 case LANG_TYPE: case FUNCTION_TYPE:
4280 return VOIDmode;
4282 /* Fortran complex types are supposed to be HFAs, so we need to handle
4283 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4284 types though. */
4285 case COMPLEX_TYPE:
4286 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4287 && TYPE_MODE (type) != TCmode)
4288 return GET_MODE_INNER (TYPE_MODE (type));
4289 else
4290 return VOIDmode;
4292 case REAL_TYPE:
4293 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4294 mode if this is contained within an aggregate. */
4295 if (nested && TYPE_MODE (type) != TFmode)
4296 return TYPE_MODE (type);
4297 else
4298 return VOIDmode;
4300 case ARRAY_TYPE:
4301 return hfa_element_mode (TREE_TYPE (type), 1);
4303 case RECORD_TYPE:
4304 case UNION_TYPE:
4305 case QUAL_UNION_TYPE:
4306 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4308 if (TREE_CODE (t) != FIELD_DECL)
4309 continue;
4311 mode = hfa_element_mode (TREE_TYPE (t), 1);
4312 if (know_element_mode)
4314 if (mode != element_mode)
4315 return VOIDmode;
4317 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4318 return VOIDmode;
4319 else
4321 know_element_mode = 1;
4322 element_mode = mode;
4325 return element_mode;
4327 default:
4328 /* If we reach here, we probably have some front-end specific type
4329 that the backend doesn't know about. This can happen via the
4330 aggregate_value_p call in init_function_start. All we can do is
4331 ignore unknown tree types. */
4332 return VOIDmode;
4335 return VOIDmode;
4338 /* Return the number of words required to hold a quantity of TYPE and MODE
4339 when passed as an argument. */
4340 static int
4341 ia64_function_arg_words (const_tree type, enum machine_mode mode)
4343 int words;
4345 if (mode == BLKmode)
4346 words = int_size_in_bytes (type);
4347 else
4348 words = GET_MODE_SIZE (mode);
4350 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4353 /* Return the number of registers that should be skipped so the current
4354 argument (described by TYPE and WORDS) will be properly aligned.
4356 Integer and float arguments larger than 8 bytes start at the next
4357 even boundary. Aggregates larger than 8 bytes start at the next
4358 even boundary if the aggregate has 16 byte alignment. Note that
4359 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4360 but are still to be aligned in registers.
4362 ??? The ABI does not specify how to handle aggregates with
4363 alignment from 9 to 15 bytes, or greater than 16. We handle them
4364 all as if they had 16 byte alignment. Such aggregates can occur
4365 only if gcc extensions are used. */
4366 static int
4367 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4368 const_tree type, int words)
4370 /* No registers are skipped on VMS. */
4371 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4372 return 0;
4374 if (type
4375 && TREE_CODE (type) != INTEGER_TYPE
4376 && TREE_CODE (type) != REAL_TYPE)
4377 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4378 else
4379 return words > 1;
4382 /* Return rtx for register where argument is passed, or zero if it is passed
4383 on the stack. */
4384 /* ??? 128-bit quad-precision floats are always passed in general
4385 registers. */
4387 static rtx
4388 ia64_function_arg_1 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4389 const_tree type, bool named, bool incoming)
4391 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4392 int words = ia64_function_arg_words (type, mode);
4393 int offset = ia64_function_arg_offset (cum, type, words);
4394 enum machine_mode hfa_mode = VOIDmode;
4396 /* For OPEN VMS, emit the instruction setting up the argument register here,
4397 when we know this will be together with the other arguments setup related
4398 insns. This is not the conceptually best place to do this, but this is
4399 the easiest as we have convenient access to cumulative args info. */
4401 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4402 && named == 1)
4404 unsigned HOST_WIDE_INT regval = cum->words;
4405 int i;
4407 for (i = 0; i < 8; i++)
4408 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4410 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4411 GEN_INT (regval));
4414 /* If all argument slots are used, then it must go on the stack. */
4415 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4416 return 0;
4418 /* Check for and handle homogeneous FP aggregates. */
4419 if (type)
4420 hfa_mode = hfa_element_mode (type, 0);
4422 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4423 and unprototyped hfas are passed specially. */
4424 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4426 rtx loc[16];
4427 int i = 0;
4428 int fp_regs = cum->fp_regs;
4429 int int_regs = cum->words + offset;
4430 int hfa_size = GET_MODE_SIZE (hfa_mode);
4431 int byte_size;
4432 int args_byte_size;
4434 /* If prototyped, pass it in FR regs then GR regs.
4435 If not prototyped, pass it in both FR and GR regs.
4437 If this is an SFmode aggregate, then it is possible to run out of
4438 FR regs while GR regs are still left. In that case, we pass the
4439 remaining part in the GR regs. */
4441 /* Fill the FP regs. We do this always. We stop if we reach the end
4442 of the argument, the last FP register, or the last argument slot. */
4444 byte_size = ((mode == BLKmode)
4445 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4446 args_byte_size = int_regs * UNITS_PER_WORD;
4447 offset = 0;
4448 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4449 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4451 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4452 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4453 + fp_regs)),
4454 GEN_INT (offset));
4455 offset += hfa_size;
4456 args_byte_size += hfa_size;
4457 fp_regs++;
4460 /* If no prototype, then the whole thing must go in GR regs. */
4461 if (! cum->prototype)
4462 offset = 0;
4463 /* If this is an SFmode aggregate, then we might have some left over
4464 that needs to go in GR regs. */
4465 else if (byte_size != offset)
4466 int_regs += offset / UNITS_PER_WORD;
4468 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4470 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4472 enum machine_mode gr_mode = DImode;
4473 unsigned int gr_size;
4475 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4476 then this goes in a GR reg left adjusted/little endian, right
4477 adjusted/big endian. */
4478 /* ??? Currently this is handled wrong, because 4-byte hunks are
4479 always right adjusted/little endian. */
4480 if (offset & 0x4)
4481 gr_mode = SImode;
4482 /* If we have an even 4 byte hunk because the aggregate is a
4483 multiple of 4 bytes in size, then this goes in a GR reg right
4484 adjusted/little endian. */
4485 else if (byte_size - offset == 4)
4486 gr_mode = SImode;
4488 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4489 gen_rtx_REG (gr_mode, (basereg
4490 + int_regs)),
4491 GEN_INT (offset));
4493 gr_size = GET_MODE_SIZE (gr_mode);
4494 offset += gr_size;
4495 if (gr_size == UNITS_PER_WORD
4496 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4497 int_regs++;
4498 else if (gr_size > UNITS_PER_WORD)
4499 int_regs += gr_size / UNITS_PER_WORD;
4501 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4504 /* On OpenVMS variable argument is either in Rn or Fn. */
4505 else if (TARGET_ABI_OPEN_VMS && named == 0)
4507 if (FLOAT_MODE_P (mode))
4508 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4509 else
4510 return gen_rtx_REG (mode, basereg + cum->words);
4513 /* Integral and aggregates go in general registers. If we have run out of
4514 FR registers, then FP values must also go in general registers. This can
4515 happen when we have a SFmode HFA. */
4516 else if (mode == TFmode || mode == TCmode
4517 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4519 int byte_size = ((mode == BLKmode)
4520 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4521 if (BYTES_BIG_ENDIAN
4522 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4523 && byte_size < UNITS_PER_WORD
4524 && byte_size > 0)
4526 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4527 gen_rtx_REG (DImode,
4528 (basereg + cum->words
4529 + offset)),
4530 const0_rtx);
4531 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4533 else
4534 return gen_rtx_REG (mode, basereg + cum->words + offset);
4538 /* If there is a prototype, then FP values go in a FR register when
4539 named, and in a GR register when unnamed. */
4540 else if (cum->prototype)
4542 if (named)
4543 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4544 /* In big-endian mode, an anonymous SFmode value must be represented
4545 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4546 the value into the high half of the general register. */
4547 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4548 return gen_rtx_PARALLEL (mode,
4549 gen_rtvec (1,
4550 gen_rtx_EXPR_LIST (VOIDmode,
4551 gen_rtx_REG (DImode, basereg + cum->words + offset),
4552 const0_rtx)));
4553 else
4554 return gen_rtx_REG (mode, basereg + cum->words + offset);
4556 /* If there is no prototype, then FP values go in both FR and GR
4557 registers. */
4558 else
4560 /* See comment above. */
4561 enum machine_mode inner_mode =
4562 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4564 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4565 gen_rtx_REG (mode, (FR_ARG_FIRST
4566 + cum->fp_regs)),
4567 const0_rtx);
4568 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4569 gen_rtx_REG (inner_mode,
4570 (basereg + cum->words
4571 + offset)),
4572 const0_rtx);
4574 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4578 /* Implement TARGET_FUNCION_ARG target hook. */
4580 static rtx
4581 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4582 const_tree type, bool named)
4584 return ia64_function_arg_1 (cum, mode, type, named, false);
4587 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4589 static rtx
4590 ia64_function_incoming_arg (CUMULATIVE_ARGS *cum,
4591 enum machine_mode mode,
4592 const_tree type, bool named)
4594 return ia64_function_arg_1 (cum, mode, type, named, true);
4597 /* Return number of bytes, at the beginning of the argument, that must be
4598 put in registers. 0 is the argument is entirely in registers or entirely
4599 in memory. */
4601 static int
4602 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4603 tree type, bool named ATTRIBUTE_UNUSED)
4605 int words = ia64_function_arg_words (type, mode);
4606 int offset = ia64_function_arg_offset (cum, type, words);
4608 /* If all argument slots are used, then it must go on the stack. */
4609 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4610 return 0;
4612 /* It doesn't matter whether the argument goes in FR or GR regs. If
4613 it fits within the 8 argument slots, then it goes entirely in
4614 registers. If it extends past the last argument slot, then the rest
4615 goes on the stack. */
4617 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4618 return 0;
4620 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4623 /* Return ivms_arg_type based on machine_mode. */
4625 static enum ivms_arg_type
4626 ia64_arg_type (enum machine_mode mode)
4628 switch (mode)
4630 case SFmode:
4631 return FS;
4632 case DFmode:
4633 return FT;
4634 default:
4635 return I64;
4639 /* Update CUM to point after this argument. This is patterned after
4640 ia64_function_arg. */
4642 static void
4643 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4644 const_tree type, bool named)
4646 int words = ia64_function_arg_words (type, mode);
4647 int offset = ia64_function_arg_offset (cum, type, words);
4648 enum machine_mode hfa_mode = VOIDmode;
4650 /* If all arg slots are already full, then there is nothing to do. */
4651 if (cum->words >= MAX_ARGUMENT_SLOTS)
4653 cum->words += words + offset;
4654 return;
4657 cum->atypes[cum->words] = ia64_arg_type (mode);
4658 cum->words += words + offset;
4660 /* Check for and handle homogeneous FP aggregates. */
4661 if (type)
4662 hfa_mode = hfa_element_mode (type, 0);
4664 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4665 and unprototyped hfas are passed specially. */
4666 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4668 int fp_regs = cum->fp_regs;
4669 /* This is the original value of cum->words + offset. */
4670 int int_regs = cum->words - words;
4671 int hfa_size = GET_MODE_SIZE (hfa_mode);
4672 int byte_size;
4673 int args_byte_size;
4675 /* If prototyped, pass it in FR regs then GR regs.
4676 If not prototyped, pass it in both FR and GR regs.
4678 If this is an SFmode aggregate, then it is possible to run out of
4679 FR regs while GR regs are still left. In that case, we pass the
4680 remaining part in the GR regs. */
4682 /* Fill the FP regs. We do this always. We stop if we reach the end
4683 of the argument, the last FP register, or the last argument slot. */
4685 byte_size = ((mode == BLKmode)
4686 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4687 args_byte_size = int_regs * UNITS_PER_WORD;
4688 offset = 0;
4689 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4690 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4692 offset += hfa_size;
4693 args_byte_size += hfa_size;
4694 fp_regs++;
4697 cum->fp_regs = fp_regs;
4700 /* On OpenVMS variable argument is either in Rn or Fn. */
4701 else if (TARGET_ABI_OPEN_VMS && named == 0)
4703 cum->int_regs = cum->words;
4704 cum->fp_regs = cum->words;
4707 /* Integral and aggregates go in general registers. So do TFmode FP values.
4708 If we have run out of FR registers, then other FP values must also go in
4709 general registers. This can happen when we have a SFmode HFA. */
4710 else if (mode == TFmode || mode == TCmode
4711 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4712 cum->int_regs = cum->words;
4714 /* If there is a prototype, then FP values go in a FR register when
4715 named, and in a GR register when unnamed. */
4716 else if (cum->prototype)
4718 if (! named)
4719 cum->int_regs = cum->words;
4720 else
4721 /* ??? Complex types should not reach here. */
4722 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4724 /* If there is no prototype, then FP values go in both FR and GR
4725 registers. */
4726 else
4728 /* ??? Complex types should not reach here. */
4729 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4730 cum->int_regs = cum->words;
4734 /* Arguments with alignment larger than 8 bytes start at the next even
4735 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4736 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4738 static unsigned int
4739 ia64_function_arg_boundary (enum machine_mode mode, const_tree type)
4741 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4742 return PARM_BOUNDARY * 2;
4744 if (type)
4746 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4747 return PARM_BOUNDARY * 2;
4748 else
4749 return PARM_BOUNDARY;
4752 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4753 return PARM_BOUNDARY * 2;
4754 else
4755 return PARM_BOUNDARY;
4758 /* True if it is OK to do sibling call optimization for the specified
4759 call expression EXP. DECL will be the called function, or NULL if
4760 this is an indirect call. */
4761 static bool
4762 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4764 /* We can't perform a sibcall if the current function has the syscall_linkage
4765 attribute. */
4766 if (lookup_attribute ("syscall_linkage",
4767 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4768 return false;
4770 /* We must always return with our current GP. This means we can
4771 only sibcall to functions defined in the current module unless
4772 TARGET_CONST_GP is set to true. */
4773 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
4777 /* Implement va_arg. */
4779 static tree
4780 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4781 gimple_seq *post_p)
4783 /* Variable sized types are passed by reference. */
4784 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4786 tree ptrtype = build_pointer_type (type);
4787 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4788 return build_va_arg_indirect_ref (addr);
4791 /* Aggregate arguments with alignment larger than 8 bytes start at
4792 the next even boundary. Integer and floating point arguments
4793 do so if they are larger than 8 bytes, whether or not they are
4794 also aligned larger than 8 bytes. */
4795 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4796 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4798 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4799 size_int (2 * UNITS_PER_WORD - 1));
4800 t = fold_convert (sizetype, t);
4801 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4802 size_int (-2 * UNITS_PER_WORD));
4803 t = fold_convert (TREE_TYPE (valist), t);
4804 gimplify_assign (unshare_expr (valist), t, pre_p);
4807 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4810 /* Return 1 if function return value returned in memory. Return 0 if it is
4811 in a register. */
4813 static bool
4814 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4816 enum machine_mode mode;
4817 enum machine_mode hfa_mode;
4818 HOST_WIDE_INT byte_size;
4820 mode = TYPE_MODE (valtype);
4821 byte_size = GET_MODE_SIZE (mode);
4822 if (mode == BLKmode)
4824 byte_size = int_size_in_bytes (valtype);
4825 if (byte_size < 0)
4826 return true;
4829 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4831 hfa_mode = hfa_element_mode (valtype, 0);
4832 if (hfa_mode != VOIDmode)
4834 int hfa_size = GET_MODE_SIZE (hfa_mode);
4836 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4837 return true;
4838 else
4839 return false;
4841 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4842 return true;
4843 else
4844 return false;
4847 /* Return rtx for register that holds the function return value. */
4849 static rtx
4850 ia64_function_value (const_tree valtype,
4851 const_tree fn_decl_or_type,
4852 bool outgoing ATTRIBUTE_UNUSED)
4854 enum machine_mode mode;
4855 enum machine_mode hfa_mode;
4856 int unsignedp;
4857 const_tree func = fn_decl_or_type;
4859 if (fn_decl_or_type
4860 && !DECL_P (fn_decl_or_type))
4861 func = NULL;
4863 mode = TYPE_MODE (valtype);
4864 hfa_mode = hfa_element_mode (valtype, 0);
4866 if (hfa_mode != VOIDmode)
4868 rtx loc[8];
4869 int i;
4870 int hfa_size;
4871 int byte_size;
4872 int offset;
4874 hfa_size = GET_MODE_SIZE (hfa_mode);
4875 byte_size = ((mode == BLKmode)
4876 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4877 offset = 0;
4878 for (i = 0; offset < byte_size; i++)
4880 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4881 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4882 GEN_INT (offset));
4883 offset += hfa_size;
4885 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4887 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4888 return gen_rtx_REG (mode, FR_ARG_FIRST);
4889 else
4891 bool need_parallel = false;
4893 /* In big-endian mode, we need to manage the layout of aggregates
4894 in the registers so that we get the bits properly aligned in
4895 the highpart of the registers. */
4896 if (BYTES_BIG_ENDIAN
4897 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4898 need_parallel = true;
4900 /* Something like struct S { long double x; char a[0] } is not an
4901 HFA structure, and therefore doesn't go in fp registers. But
4902 the middle-end will give it XFmode anyway, and XFmode values
4903 don't normally fit in integer registers. So we need to smuggle
4904 the value inside a parallel. */
4905 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4906 need_parallel = true;
4908 if (need_parallel)
4910 rtx loc[8];
4911 int offset;
4912 int bytesize;
4913 int i;
4915 offset = 0;
4916 bytesize = int_size_in_bytes (valtype);
4917 /* An empty PARALLEL is invalid here, but the return value
4918 doesn't matter for empty structs. */
4919 if (bytesize == 0)
4920 return gen_rtx_REG (mode, GR_RET_FIRST);
4921 for (i = 0; offset < bytesize; i++)
4923 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4924 gen_rtx_REG (DImode,
4925 GR_RET_FIRST + i),
4926 GEN_INT (offset));
4927 offset += UNITS_PER_WORD;
4929 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4932 mode = ia64_promote_function_mode (valtype, mode, &unsignedp,
4933 func ? TREE_TYPE (func) : NULL_TREE,
4934 true);
4936 return gen_rtx_REG (mode, GR_RET_FIRST);
4940 /* Worker function for TARGET_LIBCALL_VALUE. */
4942 static rtx
4943 ia64_libcall_value (enum machine_mode mode,
4944 const_rtx fun ATTRIBUTE_UNUSED)
4946 return gen_rtx_REG (mode,
4947 (((GET_MODE_CLASS (mode) == MODE_FLOAT
4948 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4949 && (mode) != TFmode)
4950 ? FR_RET_FIRST : GR_RET_FIRST));
4953 /* Worker function for FUNCTION_VALUE_REGNO_P. */
4955 static bool
4956 ia64_function_value_regno_p (const unsigned int regno)
4958 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
4959 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
4962 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4963 We need to emit DTP-relative relocations. */
4965 static void
4966 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4968 gcc_assert (size == 4 || size == 8);
4969 if (size == 4)
4970 fputs ("\tdata4.ua\t@dtprel(", file);
4971 else
4972 fputs ("\tdata8.ua\t@dtprel(", file);
4973 output_addr_const (file, x);
4974 fputs (")", file);
4977 /* Print a memory address as an operand to reference that memory location. */
4979 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4980 also call this from ia64_print_operand for memory addresses. */
4982 void
4983 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4984 rtx address ATTRIBUTE_UNUSED)
4988 /* Print an operand to an assembler instruction.
4989 C Swap and print a comparison operator.
4990 D Print an FP comparison operator.
4991 E Print 32 - constant, for SImode shifts as extract.
4992 e Print 64 - constant, for DImode rotates.
4993 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4994 a floating point register emitted normally.
4995 G A floating point constant.
4996 I Invert a predicate register by adding 1.
4997 J Select the proper predicate register for a condition.
4998 j Select the inverse predicate register for a condition.
4999 O Append .acq for volatile load.
5000 P Postincrement of a MEM.
5001 Q Append .rel for volatile store.
5002 R Print .s .d or nothing for a single, double or no truncation.
5003 S Shift amount for shladd instruction.
5004 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5005 for Intel assembler.
5006 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5007 for Intel assembler.
5008 X A pair of floating point registers.
5009 r Print register name, or constant 0 as r0. HP compatibility for
5010 Linux kernel.
5011 v Print vector constant value as an 8-byte integer value. */
5013 void
5014 ia64_print_operand (FILE * file, rtx x, int code)
5016 const char *str;
5018 switch (code)
5020 case 0:
5021 /* Handled below. */
5022 break;
5024 case 'C':
5026 enum rtx_code c = swap_condition (GET_CODE (x));
5027 fputs (GET_RTX_NAME (c), file);
5028 return;
5031 case 'D':
5032 switch (GET_CODE (x))
5034 case NE:
5035 str = "neq";
5036 break;
5037 case UNORDERED:
5038 str = "unord";
5039 break;
5040 case ORDERED:
5041 str = "ord";
5042 break;
5043 case UNLT:
5044 str = "nge";
5045 break;
5046 case UNLE:
5047 str = "ngt";
5048 break;
5049 case UNGT:
5050 str = "nle";
5051 break;
5052 case UNGE:
5053 str = "nlt";
5054 break;
5055 default:
5056 str = GET_RTX_NAME (GET_CODE (x));
5057 break;
5059 fputs (str, file);
5060 return;
5062 case 'E':
5063 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5064 return;
5066 case 'e':
5067 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5068 return;
5070 case 'F':
5071 if (x == CONST0_RTX (GET_MODE (x)))
5072 str = reg_names [FR_REG (0)];
5073 else if (x == CONST1_RTX (GET_MODE (x)))
5074 str = reg_names [FR_REG (1)];
5075 else
5077 gcc_assert (GET_CODE (x) == REG);
5078 str = reg_names [REGNO (x)];
5080 fputs (str, file);
5081 return;
5083 case 'G':
5085 long val[4];
5086 REAL_VALUE_TYPE rv;
5087 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5088 real_to_target (val, &rv, GET_MODE (x));
5089 if (GET_MODE (x) == SFmode)
5090 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5091 else if (GET_MODE (x) == DFmode)
5092 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5093 & 0xffffffff,
5094 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5095 & 0xffffffff);
5096 else
5097 output_operand_lossage ("invalid %%G mode");
5099 return;
5101 case 'I':
5102 fputs (reg_names [REGNO (x) + 1], file);
5103 return;
5105 case 'J':
5106 case 'j':
5108 unsigned int regno = REGNO (XEXP (x, 0));
5109 if (GET_CODE (x) == EQ)
5110 regno += 1;
5111 if (code == 'j')
5112 regno ^= 1;
5113 fputs (reg_names [regno], file);
5115 return;
5117 case 'O':
5118 if (MEM_VOLATILE_P (x))
5119 fputs(".acq", file);
5120 return;
5122 case 'P':
5124 HOST_WIDE_INT value;
5126 switch (GET_CODE (XEXP (x, 0)))
5128 default:
5129 return;
5131 case POST_MODIFY:
5132 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5133 if (GET_CODE (x) == CONST_INT)
5134 value = INTVAL (x);
5135 else
5137 gcc_assert (GET_CODE (x) == REG);
5138 fprintf (file, ", %s", reg_names[REGNO (x)]);
5139 return;
5141 break;
5143 case POST_INC:
5144 value = GET_MODE_SIZE (GET_MODE (x));
5145 break;
5147 case POST_DEC:
5148 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5149 break;
5152 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5153 return;
5156 case 'Q':
5157 if (MEM_VOLATILE_P (x))
5158 fputs(".rel", file);
5159 return;
5161 case 'R':
5162 if (x == CONST0_RTX (GET_MODE (x)))
5163 fputs(".s", file);
5164 else if (x == CONST1_RTX (GET_MODE (x)))
5165 fputs(".d", file);
5166 else if (x == CONST2_RTX (GET_MODE (x)))
5168 else
5169 output_operand_lossage ("invalid %%R value");
5170 return;
5172 case 'S':
5173 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5174 return;
5176 case 'T':
5177 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5179 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5180 return;
5182 break;
5184 case 'U':
5185 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5187 const char *prefix = "0x";
5188 if (INTVAL (x) & 0x80000000)
5190 fprintf (file, "0xffffffff");
5191 prefix = "";
5193 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5194 return;
5196 break;
5198 case 'X':
5200 unsigned int regno = REGNO (x);
5201 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5203 return;
5205 case 'r':
5206 /* If this operand is the constant zero, write it as register zero.
5207 Any register, zero, or CONST_INT value is OK here. */
5208 if (GET_CODE (x) == REG)
5209 fputs (reg_names[REGNO (x)], file);
5210 else if (x == CONST0_RTX (GET_MODE (x)))
5211 fputs ("r0", file);
5212 else if (GET_CODE (x) == CONST_INT)
5213 output_addr_const (file, x);
5214 else
5215 output_operand_lossage ("invalid %%r value");
5216 return;
5218 case 'v':
5219 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5220 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5221 break;
5223 case '+':
5225 const char *which;
5227 /* For conditional branches, returns or calls, substitute
5228 sptk, dptk, dpnt, or spnt for %s. */
5229 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5230 if (x)
5232 int pred_val = INTVAL (XEXP (x, 0));
5234 /* Guess top and bottom 10% statically predicted. */
5235 if (pred_val < REG_BR_PROB_BASE / 50
5236 && br_prob_note_reliable_p (x))
5237 which = ".spnt";
5238 else if (pred_val < REG_BR_PROB_BASE / 2)
5239 which = ".dpnt";
5240 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5241 || !br_prob_note_reliable_p (x))
5242 which = ".dptk";
5243 else
5244 which = ".sptk";
5246 else if (GET_CODE (current_output_insn) == CALL_INSN)
5247 which = ".sptk";
5248 else
5249 which = ".dptk";
5251 fputs (which, file);
5252 return;
5255 case ',':
5256 x = current_insn_predicate;
5257 if (x)
5259 unsigned int regno = REGNO (XEXP (x, 0));
5260 if (GET_CODE (x) == EQ)
5261 regno += 1;
5262 fprintf (file, "(%s) ", reg_names [regno]);
5264 return;
5266 default:
5267 output_operand_lossage ("ia64_print_operand: unknown code");
5268 return;
5271 switch (GET_CODE (x))
5273 /* This happens for the spill/restore instructions. */
5274 case POST_INC:
5275 case POST_DEC:
5276 case POST_MODIFY:
5277 x = XEXP (x, 0);
5278 /* ... fall through ... */
5280 case REG:
5281 fputs (reg_names [REGNO (x)], file);
5282 break;
5284 case MEM:
5286 rtx addr = XEXP (x, 0);
5287 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5288 addr = XEXP (addr, 0);
5289 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5290 break;
5293 default:
5294 output_addr_const (file, x);
5295 break;
5298 return;
5301 /* Compute a (partial) cost for rtx X. Return true if the complete
5302 cost has been computed, and false if subexpressions should be
5303 scanned. In either case, *TOTAL contains the cost result. */
5304 /* ??? This is incomplete. */
5306 static bool
5307 ia64_rtx_costs (rtx x, int code, int outer_code, int *total,
5308 bool speed ATTRIBUTE_UNUSED)
5310 switch (code)
5312 case CONST_INT:
5313 switch (outer_code)
5315 case SET:
5316 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5317 return true;
5318 case PLUS:
5319 if (satisfies_constraint_I (x))
5320 *total = 0;
5321 else if (satisfies_constraint_J (x))
5322 *total = 1;
5323 else
5324 *total = COSTS_N_INSNS (1);
5325 return true;
5326 default:
5327 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5328 *total = 0;
5329 else
5330 *total = COSTS_N_INSNS (1);
5331 return true;
5334 case CONST_DOUBLE:
5335 *total = COSTS_N_INSNS (1);
5336 return true;
5338 case CONST:
5339 case SYMBOL_REF:
5340 case LABEL_REF:
5341 *total = COSTS_N_INSNS (3);
5342 return true;
5344 case FMA:
5345 *total = COSTS_N_INSNS (4);
5346 return true;
5348 case MULT:
5349 /* For multiplies wider than HImode, we have to go to the FPU,
5350 which normally involves copies. Plus there's the latency
5351 of the multiply itself, and the latency of the instructions to
5352 transfer integer regs to FP regs. */
5353 if (FLOAT_MODE_P (GET_MODE (x)))
5354 *total = COSTS_N_INSNS (4);
5355 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5356 *total = COSTS_N_INSNS (10);
5357 else
5358 *total = COSTS_N_INSNS (2);
5359 return true;
5361 case PLUS:
5362 case MINUS:
5363 if (FLOAT_MODE_P (GET_MODE (x)))
5365 *total = COSTS_N_INSNS (4);
5366 return true;
5368 /* FALLTHRU */
5370 case ASHIFT:
5371 case ASHIFTRT:
5372 case LSHIFTRT:
5373 *total = COSTS_N_INSNS (1);
5374 return true;
5376 case DIV:
5377 case UDIV:
5378 case MOD:
5379 case UMOD:
5380 /* We make divide expensive, so that divide-by-constant will be
5381 optimized to a multiply. */
5382 *total = COSTS_N_INSNS (60);
5383 return true;
5385 default:
5386 return false;
5390 /* Calculate the cost of moving data from a register in class FROM to
5391 one in class TO, using MODE. */
5393 static int
5394 ia64_register_move_cost (enum machine_mode mode, reg_class_t from_i,
5395 reg_class_t to_i)
5397 enum reg_class from = (enum reg_class) from_i;
5398 enum reg_class to = (enum reg_class) to_i;
5400 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5401 if (to == ADDL_REGS)
5402 to = GR_REGS;
5403 if (from == ADDL_REGS)
5404 from = GR_REGS;
5406 /* All costs are symmetric, so reduce cases by putting the
5407 lower number class as the destination. */
5408 if (from < to)
5410 enum reg_class tmp = to;
5411 to = from, from = tmp;
5414 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5415 so that we get secondary memory reloads. Between FR_REGS,
5416 we have to make this at least as expensive as memory_move_cost
5417 to avoid spectacularly poor register class preferencing. */
5418 if (mode == XFmode || mode == RFmode)
5420 if (to != GR_REGS || from != GR_REGS)
5421 return memory_move_cost (mode, to, false);
5422 else
5423 return 3;
5426 switch (to)
5428 case PR_REGS:
5429 /* Moving between PR registers takes two insns. */
5430 if (from == PR_REGS)
5431 return 3;
5432 /* Moving between PR and anything but GR is impossible. */
5433 if (from != GR_REGS)
5434 return memory_move_cost (mode, to, false);
5435 break;
5437 case BR_REGS:
5438 /* Moving between BR and anything but GR is impossible. */
5439 if (from != GR_REGS && from != GR_AND_BR_REGS)
5440 return memory_move_cost (mode, to, false);
5441 break;
5443 case AR_I_REGS:
5444 case AR_M_REGS:
5445 /* Moving between AR and anything but GR is impossible. */
5446 if (from != GR_REGS)
5447 return memory_move_cost (mode, to, false);
5448 break;
5450 case GR_REGS:
5451 case FR_REGS:
5452 case FP_REGS:
5453 case GR_AND_FR_REGS:
5454 case GR_AND_BR_REGS:
5455 case ALL_REGS:
5456 break;
5458 default:
5459 gcc_unreachable ();
5462 return 2;
5465 /* Calculate the cost of moving data of MODE from a register to or from
5466 memory. */
5468 static int
5469 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5470 reg_class_t rclass,
5471 bool in ATTRIBUTE_UNUSED)
5473 if (rclass == GENERAL_REGS
5474 || rclass == FR_REGS
5475 || rclass == FP_REGS
5476 || rclass == GR_AND_FR_REGS)
5477 return 4;
5478 else
5479 return 10;
5482 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5483 on RCLASS to use when copying X into that class. */
5485 static reg_class_t
5486 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5488 switch (rclass)
5490 case FR_REGS:
5491 case FP_REGS:
5492 /* Don't allow volatile mem reloads into floating point registers.
5493 This is defined to force reload to choose the r/m case instead
5494 of the f/f case when reloading (set (reg fX) (mem/v)). */
5495 if (MEM_P (x) && MEM_VOLATILE_P (x))
5496 return NO_REGS;
5498 /* Force all unrecognized constants into the constant pool. */
5499 if (CONSTANT_P (x))
5500 return NO_REGS;
5501 break;
5503 case AR_M_REGS:
5504 case AR_I_REGS:
5505 if (!OBJECT_P (x))
5506 return NO_REGS;
5507 break;
5509 default:
5510 break;
5513 return rclass;
5516 /* This function returns the register class required for a secondary
5517 register when copying between one of the registers in RCLASS, and X,
5518 using MODE. A return value of NO_REGS means that no secondary register
5519 is required. */
5521 enum reg_class
5522 ia64_secondary_reload_class (enum reg_class rclass,
5523 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5525 int regno = -1;
5527 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5528 regno = true_regnum (x);
5530 switch (rclass)
5532 case BR_REGS:
5533 case AR_M_REGS:
5534 case AR_I_REGS:
5535 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5536 interaction. We end up with two pseudos with overlapping lifetimes
5537 both of which are equiv to the same constant, and both which need
5538 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5539 changes depending on the path length, which means the qty_first_reg
5540 check in make_regs_eqv can give different answers at different times.
5541 At some point I'll probably need a reload_indi pattern to handle
5542 this.
5544 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5545 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5546 non-general registers for good measure. */
5547 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5548 return GR_REGS;
5550 /* This is needed if a pseudo used as a call_operand gets spilled to a
5551 stack slot. */
5552 if (GET_CODE (x) == MEM)
5553 return GR_REGS;
5554 break;
5556 case FR_REGS:
5557 case FP_REGS:
5558 /* Need to go through general registers to get to other class regs. */
5559 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5560 return GR_REGS;
5562 /* This can happen when a paradoxical subreg is an operand to the
5563 muldi3 pattern. */
5564 /* ??? This shouldn't be necessary after instruction scheduling is
5565 enabled, because paradoxical subregs are not accepted by
5566 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5567 stop the paradoxical subreg stupidity in the *_operand functions
5568 in recog.c. */
5569 if (GET_CODE (x) == MEM
5570 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5571 || GET_MODE (x) == QImode))
5572 return GR_REGS;
5574 /* This can happen because of the ior/and/etc patterns that accept FP
5575 registers as operands. If the third operand is a constant, then it
5576 needs to be reloaded into a FP register. */
5577 if (GET_CODE (x) == CONST_INT)
5578 return GR_REGS;
5580 /* This can happen because of register elimination in a muldi3 insn.
5581 E.g. `26107 * (unsigned long)&u'. */
5582 if (GET_CODE (x) == PLUS)
5583 return GR_REGS;
5584 break;
5586 case PR_REGS:
5587 /* ??? This happens if we cse/gcse a BImode value across a call,
5588 and the function has a nonlocal goto. This is because global
5589 does not allocate call crossing pseudos to hard registers when
5590 crtl->has_nonlocal_goto is true. This is relatively
5591 common for C++ programs that use exceptions. To reproduce,
5592 return NO_REGS and compile libstdc++. */
5593 if (GET_CODE (x) == MEM)
5594 return GR_REGS;
5596 /* This can happen when we take a BImode subreg of a DImode value,
5597 and that DImode value winds up in some non-GR register. */
5598 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5599 return GR_REGS;
5600 break;
5602 default:
5603 break;
5606 return NO_REGS;
5610 /* Implement targetm.unspec_may_trap_p hook. */
5611 static int
5612 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5614 if (GET_CODE (x) == UNSPEC)
5616 switch (XINT (x, 1))
5618 case UNSPEC_LDA:
5619 case UNSPEC_LDS:
5620 case UNSPEC_LDSA:
5621 case UNSPEC_LDCCLR:
5622 case UNSPEC_CHKACLR:
5623 case UNSPEC_CHKS:
5624 /* These unspecs are just wrappers. */
5625 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5629 return default_unspec_may_trap_p (x, flags);
5633 /* Parse the -mfixed-range= option string. */
5635 static void
5636 fix_range (const char *const_str)
5638 int i, first, last;
5639 char *str, *dash, *comma;
5641 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5642 REG2 are either register names or register numbers. The effect
5643 of this option is to mark the registers in the range from REG1 to
5644 REG2 as ``fixed'' so they won't be used by the compiler. This is
5645 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5647 i = strlen (const_str);
5648 str = (char *) alloca (i + 1);
5649 memcpy (str, const_str, i + 1);
5651 while (1)
5653 dash = strchr (str, '-');
5654 if (!dash)
5656 warning (0, "value of -mfixed-range must have form REG1-REG2");
5657 return;
5659 *dash = '\0';
5661 comma = strchr (dash + 1, ',');
5662 if (comma)
5663 *comma = '\0';
5665 first = decode_reg_name (str);
5666 if (first < 0)
5668 warning (0, "unknown register name: %s", str);
5669 return;
5672 last = decode_reg_name (dash + 1);
5673 if (last < 0)
5675 warning (0, "unknown register name: %s", dash + 1);
5676 return;
5679 *dash = '-';
5681 if (first > last)
5683 warning (0, "%s-%s is an empty range", str, dash + 1);
5684 return;
5687 for (i = first; i <= last; ++i)
5688 fixed_regs[i] = call_used_regs[i] = 1;
5690 if (!comma)
5691 break;
5693 *comma = ',';
5694 str = comma + 1;
5698 /* Implement TARGET_HANDLE_OPTION. */
5700 static bool
5701 ia64_handle_option (size_t code, const char *arg, int value)
5703 switch (code)
5705 case OPT_mfixed_range_:
5706 fix_range (arg);
5707 return true;
5709 case OPT_mtls_size_:
5710 if (value != 14 && value != 22 && value != 64)
5711 error ("bad value %<%s%> for -mtls-size= switch", arg);
5712 return true;
5714 case OPT_mtune_:
5716 static struct pta
5718 const char *name; /* processor name or nickname. */
5719 enum processor_type processor;
5721 const processor_alias_table[] =
5723 {"itanium2", PROCESSOR_ITANIUM2},
5724 {"mckinley", PROCESSOR_ITANIUM2},
5726 int const pta_size = ARRAY_SIZE (processor_alias_table);
5727 int i;
5729 for (i = 0; i < pta_size; i++)
5730 if (!strcmp (arg, processor_alias_table[i].name))
5732 ia64_tune = processor_alias_table[i].processor;
5733 break;
5735 if (i == pta_size)
5736 error ("bad value %<%s%> for -mtune= switch", arg);
5737 return true;
5740 default:
5741 return true;
5745 /* Implement TARGET_OPTION_OVERRIDE. */
5747 static void
5748 ia64_option_override (void)
5750 if (TARGET_AUTO_PIC)
5751 target_flags |= MASK_CONST_GP;
5753 /* Numerous experiment shows that IRA based loop pressure
5754 calculation works better for RTL loop invariant motion on targets
5755 with enough (>= 32) registers. It is an expensive optimization.
5756 So it is on only for peak performance. */
5757 if (optimize >= 3)
5758 flag_ira_loop_pressure = 1;
5761 ia64_section_threshold = (global_options_set.x_g_switch_value
5762 ? g_switch_value
5763 : IA64_DEFAULT_GVALUE);
5765 init_machine_status = ia64_init_machine_status;
5767 if (align_functions <= 0)
5768 align_functions = 64;
5769 if (align_loops <= 0)
5770 align_loops = 32;
5771 if (TARGET_ABI_OPEN_VMS)
5772 flag_no_common = 1;
5774 ia64_override_options_after_change();
5777 /* Implement targetm.override_options_after_change. */
5779 static void
5780 ia64_override_options_after_change (void)
5782 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5783 flag_schedule_insns_after_reload = 0;
5785 if (optimize >= 3
5786 && !global_options_set.x_flag_selective_scheduling
5787 && !global_options_set.x_flag_selective_scheduling2)
5789 flag_selective_scheduling2 = 1;
5790 flag_sel_sched_pipelining = 1;
5792 if (mflag_sched_control_spec == 2)
5794 /* Control speculation is on by default for the selective scheduler,
5795 but not for the Haifa scheduler. */
5796 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5798 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5800 /* FIXME: remove this when we'd implement breaking autoinsns as
5801 a transformation. */
5802 flag_auto_inc_dec = 0;
5806 /* Initialize the record of emitted frame related registers. */
5808 void ia64_init_expanders (void)
5810 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5813 static struct machine_function *
5814 ia64_init_machine_status (void)
5816 return ggc_alloc_cleared_machine_function ();
5819 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5820 static enum attr_type ia64_safe_type (rtx);
5822 static enum attr_itanium_class
5823 ia64_safe_itanium_class (rtx insn)
5825 if (recog_memoized (insn) >= 0)
5826 return get_attr_itanium_class (insn);
5827 else if (DEBUG_INSN_P (insn))
5828 return ITANIUM_CLASS_IGNORE;
5829 else
5830 return ITANIUM_CLASS_UNKNOWN;
5833 static enum attr_type
5834 ia64_safe_type (rtx insn)
5836 if (recog_memoized (insn) >= 0)
5837 return get_attr_type (insn);
5838 else
5839 return TYPE_UNKNOWN;
5842 /* The following collection of routines emit instruction group stop bits as
5843 necessary to avoid dependencies. */
5845 /* Need to track some additional registers as far as serialization is
5846 concerned so we can properly handle br.call and br.ret. We could
5847 make these registers visible to gcc, but since these registers are
5848 never explicitly used in gcc generated code, it seems wasteful to
5849 do so (plus it would make the call and return patterns needlessly
5850 complex). */
5851 #define REG_RP (BR_REG (0))
5852 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5853 /* This is used for volatile asms which may require a stop bit immediately
5854 before and after them. */
5855 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5856 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5857 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5859 /* For each register, we keep track of how it has been written in the
5860 current instruction group.
5862 If a register is written unconditionally (no qualifying predicate),
5863 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5865 If a register is written if its qualifying predicate P is true, we
5866 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5867 may be written again by the complement of P (P^1) and when this happens,
5868 WRITE_COUNT gets set to 2.
5870 The result of this is that whenever an insn attempts to write a register
5871 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5873 If a predicate register is written by a floating-point insn, we set
5874 WRITTEN_BY_FP to true.
5876 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5877 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5879 #if GCC_VERSION >= 4000
5880 #define RWS_FIELD_TYPE __extension__ unsigned short
5881 #else
5882 #define RWS_FIELD_TYPE unsigned int
5883 #endif
5884 struct reg_write_state
5886 RWS_FIELD_TYPE write_count : 2;
5887 RWS_FIELD_TYPE first_pred : 10;
5888 RWS_FIELD_TYPE written_by_fp : 1;
5889 RWS_FIELD_TYPE written_by_and : 1;
5890 RWS_FIELD_TYPE written_by_or : 1;
5893 /* Cumulative info for the current instruction group. */
5894 struct reg_write_state rws_sum[NUM_REGS];
5895 #ifdef ENABLE_CHECKING
5896 /* Bitmap whether a register has been written in the current insn. */
5897 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5898 / HOST_BITS_PER_WIDEST_FAST_INT];
5900 static inline void
5901 rws_insn_set (int regno)
5903 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5904 SET_HARD_REG_BIT (rws_insn, regno);
5907 static inline int
5908 rws_insn_test (int regno)
5910 return TEST_HARD_REG_BIT (rws_insn, regno);
5912 #else
5913 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5914 unsigned char rws_insn[2];
5916 static inline void
5917 rws_insn_set (int regno)
5919 if (regno == REG_AR_CFM)
5920 rws_insn[0] = 1;
5921 else if (regno == REG_VOLATILE)
5922 rws_insn[1] = 1;
5925 static inline int
5926 rws_insn_test (int regno)
5928 if (regno == REG_AR_CFM)
5929 return rws_insn[0];
5930 if (regno == REG_VOLATILE)
5931 return rws_insn[1];
5932 return 0;
5934 #endif
5936 /* Indicates whether this is the first instruction after a stop bit,
5937 in which case we don't need another stop bit. Without this,
5938 ia64_variable_issue will die when scheduling an alloc. */
5939 static int first_instruction;
5941 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5942 RTL for one instruction. */
5943 struct reg_flags
5945 unsigned int is_write : 1; /* Is register being written? */
5946 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5947 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5948 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5949 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5950 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5953 static void rws_update (int, struct reg_flags, int);
5954 static int rws_access_regno (int, struct reg_flags, int);
5955 static int rws_access_reg (rtx, struct reg_flags, int);
5956 static void update_set_flags (rtx, struct reg_flags *);
5957 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5958 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5959 static void init_insn_group_barriers (void);
5960 static int group_barrier_needed (rtx);
5961 static int safe_group_barrier_needed (rtx);
5962 static int in_safe_group_barrier;
5964 /* Update *RWS for REGNO, which is being written by the current instruction,
5965 with predicate PRED, and associated register flags in FLAGS. */
5967 static void
5968 rws_update (int regno, struct reg_flags flags, int pred)
5970 if (pred)
5971 rws_sum[regno].write_count++;
5972 else
5973 rws_sum[regno].write_count = 2;
5974 rws_sum[regno].written_by_fp |= flags.is_fp;
5975 /* ??? Not tracking and/or across differing predicates. */
5976 rws_sum[regno].written_by_and = flags.is_and;
5977 rws_sum[regno].written_by_or = flags.is_or;
5978 rws_sum[regno].first_pred = pred;
5981 /* Handle an access to register REGNO of type FLAGS using predicate register
5982 PRED. Update rws_sum array. Return 1 if this access creates
5983 a dependency with an earlier instruction in the same group. */
5985 static int
5986 rws_access_regno (int regno, struct reg_flags flags, int pred)
5988 int need_barrier = 0;
5990 gcc_assert (regno < NUM_REGS);
5992 if (! PR_REGNO_P (regno))
5993 flags.is_and = flags.is_or = 0;
5995 if (flags.is_write)
5997 int write_count;
5999 rws_insn_set (regno);
6000 write_count = rws_sum[regno].write_count;
6002 switch (write_count)
6004 case 0:
6005 /* The register has not been written yet. */
6006 if (!in_safe_group_barrier)
6007 rws_update (regno, flags, pred);
6008 break;
6010 case 1:
6011 /* The register has been written via a predicate. Treat
6012 it like a unconditional write and do not try to check
6013 for complementary pred reg in earlier write. */
6014 if (flags.is_and && rws_sum[regno].written_by_and)
6016 else if (flags.is_or && rws_sum[regno].written_by_or)
6018 else
6019 need_barrier = 1;
6020 if (!in_safe_group_barrier)
6021 rws_update (regno, flags, pred);
6022 break;
6024 case 2:
6025 /* The register has been unconditionally written already. We
6026 need a barrier. */
6027 if (flags.is_and && rws_sum[regno].written_by_and)
6029 else if (flags.is_or && rws_sum[regno].written_by_or)
6031 else
6032 need_barrier = 1;
6033 if (!in_safe_group_barrier)
6035 rws_sum[regno].written_by_and = flags.is_and;
6036 rws_sum[regno].written_by_or = flags.is_or;
6038 break;
6040 default:
6041 gcc_unreachable ();
6044 else
6046 if (flags.is_branch)
6048 /* Branches have several RAW exceptions that allow to avoid
6049 barriers. */
6051 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6052 /* RAW dependencies on branch regs are permissible as long
6053 as the writer is a non-branch instruction. Since we
6054 never generate code that uses a branch register written
6055 by a branch instruction, handling this case is
6056 easy. */
6057 return 0;
6059 if (REGNO_REG_CLASS (regno) == PR_REGS
6060 && ! rws_sum[regno].written_by_fp)
6061 /* The predicates of a branch are available within the
6062 same insn group as long as the predicate was written by
6063 something other than a floating-point instruction. */
6064 return 0;
6067 if (flags.is_and && rws_sum[regno].written_by_and)
6068 return 0;
6069 if (flags.is_or && rws_sum[regno].written_by_or)
6070 return 0;
6072 switch (rws_sum[regno].write_count)
6074 case 0:
6075 /* The register has not been written yet. */
6076 break;
6078 case 1:
6079 /* The register has been written via a predicate, assume we
6080 need a barrier (don't check for complementary regs). */
6081 need_barrier = 1;
6082 break;
6084 case 2:
6085 /* The register has been unconditionally written already. We
6086 need a barrier. */
6087 need_barrier = 1;
6088 break;
6090 default:
6091 gcc_unreachable ();
6095 return need_barrier;
6098 static int
6099 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6101 int regno = REGNO (reg);
6102 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6104 if (n == 1)
6105 return rws_access_regno (regno, flags, pred);
6106 else
6108 int need_barrier = 0;
6109 while (--n >= 0)
6110 need_barrier |= rws_access_regno (regno + n, flags, pred);
6111 return need_barrier;
6115 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6116 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6118 static void
6119 update_set_flags (rtx x, struct reg_flags *pflags)
6121 rtx src = SET_SRC (x);
6123 switch (GET_CODE (src))
6125 case CALL:
6126 return;
6128 case IF_THEN_ELSE:
6129 /* There are four cases here:
6130 (1) The destination is (pc), in which case this is a branch,
6131 nothing here applies.
6132 (2) The destination is ar.lc, in which case this is a
6133 doloop_end_internal,
6134 (3) The destination is an fp register, in which case this is
6135 an fselect instruction.
6136 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6137 this is a check load.
6138 In all cases, nothing we do in this function applies. */
6139 return;
6141 default:
6142 if (COMPARISON_P (src)
6143 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6144 /* Set pflags->is_fp to 1 so that we know we're dealing
6145 with a floating point comparison when processing the
6146 destination of the SET. */
6147 pflags->is_fp = 1;
6149 /* Discover if this is a parallel comparison. We only handle
6150 and.orcm and or.andcm at present, since we must retain a
6151 strict inverse on the predicate pair. */
6152 else if (GET_CODE (src) == AND)
6153 pflags->is_and = 1;
6154 else if (GET_CODE (src) == IOR)
6155 pflags->is_or = 1;
6157 break;
6161 /* Subroutine of rtx_needs_barrier; this function determines whether the
6162 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6163 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6164 for this insn. */
6166 static int
6167 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6169 int need_barrier = 0;
6170 rtx dst;
6171 rtx src = SET_SRC (x);
6173 if (GET_CODE (src) == CALL)
6174 /* We don't need to worry about the result registers that
6175 get written by subroutine call. */
6176 return rtx_needs_barrier (src, flags, pred);
6177 else if (SET_DEST (x) == pc_rtx)
6179 /* X is a conditional branch. */
6180 /* ??? This seems redundant, as the caller sets this bit for
6181 all JUMP_INSNs. */
6182 if (!ia64_spec_check_src_p (src))
6183 flags.is_branch = 1;
6184 return rtx_needs_barrier (src, flags, pred);
6187 if (ia64_spec_check_src_p (src))
6188 /* Avoid checking one register twice (in condition
6189 and in 'then' section) for ldc pattern. */
6191 gcc_assert (REG_P (XEXP (src, 2)));
6192 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6194 /* We process MEM below. */
6195 src = XEXP (src, 1);
6198 need_barrier |= rtx_needs_barrier (src, flags, pred);
6200 dst = SET_DEST (x);
6201 if (GET_CODE (dst) == ZERO_EXTRACT)
6203 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6204 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6206 return need_barrier;
6209 /* Handle an access to rtx X of type FLAGS using predicate register
6210 PRED. Return 1 if this access creates a dependency with an earlier
6211 instruction in the same group. */
6213 static int
6214 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6216 int i, j;
6217 int is_complemented = 0;
6218 int need_barrier = 0;
6219 const char *format_ptr;
6220 struct reg_flags new_flags;
6221 rtx cond;
6223 if (! x)
6224 return 0;
6226 new_flags = flags;
6228 switch (GET_CODE (x))
6230 case SET:
6231 update_set_flags (x, &new_flags);
6232 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6233 if (GET_CODE (SET_SRC (x)) != CALL)
6235 new_flags.is_write = 1;
6236 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6238 break;
6240 case CALL:
6241 new_flags.is_write = 0;
6242 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6244 /* Avoid multiple register writes, in case this is a pattern with
6245 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6246 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6248 new_flags.is_write = 1;
6249 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6250 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6251 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6253 break;
6255 case COND_EXEC:
6256 /* X is a predicated instruction. */
6258 cond = COND_EXEC_TEST (x);
6259 gcc_assert (!pred);
6260 need_barrier = rtx_needs_barrier (cond, flags, 0);
6262 if (GET_CODE (cond) == EQ)
6263 is_complemented = 1;
6264 cond = XEXP (cond, 0);
6265 gcc_assert (GET_CODE (cond) == REG
6266 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6267 pred = REGNO (cond);
6268 if (is_complemented)
6269 ++pred;
6271 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6272 return need_barrier;
6274 case CLOBBER:
6275 case USE:
6276 /* Clobber & use are for earlier compiler-phases only. */
6277 break;
6279 case ASM_OPERANDS:
6280 case ASM_INPUT:
6281 /* We always emit stop bits for traditional asms. We emit stop bits
6282 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6283 if (GET_CODE (x) != ASM_OPERANDS
6284 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6286 /* Avoid writing the register multiple times if we have multiple
6287 asm outputs. This avoids a failure in rws_access_reg. */
6288 if (! rws_insn_test (REG_VOLATILE))
6290 new_flags.is_write = 1;
6291 rws_access_regno (REG_VOLATILE, new_flags, pred);
6293 return 1;
6296 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6297 We cannot just fall through here since then we would be confused
6298 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6299 traditional asms unlike their normal usage. */
6301 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6302 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6303 need_barrier = 1;
6304 break;
6306 case PARALLEL:
6307 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6309 rtx pat = XVECEXP (x, 0, i);
6310 switch (GET_CODE (pat))
6312 case SET:
6313 update_set_flags (pat, &new_flags);
6314 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6315 break;
6317 case USE:
6318 case CALL:
6319 case ASM_OPERANDS:
6320 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6321 break;
6323 case CLOBBER:
6324 case RETURN:
6325 break;
6327 default:
6328 gcc_unreachable ();
6331 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6333 rtx pat = XVECEXP (x, 0, i);
6334 if (GET_CODE (pat) == SET)
6336 if (GET_CODE (SET_SRC (pat)) != CALL)
6338 new_flags.is_write = 1;
6339 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6340 pred);
6343 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6344 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6346 break;
6348 case SUBREG:
6349 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6350 break;
6351 case REG:
6352 if (REGNO (x) == AR_UNAT_REGNUM)
6354 for (i = 0; i < 64; ++i)
6355 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6357 else
6358 need_barrier = rws_access_reg (x, flags, pred);
6359 break;
6361 case MEM:
6362 /* Find the regs used in memory address computation. */
6363 new_flags.is_write = 0;
6364 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6365 break;
6367 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6368 case SYMBOL_REF: case LABEL_REF: case CONST:
6369 break;
6371 /* Operators with side-effects. */
6372 case POST_INC: case POST_DEC:
6373 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6375 new_flags.is_write = 0;
6376 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6377 new_flags.is_write = 1;
6378 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6379 break;
6381 case POST_MODIFY:
6382 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6384 new_flags.is_write = 0;
6385 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6386 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6387 new_flags.is_write = 1;
6388 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6389 break;
6391 /* Handle common unary and binary ops for efficiency. */
6392 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6393 case MOD: case UDIV: case UMOD: case AND: case IOR:
6394 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6395 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6396 case NE: case EQ: case GE: case GT: case LE:
6397 case LT: case GEU: case GTU: case LEU: case LTU:
6398 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6399 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6400 break;
6402 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6403 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6404 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6405 case SQRT: case FFS: case POPCOUNT:
6406 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6407 break;
6409 case VEC_SELECT:
6410 /* VEC_SELECT's second argument is a PARALLEL with integers that
6411 describe the elements selected. On ia64, those integers are
6412 always constants. Avoid walking the PARALLEL so that we don't
6413 get confused with "normal" parallels and then die. */
6414 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6415 break;
6417 case UNSPEC:
6418 switch (XINT (x, 1))
6420 case UNSPEC_LTOFF_DTPMOD:
6421 case UNSPEC_LTOFF_DTPREL:
6422 case UNSPEC_DTPREL:
6423 case UNSPEC_LTOFF_TPREL:
6424 case UNSPEC_TPREL:
6425 case UNSPEC_PRED_REL_MUTEX:
6426 case UNSPEC_PIC_CALL:
6427 case UNSPEC_MF:
6428 case UNSPEC_FETCHADD_ACQ:
6429 case UNSPEC_BSP_VALUE:
6430 case UNSPEC_FLUSHRS:
6431 case UNSPEC_BUNDLE_SELECTOR:
6432 break;
6434 case UNSPEC_GR_SPILL:
6435 case UNSPEC_GR_RESTORE:
6437 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6438 HOST_WIDE_INT bit = (offset >> 3) & 63;
6440 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6441 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6442 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6443 new_flags, pred);
6444 break;
6447 case UNSPEC_FR_SPILL:
6448 case UNSPEC_FR_RESTORE:
6449 case UNSPEC_GETF_EXP:
6450 case UNSPEC_SETF_EXP:
6451 case UNSPEC_ADDP4:
6452 case UNSPEC_FR_SQRT_RECIP_APPROX:
6453 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6454 case UNSPEC_LDA:
6455 case UNSPEC_LDS:
6456 case UNSPEC_LDS_A:
6457 case UNSPEC_LDSA:
6458 case UNSPEC_CHKACLR:
6459 case UNSPEC_CHKS:
6460 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6461 break;
6463 case UNSPEC_FR_RECIP_APPROX:
6464 case UNSPEC_SHRP:
6465 case UNSPEC_COPYSIGN:
6466 case UNSPEC_FR_RECIP_APPROX_RES:
6467 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6468 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6469 break;
6471 case UNSPEC_CMPXCHG_ACQ:
6472 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6473 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6474 break;
6476 default:
6477 gcc_unreachable ();
6479 break;
6481 case UNSPEC_VOLATILE:
6482 switch (XINT (x, 1))
6484 case UNSPECV_ALLOC:
6485 /* Alloc must always be the first instruction of a group.
6486 We force this by always returning true. */
6487 /* ??? We might get better scheduling if we explicitly check for
6488 input/local/output register dependencies, and modify the
6489 scheduler so that alloc is always reordered to the start of
6490 the current group. We could then eliminate all of the
6491 first_instruction code. */
6492 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6494 new_flags.is_write = 1;
6495 rws_access_regno (REG_AR_CFM, new_flags, pred);
6496 return 1;
6498 case UNSPECV_SET_BSP:
6499 need_barrier = 1;
6500 break;
6502 case UNSPECV_BLOCKAGE:
6503 case UNSPECV_INSN_GROUP_BARRIER:
6504 case UNSPECV_BREAK:
6505 case UNSPECV_PSAC_ALL:
6506 case UNSPECV_PSAC_NORMAL:
6507 return 0;
6509 default:
6510 gcc_unreachable ();
6512 break;
6514 case RETURN:
6515 new_flags.is_write = 0;
6516 need_barrier = rws_access_regno (REG_RP, flags, pred);
6517 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6519 new_flags.is_write = 1;
6520 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6521 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6522 break;
6524 default:
6525 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6526 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6527 switch (format_ptr[i])
6529 case '0': /* unused field */
6530 case 'i': /* integer */
6531 case 'n': /* note */
6532 case 'w': /* wide integer */
6533 case 's': /* pointer to string */
6534 case 'S': /* optional pointer to string */
6535 break;
6537 case 'e':
6538 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6539 need_barrier = 1;
6540 break;
6542 case 'E':
6543 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6544 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6545 need_barrier = 1;
6546 break;
6548 default:
6549 gcc_unreachable ();
6551 break;
6553 return need_barrier;
6556 /* Clear out the state for group_barrier_needed at the start of a
6557 sequence of insns. */
6559 static void
6560 init_insn_group_barriers (void)
6562 memset (rws_sum, 0, sizeof (rws_sum));
6563 first_instruction = 1;
6566 /* Given the current state, determine whether a group barrier (a stop bit) is
6567 necessary before INSN. Return nonzero if so. This modifies the state to
6568 include the effects of INSN as a side-effect. */
6570 static int
6571 group_barrier_needed (rtx insn)
6573 rtx pat;
6574 int need_barrier = 0;
6575 struct reg_flags flags;
6577 memset (&flags, 0, sizeof (flags));
6578 switch (GET_CODE (insn))
6580 case NOTE:
6581 case DEBUG_INSN:
6582 break;
6584 case BARRIER:
6585 /* A barrier doesn't imply an instruction group boundary. */
6586 break;
6588 case CODE_LABEL:
6589 memset (rws_insn, 0, sizeof (rws_insn));
6590 return 1;
6592 case CALL_INSN:
6593 flags.is_branch = 1;
6594 flags.is_sibcall = SIBLING_CALL_P (insn);
6595 memset (rws_insn, 0, sizeof (rws_insn));
6597 /* Don't bundle a call following another call. */
6598 if ((pat = prev_active_insn (insn))
6599 && GET_CODE (pat) == CALL_INSN)
6601 need_barrier = 1;
6602 break;
6605 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6606 break;
6608 case JUMP_INSN:
6609 if (!ia64_spec_check_p (insn))
6610 flags.is_branch = 1;
6612 /* Don't bundle a jump following a call. */
6613 if ((pat = prev_active_insn (insn))
6614 && GET_CODE (pat) == CALL_INSN)
6616 need_barrier = 1;
6617 break;
6619 /* FALLTHRU */
6621 case INSN:
6622 if (GET_CODE (PATTERN (insn)) == USE
6623 || GET_CODE (PATTERN (insn)) == CLOBBER)
6624 /* Don't care about USE and CLOBBER "insns"---those are used to
6625 indicate to the optimizer that it shouldn't get rid of
6626 certain operations. */
6627 break;
6629 pat = PATTERN (insn);
6631 /* Ug. Hack hacks hacked elsewhere. */
6632 switch (recog_memoized (insn))
6634 /* We play dependency tricks with the epilogue in order
6635 to get proper schedules. Undo this for dv analysis. */
6636 case CODE_FOR_epilogue_deallocate_stack:
6637 case CODE_FOR_prologue_allocate_stack:
6638 pat = XVECEXP (pat, 0, 0);
6639 break;
6641 /* The pattern we use for br.cloop confuses the code above.
6642 The second element of the vector is representative. */
6643 case CODE_FOR_doloop_end_internal:
6644 pat = XVECEXP (pat, 0, 1);
6645 break;
6647 /* Doesn't generate code. */
6648 case CODE_FOR_pred_rel_mutex:
6649 case CODE_FOR_prologue_use:
6650 return 0;
6652 default:
6653 break;
6656 memset (rws_insn, 0, sizeof (rws_insn));
6657 need_barrier = rtx_needs_barrier (pat, flags, 0);
6659 /* Check to see if the previous instruction was a volatile
6660 asm. */
6661 if (! need_barrier)
6662 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6664 break;
6666 default:
6667 gcc_unreachable ();
6670 if (first_instruction && INSN_P (insn)
6671 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6672 && GET_CODE (PATTERN (insn)) != USE
6673 && GET_CODE (PATTERN (insn)) != CLOBBER)
6675 need_barrier = 0;
6676 first_instruction = 0;
6679 return need_barrier;
6682 /* Like group_barrier_needed, but do not clobber the current state. */
6684 static int
6685 safe_group_barrier_needed (rtx insn)
6687 int saved_first_instruction;
6688 int t;
6690 saved_first_instruction = first_instruction;
6691 in_safe_group_barrier = 1;
6693 t = group_barrier_needed (insn);
6695 first_instruction = saved_first_instruction;
6696 in_safe_group_barrier = 0;
6698 return t;
6701 /* Scan the current function and insert stop bits as necessary to
6702 eliminate dependencies. This function assumes that a final
6703 instruction scheduling pass has been run which has already
6704 inserted most of the necessary stop bits. This function only
6705 inserts new ones at basic block boundaries, since these are
6706 invisible to the scheduler. */
6708 static void
6709 emit_insn_group_barriers (FILE *dump)
6711 rtx insn;
6712 rtx last_label = 0;
6713 int insns_since_last_label = 0;
6715 init_insn_group_barriers ();
6717 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6719 if (GET_CODE (insn) == CODE_LABEL)
6721 if (insns_since_last_label)
6722 last_label = insn;
6723 insns_since_last_label = 0;
6725 else if (GET_CODE (insn) == NOTE
6726 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6728 if (insns_since_last_label)
6729 last_label = insn;
6730 insns_since_last_label = 0;
6732 else if (GET_CODE (insn) == INSN
6733 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6734 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6736 init_insn_group_barriers ();
6737 last_label = 0;
6739 else if (NONDEBUG_INSN_P (insn))
6741 insns_since_last_label = 1;
6743 if (group_barrier_needed (insn))
6745 if (last_label)
6747 if (dump)
6748 fprintf (dump, "Emitting stop before label %d\n",
6749 INSN_UID (last_label));
6750 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6751 insn = last_label;
6753 init_insn_group_barriers ();
6754 last_label = 0;
6761 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6762 This function has to emit all necessary group barriers. */
6764 static void
6765 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6767 rtx insn;
6769 init_insn_group_barriers ();
6771 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6773 if (GET_CODE (insn) == BARRIER)
6775 rtx last = prev_active_insn (insn);
6777 if (! last)
6778 continue;
6779 if (GET_CODE (last) == JUMP_INSN
6780 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6781 last = prev_active_insn (last);
6782 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6783 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6785 init_insn_group_barriers ();
6787 else if (NONDEBUG_INSN_P (insn))
6789 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6790 init_insn_group_barriers ();
6791 else if (group_barrier_needed (insn))
6793 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6794 init_insn_group_barriers ();
6795 group_barrier_needed (insn);
6803 /* Instruction scheduling support. */
6805 #define NR_BUNDLES 10
6807 /* A list of names of all available bundles. */
6809 static const char *bundle_name [NR_BUNDLES] =
6811 ".mii",
6812 ".mmi",
6813 ".mfi",
6814 ".mmf",
6815 #if NR_BUNDLES == 10
6816 ".bbb",
6817 ".mbb",
6818 #endif
6819 ".mib",
6820 ".mmb",
6821 ".mfb",
6822 ".mlx"
6825 /* Nonzero if we should insert stop bits into the schedule. */
6827 int ia64_final_schedule = 0;
6829 /* Codes of the corresponding queried units: */
6831 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6832 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6834 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6835 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6837 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6839 /* The following variable value is an insn group barrier. */
6841 static rtx dfa_stop_insn;
6843 /* The following variable value is the last issued insn. */
6845 static rtx last_scheduled_insn;
6847 /* The following variable value is pointer to a DFA state used as
6848 temporary variable. */
6850 static state_t temp_dfa_state = NULL;
6852 /* The following variable value is DFA state after issuing the last
6853 insn. */
6855 static state_t prev_cycle_state = NULL;
6857 /* The following array element values are TRUE if the corresponding
6858 insn requires to add stop bits before it. */
6860 static char *stops_p = NULL;
6862 /* The following variable is used to set up the mentioned above array. */
6864 static int stop_before_p = 0;
6866 /* The following variable value is length of the arrays `clocks' and
6867 `add_cycles'. */
6869 static int clocks_length;
6871 /* The following variable value is number of data speculations in progress. */
6872 static int pending_data_specs = 0;
6874 /* Number of memory references on current and three future processor cycles. */
6875 static char mem_ops_in_group[4];
6877 /* Number of current processor cycle (from scheduler's point of view). */
6878 static int current_cycle;
6880 static rtx ia64_single_set (rtx);
6881 static void ia64_emit_insn_before (rtx, rtx);
6883 /* Map a bundle number to its pseudo-op. */
6885 const char *
6886 get_bundle_name (int b)
6888 return bundle_name[b];
6892 /* Return the maximum number of instructions a cpu can issue. */
6894 static int
6895 ia64_issue_rate (void)
6897 return 6;
6900 /* Helper function - like single_set, but look inside COND_EXEC. */
6902 static rtx
6903 ia64_single_set (rtx insn)
6905 rtx x = PATTERN (insn), ret;
6906 if (GET_CODE (x) == COND_EXEC)
6907 x = COND_EXEC_CODE (x);
6908 if (GET_CODE (x) == SET)
6909 return x;
6911 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6912 Although they are not classical single set, the second set is there just
6913 to protect it from moving past FP-relative stack accesses. */
6914 switch (recog_memoized (insn))
6916 case CODE_FOR_prologue_allocate_stack:
6917 case CODE_FOR_epilogue_deallocate_stack:
6918 ret = XVECEXP (x, 0, 0);
6919 break;
6921 default:
6922 ret = single_set_2 (insn, x);
6923 break;
6926 return ret;
6929 /* Adjust the cost of a scheduling dependency.
6930 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
6931 COST is the current cost, DW is dependency weakness. */
6932 static int
6933 ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
6935 enum reg_note dep_type = (enum reg_note) dep_type1;
6936 enum attr_itanium_class dep_class;
6937 enum attr_itanium_class insn_class;
6939 insn_class = ia64_safe_itanium_class (insn);
6940 dep_class = ia64_safe_itanium_class (dep_insn);
6942 /* Treat true memory dependencies separately. Ignore apparent true
6943 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
6944 if (dep_type == REG_DEP_TRUE
6945 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
6946 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
6947 return 0;
6949 if (dw == MIN_DEP_WEAK)
6950 /* Store and load are likely to alias, use higher cost to avoid stall. */
6951 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
6952 else if (dw > MIN_DEP_WEAK)
6954 /* Store and load are less likely to alias. */
6955 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
6956 /* Assume there will be no cache conflict for floating-point data.
6957 For integer data, L1 conflict penalty is huge (17 cycles), so we
6958 never assume it will not cause a conflict. */
6959 return 0;
6960 else
6961 return cost;
6964 if (dep_type != REG_DEP_OUTPUT)
6965 return cost;
6967 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6968 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6969 return 0;
6971 return cost;
6974 /* Like emit_insn_before, but skip cycle_display notes.
6975 ??? When cycle display notes are implemented, update this. */
6977 static void
6978 ia64_emit_insn_before (rtx insn, rtx before)
6980 emit_insn_before (insn, before);
6983 /* The following function marks insns who produce addresses for load
6984 and store insns. Such insns will be placed into M slots because it
6985 decrease latency time for Itanium1 (see function
6986 `ia64_produce_address_p' and the DFA descriptions). */
6988 static void
6989 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6991 rtx insn, next, next_tail;
6993 /* Before reload, which_alternative is not set, which means that
6994 ia64_safe_itanium_class will produce wrong results for (at least)
6995 move instructions. */
6996 if (!reload_completed)
6997 return;
6999 next_tail = NEXT_INSN (tail);
7000 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7001 if (INSN_P (insn))
7002 insn->call = 0;
7003 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7004 if (INSN_P (insn)
7005 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
7007 sd_iterator_def sd_it;
7008 dep_t dep;
7009 bool has_mem_op_consumer_p = false;
7011 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7013 enum attr_itanium_class c;
7015 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7016 continue;
7018 next = DEP_CON (dep);
7019 c = ia64_safe_itanium_class (next);
7020 if ((c == ITANIUM_CLASS_ST
7021 || c == ITANIUM_CLASS_STF)
7022 && ia64_st_address_bypass_p (insn, next))
7024 has_mem_op_consumer_p = true;
7025 break;
7027 else if ((c == ITANIUM_CLASS_LD
7028 || c == ITANIUM_CLASS_FLD
7029 || c == ITANIUM_CLASS_FLDP)
7030 && ia64_ld_address_bypass_p (insn, next))
7032 has_mem_op_consumer_p = true;
7033 break;
7037 insn->call = has_mem_op_consumer_p;
7041 /* We're beginning a new block. Initialize data structures as necessary. */
7043 static void
7044 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7045 int sched_verbose ATTRIBUTE_UNUSED,
7046 int max_ready ATTRIBUTE_UNUSED)
7048 #ifdef ENABLE_CHECKING
7049 rtx insn;
7051 if (!sel_sched_p () && reload_completed)
7052 for (insn = NEXT_INSN (current_sched_info->prev_head);
7053 insn != current_sched_info->next_tail;
7054 insn = NEXT_INSN (insn))
7055 gcc_assert (!SCHED_GROUP_P (insn));
7056 #endif
7057 last_scheduled_insn = NULL_RTX;
7058 init_insn_group_barriers ();
7060 current_cycle = 0;
7061 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7064 /* We're beginning a scheduling pass. Check assertion. */
7066 static void
7067 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7068 int sched_verbose ATTRIBUTE_UNUSED,
7069 int max_ready ATTRIBUTE_UNUSED)
7071 gcc_assert (pending_data_specs == 0);
7074 /* Scheduling pass is now finished. Free/reset static variable. */
7075 static void
7076 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7077 int sched_verbose ATTRIBUTE_UNUSED)
7079 gcc_assert (pending_data_specs == 0);
7082 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7083 speculation check), FALSE otherwise. */
7084 static bool
7085 is_load_p (rtx insn)
7087 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7089 return
7090 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7091 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7094 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7095 (taking account for 3-cycle cache reference postponing for stores: Intel
7096 Itanium 2 Reference Manual for Software Development and Optimization,
7097 6.7.3.1). */
7098 static void
7099 record_memory_reference (rtx insn)
7101 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7103 switch (insn_class) {
7104 case ITANIUM_CLASS_FLD:
7105 case ITANIUM_CLASS_LD:
7106 mem_ops_in_group[current_cycle % 4]++;
7107 break;
7108 case ITANIUM_CLASS_STF:
7109 case ITANIUM_CLASS_ST:
7110 mem_ops_in_group[(current_cycle + 3) % 4]++;
7111 break;
7112 default:;
7116 /* We are about to being issuing insns for this clock cycle.
7117 Override the default sort algorithm to better slot instructions. */
7119 static int
7120 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
7121 int *pn_ready, int clock_var,
7122 int reorder_type)
7124 int n_asms;
7125 int n_ready = *pn_ready;
7126 rtx *e_ready = ready + n_ready;
7127 rtx *insnp;
7129 if (sched_verbose)
7130 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7132 if (reorder_type == 0)
7134 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7135 n_asms = 0;
7136 for (insnp = ready; insnp < e_ready; insnp++)
7137 if (insnp < e_ready)
7139 rtx insn = *insnp;
7140 enum attr_type t = ia64_safe_type (insn);
7141 if (t == TYPE_UNKNOWN)
7143 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7144 || asm_noperands (PATTERN (insn)) >= 0)
7146 rtx lowest = ready[n_asms];
7147 ready[n_asms] = insn;
7148 *insnp = lowest;
7149 n_asms++;
7151 else
7153 rtx highest = ready[n_ready - 1];
7154 ready[n_ready - 1] = insn;
7155 *insnp = highest;
7156 return 1;
7161 if (n_asms < n_ready)
7163 /* Some normal insns to process. Skip the asms. */
7164 ready += n_asms;
7165 n_ready -= n_asms;
7167 else if (n_ready > 0)
7168 return 1;
7171 if (ia64_final_schedule)
7173 int deleted = 0;
7174 int nr_need_stop = 0;
7176 for (insnp = ready; insnp < e_ready; insnp++)
7177 if (safe_group_barrier_needed (*insnp))
7178 nr_need_stop++;
7180 if (reorder_type == 1 && n_ready == nr_need_stop)
7181 return 0;
7182 if (reorder_type == 0)
7183 return 1;
7184 insnp = e_ready;
7185 /* Move down everything that needs a stop bit, preserving
7186 relative order. */
7187 while (insnp-- > ready + deleted)
7188 while (insnp >= ready + deleted)
7190 rtx insn = *insnp;
7191 if (! safe_group_barrier_needed (insn))
7192 break;
7193 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7194 *ready = insn;
7195 deleted++;
7197 n_ready -= deleted;
7198 ready += deleted;
7201 current_cycle = clock_var;
7202 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7204 int moved = 0;
7206 insnp = e_ready;
7207 /* Move down loads/stores, preserving relative order. */
7208 while (insnp-- > ready + moved)
7209 while (insnp >= ready + moved)
7211 rtx insn = *insnp;
7212 if (! is_load_p (insn))
7213 break;
7214 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7215 *ready = insn;
7216 moved++;
7218 n_ready -= moved;
7219 ready += moved;
7222 return 1;
7225 /* We are about to being issuing insns for this clock cycle. Override
7226 the default sort algorithm to better slot instructions. */
7228 static int
7229 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
7230 int clock_var)
7232 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7233 pn_ready, clock_var, 0);
7236 /* Like ia64_sched_reorder, but called after issuing each insn.
7237 Override the default sort algorithm to better slot instructions. */
7239 static int
7240 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7241 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
7242 int *pn_ready, int clock_var)
7244 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7245 clock_var, 1);
7248 /* We are about to issue INSN. Return the number of insns left on the
7249 ready queue that can be issued this cycle. */
7251 static int
7252 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7253 int sched_verbose ATTRIBUTE_UNUSED,
7254 rtx insn ATTRIBUTE_UNUSED,
7255 int can_issue_more ATTRIBUTE_UNUSED)
7257 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7258 /* Modulo scheduling does not extend h_i_d when emitting
7259 new instructions. Don't use h_i_d, if we don't have to. */
7261 if (DONE_SPEC (insn) & BEGIN_DATA)
7262 pending_data_specs++;
7263 if (CHECK_SPEC (insn) & BEGIN_DATA)
7264 pending_data_specs--;
7267 if (DEBUG_INSN_P (insn))
7268 return 1;
7270 last_scheduled_insn = insn;
7271 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7272 if (reload_completed)
7274 int needed = group_barrier_needed (insn);
7276 gcc_assert (!needed);
7277 if (GET_CODE (insn) == CALL_INSN)
7278 init_insn_group_barriers ();
7279 stops_p [INSN_UID (insn)] = stop_before_p;
7280 stop_before_p = 0;
7282 record_memory_reference (insn);
7284 return 1;
7287 /* We are choosing insn from the ready queue. Return nonzero if INSN
7288 can be chosen. */
7290 static int
7291 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
7293 gcc_assert (insn && INSN_P (insn));
7294 return ((!reload_completed
7295 || !safe_group_barrier_needed (insn))
7296 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn)
7297 && (!mflag_sched_mem_insns_hard_limit
7298 || !is_load_p (insn)
7299 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns));
7302 /* We are choosing insn from the ready queue. Return nonzero if INSN
7303 can be chosen. */
7305 static bool
7306 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
7308 gcc_assert (insn && INSN_P (insn));
7309 /* Size of ALAT is 32. As far as we perform conservative data speculation,
7310 we keep ALAT half-empty. */
7311 return (pending_data_specs < 16
7312 || !(TODO_SPEC (insn) & BEGIN_DATA));
7315 /* The following variable value is pseudo-insn used by the DFA insn
7316 scheduler to change the DFA state when the simulated clock is
7317 increased. */
7319 static rtx dfa_pre_cycle_insn;
7321 /* Returns 1 when a meaningful insn was scheduled between the last group
7322 barrier and LAST. */
7323 static int
7324 scheduled_good_insn (rtx last)
7326 if (last && recog_memoized (last) >= 0)
7327 return 1;
7329 for ( ;
7330 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7331 && !stops_p[INSN_UID (last)];
7332 last = PREV_INSN (last))
7333 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7334 the ebb we're scheduling. */
7335 if (INSN_P (last) && recog_memoized (last) >= 0)
7336 return 1;
7338 return 0;
7341 /* We are about to being issuing INSN. Return nonzero if we cannot
7342 issue it on given cycle CLOCK and return zero if we should not sort
7343 the ready queue on the next clock start. */
7345 static int
7346 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
7347 int clock, int *sort_p)
7349 gcc_assert (insn && INSN_P (insn));
7351 if (DEBUG_INSN_P (insn))
7352 return 0;
7354 /* When a group barrier is needed for insn, last_scheduled_insn
7355 should be set. */
7356 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7357 || last_scheduled_insn);
7359 if ((reload_completed
7360 && (safe_group_barrier_needed (insn)
7361 || (mflag_sched_stop_bits_after_every_cycle
7362 && last_clock != clock
7363 && last_scheduled_insn
7364 && scheduled_good_insn (last_scheduled_insn))))
7365 || (last_scheduled_insn
7366 && (GET_CODE (last_scheduled_insn) == CALL_INSN
7367 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7368 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
7370 init_insn_group_barriers ();
7372 if (verbose && dump)
7373 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7374 last_clock == clock ? " + cycle advance" : "");
7376 stop_before_p = 1;
7377 current_cycle = clock;
7378 mem_ops_in_group[current_cycle % 4] = 0;
7380 if (last_clock == clock)
7382 state_transition (curr_state, dfa_stop_insn);
7383 if (TARGET_EARLY_STOP_BITS)
7384 *sort_p = (last_scheduled_insn == NULL_RTX
7385 || GET_CODE (last_scheduled_insn) != CALL_INSN);
7386 else
7387 *sort_p = 0;
7388 return 1;
7391 if (last_scheduled_insn)
7393 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7394 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
7395 state_reset (curr_state);
7396 else
7398 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7399 state_transition (curr_state, dfa_stop_insn);
7400 state_transition (curr_state, dfa_pre_cycle_insn);
7401 state_transition (curr_state, NULL);
7405 return 0;
7408 /* Implement targetm.sched.h_i_d_extended hook.
7409 Extend internal data structures. */
7410 static void
7411 ia64_h_i_d_extended (void)
7413 if (stops_p != NULL)
7415 int new_clocks_length = get_max_uid () * 3 / 2;
7416 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7417 clocks_length = new_clocks_length;
7422 /* This structure describes the data used by the backend to guide scheduling.
7423 When the current scheduling point is switched, this data should be saved
7424 and restored later, if the scheduler returns to this point. */
7425 struct _ia64_sched_context
7427 state_t prev_cycle_state;
7428 rtx last_scheduled_insn;
7429 struct reg_write_state rws_sum[NUM_REGS];
7430 struct reg_write_state rws_insn[NUM_REGS];
7431 int first_instruction;
7432 int pending_data_specs;
7433 int current_cycle;
7434 char mem_ops_in_group[4];
7436 typedef struct _ia64_sched_context *ia64_sched_context_t;
7438 /* Allocates a scheduling context. */
7439 static void *
7440 ia64_alloc_sched_context (void)
7442 return xmalloc (sizeof (struct _ia64_sched_context));
7445 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7446 the global context otherwise. */
7447 static void
7448 ia64_init_sched_context (void *_sc, bool clean_p)
7450 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7452 sc->prev_cycle_state = xmalloc (dfa_state_size);
7453 if (clean_p)
7455 state_reset (sc->prev_cycle_state);
7456 sc->last_scheduled_insn = NULL_RTX;
7457 memset (sc->rws_sum, 0, sizeof (rws_sum));
7458 memset (sc->rws_insn, 0, sizeof (rws_insn));
7459 sc->first_instruction = 1;
7460 sc->pending_data_specs = 0;
7461 sc->current_cycle = 0;
7462 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7464 else
7466 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7467 sc->last_scheduled_insn = last_scheduled_insn;
7468 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7469 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7470 sc->first_instruction = first_instruction;
7471 sc->pending_data_specs = pending_data_specs;
7472 sc->current_cycle = current_cycle;
7473 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7477 /* Sets the global scheduling context to the one pointed to by _SC. */
7478 static void
7479 ia64_set_sched_context (void *_sc)
7481 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7483 gcc_assert (sc != NULL);
7485 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7486 last_scheduled_insn = sc->last_scheduled_insn;
7487 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7488 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7489 first_instruction = sc->first_instruction;
7490 pending_data_specs = sc->pending_data_specs;
7491 current_cycle = sc->current_cycle;
7492 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7495 /* Clears the data in the _SC scheduling context. */
7496 static void
7497 ia64_clear_sched_context (void *_sc)
7499 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7501 free (sc->prev_cycle_state);
7502 sc->prev_cycle_state = NULL;
7505 /* Frees the _SC scheduling context. */
7506 static void
7507 ia64_free_sched_context (void *_sc)
7509 gcc_assert (_sc != NULL);
7511 free (_sc);
7514 typedef rtx (* gen_func_t) (rtx, rtx);
7516 /* Return a function that will generate a load of mode MODE_NO
7517 with speculation types TS. */
7518 static gen_func_t
7519 get_spec_load_gen_function (ds_t ts, int mode_no)
7521 static gen_func_t gen_ld_[] = {
7522 gen_movbi,
7523 gen_movqi_internal,
7524 gen_movhi_internal,
7525 gen_movsi_internal,
7526 gen_movdi_internal,
7527 gen_movsf_internal,
7528 gen_movdf_internal,
7529 gen_movxf_internal,
7530 gen_movti_internal,
7531 gen_zero_extendqidi2,
7532 gen_zero_extendhidi2,
7533 gen_zero_extendsidi2,
7536 static gen_func_t gen_ld_a[] = {
7537 gen_movbi_advanced,
7538 gen_movqi_advanced,
7539 gen_movhi_advanced,
7540 gen_movsi_advanced,
7541 gen_movdi_advanced,
7542 gen_movsf_advanced,
7543 gen_movdf_advanced,
7544 gen_movxf_advanced,
7545 gen_movti_advanced,
7546 gen_zero_extendqidi2_advanced,
7547 gen_zero_extendhidi2_advanced,
7548 gen_zero_extendsidi2_advanced,
7550 static gen_func_t gen_ld_s[] = {
7551 gen_movbi_speculative,
7552 gen_movqi_speculative,
7553 gen_movhi_speculative,
7554 gen_movsi_speculative,
7555 gen_movdi_speculative,
7556 gen_movsf_speculative,
7557 gen_movdf_speculative,
7558 gen_movxf_speculative,
7559 gen_movti_speculative,
7560 gen_zero_extendqidi2_speculative,
7561 gen_zero_extendhidi2_speculative,
7562 gen_zero_extendsidi2_speculative,
7564 static gen_func_t gen_ld_sa[] = {
7565 gen_movbi_speculative_advanced,
7566 gen_movqi_speculative_advanced,
7567 gen_movhi_speculative_advanced,
7568 gen_movsi_speculative_advanced,
7569 gen_movdi_speculative_advanced,
7570 gen_movsf_speculative_advanced,
7571 gen_movdf_speculative_advanced,
7572 gen_movxf_speculative_advanced,
7573 gen_movti_speculative_advanced,
7574 gen_zero_extendqidi2_speculative_advanced,
7575 gen_zero_extendhidi2_speculative_advanced,
7576 gen_zero_extendsidi2_speculative_advanced,
7578 static gen_func_t gen_ld_s_a[] = {
7579 gen_movbi_speculative_a,
7580 gen_movqi_speculative_a,
7581 gen_movhi_speculative_a,
7582 gen_movsi_speculative_a,
7583 gen_movdi_speculative_a,
7584 gen_movsf_speculative_a,
7585 gen_movdf_speculative_a,
7586 gen_movxf_speculative_a,
7587 gen_movti_speculative_a,
7588 gen_zero_extendqidi2_speculative_a,
7589 gen_zero_extendhidi2_speculative_a,
7590 gen_zero_extendsidi2_speculative_a,
7593 gen_func_t *gen_ld;
7595 if (ts & BEGIN_DATA)
7597 if (ts & BEGIN_CONTROL)
7598 gen_ld = gen_ld_sa;
7599 else
7600 gen_ld = gen_ld_a;
7602 else if (ts & BEGIN_CONTROL)
7604 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7605 || ia64_needs_block_p (ts))
7606 gen_ld = gen_ld_s;
7607 else
7608 gen_ld = gen_ld_s_a;
7610 else if (ts == 0)
7611 gen_ld = gen_ld_;
7612 else
7613 gcc_unreachable ();
7615 return gen_ld[mode_no];
7618 /* Constants that help mapping 'enum machine_mode' to int. */
7619 enum SPEC_MODES
7621 SPEC_MODE_INVALID = -1,
7622 SPEC_MODE_FIRST = 0,
7623 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7624 SPEC_MODE_FOR_EXTEND_LAST = 3,
7625 SPEC_MODE_LAST = 8
7628 enum
7630 /* Offset to reach ZERO_EXTEND patterns. */
7631 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7634 /* Return index of the MODE. */
7635 static int
7636 ia64_mode_to_int (enum machine_mode mode)
7638 switch (mode)
7640 case BImode: return 0; /* SPEC_MODE_FIRST */
7641 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7642 case HImode: return 2;
7643 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7644 case DImode: return 4;
7645 case SFmode: return 5;
7646 case DFmode: return 6;
7647 case XFmode: return 7;
7648 case TImode:
7649 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7650 mentioned in itanium[12].md. Predicate fp_register_operand also
7651 needs to be defined. Bottom line: better disable for now. */
7652 return SPEC_MODE_INVALID;
7653 default: return SPEC_MODE_INVALID;
7657 /* Provide information about speculation capabilities. */
7658 static void
7659 ia64_set_sched_flags (spec_info_t spec_info)
7661 unsigned int *flags = &(current_sched_info->flags);
7663 if (*flags & SCHED_RGN
7664 || *flags & SCHED_EBB
7665 || *flags & SEL_SCHED)
7667 int mask = 0;
7669 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7670 || (mflag_sched_ar_data_spec && reload_completed))
7672 mask |= BEGIN_DATA;
7674 if (!sel_sched_p ()
7675 && ((mflag_sched_br_in_data_spec && !reload_completed)
7676 || (mflag_sched_ar_in_data_spec && reload_completed)))
7677 mask |= BE_IN_DATA;
7680 if (mflag_sched_control_spec
7681 && (!sel_sched_p ()
7682 || reload_completed))
7684 mask |= BEGIN_CONTROL;
7686 if (!sel_sched_p () && mflag_sched_in_control_spec)
7687 mask |= BE_IN_CONTROL;
7690 spec_info->mask = mask;
7692 if (mask)
7694 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7696 if (mask & BE_IN_SPEC)
7697 *flags |= NEW_BBS;
7699 spec_info->flags = 0;
7701 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
7702 spec_info->flags |= PREFER_NON_DATA_SPEC;
7704 if (mask & CONTROL_SPEC)
7706 if (mflag_sched_prefer_non_control_spec_insns)
7707 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
7709 if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7710 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7713 if (sched_verbose >= 1)
7714 spec_info->dump = sched_dump;
7715 else
7716 spec_info->dump = 0;
7718 if (mflag_sched_count_spec_in_critical_path)
7719 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7722 else
7723 spec_info->mask = 0;
7726 /* If INSN is an appropriate load return its mode.
7727 Return -1 otherwise. */
7728 static int
7729 get_mode_no_for_insn (rtx insn)
7731 rtx reg, mem, mode_rtx;
7732 int mode_no;
7733 bool extend_p;
7735 extract_insn_cached (insn);
7737 /* We use WHICH_ALTERNATIVE only after reload. This will
7738 guarantee that reload won't touch a speculative insn. */
7740 if (recog_data.n_operands != 2)
7741 return -1;
7743 reg = recog_data.operand[0];
7744 mem = recog_data.operand[1];
7746 /* We should use MEM's mode since REG's mode in presence of
7747 ZERO_EXTEND will always be DImode. */
7748 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7749 /* Process non-speculative ld. */
7751 if (!reload_completed)
7753 /* Do not speculate into regs like ar.lc. */
7754 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7755 return -1;
7757 if (!MEM_P (mem))
7758 return -1;
7761 rtx mem_reg = XEXP (mem, 0);
7763 if (!REG_P (mem_reg))
7764 return -1;
7767 mode_rtx = mem;
7769 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
7771 gcc_assert (REG_P (reg) && MEM_P (mem));
7772 mode_rtx = mem;
7774 else
7775 return -1;
7777 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
7778 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
7779 || get_attr_check_load (insn) == CHECK_LOAD_YES)
7780 /* Process speculative ld or ld.c. */
7782 gcc_assert (REG_P (reg) && MEM_P (mem));
7783 mode_rtx = mem;
7785 else
7787 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
7789 if (attr_class == ITANIUM_CLASS_CHK_A
7790 || attr_class == ITANIUM_CLASS_CHK_S_I
7791 || attr_class == ITANIUM_CLASS_CHK_S_F)
7792 /* Process chk. */
7793 mode_rtx = reg;
7794 else
7795 return -1;
7798 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
7800 if (mode_no == SPEC_MODE_INVALID)
7801 return -1;
7803 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
7805 if (extend_p)
7807 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
7808 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
7809 return -1;
7811 mode_no += SPEC_GEN_EXTEND_OFFSET;
7814 return mode_no;
7817 /* If X is an unspec part of a speculative load, return its code.
7818 Return -1 otherwise. */
7819 static int
7820 get_spec_unspec_code (const_rtx x)
7822 if (GET_CODE (x) != UNSPEC)
7823 return -1;
7826 int code;
7828 code = XINT (x, 1);
7830 switch (code)
7832 case UNSPEC_LDA:
7833 case UNSPEC_LDS:
7834 case UNSPEC_LDS_A:
7835 case UNSPEC_LDSA:
7836 return code;
7838 default:
7839 return -1;
7844 /* Implement skip_rtx_p hook. */
7845 static bool
7846 ia64_skip_rtx_p (const_rtx x)
7848 return get_spec_unspec_code (x) != -1;
7851 /* If INSN is a speculative load, return its UNSPEC code.
7852 Return -1 otherwise. */
7853 static int
7854 get_insn_spec_code (const_rtx insn)
7856 rtx pat, reg, mem;
7858 pat = PATTERN (insn);
7860 if (GET_CODE (pat) == COND_EXEC)
7861 pat = COND_EXEC_CODE (pat);
7863 if (GET_CODE (pat) != SET)
7864 return -1;
7866 reg = SET_DEST (pat);
7867 if (!REG_P (reg))
7868 return -1;
7870 mem = SET_SRC (pat);
7871 if (GET_CODE (mem) == ZERO_EXTEND)
7872 mem = XEXP (mem, 0);
7874 return get_spec_unspec_code (mem);
7877 /* If INSN is a speculative load, return a ds with the speculation types.
7878 Otherwise [if INSN is a normal instruction] return 0. */
7879 static ds_t
7880 ia64_get_insn_spec_ds (rtx insn)
7882 int code = get_insn_spec_code (insn);
7884 switch (code)
7886 case UNSPEC_LDA:
7887 return BEGIN_DATA;
7889 case UNSPEC_LDS:
7890 case UNSPEC_LDS_A:
7891 return BEGIN_CONTROL;
7893 case UNSPEC_LDSA:
7894 return BEGIN_DATA | BEGIN_CONTROL;
7896 default:
7897 return 0;
7901 /* If INSN is a speculative load return a ds with the speculation types that
7902 will be checked.
7903 Otherwise [if INSN is a normal instruction] return 0. */
7904 static ds_t
7905 ia64_get_insn_checked_ds (rtx insn)
7907 int code = get_insn_spec_code (insn);
7909 switch (code)
7911 case UNSPEC_LDA:
7912 return BEGIN_DATA | BEGIN_CONTROL;
7914 case UNSPEC_LDS:
7915 return BEGIN_CONTROL;
7917 case UNSPEC_LDS_A:
7918 case UNSPEC_LDSA:
7919 return BEGIN_DATA | BEGIN_CONTROL;
7921 default:
7922 return 0;
7926 /* If GEN_P is true, calculate the index of needed speculation check and return
7927 speculative pattern for INSN with speculative mode TS, machine mode
7928 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7929 If GEN_P is false, just calculate the index of needed speculation check. */
7930 static rtx
7931 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
7933 rtx pat, new_pat;
7934 gen_func_t gen_load;
7936 gen_load = get_spec_load_gen_function (ts, mode_no);
7938 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
7939 copy_rtx (recog_data.operand[1]));
7941 pat = PATTERN (insn);
7942 if (GET_CODE (pat) == COND_EXEC)
7943 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7944 new_pat);
7946 return new_pat;
7949 static bool
7950 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
7951 ds_t ds ATTRIBUTE_UNUSED)
7953 return false;
7956 /* Implement targetm.sched.speculate_insn hook.
7957 Check if the INSN can be TS speculative.
7958 If 'no' - return -1.
7959 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
7960 If current pattern of the INSN already provides TS speculation,
7961 return 0. */
7962 static int
7963 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
7965 int mode_no;
7966 int res;
7968 gcc_assert (!(ts & ~SPECULATIVE));
7970 if (ia64_spec_check_p (insn))
7971 return -1;
7973 if ((ts & BE_IN_SPEC)
7974 && !insn_can_be_in_speculative_p (insn, ts))
7975 return -1;
7977 mode_no = get_mode_no_for_insn (insn);
7979 if (mode_no != SPEC_MODE_INVALID)
7981 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
7982 res = 0;
7983 else
7985 res = 1;
7986 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
7989 else
7990 res = -1;
7992 return res;
7995 /* Return a function that will generate a check for speculation TS with mode
7996 MODE_NO.
7997 If simple check is needed, pass true for SIMPLE_CHECK_P.
7998 If clearing check is needed, pass true for CLEARING_CHECK_P. */
7999 static gen_func_t
8000 get_spec_check_gen_function (ds_t ts, int mode_no,
8001 bool simple_check_p, bool clearing_check_p)
8003 static gen_func_t gen_ld_c_clr[] = {
8004 gen_movbi_clr,
8005 gen_movqi_clr,
8006 gen_movhi_clr,
8007 gen_movsi_clr,
8008 gen_movdi_clr,
8009 gen_movsf_clr,
8010 gen_movdf_clr,
8011 gen_movxf_clr,
8012 gen_movti_clr,
8013 gen_zero_extendqidi2_clr,
8014 gen_zero_extendhidi2_clr,
8015 gen_zero_extendsidi2_clr,
8017 static gen_func_t gen_ld_c_nc[] = {
8018 gen_movbi_nc,
8019 gen_movqi_nc,
8020 gen_movhi_nc,
8021 gen_movsi_nc,
8022 gen_movdi_nc,
8023 gen_movsf_nc,
8024 gen_movdf_nc,
8025 gen_movxf_nc,
8026 gen_movti_nc,
8027 gen_zero_extendqidi2_nc,
8028 gen_zero_extendhidi2_nc,
8029 gen_zero_extendsidi2_nc,
8031 static gen_func_t gen_chk_a_clr[] = {
8032 gen_advanced_load_check_clr_bi,
8033 gen_advanced_load_check_clr_qi,
8034 gen_advanced_load_check_clr_hi,
8035 gen_advanced_load_check_clr_si,
8036 gen_advanced_load_check_clr_di,
8037 gen_advanced_load_check_clr_sf,
8038 gen_advanced_load_check_clr_df,
8039 gen_advanced_load_check_clr_xf,
8040 gen_advanced_load_check_clr_ti,
8041 gen_advanced_load_check_clr_di,
8042 gen_advanced_load_check_clr_di,
8043 gen_advanced_load_check_clr_di,
8045 static gen_func_t gen_chk_a_nc[] = {
8046 gen_advanced_load_check_nc_bi,
8047 gen_advanced_load_check_nc_qi,
8048 gen_advanced_load_check_nc_hi,
8049 gen_advanced_load_check_nc_si,
8050 gen_advanced_load_check_nc_di,
8051 gen_advanced_load_check_nc_sf,
8052 gen_advanced_load_check_nc_df,
8053 gen_advanced_load_check_nc_xf,
8054 gen_advanced_load_check_nc_ti,
8055 gen_advanced_load_check_nc_di,
8056 gen_advanced_load_check_nc_di,
8057 gen_advanced_load_check_nc_di,
8059 static gen_func_t gen_chk_s[] = {
8060 gen_speculation_check_bi,
8061 gen_speculation_check_qi,
8062 gen_speculation_check_hi,
8063 gen_speculation_check_si,
8064 gen_speculation_check_di,
8065 gen_speculation_check_sf,
8066 gen_speculation_check_df,
8067 gen_speculation_check_xf,
8068 gen_speculation_check_ti,
8069 gen_speculation_check_di,
8070 gen_speculation_check_di,
8071 gen_speculation_check_di,
8074 gen_func_t *gen_check;
8076 if (ts & BEGIN_DATA)
8078 /* We don't need recovery because even if this is ld.sa
8079 ALAT entry will be allocated only if NAT bit is set to zero.
8080 So it is enough to use ld.c here. */
8082 if (simple_check_p)
8084 gcc_assert (mflag_sched_spec_ldc);
8086 if (clearing_check_p)
8087 gen_check = gen_ld_c_clr;
8088 else
8089 gen_check = gen_ld_c_nc;
8091 else
8093 if (clearing_check_p)
8094 gen_check = gen_chk_a_clr;
8095 else
8096 gen_check = gen_chk_a_nc;
8099 else if (ts & BEGIN_CONTROL)
8101 if (simple_check_p)
8102 /* We might want to use ld.sa -> ld.c instead of
8103 ld.s -> chk.s. */
8105 gcc_assert (!ia64_needs_block_p (ts));
8107 if (clearing_check_p)
8108 gen_check = gen_ld_c_clr;
8109 else
8110 gen_check = gen_ld_c_nc;
8112 else
8114 gen_check = gen_chk_s;
8117 else
8118 gcc_unreachable ();
8120 gcc_assert (mode_no >= 0);
8121 return gen_check[mode_no];
8124 /* Return nonzero, if INSN needs branchy recovery check. */
8125 static bool
8126 ia64_needs_block_p (ds_t ts)
8128 if (ts & BEGIN_DATA)
8129 return !mflag_sched_spec_ldc;
8131 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8133 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8136 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
8137 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
8138 Otherwise, generate a simple check. */
8139 static rtx
8140 ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
8142 rtx op1, pat, check_pat;
8143 gen_func_t gen_check;
8144 int mode_no;
8146 mode_no = get_mode_no_for_insn (insn);
8147 gcc_assert (mode_no >= 0);
8149 if (label)
8150 op1 = label;
8151 else
8153 gcc_assert (!ia64_needs_block_p (ds));
8154 op1 = copy_rtx (recog_data.operand[1]);
8157 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8158 true);
8160 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8162 pat = PATTERN (insn);
8163 if (GET_CODE (pat) == COND_EXEC)
8164 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8165 check_pat);
8167 return check_pat;
8170 /* Return nonzero, if X is branchy recovery check. */
8171 static int
8172 ia64_spec_check_p (rtx x)
8174 x = PATTERN (x);
8175 if (GET_CODE (x) == COND_EXEC)
8176 x = COND_EXEC_CODE (x);
8177 if (GET_CODE (x) == SET)
8178 return ia64_spec_check_src_p (SET_SRC (x));
8179 return 0;
8182 /* Return nonzero, if SRC belongs to recovery check. */
8183 static int
8184 ia64_spec_check_src_p (rtx src)
8186 if (GET_CODE (src) == IF_THEN_ELSE)
8188 rtx t;
8190 t = XEXP (src, 0);
8191 if (GET_CODE (t) == NE)
8193 t = XEXP (t, 0);
8195 if (GET_CODE (t) == UNSPEC)
8197 int code;
8199 code = XINT (t, 1);
8201 if (code == UNSPEC_LDCCLR
8202 || code == UNSPEC_LDCNC
8203 || code == UNSPEC_CHKACLR
8204 || code == UNSPEC_CHKANC
8205 || code == UNSPEC_CHKS)
8207 gcc_assert (code != 0);
8208 return code;
8213 return 0;
8217 /* The following page contains abstract data `bundle states' which are
8218 used for bundling insns (inserting nops and template generation). */
8220 /* The following describes state of insn bundling. */
8222 struct bundle_state
8224 /* Unique bundle state number to identify them in the debugging
8225 output */
8226 int unique_num;
8227 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
8228 /* number nops before and after the insn */
8229 short before_nops_num, after_nops_num;
8230 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8231 insn */
8232 int cost; /* cost of the state in cycles */
8233 int accumulated_insns_num; /* number of all previous insns including
8234 nops. L is considered as 2 insns */
8235 int branch_deviation; /* deviation of previous branches from 3rd slots */
8236 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8237 struct bundle_state *next; /* next state with the same insn_num */
8238 struct bundle_state *originator; /* originator (previous insn state) */
8239 /* All bundle states are in the following chain. */
8240 struct bundle_state *allocated_states_chain;
8241 /* The DFA State after issuing the insn and the nops. */
8242 state_t dfa_state;
8245 /* The following is map insn number to the corresponding bundle state. */
8247 static struct bundle_state **index_to_bundle_states;
8249 /* The unique number of next bundle state. */
8251 static int bundle_states_num;
8253 /* All allocated bundle states are in the following chain. */
8255 static struct bundle_state *allocated_bundle_states_chain;
8257 /* All allocated but not used bundle states are in the following
8258 chain. */
8260 static struct bundle_state *free_bundle_state_chain;
8263 /* The following function returns a free bundle state. */
8265 static struct bundle_state *
8266 get_free_bundle_state (void)
8268 struct bundle_state *result;
8270 if (free_bundle_state_chain != NULL)
8272 result = free_bundle_state_chain;
8273 free_bundle_state_chain = result->next;
8275 else
8277 result = XNEW (struct bundle_state);
8278 result->dfa_state = xmalloc (dfa_state_size);
8279 result->allocated_states_chain = allocated_bundle_states_chain;
8280 allocated_bundle_states_chain = result;
8282 result->unique_num = bundle_states_num++;
8283 return result;
8287 /* The following function frees given bundle state. */
8289 static void
8290 free_bundle_state (struct bundle_state *state)
8292 state->next = free_bundle_state_chain;
8293 free_bundle_state_chain = state;
8296 /* Start work with abstract data `bundle states'. */
8298 static void
8299 initiate_bundle_states (void)
8301 bundle_states_num = 0;
8302 free_bundle_state_chain = NULL;
8303 allocated_bundle_states_chain = NULL;
8306 /* Finish work with abstract data `bundle states'. */
8308 static void
8309 finish_bundle_states (void)
8311 struct bundle_state *curr_state, *next_state;
8313 for (curr_state = allocated_bundle_states_chain;
8314 curr_state != NULL;
8315 curr_state = next_state)
8317 next_state = curr_state->allocated_states_chain;
8318 free (curr_state->dfa_state);
8319 free (curr_state);
8323 /* Hash table of the bundle states. The key is dfa_state and insn_num
8324 of the bundle states. */
8326 static htab_t bundle_state_table;
8328 /* The function returns hash of BUNDLE_STATE. */
8330 static unsigned
8331 bundle_state_hash (const void *bundle_state)
8333 const struct bundle_state *const state
8334 = (const struct bundle_state *) bundle_state;
8335 unsigned result, i;
8337 for (result = i = 0; i < dfa_state_size; i++)
8338 result += (((unsigned char *) state->dfa_state) [i]
8339 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8340 return result + state->insn_num;
8343 /* The function returns nonzero if the bundle state keys are equal. */
8345 static int
8346 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
8348 const struct bundle_state *const state1
8349 = (const struct bundle_state *) bundle_state_1;
8350 const struct bundle_state *const state2
8351 = (const struct bundle_state *) bundle_state_2;
8353 return (state1->insn_num == state2->insn_num
8354 && memcmp (state1->dfa_state, state2->dfa_state,
8355 dfa_state_size) == 0);
8358 /* The function inserts the BUNDLE_STATE into the hash table. The
8359 function returns nonzero if the bundle has been inserted into the
8360 table. The table contains the best bundle state with given key. */
8362 static int
8363 insert_bundle_state (struct bundle_state *bundle_state)
8365 void **entry_ptr;
8367 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, INSERT);
8368 if (*entry_ptr == NULL)
8370 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8371 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8372 *entry_ptr = (void *) bundle_state;
8373 return TRUE;
8375 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
8376 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
8377 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
8378 > bundle_state->accumulated_insns_num
8379 || (((struct bundle_state *)
8380 *entry_ptr)->accumulated_insns_num
8381 == bundle_state->accumulated_insns_num
8382 && (((struct bundle_state *)
8383 *entry_ptr)->branch_deviation
8384 > bundle_state->branch_deviation
8385 || (((struct bundle_state *)
8386 *entry_ptr)->branch_deviation
8387 == bundle_state->branch_deviation
8388 && ((struct bundle_state *)
8389 *entry_ptr)->middle_bundle_stops
8390 > bundle_state->middle_bundle_stops))))))
8393 struct bundle_state temp;
8395 temp = *(struct bundle_state *) *entry_ptr;
8396 *(struct bundle_state *) *entry_ptr = *bundle_state;
8397 ((struct bundle_state *) *entry_ptr)->next = temp.next;
8398 *bundle_state = temp;
8400 return FALSE;
8403 /* Start work with the hash table. */
8405 static void
8406 initiate_bundle_state_table (void)
8408 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
8409 (htab_del) 0);
8412 /* Finish work with the hash table. */
8414 static void
8415 finish_bundle_state_table (void)
8417 htab_delete (bundle_state_table);
8422 /* The following variable is a insn `nop' used to check bundle states
8423 with different number of inserted nops. */
8425 static rtx ia64_nop;
8427 /* The following function tries to issue NOPS_NUM nops for the current
8428 state without advancing processor cycle. If it failed, the
8429 function returns FALSE and frees the current state. */
8431 static int
8432 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8434 int i;
8436 for (i = 0; i < nops_num; i++)
8437 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8439 free_bundle_state (curr_state);
8440 return FALSE;
8442 return TRUE;
8445 /* The following function tries to issue INSN for the current
8446 state without advancing processor cycle. If it failed, the
8447 function returns FALSE and frees the current state. */
8449 static int
8450 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8452 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8454 free_bundle_state (curr_state);
8455 return FALSE;
8457 return TRUE;
8460 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8461 starting with ORIGINATOR without advancing processor cycle. If
8462 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8463 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8464 If it was successful, the function creates new bundle state and
8465 insert into the hash table and into `index_to_bundle_states'. */
8467 static void
8468 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8469 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
8471 struct bundle_state *curr_state;
8473 curr_state = get_free_bundle_state ();
8474 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8475 curr_state->insn = insn;
8476 curr_state->insn_num = originator->insn_num + 1;
8477 curr_state->cost = originator->cost;
8478 curr_state->originator = originator;
8479 curr_state->before_nops_num = before_nops_num;
8480 curr_state->after_nops_num = 0;
8481 curr_state->accumulated_insns_num
8482 = originator->accumulated_insns_num + before_nops_num;
8483 curr_state->branch_deviation = originator->branch_deviation;
8484 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8485 gcc_assert (insn);
8486 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8488 gcc_assert (GET_MODE (insn) != TImode);
8489 if (!try_issue_nops (curr_state, before_nops_num))
8490 return;
8491 if (!try_issue_insn (curr_state, insn))
8492 return;
8493 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8494 if (curr_state->accumulated_insns_num % 3 != 0)
8495 curr_state->middle_bundle_stops++;
8496 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8497 && curr_state->accumulated_insns_num % 3 != 0)
8499 free_bundle_state (curr_state);
8500 return;
8503 else if (GET_MODE (insn) != TImode)
8505 if (!try_issue_nops (curr_state, before_nops_num))
8506 return;
8507 if (!try_issue_insn (curr_state, insn))
8508 return;
8509 curr_state->accumulated_insns_num++;
8510 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
8511 && asm_noperands (PATTERN (insn)) < 0);
8513 if (ia64_safe_type (insn) == TYPE_L)
8514 curr_state->accumulated_insns_num++;
8516 else
8518 /* If this is an insn that must be first in a group, then don't allow
8519 nops to be emitted before it. Currently, alloc is the only such
8520 supported instruction. */
8521 /* ??? The bundling automatons should handle this for us, but they do
8522 not yet have support for the first_insn attribute. */
8523 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8525 free_bundle_state (curr_state);
8526 return;
8529 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8530 state_transition (curr_state->dfa_state, NULL);
8531 curr_state->cost++;
8532 if (!try_issue_nops (curr_state, before_nops_num))
8533 return;
8534 if (!try_issue_insn (curr_state, insn))
8535 return;
8536 curr_state->accumulated_insns_num++;
8537 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
8538 || asm_noperands (PATTERN (insn)) >= 0)
8540 /* Finish bundle containing asm insn. */
8541 curr_state->after_nops_num
8542 = 3 - curr_state->accumulated_insns_num % 3;
8543 curr_state->accumulated_insns_num
8544 += 3 - curr_state->accumulated_insns_num % 3;
8546 else if (ia64_safe_type (insn) == TYPE_L)
8547 curr_state->accumulated_insns_num++;
8549 if (ia64_safe_type (insn) == TYPE_B)
8550 curr_state->branch_deviation
8551 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8552 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8554 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8556 state_t dfa_state;
8557 struct bundle_state *curr_state1;
8558 struct bundle_state *allocated_states_chain;
8560 curr_state1 = get_free_bundle_state ();
8561 dfa_state = curr_state1->dfa_state;
8562 allocated_states_chain = curr_state1->allocated_states_chain;
8563 *curr_state1 = *curr_state;
8564 curr_state1->dfa_state = dfa_state;
8565 curr_state1->allocated_states_chain = allocated_states_chain;
8566 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8567 dfa_state_size);
8568 curr_state = curr_state1;
8570 if (!try_issue_nops (curr_state,
8571 3 - curr_state->accumulated_insns_num % 3))
8572 return;
8573 curr_state->after_nops_num
8574 = 3 - curr_state->accumulated_insns_num % 3;
8575 curr_state->accumulated_insns_num
8576 += 3 - curr_state->accumulated_insns_num % 3;
8578 if (!insert_bundle_state (curr_state))
8579 free_bundle_state (curr_state);
8580 return;
8583 /* The following function returns position in the two window bundle
8584 for given STATE. */
8586 static int
8587 get_max_pos (state_t state)
8589 if (cpu_unit_reservation_p (state, pos_6))
8590 return 6;
8591 else if (cpu_unit_reservation_p (state, pos_5))
8592 return 5;
8593 else if (cpu_unit_reservation_p (state, pos_4))
8594 return 4;
8595 else if (cpu_unit_reservation_p (state, pos_3))
8596 return 3;
8597 else if (cpu_unit_reservation_p (state, pos_2))
8598 return 2;
8599 else if (cpu_unit_reservation_p (state, pos_1))
8600 return 1;
8601 else
8602 return 0;
8605 /* The function returns code of a possible template for given position
8606 and state. The function should be called only with 2 values of
8607 position equal to 3 or 6. We avoid generating F NOPs by putting
8608 templates containing F insns at the end of the template search
8609 because undocumented anomaly in McKinley derived cores which can
8610 cause stalls if an F-unit insn (including a NOP) is issued within a
8611 six-cycle window after reading certain application registers (such
8612 as ar.bsp). Furthermore, power-considerations also argue against
8613 the use of F-unit instructions unless they're really needed. */
8615 static int
8616 get_template (state_t state, int pos)
8618 switch (pos)
8620 case 3:
8621 if (cpu_unit_reservation_p (state, _0mmi_))
8622 return 1;
8623 else if (cpu_unit_reservation_p (state, _0mii_))
8624 return 0;
8625 else if (cpu_unit_reservation_p (state, _0mmb_))
8626 return 7;
8627 else if (cpu_unit_reservation_p (state, _0mib_))
8628 return 6;
8629 else if (cpu_unit_reservation_p (state, _0mbb_))
8630 return 5;
8631 else if (cpu_unit_reservation_p (state, _0bbb_))
8632 return 4;
8633 else if (cpu_unit_reservation_p (state, _0mmf_))
8634 return 3;
8635 else if (cpu_unit_reservation_p (state, _0mfi_))
8636 return 2;
8637 else if (cpu_unit_reservation_p (state, _0mfb_))
8638 return 8;
8639 else if (cpu_unit_reservation_p (state, _0mlx_))
8640 return 9;
8641 else
8642 gcc_unreachable ();
8643 case 6:
8644 if (cpu_unit_reservation_p (state, _1mmi_))
8645 return 1;
8646 else if (cpu_unit_reservation_p (state, _1mii_))
8647 return 0;
8648 else if (cpu_unit_reservation_p (state, _1mmb_))
8649 return 7;
8650 else if (cpu_unit_reservation_p (state, _1mib_))
8651 return 6;
8652 else if (cpu_unit_reservation_p (state, _1mbb_))
8653 return 5;
8654 else if (cpu_unit_reservation_p (state, _1bbb_))
8655 return 4;
8656 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8657 return 3;
8658 else if (cpu_unit_reservation_p (state, _1mfi_))
8659 return 2;
8660 else if (cpu_unit_reservation_p (state, _1mfb_))
8661 return 8;
8662 else if (cpu_unit_reservation_p (state, _1mlx_))
8663 return 9;
8664 else
8665 gcc_unreachable ();
8666 default:
8667 gcc_unreachable ();
8671 /* True when INSN is important for bundling. */
8672 static bool
8673 important_for_bundling_p (rtx insn)
8675 return (INSN_P (insn)
8676 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8677 && GET_CODE (PATTERN (insn)) != USE
8678 && GET_CODE (PATTERN (insn)) != CLOBBER);
8681 /* The following function returns an insn important for insn bundling
8682 followed by INSN and before TAIL. */
8684 static rtx
8685 get_next_important_insn (rtx insn, rtx tail)
8687 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8688 if (important_for_bundling_p (insn))
8689 return insn;
8690 return NULL_RTX;
8693 /* Add a bundle selector TEMPLATE0 before INSN. */
8695 static void
8696 ia64_add_bundle_selector_before (int template0, rtx insn)
8698 rtx b = gen_bundle_selector (GEN_INT (template0));
8700 ia64_emit_insn_before (b, insn);
8701 #if NR_BUNDLES == 10
8702 if ((template0 == 4 || template0 == 5)
8703 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8705 int i;
8706 rtx note = NULL_RTX;
8708 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8709 first or second slot. If it is and has REG_EH_NOTE set, copy it
8710 to following nops, as br.call sets rp to the address of following
8711 bundle and therefore an EH region end must be on a bundle
8712 boundary. */
8713 insn = PREV_INSN (insn);
8714 for (i = 0; i < 3; i++)
8717 insn = next_active_insn (insn);
8718 while (GET_CODE (insn) == INSN
8719 && get_attr_empty (insn) == EMPTY_YES);
8720 if (GET_CODE (insn) == CALL_INSN)
8721 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8722 else if (note)
8724 int code;
8726 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8727 || code == CODE_FOR_nop_b);
8728 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8729 note = NULL_RTX;
8730 else
8731 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8735 #endif
8738 /* The following function does insn bundling. Bundling means
8739 inserting templates and nop insns to fit insn groups into permitted
8740 templates. Instruction scheduling uses NDFA (non-deterministic
8741 finite automata) encoding informations about the templates and the
8742 inserted nops. Nondeterminism of the automata permits follows
8743 all possible insn sequences very fast.
8745 Unfortunately it is not possible to get information about inserting
8746 nop insns and used templates from the automata states. The
8747 automata only says that we can issue an insn possibly inserting
8748 some nops before it and using some template. Therefore insn
8749 bundling in this function is implemented by using DFA
8750 (deterministic finite automata). We follow all possible insn
8751 sequences by inserting 0-2 nops (that is what the NDFA describe for
8752 insn scheduling) before/after each insn being bundled. We know the
8753 start of simulated processor cycle from insn scheduling (insn
8754 starting a new cycle has TImode).
8756 Simple implementation of insn bundling would create enormous
8757 number of possible insn sequences satisfying information about new
8758 cycle ticks taken from the insn scheduling. To make the algorithm
8759 practical we use dynamic programming. Each decision (about
8760 inserting nops and implicitly about previous decisions) is described
8761 by structure bundle_state (see above). If we generate the same
8762 bundle state (key is automaton state after issuing the insns and
8763 nops for it), we reuse already generated one. As consequence we
8764 reject some decisions which cannot improve the solution and
8765 reduce memory for the algorithm.
8767 When we reach the end of EBB (extended basic block), we choose the
8768 best sequence and then, moving back in EBB, insert templates for
8769 the best alternative. The templates are taken from querying
8770 automaton state for each insn in chosen bundle states.
8772 So the algorithm makes two (forward and backward) passes through
8773 EBB. */
8775 static void
8776 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
8778 struct bundle_state *curr_state, *next_state, *best_state;
8779 rtx insn, next_insn;
8780 int insn_num;
8781 int i, bundle_end_p, only_bundle_end_p, asm_p;
8782 int pos = 0, max_pos, template0, template1;
8783 rtx b;
8784 rtx nop;
8785 enum attr_type type;
8787 insn_num = 0;
8788 /* Count insns in the EBB. */
8789 for (insn = NEXT_INSN (prev_head_insn);
8790 insn && insn != tail;
8791 insn = NEXT_INSN (insn))
8792 if (INSN_P (insn))
8793 insn_num++;
8794 if (insn_num == 0)
8795 return;
8796 bundling_p = 1;
8797 dfa_clean_insn_cache ();
8798 initiate_bundle_state_table ();
8799 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
8800 /* First (forward) pass -- generation of bundle states. */
8801 curr_state = get_free_bundle_state ();
8802 curr_state->insn = NULL;
8803 curr_state->before_nops_num = 0;
8804 curr_state->after_nops_num = 0;
8805 curr_state->insn_num = 0;
8806 curr_state->cost = 0;
8807 curr_state->accumulated_insns_num = 0;
8808 curr_state->branch_deviation = 0;
8809 curr_state->middle_bundle_stops = 0;
8810 curr_state->next = NULL;
8811 curr_state->originator = NULL;
8812 state_reset (curr_state->dfa_state);
8813 index_to_bundle_states [0] = curr_state;
8814 insn_num = 0;
8815 /* Shift cycle mark if it is put on insn which could be ignored. */
8816 for (insn = NEXT_INSN (prev_head_insn);
8817 insn != tail;
8818 insn = NEXT_INSN (insn))
8819 if (INSN_P (insn)
8820 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
8821 || GET_CODE (PATTERN (insn)) == USE
8822 || GET_CODE (PATTERN (insn)) == CLOBBER)
8823 && GET_MODE (insn) == TImode)
8825 PUT_MODE (insn, VOIDmode);
8826 for (next_insn = NEXT_INSN (insn);
8827 next_insn != tail;
8828 next_insn = NEXT_INSN (next_insn))
8829 if (INSN_P (next_insn)
8830 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
8831 && GET_CODE (PATTERN (next_insn)) != USE
8832 && GET_CODE (PATTERN (next_insn)) != CLOBBER
8833 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
8835 PUT_MODE (next_insn, TImode);
8836 break;
8839 /* Forward pass: generation of bundle states. */
8840 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8841 insn != NULL_RTX;
8842 insn = next_insn)
8844 gcc_assert (INSN_P (insn)
8845 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8846 && GET_CODE (PATTERN (insn)) != USE
8847 && GET_CODE (PATTERN (insn)) != CLOBBER);
8848 type = ia64_safe_type (insn);
8849 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8850 insn_num++;
8851 index_to_bundle_states [insn_num] = NULL;
8852 for (curr_state = index_to_bundle_states [insn_num - 1];
8853 curr_state != NULL;
8854 curr_state = next_state)
8856 pos = curr_state->accumulated_insns_num % 3;
8857 next_state = curr_state->next;
8858 /* We must fill up the current bundle in order to start a
8859 subsequent asm insn in a new bundle. Asm insn is always
8860 placed in a separate bundle. */
8861 only_bundle_end_p
8862 = (next_insn != NULL_RTX
8863 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
8864 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
8865 /* We may fill up the current bundle if it is the cycle end
8866 without a group barrier. */
8867 bundle_end_p
8868 = (only_bundle_end_p || next_insn == NULL_RTX
8869 || (GET_MODE (next_insn) == TImode
8870 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
8871 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
8872 || type == TYPE_S)
8873 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8874 only_bundle_end_p);
8875 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8876 only_bundle_end_p);
8877 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8878 only_bundle_end_p);
8880 gcc_assert (index_to_bundle_states [insn_num]);
8881 for (curr_state = index_to_bundle_states [insn_num];
8882 curr_state != NULL;
8883 curr_state = curr_state->next)
8884 if (verbose >= 2 && dump)
8886 /* This structure is taken from generated code of the
8887 pipeline hazard recognizer (see file insn-attrtab.c).
8888 Please don't forget to change the structure if a new
8889 automaton is added to .md file. */
8890 struct DFA_chip
8892 unsigned short one_automaton_state;
8893 unsigned short oneb_automaton_state;
8894 unsigned short two_automaton_state;
8895 unsigned short twob_automaton_state;
8898 fprintf
8899 (dump,
8900 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
8901 curr_state->unique_num,
8902 (curr_state->originator == NULL
8903 ? -1 : curr_state->originator->unique_num),
8904 curr_state->cost,
8905 curr_state->before_nops_num, curr_state->after_nops_num,
8906 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8907 curr_state->middle_bundle_stops,
8908 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8909 INSN_UID (insn));
8913 /* We should find a solution because the 2nd insn scheduling has
8914 found one. */
8915 gcc_assert (index_to_bundle_states [insn_num]);
8916 /* Find a state corresponding to the best insn sequence. */
8917 best_state = NULL;
8918 for (curr_state = index_to_bundle_states [insn_num];
8919 curr_state != NULL;
8920 curr_state = curr_state->next)
8921 /* We are just looking at the states with fully filled up last
8922 bundle. The first we prefer insn sequences with minimal cost
8923 then with minimal inserted nops and finally with branch insns
8924 placed in the 3rd slots. */
8925 if (curr_state->accumulated_insns_num % 3 == 0
8926 && (best_state == NULL || best_state->cost > curr_state->cost
8927 || (best_state->cost == curr_state->cost
8928 && (curr_state->accumulated_insns_num
8929 < best_state->accumulated_insns_num
8930 || (curr_state->accumulated_insns_num
8931 == best_state->accumulated_insns_num
8932 && (curr_state->branch_deviation
8933 < best_state->branch_deviation
8934 || (curr_state->branch_deviation
8935 == best_state->branch_deviation
8936 && curr_state->middle_bundle_stops
8937 < best_state->middle_bundle_stops)))))))
8938 best_state = curr_state;
8939 /* Second (backward) pass: adding nops and templates. */
8940 gcc_assert (best_state);
8941 insn_num = best_state->before_nops_num;
8942 template0 = template1 = -1;
8943 for (curr_state = best_state;
8944 curr_state->originator != NULL;
8945 curr_state = curr_state->originator)
8947 insn = curr_state->insn;
8948 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8949 || asm_noperands (PATTERN (insn)) >= 0);
8950 insn_num++;
8951 if (verbose >= 2 && dump)
8953 struct DFA_chip
8955 unsigned short one_automaton_state;
8956 unsigned short oneb_automaton_state;
8957 unsigned short two_automaton_state;
8958 unsigned short twob_automaton_state;
8961 fprintf
8962 (dump,
8963 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
8964 curr_state->unique_num,
8965 (curr_state->originator == NULL
8966 ? -1 : curr_state->originator->unique_num),
8967 curr_state->cost,
8968 curr_state->before_nops_num, curr_state->after_nops_num,
8969 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8970 curr_state->middle_bundle_stops,
8971 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8972 INSN_UID (insn));
8974 /* Find the position in the current bundle window. The window can
8975 contain at most two bundles. Two bundle window means that
8976 the processor will make two bundle rotation. */
8977 max_pos = get_max_pos (curr_state->dfa_state);
8978 if (max_pos == 6
8979 /* The following (negative template number) means that the
8980 processor did one bundle rotation. */
8981 || (max_pos == 3 && template0 < 0))
8983 /* We are at the end of the window -- find template(s) for
8984 its bundle(s). */
8985 pos = max_pos;
8986 if (max_pos == 3)
8987 template0 = get_template (curr_state->dfa_state, 3);
8988 else
8990 template1 = get_template (curr_state->dfa_state, 3);
8991 template0 = get_template (curr_state->dfa_state, 6);
8994 if (max_pos > 3 && template1 < 0)
8995 /* It may happen when we have the stop inside a bundle. */
8997 gcc_assert (pos <= 3);
8998 template1 = get_template (curr_state->dfa_state, 3);
8999 pos += 3;
9001 if (!asm_p)
9002 /* Emit nops after the current insn. */
9003 for (i = 0; i < curr_state->after_nops_num; i++)
9005 nop = gen_nop ();
9006 emit_insn_after (nop, insn);
9007 pos--;
9008 gcc_assert (pos >= 0);
9009 if (pos % 3 == 0)
9011 /* We are at the start of a bundle: emit the template
9012 (it should be defined). */
9013 gcc_assert (template0 >= 0);
9014 ia64_add_bundle_selector_before (template0, nop);
9015 /* If we have two bundle window, we make one bundle
9016 rotation. Otherwise template0 will be undefined
9017 (negative value). */
9018 template0 = template1;
9019 template1 = -1;
9022 /* Move the position backward in the window. Group barrier has
9023 no slot. Asm insn takes all bundle. */
9024 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9025 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9026 && asm_noperands (PATTERN (insn)) < 0)
9027 pos--;
9028 /* Long insn takes 2 slots. */
9029 if (ia64_safe_type (insn) == TYPE_L)
9030 pos--;
9031 gcc_assert (pos >= 0);
9032 if (pos % 3 == 0
9033 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9034 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9035 && asm_noperands (PATTERN (insn)) < 0)
9037 /* The current insn is at the bundle start: emit the
9038 template. */
9039 gcc_assert (template0 >= 0);
9040 ia64_add_bundle_selector_before (template0, insn);
9041 b = PREV_INSN (insn);
9042 insn = b;
9043 /* See comment above in analogous place for emitting nops
9044 after the insn. */
9045 template0 = template1;
9046 template1 = -1;
9048 /* Emit nops after the current insn. */
9049 for (i = 0; i < curr_state->before_nops_num; i++)
9051 nop = gen_nop ();
9052 ia64_emit_insn_before (nop, insn);
9053 nop = PREV_INSN (insn);
9054 insn = nop;
9055 pos--;
9056 gcc_assert (pos >= 0);
9057 if (pos % 3 == 0)
9059 /* See comment above in analogous place for emitting nops
9060 after the insn. */
9061 gcc_assert (template0 >= 0);
9062 ia64_add_bundle_selector_before (template0, insn);
9063 b = PREV_INSN (insn);
9064 insn = b;
9065 template0 = template1;
9066 template1 = -1;
9071 #ifdef ENABLE_CHECKING
9073 /* Assert right calculation of middle_bundle_stops. */
9074 int num = best_state->middle_bundle_stops;
9075 bool start_bundle = true, end_bundle = false;
9077 for (insn = NEXT_INSN (prev_head_insn);
9078 insn && insn != tail;
9079 insn = NEXT_INSN (insn))
9081 if (!INSN_P (insn))
9082 continue;
9083 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9084 start_bundle = true;
9085 else
9087 rtx next_insn;
9089 for (next_insn = NEXT_INSN (insn);
9090 next_insn && next_insn != tail;
9091 next_insn = NEXT_INSN (next_insn))
9092 if (INSN_P (next_insn)
9093 && (ia64_safe_itanium_class (next_insn)
9094 != ITANIUM_CLASS_IGNORE
9095 || recog_memoized (next_insn)
9096 == CODE_FOR_bundle_selector)
9097 && GET_CODE (PATTERN (next_insn)) != USE
9098 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9099 break;
9101 end_bundle = next_insn == NULL_RTX
9102 || next_insn == tail
9103 || (INSN_P (next_insn)
9104 && recog_memoized (next_insn)
9105 == CODE_FOR_bundle_selector);
9106 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9107 && !start_bundle && !end_bundle
9108 && next_insn
9109 && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
9110 && asm_noperands (PATTERN (next_insn)) < 0)
9111 num--;
9113 start_bundle = false;
9117 gcc_assert (num == 0);
9119 #endif
9121 free (index_to_bundle_states);
9122 finish_bundle_state_table ();
9123 bundling_p = 0;
9124 dfa_clean_insn_cache ();
9127 /* The following function is called at the end of scheduling BB or
9128 EBB. After reload, it inserts stop bits and does insn bundling. */
9130 static void
9131 ia64_sched_finish (FILE *dump, int sched_verbose)
9133 if (sched_verbose)
9134 fprintf (dump, "// Finishing schedule.\n");
9135 if (!reload_completed)
9136 return;
9137 if (reload_completed)
9139 final_emit_insn_group_barriers (dump);
9140 bundling (dump, sched_verbose, current_sched_info->prev_head,
9141 current_sched_info->next_tail);
9142 if (sched_verbose && dump)
9143 fprintf (dump, "// finishing %d-%d\n",
9144 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9145 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9147 return;
9151 /* The following function inserts stop bits in scheduled BB or EBB. */
9153 static void
9154 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9156 rtx insn;
9157 int need_barrier_p = 0;
9158 int seen_good_insn = 0;
9160 init_insn_group_barriers ();
9162 for (insn = NEXT_INSN (current_sched_info->prev_head);
9163 insn != current_sched_info->next_tail;
9164 insn = NEXT_INSN (insn))
9166 if (GET_CODE (insn) == BARRIER)
9168 rtx last = prev_active_insn (insn);
9170 if (! last)
9171 continue;
9172 if (GET_CODE (last) == JUMP_INSN
9173 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
9174 last = prev_active_insn (last);
9175 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9176 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9178 init_insn_group_barriers ();
9179 seen_good_insn = 0;
9180 need_barrier_p = 0;
9182 else if (NONDEBUG_INSN_P (insn))
9184 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9186 init_insn_group_barriers ();
9187 seen_good_insn = 0;
9188 need_barrier_p = 0;
9190 else if (need_barrier_p || group_barrier_needed (insn)
9191 || (mflag_sched_stop_bits_after_every_cycle
9192 && GET_MODE (insn) == TImode
9193 && seen_good_insn))
9195 if (TARGET_EARLY_STOP_BITS)
9197 rtx last;
9199 for (last = insn;
9200 last != current_sched_info->prev_head;
9201 last = PREV_INSN (last))
9202 if (INSN_P (last) && GET_MODE (last) == TImode
9203 && stops_p [INSN_UID (last)])
9204 break;
9205 if (last == current_sched_info->prev_head)
9206 last = insn;
9207 last = prev_active_insn (last);
9208 if (last
9209 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9210 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9211 last);
9212 init_insn_group_barriers ();
9213 for (last = NEXT_INSN (last);
9214 last != insn;
9215 last = NEXT_INSN (last))
9216 if (INSN_P (last))
9218 group_barrier_needed (last);
9219 if (recog_memoized (last) >= 0
9220 && important_for_bundling_p (last))
9221 seen_good_insn = 1;
9224 else
9226 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9227 insn);
9228 init_insn_group_barriers ();
9229 seen_good_insn = 0;
9231 group_barrier_needed (insn);
9232 if (recog_memoized (insn) >= 0
9233 && important_for_bundling_p (insn))
9234 seen_good_insn = 1;
9236 else if (recog_memoized (insn) >= 0
9237 && important_for_bundling_p (insn))
9238 seen_good_insn = 1;
9239 need_barrier_p = (GET_CODE (insn) == CALL_INSN
9240 || GET_CODE (PATTERN (insn)) == ASM_INPUT
9241 || asm_noperands (PATTERN (insn)) >= 0);
9248 /* If the following function returns TRUE, we will use the DFA
9249 insn scheduler. */
9251 static int
9252 ia64_first_cycle_multipass_dfa_lookahead (void)
9254 return (reload_completed ? 6 : 4);
9257 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9259 static void
9260 ia64_init_dfa_pre_cycle_insn (void)
9262 if (temp_dfa_state == NULL)
9264 dfa_state_size = state_size ();
9265 temp_dfa_state = xmalloc (dfa_state_size);
9266 prev_cycle_state = xmalloc (dfa_state_size);
9268 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9269 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9270 recog_memoized (dfa_pre_cycle_insn);
9271 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9272 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9273 recog_memoized (dfa_stop_insn);
9276 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9277 used by the DFA insn scheduler. */
9279 static rtx
9280 ia64_dfa_pre_cycle_insn (void)
9282 return dfa_pre_cycle_insn;
9285 /* The following function returns TRUE if PRODUCER (of type ilog or
9286 ld) produces address for CONSUMER (of type st or stf). */
9289 ia64_st_address_bypass_p (rtx producer, rtx consumer)
9291 rtx dest, reg, mem;
9293 gcc_assert (producer && consumer);
9294 dest = ia64_single_set (producer);
9295 gcc_assert (dest);
9296 reg = SET_DEST (dest);
9297 gcc_assert (reg);
9298 if (GET_CODE (reg) == SUBREG)
9299 reg = SUBREG_REG (reg);
9300 gcc_assert (GET_CODE (reg) == REG);
9302 dest = ia64_single_set (consumer);
9303 gcc_assert (dest);
9304 mem = SET_DEST (dest);
9305 gcc_assert (mem && GET_CODE (mem) == MEM);
9306 return reg_mentioned_p (reg, mem);
9309 /* The following function returns TRUE if PRODUCER (of type ilog or
9310 ld) produces address for CONSUMER (of type ld or fld). */
9313 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
9315 rtx dest, src, reg, mem;
9317 gcc_assert (producer && consumer);
9318 dest = ia64_single_set (producer);
9319 gcc_assert (dest);
9320 reg = SET_DEST (dest);
9321 gcc_assert (reg);
9322 if (GET_CODE (reg) == SUBREG)
9323 reg = SUBREG_REG (reg);
9324 gcc_assert (GET_CODE (reg) == REG);
9326 src = ia64_single_set (consumer);
9327 gcc_assert (src);
9328 mem = SET_SRC (src);
9329 gcc_assert (mem);
9331 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9332 mem = XVECEXP (mem, 0, 0);
9333 else if (GET_CODE (mem) == IF_THEN_ELSE)
9334 /* ??? Is this bypass necessary for ld.c? */
9336 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9337 mem = XEXP (mem, 1);
9340 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9341 mem = XEXP (mem, 0);
9343 if (GET_CODE (mem) == UNSPEC)
9345 int c = XINT (mem, 1);
9347 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9348 || c == UNSPEC_LDSA);
9349 mem = XVECEXP (mem, 0, 0);
9352 /* Note that LO_SUM is used for GOT loads. */
9353 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9355 return reg_mentioned_p (reg, mem);
9358 /* The following function returns TRUE if INSN produces address for a
9359 load/store insn. We will place such insns into M slot because it
9360 decreases its latency time. */
9363 ia64_produce_address_p (rtx insn)
9365 return insn->call;
9369 /* Emit pseudo-ops for the assembler to describe predicate relations.
9370 At present this assumes that we only consider predicate pairs to
9371 be mutex, and that the assembler can deduce proper values from
9372 straight-line code. */
9374 static void
9375 emit_predicate_relation_info (void)
9377 basic_block bb;
9379 FOR_EACH_BB_REVERSE (bb)
9381 int r;
9382 rtx head = BB_HEAD (bb);
9384 /* We only need such notes at code labels. */
9385 if (GET_CODE (head) != CODE_LABEL)
9386 continue;
9387 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9388 head = NEXT_INSN (head);
9390 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9391 grabbing the entire block of predicate registers. */
9392 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9393 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9395 rtx p = gen_rtx_REG (BImode, r);
9396 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
9397 if (head == BB_END (bb))
9398 BB_END (bb) = n;
9399 head = n;
9403 /* Look for conditional calls that do not return, and protect predicate
9404 relations around them. Otherwise the assembler will assume the call
9405 returns, and complain about uses of call-clobbered predicates after
9406 the call. */
9407 FOR_EACH_BB_REVERSE (bb)
9409 rtx insn = BB_HEAD (bb);
9411 while (1)
9413 if (GET_CODE (insn) == CALL_INSN
9414 && GET_CODE (PATTERN (insn)) == COND_EXEC
9415 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9417 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
9418 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9419 if (BB_HEAD (bb) == insn)
9420 BB_HEAD (bb) = b;
9421 if (BB_END (bb) == insn)
9422 BB_END (bb) = a;
9425 if (insn == BB_END (bb))
9426 break;
9427 insn = NEXT_INSN (insn);
9432 /* Perform machine dependent operations on the rtl chain INSNS. */
9434 static void
9435 ia64_reorg (void)
9437 /* We are freeing block_for_insn in the toplev to keep compatibility
9438 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9439 compute_bb_for_insn ();
9441 /* If optimizing, we'll have split before scheduling. */
9442 if (optimize == 0)
9443 split_all_insns ();
9445 if (optimize && ia64_flag_schedule_insns2
9446 && dbg_cnt (ia64_sched2))
9448 timevar_push (TV_SCHED2);
9449 ia64_final_schedule = 1;
9451 initiate_bundle_states ();
9452 ia64_nop = make_insn_raw (gen_nop ());
9453 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9454 recog_memoized (ia64_nop);
9455 clocks_length = get_max_uid () + 1;
9456 stops_p = XCNEWVEC (char, clocks_length);
9458 if (ia64_tune == PROCESSOR_ITANIUM2)
9460 pos_1 = get_cpu_unit_code ("2_1");
9461 pos_2 = get_cpu_unit_code ("2_2");
9462 pos_3 = get_cpu_unit_code ("2_3");
9463 pos_4 = get_cpu_unit_code ("2_4");
9464 pos_5 = get_cpu_unit_code ("2_5");
9465 pos_6 = get_cpu_unit_code ("2_6");
9466 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9467 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9468 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9469 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9470 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9471 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9472 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9473 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9474 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9475 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9476 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9477 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9478 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9479 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9480 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9481 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9482 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9483 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9484 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9485 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9487 else
9489 pos_1 = get_cpu_unit_code ("1_1");
9490 pos_2 = get_cpu_unit_code ("1_2");
9491 pos_3 = get_cpu_unit_code ("1_3");
9492 pos_4 = get_cpu_unit_code ("1_4");
9493 pos_5 = get_cpu_unit_code ("1_5");
9494 pos_6 = get_cpu_unit_code ("1_6");
9495 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9496 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9497 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9498 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9499 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9500 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9501 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9502 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9503 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9504 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9505 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9506 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9507 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9508 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9509 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9510 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9511 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9512 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9513 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9514 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9517 if (flag_selective_scheduling2
9518 && !maybe_skip_selective_scheduling ())
9519 run_selective_scheduling ();
9520 else
9521 schedule_ebbs ();
9523 /* Redo alignment computation, as it might gone wrong. */
9524 compute_alignments ();
9526 /* We cannot reuse this one because it has been corrupted by the
9527 evil glat. */
9528 finish_bundle_states ();
9529 free (stops_p);
9530 stops_p = NULL;
9531 emit_insn_group_barriers (dump_file);
9533 ia64_final_schedule = 0;
9534 timevar_pop (TV_SCHED2);
9536 else
9537 emit_all_insn_group_barriers (dump_file);
9539 df_analyze ();
9541 /* A call must not be the last instruction in a function, so that the
9542 return address is still within the function, so that unwinding works
9543 properly. Note that IA-64 differs from dwarf2 on this point. */
9544 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9546 rtx insn;
9547 int saw_stop = 0;
9549 insn = get_last_insn ();
9550 if (! INSN_P (insn))
9551 insn = prev_active_insn (insn);
9552 if (insn)
9554 /* Skip over insns that expand to nothing. */
9555 while (GET_CODE (insn) == INSN
9556 && get_attr_empty (insn) == EMPTY_YES)
9558 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9559 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9560 saw_stop = 1;
9561 insn = prev_active_insn (insn);
9563 if (GET_CODE (insn) == CALL_INSN)
9565 if (! saw_stop)
9566 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9567 emit_insn (gen_break_f ());
9568 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9573 emit_predicate_relation_info ();
9575 if (ia64_flag_var_tracking)
9577 timevar_push (TV_VAR_TRACKING);
9578 variable_tracking_main ();
9579 timevar_pop (TV_VAR_TRACKING);
9581 df_finish_pass (false);
9584 /* Return true if REGNO is used by the epilogue. */
9587 ia64_epilogue_uses (int regno)
9589 switch (regno)
9591 case R_GR (1):
9592 /* With a call to a function in another module, we will write a new
9593 value to "gp". After returning from such a call, we need to make
9594 sure the function restores the original gp-value, even if the
9595 function itself does not use the gp anymore. */
9596 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9598 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9599 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9600 /* For functions defined with the syscall_linkage attribute, all
9601 input registers are marked as live at all function exits. This
9602 prevents the register allocator from using the input registers,
9603 which in turn makes it possible to restart a system call after
9604 an interrupt without having to save/restore the input registers.
9605 This also prevents kernel data from leaking to application code. */
9606 return lookup_attribute ("syscall_linkage",
9607 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9609 case R_BR (0):
9610 /* Conditional return patterns can't represent the use of `b0' as
9611 the return address, so we force the value live this way. */
9612 return 1;
9614 case AR_PFS_REGNUM:
9615 /* Likewise for ar.pfs, which is used by br.ret. */
9616 return 1;
9618 default:
9619 return 0;
9623 /* Return true if REGNO is used by the frame unwinder. */
9626 ia64_eh_uses (int regno)
9628 unsigned int r;
9630 if (! reload_completed)
9631 return 0;
9633 if (regno == 0)
9634 return 0;
9636 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9637 if (regno == current_frame_info.r[r]
9638 || regno == emitted_frame_related_regs[r])
9639 return 1;
9641 return 0;
9644 /* Return true if this goes in small data/bss. */
9646 /* ??? We could also support own long data here. Generating movl/add/ld8
9647 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9648 code faster because there is one less load. This also includes incomplete
9649 types which can't go in sdata/sbss. */
9651 static bool
9652 ia64_in_small_data_p (const_tree exp)
9654 if (TARGET_NO_SDATA)
9655 return false;
9657 /* We want to merge strings, so we never consider them small data. */
9658 if (TREE_CODE (exp) == STRING_CST)
9659 return false;
9661 /* Functions are never small data. */
9662 if (TREE_CODE (exp) == FUNCTION_DECL)
9663 return false;
9665 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9667 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
9669 if (strcmp (section, ".sdata") == 0
9670 || strncmp (section, ".sdata.", 7) == 0
9671 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9672 || strcmp (section, ".sbss") == 0
9673 || strncmp (section, ".sbss.", 6) == 0
9674 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9675 return true;
9677 else
9679 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9681 /* If this is an incomplete type with size 0, then we can't put it
9682 in sdata because it might be too big when completed. */
9683 if (size > 0 && size <= ia64_section_threshold)
9684 return true;
9687 return false;
9690 /* Output assembly directives for prologue regions. */
9692 /* The current basic block number. */
9694 static bool last_block;
9696 /* True if we need a copy_state command at the start of the next block. */
9698 static bool need_copy_state;
9700 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9701 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9702 #endif
9704 /* Emit a debugging label after a call-frame-related insn. We'd
9705 rather output the label right away, but we'd have to output it
9706 after, not before, the instruction, and the instruction has not
9707 been output yet. So we emit the label after the insn, delete it to
9708 avoid introducing basic blocks, and mark it as preserved, such that
9709 it is still output, given that it is referenced in debug info. */
9711 static const char *
9712 ia64_emit_deleted_label_after_insn (rtx insn)
9714 char label[MAX_ARTIFICIAL_LABEL_BYTES];
9715 rtx lb = gen_label_rtx ();
9716 rtx label_insn = emit_label_after (lb, insn);
9718 LABEL_PRESERVE_P (lb) = 1;
9720 delete_insn (label_insn);
9722 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
9724 return xstrdup (label);
9727 /* Define the CFA after INSN with the steady-state definition. */
9729 static void
9730 ia64_dwarf2out_def_steady_cfa (rtx insn, bool frame)
9732 rtx fp = frame_pointer_needed
9733 ? hard_frame_pointer_rtx
9734 : stack_pointer_rtx;
9735 const char *label = ia64_emit_deleted_label_after_insn (insn);
9737 if (!frame)
9738 return;
9740 dwarf2out_def_cfa
9741 (label, REGNO (fp),
9742 ia64_initial_elimination_offset
9743 (REGNO (arg_pointer_rtx), REGNO (fp))
9744 + ARG_POINTER_CFA_OFFSET (current_function_decl));
9747 /* All we need to do here is avoid a crash in the generic dwarf2
9748 processing. The real CFA definition is set up above. */
9750 static void
9751 ia64_dwarf_handle_frame_unspec (const char * ARG_UNUSED (label),
9752 rtx ARG_UNUSED (pattern),
9753 int index)
9755 gcc_assert (index == UNSPECV_ALLOC);
9758 /* The generic dwarf2 frame debug info generator does not define a
9759 separate region for the very end of the epilogue, so refrain from
9760 doing so in the IA64-specific code as well. */
9762 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
9764 /* The function emits unwind directives for the start of an epilogue. */
9766 static void
9767 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
9769 /* If this isn't the last block of the function, then we need to label the
9770 current state, and copy it back in at the start of the next block. */
9772 if (!last_block)
9774 if (unwind)
9775 fprintf (asm_out_file, "\t.label_state %d\n",
9776 ++cfun->machine->state_num);
9777 need_copy_state = true;
9780 if (unwind)
9781 fprintf (asm_out_file, "\t.restore sp\n");
9782 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9783 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
9784 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
9787 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9789 static void
9790 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9791 bool unwind, bool frame)
9793 rtx dest = SET_DEST (pat);
9794 rtx src = SET_SRC (pat);
9796 if (dest == stack_pointer_rtx)
9798 if (GET_CODE (src) == PLUS)
9800 rtx op0 = XEXP (src, 0);
9801 rtx op1 = XEXP (src, 1);
9803 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9805 if (INTVAL (op1) < 0)
9807 gcc_assert (!frame_pointer_needed);
9808 if (unwind)
9809 fprintf (asm_out_file,
9810 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9811 -INTVAL (op1));
9812 ia64_dwarf2out_def_steady_cfa (insn, frame);
9814 else
9815 process_epilogue (asm_out_file, insn, unwind, frame);
9817 else
9819 gcc_assert (src == hard_frame_pointer_rtx);
9820 process_epilogue (asm_out_file, insn, unwind, frame);
9823 else if (dest == hard_frame_pointer_rtx)
9825 gcc_assert (src == stack_pointer_rtx);
9826 gcc_assert (frame_pointer_needed);
9828 if (unwind)
9829 fprintf (asm_out_file, "\t.vframe r%d\n",
9830 ia64_dbx_register_number (REGNO (dest)));
9831 ia64_dwarf2out_def_steady_cfa (insn, frame);
9833 else
9834 gcc_unreachable ();
9837 /* This function processes a SET pattern for REG_CFA_REGISTER. */
9839 static void
9840 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
9842 rtx dest = SET_DEST (pat);
9843 rtx src = SET_SRC (pat);
9845 int dest_regno = REGNO (dest);
9846 int src_regno = REGNO (src);
9848 switch (src_regno)
9850 case BR_REG (0):
9851 /* Saving return address pointer. */
9852 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9853 if (unwind)
9854 fprintf (asm_out_file, "\t.save rp, r%d\n",
9855 ia64_dbx_register_number (dest_regno));
9856 break;
9858 case PR_REG (0):
9859 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9860 if (unwind)
9861 fprintf (asm_out_file, "\t.save pr, r%d\n",
9862 ia64_dbx_register_number (dest_regno));
9863 break;
9865 case AR_UNAT_REGNUM:
9866 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9867 if (unwind)
9868 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9869 ia64_dbx_register_number (dest_regno));
9870 break;
9872 case AR_LC_REGNUM:
9873 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9874 if (unwind)
9875 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9876 ia64_dbx_register_number (dest_regno));
9877 break;
9879 default:
9880 /* Everything else should indicate being stored to memory. */
9881 gcc_unreachable ();
9885 /* This function processes a SET pattern for REG_CFA_OFFSET. */
9887 static void
9888 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
9890 rtx dest = SET_DEST (pat);
9891 rtx src = SET_SRC (pat);
9892 int src_regno = REGNO (src);
9893 const char *saveop;
9894 HOST_WIDE_INT off;
9895 rtx base;
9897 gcc_assert (MEM_P (dest));
9898 if (GET_CODE (XEXP (dest, 0)) == REG)
9900 base = XEXP (dest, 0);
9901 off = 0;
9903 else
9905 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9906 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9907 base = XEXP (XEXP (dest, 0), 0);
9908 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9911 if (base == hard_frame_pointer_rtx)
9913 saveop = ".savepsp";
9914 off = - off;
9916 else
9918 gcc_assert (base == stack_pointer_rtx);
9919 saveop = ".savesp";
9922 src_regno = REGNO (src);
9923 switch (src_regno)
9925 case BR_REG (0):
9926 gcc_assert (!current_frame_info.r[reg_save_b0]);
9927 if (unwind)
9928 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
9929 saveop, off);
9930 break;
9932 case PR_REG (0):
9933 gcc_assert (!current_frame_info.r[reg_save_pr]);
9934 if (unwind)
9935 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
9936 saveop, off);
9937 break;
9939 case AR_LC_REGNUM:
9940 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9941 if (unwind)
9942 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
9943 saveop, off);
9944 break;
9946 case AR_PFS_REGNUM:
9947 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9948 if (unwind)
9949 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
9950 saveop, off);
9951 break;
9953 case AR_UNAT_REGNUM:
9954 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9955 if (unwind)
9956 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
9957 saveop, off);
9958 break;
9960 case GR_REG (4):
9961 case GR_REG (5):
9962 case GR_REG (6):
9963 case GR_REG (7):
9964 if (unwind)
9965 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9966 1 << (src_regno - GR_REG (4)));
9967 break;
9969 case BR_REG (1):
9970 case BR_REG (2):
9971 case BR_REG (3):
9972 case BR_REG (4):
9973 case BR_REG (5):
9974 if (unwind)
9975 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9976 1 << (src_regno - BR_REG (1)));
9977 break;
9979 case FR_REG (2):
9980 case FR_REG (3):
9981 case FR_REG (4):
9982 case FR_REG (5):
9983 if (unwind)
9984 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9985 1 << (src_regno - FR_REG (2)));
9986 break;
9988 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9989 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9990 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9991 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9992 if (unwind)
9993 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9994 1 << (src_regno - FR_REG (12)));
9995 break;
9997 default:
9998 /* ??? For some reason we mark other general registers, even those
9999 we can't represent in the unwind info. Ignore them. */
10000 break;
10004 /* This function looks at a single insn and emits any directives
10005 required to unwind this insn. */
10007 static void
10008 ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn)
10010 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
10011 bool frame = dwarf2out_do_frame ();
10012 rtx note, pat;
10013 bool handled_one;
10015 if (!unwind && !frame)
10016 return;
10018 if (NOTE_INSN_BASIC_BLOCK_P (insn))
10020 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
10022 /* Restore unwind state from immediately before the epilogue. */
10023 if (need_copy_state)
10025 if (unwind)
10027 fprintf (asm_out_file, "\t.body\n");
10028 fprintf (asm_out_file, "\t.copy_state %d\n",
10029 cfun->machine->state_num);
10031 if (IA64_CHANGE_CFA_IN_EPILOGUE)
10032 ia64_dwarf2out_def_steady_cfa (insn, frame);
10033 need_copy_state = false;
10037 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
10038 return;
10040 /* Look for the ALLOC insn. */
10041 if (INSN_CODE (insn) == CODE_FOR_alloc)
10043 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10044 int dest_regno = REGNO (dest);
10046 /* If this is the final destination for ar.pfs, then this must
10047 be the alloc in the prologue. */
10048 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10050 if (unwind)
10051 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10052 ia64_dbx_register_number (dest_regno));
10054 else
10056 /* This must be an alloc before a sibcall. We must drop the
10057 old frame info. The easiest way to drop the old frame
10058 info is to ensure we had a ".restore sp" directive
10059 followed by a new prologue. If the procedure doesn't
10060 have a memory-stack frame, we'll issue a dummy ".restore
10061 sp" now. */
10062 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10063 /* if haven't done process_epilogue() yet, do it now */
10064 process_epilogue (asm_out_file, insn, unwind, frame);
10065 if (unwind)
10066 fprintf (asm_out_file, "\t.prologue\n");
10068 return;
10071 handled_one = false;
10072 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10073 switch (REG_NOTE_KIND (note))
10075 case REG_CFA_ADJUST_CFA:
10076 pat = XEXP (note, 0);
10077 if (pat == NULL)
10078 pat = PATTERN (insn);
10079 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10080 handled_one = true;
10081 break;
10083 case REG_CFA_OFFSET:
10084 pat = XEXP (note, 0);
10085 if (pat == NULL)
10086 pat = PATTERN (insn);
10087 process_cfa_offset (asm_out_file, pat, unwind);
10088 handled_one = true;
10089 break;
10091 case REG_CFA_REGISTER:
10092 pat = XEXP (note, 0);
10093 if (pat == NULL)
10094 pat = PATTERN (insn);
10095 process_cfa_register (asm_out_file, pat, unwind);
10096 handled_one = true;
10097 break;
10099 case REG_FRAME_RELATED_EXPR:
10100 case REG_CFA_DEF_CFA:
10101 case REG_CFA_EXPRESSION:
10102 case REG_CFA_RESTORE:
10103 case REG_CFA_SET_VDRAP:
10104 /* Not used in the ia64 port. */
10105 gcc_unreachable ();
10107 default:
10108 /* Not a frame-related note. */
10109 break;
10112 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10113 explicit action to take. No guessing required. */
10114 gcc_assert (handled_one);
10117 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10119 static void
10120 ia64_asm_emit_except_personality (rtx personality)
10122 fputs ("\t.personality\t", asm_out_file);
10123 output_addr_const (asm_out_file, personality);
10124 fputc ('\n', asm_out_file);
10127 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10129 static void
10130 ia64_asm_init_sections (void)
10132 exception_section = get_unnamed_section (0, output_section_asm_op,
10133 "\t.handlerdata");
10136 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10138 static enum unwind_info_type
10139 ia64_debug_unwind_info (void)
10141 return UI_TARGET;
10144 /* Implement TARGET_EXCEPT_UNWIND_INFO. */
10146 static enum unwind_info_type
10147 ia64_except_unwind_info (struct gcc_options *opts)
10149 /* Honor the --enable-sjlj-exceptions configure switch. */
10150 #ifdef CONFIG_UNWIND_EXCEPTIONS
10151 if (CONFIG_UNWIND_EXCEPTIONS)
10152 return UI_SJLJ;
10153 #endif
10155 /* For simplicity elsewhere in this file, indicate that all unwind
10156 info is disabled if we're not emitting unwind tables. */
10157 if (!opts->x_flag_exceptions && !opts->x_flag_unwind_tables)
10158 return UI_NONE;
10160 return UI_TARGET;
10163 enum ia64_builtins
10165 IA64_BUILTIN_BSP,
10166 IA64_BUILTIN_COPYSIGNQ,
10167 IA64_BUILTIN_FABSQ,
10168 IA64_BUILTIN_FLUSHRS,
10169 IA64_BUILTIN_INFQ,
10170 IA64_BUILTIN_HUGE_VALQ,
10171 IA64_BUILTIN_max
10174 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10176 void
10177 ia64_init_builtins (void)
10179 tree fpreg_type;
10180 tree float80_type;
10181 tree decl;
10183 /* The __fpreg type. */
10184 fpreg_type = make_node (REAL_TYPE);
10185 TYPE_PRECISION (fpreg_type) = 82;
10186 layout_type (fpreg_type);
10187 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10189 /* The __float80 type. */
10190 float80_type = make_node (REAL_TYPE);
10191 TYPE_PRECISION (float80_type) = 80;
10192 layout_type (float80_type);
10193 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10195 /* The __float128 type. */
10196 if (!TARGET_HPUX)
10198 tree ftype;
10199 tree float128_type = make_node (REAL_TYPE);
10201 TYPE_PRECISION (float128_type) = 128;
10202 layout_type (float128_type);
10203 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10205 /* TFmode support builtins. */
10206 ftype = build_function_type (float128_type, void_list_node);
10207 decl = add_builtin_function ("__builtin_infq", ftype,
10208 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10209 NULL, NULL_TREE);
10210 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10212 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10213 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10214 NULL, NULL_TREE);
10215 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10217 ftype = build_function_type_list (float128_type,
10218 float128_type,
10219 NULL_TREE);
10220 decl = add_builtin_function ("__builtin_fabsq", ftype,
10221 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10222 "__fabstf2", NULL_TREE);
10223 TREE_READONLY (decl) = 1;
10224 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10226 ftype = build_function_type_list (float128_type,
10227 float128_type,
10228 float128_type,
10229 NULL_TREE);
10230 decl = add_builtin_function ("__builtin_copysignq", ftype,
10231 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10232 "__copysigntf3", NULL_TREE);
10233 TREE_READONLY (decl) = 1;
10234 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10236 else
10237 /* Under HPUX, this is a synonym for "long double". */
10238 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10239 "__float128");
10241 /* Fwrite on VMS is non-standard. */
10242 if (TARGET_ABI_OPEN_VMS)
10244 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
10245 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
10248 #define def_builtin(name, type, code) \
10249 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10250 NULL, NULL_TREE)
10252 decl = def_builtin ("__builtin_ia64_bsp",
10253 build_function_type (ptr_type_node, void_list_node),
10254 IA64_BUILTIN_BSP);
10255 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10257 decl = def_builtin ("__builtin_ia64_flushrs",
10258 build_function_type (void_type_node, void_list_node),
10259 IA64_BUILTIN_FLUSHRS);
10260 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10262 #undef def_builtin
10264 if (TARGET_HPUX)
10266 if (built_in_decls [BUILT_IN_FINITE])
10267 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
10268 "_Isfinite");
10269 if (built_in_decls [BUILT_IN_FINITEF])
10270 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
10271 "_Isfinitef");
10272 if (built_in_decls [BUILT_IN_FINITEL])
10273 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
10274 "_Isfinitef128");
10279 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10280 enum machine_mode mode ATTRIBUTE_UNUSED,
10281 int ignore ATTRIBUTE_UNUSED)
10283 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10284 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10286 switch (fcode)
10288 case IA64_BUILTIN_BSP:
10289 if (! target || ! register_operand (target, DImode))
10290 target = gen_reg_rtx (DImode);
10291 emit_insn (gen_bsp_value (target));
10292 #ifdef POINTERS_EXTEND_UNSIGNED
10293 target = convert_memory_address (ptr_mode, target);
10294 #endif
10295 return target;
10297 case IA64_BUILTIN_FLUSHRS:
10298 emit_insn (gen_flushrs ());
10299 return const0_rtx;
10301 case IA64_BUILTIN_INFQ:
10302 case IA64_BUILTIN_HUGE_VALQ:
10304 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10305 REAL_VALUE_TYPE inf;
10306 rtx tmp;
10308 real_inf (&inf);
10309 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10311 tmp = validize_mem (force_const_mem (target_mode, tmp));
10313 if (target == 0)
10314 target = gen_reg_rtx (target_mode);
10316 emit_move_insn (target, tmp);
10317 return target;
10320 case IA64_BUILTIN_FABSQ:
10321 case IA64_BUILTIN_COPYSIGNQ:
10322 return expand_call (exp, target, ignore);
10324 default:
10325 gcc_unreachable ();
10328 return NULL_RTX;
10331 /* Return the ia64 builtin for CODE. */
10333 static tree
10334 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10336 if (code >= IA64_BUILTIN_max)
10337 return error_mark_node;
10339 return ia64_builtins[code];
10342 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10343 most significant bits of the stack slot. */
10345 enum direction
10346 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10348 /* Exception to normal case for structures/unions/etc. */
10350 if (type && AGGREGATE_TYPE_P (type)
10351 && int_size_in_bytes (type) < UNITS_PER_WORD)
10352 return upward;
10354 /* Fall back to the default. */
10355 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10358 /* Emit text to declare externally defined variables and functions, because
10359 the Intel assembler does not support undefined externals. */
10361 void
10362 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10364 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10365 set in order to avoid putting out names that are never really
10366 used. */
10367 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10369 /* maybe_assemble_visibility will return 1 if the assembler
10370 visibility directive is output. */
10371 int need_visibility = ((*targetm.binds_local_p) (decl)
10372 && maybe_assemble_visibility (decl));
10374 #ifdef DO_CRTL_NAMES
10375 DO_CRTL_NAMES;
10376 #endif
10378 /* GNU as does not need anything here, but the HP linker does
10379 need something for external functions. */
10380 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10381 && TREE_CODE (decl) == FUNCTION_DECL)
10382 (*targetm.asm_out.globalize_decl_name) (file, decl);
10383 else if (need_visibility && !TARGET_GNU_AS)
10384 (*targetm.asm_out.globalize_label) (file, name);
10388 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10389 modes of word_mode and larger. Rename the TFmode libfuncs using the
10390 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10391 backward compatibility. */
10393 static void
10394 ia64_init_libfuncs (void)
10396 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10397 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10398 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10399 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10401 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10402 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10403 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10404 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10405 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10407 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10408 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10409 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10410 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10411 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10412 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10414 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10415 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10416 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10417 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10418 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10420 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10421 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10422 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10423 /* HP-UX 11.23 libc does not have a function for unsigned
10424 SImode-to-TFmode conversion. */
10425 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10428 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10430 static void
10431 ia64_hpux_init_libfuncs (void)
10433 ia64_init_libfuncs ();
10435 /* The HP SI millicode division and mod functions expect DI arguments.
10436 By turning them off completely we avoid using both libgcc and the
10437 non-standard millicode routines and use the HP DI millicode routines
10438 instead. */
10440 set_optab_libfunc (sdiv_optab, SImode, 0);
10441 set_optab_libfunc (udiv_optab, SImode, 0);
10442 set_optab_libfunc (smod_optab, SImode, 0);
10443 set_optab_libfunc (umod_optab, SImode, 0);
10445 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10446 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10447 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10448 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10450 /* HP-UX libc has TF min/max/abs routines in it. */
10451 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10452 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10453 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10455 /* ia64_expand_compare uses this. */
10456 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10458 /* These should never be used. */
10459 set_optab_libfunc (eq_optab, TFmode, 0);
10460 set_optab_libfunc (ne_optab, TFmode, 0);
10461 set_optab_libfunc (gt_optab, TFmode, 0);
10462 set_optab_libfunc (ge_optab, TFmode, 0);
10463 set_optab_libfunc (lt_optab, TFmode, 0);
10464 set_optab_libfunc (le_optab, TFmode, 0);
10467 /* Rename the division and modulus functions in VMS. */
10469 static void
10470 ia64_vms_init_libfuncs (void)
10472 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10473 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10474 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10475 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10476 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10477 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10478 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10479 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10480 abort_libfunc = init_one_libfunc ("decc$abort");
10481 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10482 #ifdef MEM_LIBFUNCS_INIT
10483 MEM_LIBFUNCS_INIT;
10484 #endif
10487 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10488 the HPUX conventions. */
10490 static void
10491 ia64_sysv4_init_libfuncs (void)
10493 ia64_init_libfuncs ();
10495 /* These functions are not part of the HPUX TFmode interface. We
10496 use them instead of _U_Qfcmp, which doesn't work the way we
10497 expect. */
10498 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10499 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10500 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10501 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10502 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10503 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10505 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10506 glibc doesn't have them. */
10509 /* Use soft-fp. */
10511 static void
10512 ia64_soft_fp_init_libfuncs (void)
10516 static bool
10517 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10519 return (mode == SImode || mode == DImode);
10522 /* For HPUX, it is illegal to have relocations in shared segments. */
10524 static int
10525 ia64_hpux_reloc_rw_mask (void)
10527 return 3;
10530 /* For others, relax this so that relocations to local data goes in
10531 read-only segments, but we still cannot allow global relocations
10532 in read-only segments. */
10534 static int
10535 ia64_reloc_rw_mask (void)
10537 return flag_pic ? 3 : 2;
10540 /* Return the section to use for X. The only special thing we do here
10541 is to honor small data. */
10543 static section *
10544 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10545 unsigned HOST_WIDE_INT align)
10547 if (GET_MODE_SIZE (mode) > 0
10548 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10549 && !TARGET_NO_SDATA)
10550 return sdata_section;
10551 else
10552 return default_elf_select_rtx_section (mode, x, align);
10555 static unsigned int
10556 ia64_section_type_flags (tree decl, const char *name, int reloc)
10558 unsigned int flags = 0;
10560 if (strcmp (name, ".sdata") == 0
10561 || strncmp (name, ".sdata.", 7) == 0
10562 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10563 || strncmp (name, ".sdata2.", 8) == 0
10564 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10565 || strcmp (name, ".sbss") == 0
10566 || strncmp (name, ".sbss.", 6) == 0
10567 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10568 flags = SECTION_SMALL;
10570 #if TARGET_ABI_OPEN_VMS
10571 if (decl && DECL_ATTRIBUTES (decl)
10572 && lookup_attribute ("common_object", DECL_ATTRIBUTES (decl)))
10573 flags |= SECTION_VMS_OVERLAY;
10574 #endif
10576 flags |= default_section_type_flags (decl, name, reloc);
10577 return flags;
10580 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10581 structure type and that the address of that type should be passed
10582 in out0, rather than in r8. */
10584 static bool
10585 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10587 tree ret_type = TREE_TYPE (fntype);
10589 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10590 as the structure return address parameter, if the return value
10591 type has a non-trivial copy constructor or destructor. It is not
10592 clear if this same convention should be used for other
10593 programming languages. Until G++ 3.4, we incorrectly used r8 for
10594 these return values. */
10595 return (abi_version_at_least (2)
10596 && ret_type
10597 && TYPE_MODE (ret_type) == BLKmode
10598 && TREE_ADDRESSABLE (ret_type)
10599 && strcmp (lang_hooks.name, "GNU C++") == 0);
10602 /* Output the assembler code for a thunk function. THUNK_DECL is the
10603 declaration for the thunk function itself, FUNCTION is the decl for
10604 the target function. DELTA is an immediate constant offset to be
10605 added to THIS. If VCALL_OFFSET is nonzero, the word at
10606 *(*this + vcall_offset) should be added to THIS. */
10608 static void
10609 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10610 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10611 tree function)
10613 rtx this_rtx, insn, funexp;
10614 unsigned int this_parmno;
10615 unsigned int this_regno;
10616 rtx delta_rtx;
10618 reload_completed = 1;
10619 epilogue_completed = 1;
10621 /* Set things up as ia64_expand_prologue might. */
10622 last_scratch_gr_reg = 15;
10624 memset (&current_frame_info, 0, sizeof (current_frame_info));
10625 current_frame_info.spill_cfa_off = -16;
10626 current_frame_info.n_input_regs = 1;
10627 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10629 /* Mark the end of the (empty) prologue. */
10630 emit_note (NOTE_INSN_PROLOGUE_END);
10632 /* Figure out whether "this" will be the first parameter (the
10633 typical case) or the second parameter (as happens when the
10634 virtual function returns certain class objects). */
10635 this_parmno
10636 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10637 ? 1 : 0);
10638 this_regno = IN_REG (this_parmno);
10639 if (!TARGET_REG_NAMES)
10640 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10642 this_rtx = gen_rtx_REG (Pmode, this_regno);
10644 /* Apply the constant offset, if required. */
10645 delta_rtx = GEN_INT (delta);
10646 if (TARGET_ILP32)
10648 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10649 REG_POINTER (tmp) = 1;
10650 if (delta && satisfies_constraint_I (delta_rtx))
10652 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10653 delta = 0;
10655 else
10656 emit_insn (gen_ptr_extend (this_rtx, tmp));
10658 if (delta)
10660 if (!satisfies_constraint_I (delta_rtx))
10662 rtx tmp = gen_rtx_REG (Pmode, 2);
10663 emit_move_insn (tmp, delta_rtx);
10664 delta_rtx = tmp;
10666 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10669 /* Apply the offset from the vtable, if required. */
10670 if (vcall_offset)
10672 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10673 rtx tmp = gen_rtx_REG (Pmode, 2);
10675 if (TARGET_ILP32)
10677 rtx t = gen_rtx_REG (ptr_mode, 2);
10678 REG_POINTER (t) = 1;
10679 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10680 if (satisfies_constraint_I (vcall_offset_rtx))
10682 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10683 vcall_offset = 0;
10685 else
10686 emit_insn (gen_ptr_extend (tmp, t));
10688 else
10689 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10691 if (vcall_offset)
10693 if (!satisfies_constraint_J (vcall_offset_rtx))
10695 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10696 emit_move_insn (tmp2, vcall_offset_rtx);
10697 vcall_offset_rtx = tmp2;
10699 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10702 if (TARGET_ILP32)
10703 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10704 else
10705 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10707 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10710 /* Generate a tail call to the target function. */
10711 if (! TREE_USED (function))
10713 assemble_external (function);
10714 TREE_USED (function) = 1;
10716 funexp = XEXP (DECL_RTL (function), 0);
10717 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10718 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10719 insn = get_last_insn ();
10720 SIBLING_CALL_P (insn) = 1;
10722 /* Code generation for calls relies on splitting. */
10723 reload_completed = 1;
10724 epilogue_completed = 1;
10725 try_split (PATTERN (insn), insn, 0);
10727 emit_barrier ();
10729 /* Run just enough of rest_of_compilation to get the insns emitted.
10730 There's not really enough bulk here to make other passes such as
10731 instruction scheduling worth while. Note that use_thunk calls
10732 assemble_start_function and assemble_end_function. */
10734 insn_locators_alloc ();
10735 emit_all_insn_group_barriers (NULL);
10736 insn = get_insns ();
10737 shorten_branches (insn);
10738 final_start_function (insn, file, 1);
10739 final (insn, file, 1);
10740 final_end_function ();
10742 reload_completed = 0;
10743 epilogue_completed = 0;
10746 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10748 static rtx
10749 ia64_struct_value_rtx (tree fntype,
10750 int incoming ATTRIBUTE_UNUSED)
10752 if (TARGET_ABI_OPEN_VMS ||
10753 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10754 return NULL_RTX;
10755 return gen_rtx_REG (Pmode, GR_REG (8));
10758 static bool
10759 ia64_scalar_mode_supported_p (enum machine_mode mode)
10761 switch (mode)
10763 case QImode:
10764 case HImode:
10765 case SImode:
10766 case DImode:
10767 case TImode:
10768 return true;
10770 case SFmode:
10771 case DFmode:
10772 case XFmode:
10773 case RFmode:
10774 return true;
10776 case TFmode:
10777 return true;
10779 default:
10780 return false;
10784 static bool
10785 ia64_vector_mode_supported_p (enum machine_mode mode)
10787 switch (mode)
10789 case V8QImode:
10790 case V4HImode:
10791 case V2SImode:
10792 return true;
10794 case V2SFmode:
10795 return true;
10797 default:
10798 return false;
10802 /* Implement the FUNCTION_PROFILER macro. */
10804 void
10805 ia64_output_function_profiler (FILE *file, int labelno)
10807 bool indirect_call;
10809 /* If the function needs a static chain and the static chain
10810 register is r15, we use an indirect call so as to bypass
10811 the PLT stub in case the executable is dynamically linked,
10812 because the stub clobbers r15 as per 5.3.6 of the psABI.
10813 We don't need to do that in non canonical PIC mode. */
10815 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10817 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10818 indirect_call = true;
10820 else
10821 indirect_call = false;
10823 if (TARGET_GNU_AS)
10824 fputs ("\t.prologue 4, r40\n", file);
10825 else
10826 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
10827 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
10829 if (NO_PROFILE_COUNTERS)
10830 fputs ("\tmov out3 = r0\n", file);
10831 else
10833 char buf[20];
10834 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10836 if (TARGET_AUTO_PIC)
10837 fputs ("\tmovl out3 = @gprel(", file);
10838 else
10839 fputs ("\taddl out3 = @ltoff(", file);
10840 assemble_name (file, buf);
10841 if (TARGET_AUTO_PIC)
10842 fputs (")\n", file);
10843 else
10844 fputs ("), r1\n", file);
10847 if (indirect_call)
10848 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
10849 fputs ("\t;;\n", file);
10851 fputs ("\t.save rp, r42\n", file);
10852 fputs ("\tmov out2 = b0\n", file);
10853 if (indirect_call)
10854 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
10855 fputs ("\t.body\n", file);
10856 fputs ("\tmov out1 = r1\n", file);
10857 if (indirect_call)
10859 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
10860 fputs ("\tmov b6 = r16\n", file);
10861 fputs ("\tld8 r1 = [r14]\n", file);
10862 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
10864 else
10865 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
10868 static GTY(()) rtx mcount_func_rtx;
10869 static rtx
10870 gen_mcount_func_rtx (void)
10872 if (!mcount_func_rtx)
10873 mcount_func_rtx = init_one_libfunc ("_mcount");
10874 return mcount_func_rtx;
10877 void
10878 ia64_profile_hook (int labelno)
10880 rtx label, ip;
10882 if (NO_PROFILE_COUNTERS)
10883 label = const0_rtx;
10884 else
10886 char buf[30];
10887 const char *label_name;
10888 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10889 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
10890 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
10891 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
10893 ip = gen_reg_rtx (Pmode);
10894 emit_insn (gen_ip_value (ip));
10895 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
10896 VOIDmode, 3,
10897 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
10898 ip, Pmode,
10899 label, Pmode);
10902 /* Return the mangling of TYPE if it is an extended fundamental type. */
10904 static const char *
10905 ia64_mangle_type (const_tree type)
10907 type = TYPE_MAIN_VARIANT (type);
10909 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
10910 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
10911 return NULL;
10913 /* On HP-UX, "long double" is mangled as "e" so __float128 is
10914 mangled as "e". */
10915 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
10916 return "g";
10917 /* On HP-UX, "e" is not available as a mangling of __float80 so use
10918 an extended mangling. Elsewhere, "e" is available since long
10919 double is 80 bits. */
10920 if (TYPE_MODE (type) == XFmode)
10921 return TARGET_HPUX ? "u9__float80" : "e";
10922 if (TYPE_MODE (type) == RFmode)
10923 return "u7__fpreg";
10924 return NULL;
10927 /* Return the diagnostic message string if conversion from FROMTYPE to
10928 TOTYPE is not allowed, NULL otherwise. */
10929 static const char *
10930 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
10932 /* Reject nontrivial conversion to or from __fpreg. */
10933 if (TYPE_MODE (fromtype) == RFmode
10934 && TYPE_MODE (totype) != RFmode
10935 && TYPE_MODE (totype) != VOIDmode)
10936 return N_("invalid conversion from %<__fpreg%>");
10937 if (TYPE_MODE (totype) == RFmode
10938 && TYPE_MODE (fromtype) != RFmode)
10939 return N_("invalid conversion to %<__fpreg%>");
10940 return NULL;
10943 /* Return the diagnostic message string if the unary operation OP is
10944 not permitted on TYPE, NULL otherwise. */
10945 static const char *
10946 ia64_invalid_unary_op (int op, const_tree type)
10948 /* Reject operations on __fpreg other than unary + or &. */
10949 if (TYPE_MODE (type) == RFmode
10950 && op != CONVERT_EXPR
10951 && op != ADDR_EXPR)
10952 return N_("invalid operation on %<__fpreg%>");
10953 return NULL;
10956 /* Return the diagnostic message string if the binary operation OP is
10957 not permitted on TYPE1 and TYPE2, NULL otherwise. */
10958 static const char *
10959 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
10961 /* Reject operations on __fpreg. */
10962 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
10963 return N_("invalid operation on %<__fpreg%>");
10964 return NULL;
10967 /* Implement TARGET_OPTION_DEFAULT_PARAMS. */
10968 static void
10969 ia64_option_default_params (void)
10971 /* Let the scheduler form additional regions. */
10972 set_default_param_value (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS, 2);
10974 /* Set the default values for cache-related parameters. */
10975 set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6);
10976 set_default_param_value (PARAM_L1_CACHE_LINE_SIZE, 32);
10978 set_default_param_value (PARAM_SCHED_MEM_TRUE_DEP_COST, 4);
10981 /* HP-UX version_id attribute.
10982 For object foo, if the version_id is set to 1234 put out an alias
10983 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
10984 other than an alias statement because it is an illegal symbol name. */
10986 static tree
10987 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
10988 tree name ATTRIBUTE_UNUSED,
10989 tree args,
10990 int flags ATTRIBUTE_UNUSED,
10991 bool *no_add_attrs)
10993 tree arg = TREE_VALUE (args);
10995 if (TREE_CODE (arg) != STRING_CST)
10997 error("version attribute is not a string");
10998 *no_add_attrs = true;
10999 return NULL_TREE;
11001 return NULL_TREE;
11004 /* Target hook for c_mode_for_suffix. */
11006 static enum machine_mode
11007 ia64_c_mode_for_suffix (char suffix)
11009 if (suffix == 'q')
11010 return TFmode;
11011 if (suffix == 'w')
11012 return XFmode;
11014 return VOIDmode;
11017 static enum machine_mode
11018 ia64_promote_function_mode (const_tree type,
11019 enum machine_mode mode,
11020 int *punsignedp,
11021 const_tree funtype,
11022 int for_return)
11024 /* Special processing required for OpenVMS ... */
11026 if (!TARGET_ABI_OPEN_VMS)
11027 return default_promote_function_mode(type, mode, punsignedp, funtype,
11028 for_return);
11030 /* HP OpenVMS Calling Standard dated June, 2004, that describes
11031 HP OpenVMS I64 Version 8.2EFT,
11032 chapter 4 "OpenVMS I64 Conventions"
11033 section 4.7 "Procedure Linkage"
11034 subsection 4.7.5.2, "Normal Register Parameters"
11036 "Unsigned integral (except unsigned 32-bit), set, and VAX floating-point
11037 values passed in registers are zero-filled; signed integral values as
11038 well as unsigned 32-bit integral values are sign-extended to 64 bits.
11039 For all other types passed in the general registers, unused bits are
11040 undefined." */
11042 if (!AGGREGATE_TYPE_P (type)
11043 && GET_MODE_CLASS (mode) == MODE_INT
11044 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
11046 if (mode == SImode)
11047 *punsignedp = 0;
11048 return DImode;
11050 else
11051 return promote_mode (type, mode, punsignedp);
11054 static GTY(()) rtx ia64_dconst_0_5_rtx;
11057 ia64_dconst_0_5 (void)
11059 if (! ia64_dconst_0_5_rtx)
11061 REAL_VALUE_TYPE rv;
11062 real_from_string (&rv, "0.5");
11063 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
11065 return ia64_dconst_0_5_rtx;
11068 static GTY(()) rtx ia64_dconst_0_375_rtx;
11071 ia64_dconst_0_375 (void)
11073 if (! ia64_dconst_0_375_rtx)
11075 REAL_VALUE_TYPE rv;
11076 real_from_string (&rv, "0.375");
11077 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11079 return ia64_dconst_0_375_rtx;
11082 static enum machine_mode
11083 ia64_get_reg_raw_mode (int regno)
11085 if (FR_REGNO_P (regno))
11086 return XFmode;
11087 return default_get_reg_raw_mode(regno);
11090 /* Always default to .text section until HP-UX linker is fixed. */
11092 ATTRIBUTE_UNUSED static section *
11093 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11094 enum node_frequency freq ATTRIBUTE_UNUSED,
11095 bool startup ATTRIBUTE_UNUSED,
11096 bool exit ATTRIBUTE_UNUSED)
11098 return NULL;
11101 #include "gt-ia64.h"