Merge from mainline (167278:168000).
[official-gcc/graphite-test-results.git] / gcc / config / ia64 / ia64.c
blobf789d00ebc666b1ab811a9841f94813811a821d0
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
3 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by James E. Wilson <wilson@cygnus.com> and
6 David Mosberger <davidm@hpl.hp.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "libfuncs.h"
45 #include "diagnostic-core.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "gimple.h"
55 #include "intl.h"
56 #include "df.h"
57 #include "debug.h"
58 #include "params.h"
59 #include "dbgcnt.h"
60 #include "tm-constrs.h"
61 #include "sel-sched.h"
62 #include "reload.h"
64 /* This is used for communication between ASM_OUTPUT_LABEL and
65 ASM_OUTPUT_LABELREF. */
66 int ia64_asm_output_label = 0;
68 /* Register names for ia64_expand_prologue. */
69 static const char * const ia64_reg_numbers[96] =
70 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
71 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
72 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
73 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
74 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
75 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
76 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
77 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
78 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
79 "r104","r105","r106","r107","r108","r109","r110","r111",
80 "r112","r113","r114","r115","r116","r117","r118","r119",
81 "r120","r121","r122","r123","r124","r125","r126","r127"};
83 /* ??? These strings could be shared with REGISTER_NAMES. */
84 static const char * const ia64_input_reg_names[8] =
85 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
87 /* ??? These strings could be shared with REGISTER_NAMES. */
88 static const char * const ia64_local_reg_names[80] =
89 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
90 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
91 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
92 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
93 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
94 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
95 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
96 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
97 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
98 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
100 /* ??? These strings could be shared with REGISTER_NAMES. */
101 static const char * const ia64_output_reg_names[8] =
102 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
104 /* Which cpu are we scheduling for. */
105 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
107 /* Determines whether we run our final scheduling pass or not. We always
108 avoid the normal second scheduling pass. */
109 static int ia64_flag_schedule_insns2;
111 /* Determines whether we run variable tracking in machine dependent
112 reorganization. */
113 static int ia64_flag_var_tracking;
115 /* Variables which are this size or smaller are put in the sdata/sbss
116 sections. */
118 unsigned int ia64_section_threshold;
120 /* The following variable is used by the DFA insn scheduler. The value is
121 TRUE if we do insn bundling instead of insn scheduling. */
122 int bundling_p = 0;
124 enum ia64_frame_regs
126 reg_fp,
127 reg_save_b0,
128 reg_save_pr,
129 reg_save_ar_pfs,
130 reg_save_ar_unat,
131 reg_save_ar_lc,
132 reg_save_gp,
133 number_of_ia64_frame_regs
136 /* Structure to be filled in by ia64_compute_frame_size with register
137 save masks and offsets for the current function. */
139 struct ia64_frame_info
141 HOST_WIDE_INT total_size; /* size of the stack frame, not including
142 the caller's scratch area. */
143 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
144 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
145 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
146 HARD_REG_SET mask; /* mask of saved registers. */
147 unsigned int gr_used_mask; /* mask of registers in use as gr spill
148 registers or long-term scratches. */
149 int n_spilled; /* number of spilled registers. */
150 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
151 int n_input_regs; /* number of input registers used. */
152 int n_local_regs; /* number of local registers used. */
153 int n_output_regs; /* number of output registers used. */
154 int n_rotate_regs; /* number of rotating registers used. */
156 char need_regstk; /* true if a .regstk directive needed. */
157 char initialized; /* true if the data is finalized. */
160 /* Current frame information calculated by ia64_compute_frame_size. */
161 static struct ia64_frame_info current_frame_info;
162 /* The actual registers that are emitted. */
163 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
165 static int ia64_first_cycle_multipass_dfa_lookahead (void);
166 static void ia64_dependencies_evaluation_hook (rtx, rtx);
167 static void ia64_init_dfa_pre_cycle_insn (void);
168 static rtx ia64_dfa_pre_cycle_insn (void);
169 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
170 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
171 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
172 static void ia64_h_i_d_extended (void);
173 static void * ia64_alloc_sched_context (void);
174 static void ia64_init_sched_context (void *, bool);
175 static void ia64_set_sched_context (void *);
176 static void ia64_clear_sched_context (void *);
177 static void ia64_free_sched_context (void *);
178 static int ia64_mode_to_int (enum machine_mode);
179 static void ia64_set_sched_flags (spec_info_t);
180 static ds_t ia64_get_insn_spec_ds (rtx);
181 static ds_t ia64_get_insn_checked_ds (rtx);
182 static bool ia64_skip_rtx_p (const_rtx);
183 static int ia64_speculate_insn (rtx, ds_t, rtx *);
184 static bool ia64_needs_block_p (int);
185 static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
186 static int ia64_spec_check_p (rtx);
187 static int ia64_spec_check_src_p (rtx);
188 static rtx gen_tls_get_addr (void);
189 static rtx gen_thread_pointer (void);
190 static int find_gr_spill (enum ia64_frame_regs, int);
191 static int next_scratch_gr_reg (void);
192 static void mark_reg_gr_used_mask (rtx, void *);
193 static void ia64_compute_frame_size (HOST_WIDE_INT);
194 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
195 static void finish_spill_pointers (void);
196 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
197 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
198 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
199 static rtx gen_movdi_x (rtx, rtx, rtx);
200 static rtx gen_fr_spill_x (rtx, rtx, rtx);
201 static rtx gen_fr_restore_x (rtx, rtx, rtx);
203 static void ia64_option_override (void);
204 static void ia64_option_default_params (void);
205 static bool ia64_can_eliminate (const int, const int);
206 static enum machine_mode hfa_element_mode (const_tree, bool);
207 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
208 tree, int *, int);
209 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
210 tree, bool);
211 static rtx ia64_function_arg_1 (const CUMULATIVE_ARGS *, enum machine_mode,
212 const_tree, bool, bool);
213 static rtx ia64_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
214 const_tree, bool);
215 static rtx ia64_function_incoming_arg (CUMULATIVE_ARGS *,
216 enum machine_mode, const_tree, bool);
217 static void ia64_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
218 const_tree, bool);
219 static unsigned int ia64_function_arg_boundary (enum machine_mode,
220 const_tree);
221 static bool ia64_function_ok_for_sibcall (tree, tree);
222 static bool ia64_return_in_memory (const_tree, const_tree);
223 static rtx ia64_function_value (const_tree, const_tree, bool);
224 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
225 static bool ia64_function_value_regno_p (const unsigned int);
226 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
227 reg_class_t);
228 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
229 bool);
230 static bool ia64_rtx_costs (rtx, int, int, int *, bool);
231 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
232 static void fix_range (const char *);
233 static bool ia64_handle_option (size_t, const char *, int);
234 static struct machine_function * ia64_init_machine_status (void);
235 static void emit_insn_group_barriers (FILE *);
236 static void emit_all_insn_group_barriers (FILE *);
237 static void final_emit_insn_group_barriers (FILE *);
238 static void emit_predicate_relation_info (void);
239 static void ia64_reorg (void);
240 static bool ia64_in_small_data_p (const_tree);
241 static void process_epilogue (FILE *, rtx, bool, bool);
243 static bool ia64_assemble_integer (rtx, unsigned int, int);
244 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
245 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
246 static void ia64_output_function_end_prologue (FILE *);
248 static int ia64_issue_rate (void);
249 static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
250 static void ia64_sched_init (FILE *, int, int);
251 static void ia64_sched_init_global (FILE *, int, int);
252 static void ia64_sched_finish_global (FILE *, int);
253 static void ia64_sched_finish (FILE *, int);
254 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
255 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
256 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
257 static int ia64_variable_issue (FILE *, int, rtx, int);
259 static void ia64_asm_unwind_emit (FILE *, rtx);
260 static void ia64_asm_emit_except_personality (rtx);
261 static void ia64_asm_init_sections (void);
263 static enum unwind_info_type ia64_debug_unwind_info (void);
264 static enum unwind_info_type ia64_except_unwind_info (struct gcc_options *);
266 static struct bundle_state *get_free_bundle_state (void);
267 static void free_bundle_state (struct bundle_state *);
268 static void initiate_bundle_states (void);
269 static void finish_bundle_states (void);
270 static unsigned bundle_state_hash (const void *);
271 static int bundle_state_eq_p (const void *, const void *);
272 static int insert_bundle_state (struct bundle_state *);
273 static void initiate_bundle_state_table (void);
274 static void finish_bundle_state_table (void);
275 static int try_issue_nops (struct bundle_state *, int);
276 static int try_issue_insn (struct bundle_state *, rtx);
277 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
278 static int get_max_pos (state_t);
279 static int get_template (state_t, int);
281 static rtx get_next_important_insn (rtx, rtx);
282 static bool important_for_bundling_p (rtx);
283 static void bundling (FILE *, int, rtx, rtx);
285 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
286 HOST_WIDE_INT, tree);
287 static void ia64_file_start (void);
288 static void ia64_globalize_decl_name (FILE *, tree);
290 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
291 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
292 static section *ia64_select_rtx_section (enum machine_mode, rtx,
293 unsigned HOST_WIDE_INT);
294 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
295 ATTRIBUTE_UNUSED;
296 static unsigned int ia64_section_type_flags (tree, const char *, int);
297 static void ia64_init_libfuncs (void)
298 ATTRIBUTE_UNUSED;
299 static void ia64_hpux_init_libfuncs (void)
300 ATTRIBUTE_UNUSED;
301 static void ia64_sysv4_init_libfuncs (void)
302 ATTRIBUTE_UNUSED;
303 static void ia64_vms_init_libfuncs (void)
304 ATTRIBUTE_UNUSED;
305 static void ia64_soft_fp_init_libfuncs (void)
306 ATTRIBUTE_UNUSED;
307 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
308 ATTRIBUTE_UNUSED;
309 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
310 ATTRIBUTE_UNUSED;
312 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
313 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
314 static void ia64_encode_section_info (tree, rtx, int);
315 static rtx ia64_struct_value_rtx (tree, int);
316 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
317 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
318 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
319 static bool ia64_cannot_force_const_mem (rtx);
320 static const char *ia64_mangle_type (const_tree);
321 static const char *ia64_invalid_conversion (const_tree, const_tree);
322 static const char *ia64_invalid_unary_op (int, const_tree);
323 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
324 static enum machine_mode ia64_c_mode_for_suffix (char);
325 static enum machine_mode ia64_promote_function_mode (const_tree,
326 enum machine_mode,
327 int *,
328 const_tree,
329 int);
330 static void ia64_trampoline_init (rtx, tree, rtx);
331 static void ia64_override_options_after_change (void);
333 static void ia64_dwarf_handle_frame_unspec (const char *, rtx, int);
334 static tree ia64_builtin_decl (unsigned, bool);
336 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
337 static enum machine_mode ia64_get_reg_raw_mode (int regno);
338 static section * ia64_hpux_function_section (tree, enum node_frequency,
339 bool, bool);
341 /* Table of valid machine attributes. */
342 static const struct attribute_spec ia64_attribute_table[] =
344 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
345 { "syscall_linkage", 0, 0, false, true, true, NULL },
346 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
347 #if TARGET_ABI_OPEN_VMS
348 { "common_object", 1, 1, true, false, false, ia64_vms_common_object_attribute},
349 #endif
350 { "version_id", 1, 1, true, false, false,
351 ia64_handle_version_id_attribute },
352 { NULL, 0, 0, false, false, false, NULL }
355 /* Implement overriding of the optimization options. */
356 static const struct default_options ia64_option_optimization_table[] =
358 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
359 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
360 SUBTARGET_OPTIMIZATION_OPTIONS,
361 #endif
362 { OPT_LEVELS_NONE, 0, NULL, 0 }
365 /* Initialize the GCC target structure. */
366 #undef TARGET_ATTRIBUTE_TABLE
367 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
369 #undef TARGET_INIT_BUILTINS
370 #define TARGET_INIT_BUILTINS ia64_init_builtins
372 #undef TARGET_EXPAND_BUILTIN
373 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
375 #undef TARGET_BUILTIN_DECL
376 #define TARGET_BUILTIN_DECL ia64_builtin_decl
378 #undef TARGET_ASM_BYTE_OP
379 #define TARGET_ASM_BYTE_OP "\tdata1\t"
380 #undef TARGET_ASM_ALIGNED_HI_OP
381 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
382 #undef TARGET_ASM_ALIGNED_SI_OP
383 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
384 #undef TARGET_ASM_ALIGNED_DI_OP
385 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
386 #undef TARGET_ASM_UNALIGNED_HI_OP
387 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
388 #undef TARGET_ASM_UNALIGNED_SI_OP
389 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
390 #undef TARGET_ASM_UNALIGNED_DI_OP
391 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
392 #undef TARGET_ASM_INTEGER
393 #define TARGET_ASM_INTEGER ia64_assemble_integer
395 #undef TARGET_OPTION_OVERRIDE
396 #define TARGET_OPTION_OVERRIDE ia64_option_override
397 #undef TARGET_OPTION_OPTIMIZATION_TABLE
398 #define TARGET_OPTION_OPTIMIZATION_TABLE ia64_option_optimization_table
399 #undef TARGET_OPTION_DEFAULT_PARAMS
400 #define TARGET_OPTION_DEFAULT_PARAMS ia64_option_default_params
402 #undef TARGET_ASM_FUNCTION_PROLOGUE
403 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
404 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
405 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
406 #undef TARGET_ASM_FUNCTION_EPILOGUE
407 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
409 #undef TARGET_IN_SMALL_DATA_P
410 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
412 #undef TARGET_SCHED_ADJUST_COST_2
413 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
414 #undef TARGET_SCHED_ISSUE_RATE
415 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
416 #undef TARGET_SCHED_VARIABLE_ISSUE
417 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
418 #undef TARGET_SCHED_INIT
419 #define TARGET_SCHED_INIT ia64_sched_init
420 #undef TARGET_SCHED_FINISH
421 #define TARGET_SCHED_FINISH ia64_sched_finish
422 #undef TARGET_SCHED_INIT_GLOBAL
423 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
424 #undef TARGET_SCHED_FINISH_GLOBAL
425 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
426 #undef TARGET_SCHED_REORDER
427 #define TARGET_SCHED_REORDER ia64_sched_reorder
428 #undef TARGET_SCHED_REORDER2
429 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
431 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
432 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
434 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
435 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
437 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
438 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
439 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
440 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
442 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
443 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
444 ia64_first_cycle_multipass_dfa_lookahead_guard
446 #undef TARGET_SCHED_DFA_NEW_CYCLE
447 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
449 #undef TARGET_SCHED_H_I_D_EXTENDED
450 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
452 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
453 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
455 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
456 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
458 #undef TARGET_SCHED_SET_SCHED_CONTEXT
459 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
461 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
462 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
464 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
465 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
467 #undef TARGET_SCHED_SET_SCHED_FLAGS
468 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
470 #undef TARGET_SCHED_GET_INSN_SPEC_DS
471 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
473 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
474 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
476 #undef TARGET_SCHED_SPECULATE_INSN
477 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
479 #undef TARGET_SCHED_NEEDS_BLOCK_P
480 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
482 #undef TARGET_SCHED_GEN_SPEC_CHECK
483 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
485 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
486 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
487 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
489 #undef TARGET_SCHED_SKIP_RTX_P
490 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
492 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
493 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
494 #undef TARGET_ARG_PARTIAL_BYTES
495 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
496 #undef TARGET_FUNCTION_ARG
497 #define TARGET_FUNCTION_ARG ia64_function_arg
498 #undef TARGET_FUNCTION_INCOMING_ARG
499 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
500 #undef TARGET_FUNCTION_ARG_ADVANCE
501 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
502 #undef TARGET_FUNCTION_ARG_BOUNDARY
503 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
505 #undef TARGET_ASM_OUTPUT_MI_THUNK
506 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
507 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
508 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
510 #undef TARGET_ASM_FILE_START
511 #define TARGET_ASM_FILE_START ia64_file_start
513 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
514 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
516 #undef TARGET_REGISTER_MOVE_COST
517 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
518 #undef TARGET_MEMORY_MOVE_COST
519 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
520 #undef TARGET_RTX_COSTS
521 #define TARGET_RTX_COSTS ia64_rtx_costs
522 #undef TARGET_ADDRESS_COST
523 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
525 #undef TARGET_UNSPEC_MAY_TRAP_P
526 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
528 #undef TARGET_MACHINE_DEPENDENT_REORG
529 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
531 #undef TARGET_ENCODE_SECTION_INFO
532 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
534 #undef TARGET_SECTION_TYPE_FLAGS
535 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
537 #ifdef HAVE_AS_TLS
538 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
539 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
540 #endif
542 #undef TARGET_PROMOTE_FUNCTION_MODE
543 #define TARGET_PROMOTE_FUNCTION_MODE ia64_promote_function_mode
545 /* ??? Investigate. */
546 #if 0
547 #undef TARGET_PROMOTE_PROTOTYPES
548 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
549 #endif
551 #undef TARGET_FUNCTION_VALUE
552 #define TARGET_FUNCTION_VALUE ia64_function_value
553 #undef TARGET_LIBCALL_VALUE
554 #define TARGET_LIBCALL_VALUE ia64_libcall_value
555 #undef TARGET_FUNCTION_VALUE_REGNO_P
556 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
558 #undef TARGET_STRUCT_VALUE_RTX
559 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
560 #undef TARGET_RETURN_IN_MEMORY
561 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
562 #undef TARGET_SETUP_INCOMING_VARARGS
563 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
564 #undef TARGET_STRICT_ARGUMENT_NAMING
565 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
566 #undef TARGET_MUST_PASS_IN_STACK
567 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
568 #undef TARGET_GET_RAW_RESULT_MODE
569 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
570 #undef TARGET_GET_RAW_ARG_MODE
571 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
573 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
574 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
576 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
577 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ia64_dwarf_handle_frame_unspec
578 #undef TARGET_ASM_UNWIND_EMIT
579 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
580 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
581 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
582 #undef TARGET_ASM_INIT_SECTIONS
583 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
585 #undef TARGET_DEBUG_UNWIND_INFO
586 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
587 #undef TARGET_EXCEPT_UNWIND_INFO
588 #define TARGET_EXCEPT_UNWIND_INFO ia64_except_unwind_info
590 #undef TARGET_SCALAR_MODE_SUPPORTED_P
591 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
592 #undef TARGET_VECTOR_MODE_SUPPORTED_P
593 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
595 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
596 in an order different from the specified program order. */
597 #undef TARGET_RELAXED_ORDERING
598 #define TARGET_RELAXED_ORDERING true
600 #undef TARGET_DEFAULT_TARGET_FLAGS
601 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
602 #undef TARGET_HANDLE_OPTION
603 #define TARGET_HANDLE_OPTION ia64_handle_option
605 #undef TARGET_CANNOT_FORCE_CONST_MEM
606 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
608 #undef TARGET_MANGLE_TYPE
609 #define TARGET_MANGLE_TYPE ia64_mangle_type
611 #undef TARGET_INVALID_CONVERSION
612 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
613 #undef TARGET_INVALID_UNARY_OP
614 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
615 #undef TARGET_INVALID_BINARY_OP
616 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
618 #undef TARGET_C_MODE_FOR_SUFFIX
619 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
621 #undef TARGET_CAN_ELIMINATE
622 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
624 #undef TARGET_TRAMPOLINE_INIT
625 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
627 #undef TARGET_INVALID_WITHIN_DOLOOP
628 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
630 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
631 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
633 #undef TARGET_PREFERRED_RELOAD_CLASS
634 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
636 struct gcc_target targetm = TARGET_INITIALIZER;
638 typedef enum
640 ADDR_AREA_NORMAL, /* normal address area */
641 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
643 ia64_addr_area;
645 static GTY(()) tree small_ident1;
646 static GTY(()) tree small_ident2;
648 static void
649 init_idents (void)
651 if (small_ident1 == 0)
653 small_ident1 = get_identifier ("small");
654 small_ident2 = get_identifier ("__small__");
658 /* Retrieve the address area that has been chosen for the given decl. */
660 static ia64_addr_area
661 ia64_get_addr_area (tree decl)
663 tree model_attr;
665 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
666 if (model_attr)
668 tree id;
670 init_idents ();
671 id = TREE_VALUE (TREE_VALUE (model_attr));
672 if (id == small_ident1 || id == small_ident2)
673 return ADDR_AREA_SMALL;
675 return ADDR_AREA_NORMAL;
678 static tree
679 ia64_handle_model_attribute (tree *node, tree name, tree args,
680 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
682 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
683 ia64_addr_area area;
684 tree arg, decl = *node;
686 init_idents ();
687 arg = TREE_VALUE (args);
688 if (arg == small_ident1 || arg == small_ident2)
690 addr_area = ADDR_AREA_SMALL;
692 else
694 warning (OPT_Wattributes, "invalid argument of %qE attribute",
695 name);
696 *no_add_attrs = true;
699 switch (TREE_CODE (decl))
701 case VAR_DECL:
702 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
703 == FUNCTION_DECL)
704 && !TREE_STATIC (decl))
706 error_at (DECL_SOURCE_LOCATION (decl),
707 "an address area attribute cannot be specified for "
708 "local variables");
709 *no_add_attrs = true;
711 area = ia64_get_addr_area (decl);
712 if (area != ADDR_AREA_NORMAL && addr_area != area)
714 error ("address area of %q+D conflicts with previous "
715 "declaration", decl);
716 *no_add_attrs = true;
718 break;
720 case FUNCTION_DECL:
721 error_at (DECL_SOURCE_LOCATION (decl),
722 "address area attribute cannot be specified for "
723 "functions");
724 *no_add_attrs = true;
725 break;
727 default:
728 warning (OPT_Wattributes, "%qE attribute ignored",
729 name);
730 *no_add_attrs = true;
731 break;
734 return NULL_TREE;
737 /* The section must have global and overlaid attributes. */
738 #define SECTION_VMS_OVERLAY SECTION_MACH_DEP
740 /* Part of the low level implementation of DEC Ada pragma Common_Object which
741 enables the shared use of variables stored in overlaid linker areas
742 corresponding to the use of Fortran COMMON. */
744 static tree
745 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
746 int flags ATTRIBUTE_UNUSED,
747 bool *no_add_attrs)
749 tree decl = *node;
750 tree id, val;
751 if (! DECL_P (decl))
752 abort ();
754 DECL_COMMON (decl) = 1;
755 id = TREE_VALUE (args);
756 if (TREE_CODE (id) == IDENTIFIER_NODE)
757 val = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id));
758 else if (TREE_CODE (id) == STRING_CST)
759 val = id;
760 else
762 warning (OPT_Wattributes,
763 "%qE attribute requires a string constant argument", name);
764 *no_add_attrs = true;
765 return NULL_TREE;
767 DECL_SECTION_NAME (decl) = val;
768 return NULL_TREE;
771 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
773 void
774 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
775 unsigned HOST_WIDE_INT size,
776 unsigned int align)
778 tree attr = DECL_ATTRIBUTES (decl);
780 /* As common_object attribute set DECL_SECTION_NAME check it before
781 looking up the attribute. */
782 if (DECL_SECTION_NAME (decl) && attr)
783 attr = lookup_attribute ("common_object", attr);
784 else
785 attr = NULL_TREE;
787 if (!attr)
789 /* Code from elfos.h. */
790 fprintf (file, "%s", COMMON_ASM_OP);
791 assemble_name (file, name);
792 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
793 size, align / BITS_PER_UNIT);
795 else
797 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
798 ASM_OUTPUT_LABEL (file, name);
799 ASM_OUTPUT_SKIP (file, size ? size : 1);
803 /* Definition of TARGET_ASM_NAMED_SECTION for VMS. */
805 void
806 ia64_vms_elf_asm_named_section (const char *name, unsigned int flags,
807 tree decl)
809 if (!(flags & SECTION_VMS_OVERLAY))
811 default_elf_asm_named_section (name, flags, decl);
812 return;
814 if (flags != (SECTION_VMS_OVERLAY | SECTION_WRITE))
815 abort ();
817 if (flags & SECTION_DECLARED)
819 fprintf (asm_out_file, "\t.section\t%s\n", name);
820 return;
823 fprintf (asm_out_file, "\t.section\t%s,\"awgO\"\n", name);
826 static void
827 ia64_encode_addr_area (tree decl, rtx symbol)
829 int flags;
831 flags = SYMBOL_REF_FLAGS (symbol);
832 switch (ia64_get_addr_area (decl))
834 case ADDR_AREA_NORMAL: break;
835 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
836 default: gcc_unreachable ();
838 SYMBOL_REF_FLAGS (symbol) = flags;
841 static void
842 ia64_encode_section_info (tree decl, rtx rtl, int first)
844 default_encode_section_info (decl, rtl, first);
846 /* Careful not to prod global register variables. */
847 if (TREE_CODE (decl) == VAR_DECL
848 && GET_CODE (DECL_RTL (decl)) == MEM
849 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
850 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
851 ia64_encode_addr_area (decl, XEXP (rtl, 0));
854 /* Return 1 if the operands of a move are ok. */
857 ia64_move_ok (rtx dst, rtx src)
859 /* If we're under init_recog_no_volatile, we'll not be able to use
860 memory_operand. So check the code directly and don't worry about
861 the validity of the underlying address, which should have been
862 checked elsewhere anyway. */
863 if (GET_CODE (dst) != MEM)
864 return 1;
865 if (GET_CODE (src) == MEM)
866 return 0;
867 if (register_operand (src, VOIDmode))
868 return 1;
870 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
871 if (INTEGRAL_MODE_P (GET_MODE (dst)))
872 return src == const0_rtx;
873 else
874 return satisfies_constraint_G (src);
877 /* Return 1 if the operands are ok for a floating point load pair. */
880 ia64_load_pair_ok (rtx dst, rtx src)
882 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
883 return 0;
884 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
885 return 0;
886 switch (GET_CODE (XEXP (src, 0)))
888 case REG:
889 case POST_INC:
890 break;
891 case POST_DEC:
892 return 0;
893 case POST_MODIFY:
895 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
897 if (GET_CODE (adjust) != CONST_INT
898 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
899 return 0;
901 break;
902 default:
903 abort ();
905 return 1;
909 addp4_optimize_ok (rtx op1, rtx op2)
911 return (basereg_operand (op1, GET_MODE(op1)) !=
912 basereg_operand (op2, GET_MODE(op2)));
915 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
916 Return the length of the field, or <= 0 on failure. */
919 ia64_depz_field_mask (rtx rop, rtx rshift)
921 unsigned HOST_WIDE_INT op = INTVAL (rop);
922 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
924 /* Get rid of the zero bits we're shifting in. */
925 op >>= shift;
927 /* We must now have a solid block of 1's at bit 0. */
928 return exact_log2 (op + 1);
931 /* Return the TLS model to use for ADDR. */
933 static enum tls_model
934 tls_symbolic_operand_type (rtx addr)
936 enum tls_model tls_kind = TLS_MODEL_NONE;
938 if (GET_CODE (addr) == CONST)
940 if (GET_CODE (XEXP (addr, 0)) == PLUS
941 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
942 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
944 else if (GET_CODE (addr) == SYMBOL_REF)
945 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
947 return tls_kind;
950 /* Return true if X is a constant that is valid for some immediate
951 field in an instruction. */
953 bool
954 ia64_legitimate_constant_p (rtx x)
956 switch (GET_CODE (x))
958 case CONST_INT:
959 case LABEL_REF:
960 return true;
962 case CONST_DOUBLE:
963 if (GET_MODE (x) == VOIDmode || GET_MODE (x) == SFmode
964 || GET_MODE (x) == DFmode)
965 return true;
966 return satisfies_constraint_G (x);
968 case CONST:
969 case SYMBOL_REF:
970 /* ??? Short term workaround for PR 28490. We must make the code here
971 match the code in ia64_expand_move and move_operand, even though they
972 are both technically wrong. */
973 if (tls_symbolic_operand_type (x) == 0)
975 HOST_WIDE_INT addend = 0;
976 rtx op = x;
978 if (GET_CODE (op) == CONST
979 && GET_CODE (XEXP (op, 0)) == PLUS
980 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
982 addend = INTVAL (XEXP (XEXP (op, 0), 1));
983 op = XEXP (XEXP (op, 0), 0);
986 if (any_offset_symbol_operand (op, GET_MODE (op))
987 || function_operand (op, GET_MODE (op)))
988 return true;
989 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
990 return (addend & 0x3fff) == 0;
991 return false;
993 return false;
995 case CONST_VECTOR:
997 enum machine_mode mode = GET_MODE (x);
999 if (mode == V2SFmode)
1000 return satisfies_constraint_Y (x);
1002 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1003 && GET_MODE_SIZE (mode) <= 8);
1006 default:
1007 return false;
1011 /* Don't allow TLS addresses to get spilled to memory. */
1013 static bool
1014 ia64_cannot_force_const_mem (rtx x)
1016 if (GET_MODE (x) == RFmode)
1017 return true;
1018 return tls_symbolic_operand_type (x) != 0;
1021 /* Expand a symbolic constant load. */
1023 bool
1024 ia64_expand_load_address (rtx dest, rtx src)
1026 gcc_assert (GET_CODE (dest) == REG);
1028 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1029 having to pointer-extend the value afterward. Other forms of address
1030 computation below are also more natural to compute as 64-bit quantities.
1031 If we've been given an SImode destination register, change it. */
1032 if (GET_MODE (dest) != Pmode)
1033 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1034 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1036 if (TARGET_NO_PIC)
1037 return false;
1038 if (small_addr_symbolic_operand (src, VOIDmode))
1039 return false;
1041 if (TARGET_AUTO_PIC)
1042 emit_insn (gen_load_gprel64 (dest, src));
1043 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1044 emit_insn (gen_load_fptr (dest, src));
1045 else if (sdata_symbolic_operand (src, VOIDmode))
1046 emit_insn (gen_load_gprel (dest, src));
1047 else
1049 HOST_WIDE_INT addend = 0;
1050 rtx tmp;
1052 /* We did split constant offsets in ia64_expand_move, and we did try
1053 to keep them split in move_operand, but we also allowed reload to
1054 rematerialize arbitrary constants rather than spill the value to
1055 the stack and reload it. So we have to be prepared here to split
1056 them apart again. */
1057 if (GET_CODE (src) == CONST)
1059 HOST_WIDE_INT hi, lo;
1061 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1062 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1063 hi = hi - lo;
1065 if (lo != 0)
1067 addend = lo;
1068 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
1072 tmp = gen_rtx_HIGH (Pmode, src);
1073 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1074 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1076 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
1077 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1079 if (addend)
1081 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1082 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1086 return true;
1089 static GTY(()) rtx gen_tls_tga;
1090 static rtx
1091 gen_tls_get_addr (void)
1093 if (!gen_tls_tga)
1094 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1095 return gen_tls_tga;
1098 static GTY(()) rtx thread_pointer_rtx;
1099 static rtx
1100 gen_thread_pointer (void)
1102 if (!thread_pointer_rtx)
1103 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1104 return thread_pointer_rtx;
1107 static rtx
1108 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1109 rtx orig_op1, HOST_WIDE_INT addend)
1111 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1112 rtx orig_op0 = op0;
1113 HOST_WIDE_INT addend_lo, addend_hi;
1115 switch (tls_kind)
1117 case TLS_MODEL_GLOBAL_DYNAMIC:
1118 start_sequence ();
1120 tga_op1 = gen_reg_rtx (Pmode);
1121 emit_insn (gen_load_dtpmod (tga_op1, op1));
1123 tga_op2 = gen_reg_rtx (Pmode);
1124 emit_insn (gen_load_dtprel (tga_op2, op1));
1126 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1127 LCT_CONST, Pmode, 2, tga_op1,
1128 Pmode, tga_op2, Pmode);
1130 insns = get_insns ();
1131 end_sequence ();
1133 if (GET_MODE (op0) != Pmode)
1134 op0 = tga_ret;
1135 emit_libcall_block (insns, op0, tga_ret, op1);
1136 break;
1138 case TLS_MODEL_LOCAL_DYNAMIC:
1139 /* ??? This isn't the completely proper way to do local-dynamic
1140 If the call to __tls_get_addr is used only by a single symbol,
1141 then we should (somehow) move the dtprel to the second arg
1142 to avoid the extra add. */
1143 start_sequence ();
1145 tga_op1 = gen_reg_rtx (Pmode);
1146 emit_insn (gen_load_dtpmod (tga_op1, op1));
1148 tga_op2 = const0_rtx;
1150 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1151 LCT_CONST, Pmode, 2, tga_op1,
1152 Pmode, tga_op2, Pmode);
1154 insns = get_insns ();
1155 end_sequence ();
1157 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1158 UNSPEC_LD_BASE);
1159 tmp = gen_reg_rtx (Pmode);
1160 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1162 if (!register_operand (op0, Pmode))
1163 op0 = gen_reg_rtx (Pmode);
1164 if (TARGET_TLS64)
1166 emit_insn (gen_load_dtprel (op0, op1));
1167 emit_insn (gen_adddi3 (op0, tmp, op0));
1169 else
1170 emit_insn (gen_add_dtprel (op0, op1, tmp));
1171 break;
1173 case TLS_MODEL_INITIAL_EXEC:
1174 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1175 addend_hi = addend - addend_lo;
1177 op1 = plus_constant (op1, addend_hi);
1178 addend = addend_lo;
1180 tmp = gen_reg_rtx (Pmode);
1181 emit_insn (gen_load_tprel (tmp, op1));
1183 if (!register_operand (op0, Pmode))
1184 op0 = gen_reg_rtx (Pmode);
1185 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1186 break;
1188 case TLS_MODEL_LOCAL_EXEC:
1189 if (!register_operand (op0, Pmode))
1190 op0 = gen_reg_rtx (Pmode);
1192 op1 = orig_op1;
1193 addend = 0;
1194 if (TARGET_TLS64)
1196 emit_insn (gen_load_tprel (op0, op1));
1197 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1199 else
1200 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1201 break;
1203 default:
1204 gcc_unreachable ();
1207 if (addend)
1208 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1209 orig_op0, 1, OPTAB_DIRECT);
1210 if (orig_op0 == op0)
1211 return NULL_RTX;
1212 if (GET_MODE (orig_op0) == Pmode)
1213 return op0;
1214 return gen_lowpart (GET_MODE (orig_op0), op0);
1218 ia64_expand_move (rtx op0, rtx op1)
1220 enum machine_mode mode = GET_MODE (op0);
1222 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1223 op1 = force_reg (mode, op1);
1225 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1227 HOST_WIDE_INT addend = 0;
1228 enum tls_model tls_kind;
1229 rtx sym = op1;
1231 if (GET_CODE (op1) == CONST
1232 && GET_CODE (XEXP (op1, 0)) == PLUS
1233 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1235 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1236 sym = XEXP (XEXP (op1, 0), 0);
1239 tls_kind = tls_symbolic_operand_type (sym);
1240 if (tls_kind)
1241 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1243 if (any_offset_symbol_operand (sym, mode))
1244 addend = 0;
1245 else if (aligned_offset_symbol_operand (sym, mode))
1247 HOST_WIDE_INT addend_lo, addend_hi;
1249 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1250 addend_hi = addend - addend_lo;
1252 if (addend_lo != 0)
1254 op1 = plus_constant (sym, addend_hi);
1255 addend = addend_lo;
1257 else
1258 addend = 0;
1260 else
1261 op1 = sym;
1263 if (reload_completed)
1265 /* We really should have taken care of this offset earlier. */
1266 gcc_assert (addend == 0);
1267 if (ia64_expand_load_address (op0, op1))
1268 return NULL_RTX;
1271 if (addend)
1273 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1275 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1277 op1 = expand_simple_binop (mode, PLUS, subtarget,
1278 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1279 if (op0 == op1)
1280 return NULL_RTX;
1284 return op1;
1287 /* Split a move from OP1 to OP0 conditional on COND. */
1289 void
1290 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1292 rtx insn, first = get_last_insn ();
1294 emit_move_insn (op0, op1);
1296 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1297 if (INSN_P (insn))
1298 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1299 PATTERN (insn));
1302 /* Split a post-reload TImode or TFmode reference into two DImode
1303 components. This is made extra difficult by the fact that we do
1304 not get any scratch registers to work with, because reload cannot
1305 be prevented from giving us a scratch that overlaps the register
1306 pair involved. So instead, when addressing memory, we tweak the
1307 pointer register up and back down with POST_INCs. Or up and not
1308 back down when we can get away with it.
1310 REVERSED is true when the loads must be done in reversed order
1311 (high word first) for correctness. DEAD is true when the pointer
1312 dies with the second insn we generate and therefore the second
1313 address must not carry a postmodify.
1315 May return an insn which is to be emitted after the moves. */
1317 static rtx
1318 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1320 rtx fixup = 0;
1322 switch (GET_CODE (in))
1324 case REG:
1325 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1326 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1327 break;
1329 case CONST_INT:
1330 case CONST_DOUBLE:
1331 /* Cannot occur reversed. */
1332 gcc_assert (!reversed);
1334 if (GET_MODE (in) != TFmode)
1335 split_double (in, &out[0], &out[1]);
1336 else
1337 /* split_double does not understand how to split a TFmode
1338 quantity into a pair of DImode constants. */
1340 REAL_VALUE_TYPE r;
1341 unsigned HOST_WIDE_INT p[2];
1342 long l[4]; /* TFmode is 128 bits */
1344 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1345 real_to_target (l, &r, TFmode);
1347 if (FLOAT_WORDS_BIG_ENDIAN)
1349 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1350 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1352 else
1354 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1355 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1357 out[0] = GEN_INT (p[0]);
1358 out[1] = GEN_INT (p[1]);
1360 break;
1362 case MEM:
1364 rtx base = XEXP (in, 0);
1365 rtx offset;
1367 switch (GET_CODE (base))
1369 case REG:
1370 if (!reversed)
1372 out[0] = adjust_automodify_address
1373 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1374 out[1] = adjust_automodify_address
1375 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1377 else
1379 /* Reversal requires a pre-increment, which can only
1380 be done as a separate insn. */
1381 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1382 out[0] = adjust_automodify_address
1383 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1384 out[1] = adjust_address (in, DImode, 0);
1386 break;
1388 case POST_INC:
1389 gcc_assert (!reversed && !dead);
1391 /* Just do the increment in two steps. */
1392 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1393 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1394 break;
1396 case POST_DEC:
1397 gcc_assert (!reversed && !dead);
1399 /* Add 8, subtract 24. */
1400 base = XEXP (base, 0);
1401 out[0] = adjust_automodify_address
1402 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1403 out[1] = adjust_automodify_address
1404 (in, DImode,
1405 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1407 break;
1409 case POST_MODIFY:
1410 gcc_assert (!reversed && !dead);
1412 /* Extract and adjust the modification. This case is
1413 trickier than the others, because we might have an
1414 index register, or we might have a combined offset that
1415 doesn't fit a signed 9-bit displacement field. We can
1416 assume the incoming expression is already legitimate. */
1417 offset = XEXP (base, 1);
1418 base = XEXP (base, 0);
1420 out[0] = adjust_automodify_address
1421 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1423 if (GET_CODE (XEXP (offset, 1)) == REG)
1425 /* Can't adjust the postmodify to match. Emit the
1426 original, then a separate addition insn. */
1427 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1428 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1430 else
1432 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1433 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1435 /* Again the postmodify cannot be made to match,
1436 but in this case it's more efficient to get rid
1437 of the postmodify entirely and fix up with an
1438 add insn. */
1439 out[1] = adjust_automodify_address (in, DImode, base, 8);
1440 fixup = gen_adddi3
1441 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1443 else
1445 /* Combined offset still fits in the displacement field.
1446 (We cannot overflow it at the high end.) */
1447 out[1] = adjust_automodify_address
1448 (in, DImode, gen_rtx_POST_MODIFY
1449 (Pmode, base, gen_rtx_PLUS
1450 (Pmode, base,
1451 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1455 break;
1457 default:
1458 gcc_unreachable ();
1460 break;
1463 default:
1464 gcc_unreachable ();
1467 return fixup;
1470 /* Split a TImode or TFmode move instruction after reload.
1471 This is used by *movtf_internal and *movti_internal. */
1472 void
1473 ia64_split_tmode_move (rtx operands[])
1475 rtx in[2], out[2], insn;
1476 rtx fixup[2];
1477 bool dead = false;
1478 bool reversed = false;
1480 /* It is possible for reload to decide to overwrite a pointer with
1481 the value it points to. In that case we have to do the loads in
1482 the appropriate order so that the pointer is not destroyed too
1483 early. Also we must not generate a postmodify for that second
1484 load, or rws_access_regno will die. */
1485 if (GET_CODE (operands[1]) == MEM
1486 && reg_overlap_mentioned_p (operands[0], operands[1]))
1488 rtx base = XEXP (operands[1], 0);
1489 while (GET_CODE (base) != REG)
1490 base = XEXP (base, 0);
1492 if (REGNO (base) == REGNO (operands[0]))
1493 reversed = true;
1494 dead = true;
1496 /* Another reason to do the moves in reversed order is if the first
1497 element of the target register pair is also the second element of
1498 the source register pair. */
1499 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1500 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1501 reversed = true;
1503 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1504 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1506 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1507 if (GET_CODE (EXP) == MEM \
1508 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1509 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1510 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1511 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1513 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1514 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1515 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1517 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1518 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1519 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1521 if (fixup[0])
1522 emit_insn (fixup[0]);
1523 if (fixup[1])
1524 emit_insn (fixup[1]);
1526 #undef MAYBE_ADD_REG_INC_NOTE
1529 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1530 through memory plus an extra GR scratch register. Except that you can
1531 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1532 SECONDARY_RELOAD_CLASS, but not both.
1534 We got into problems in the first place by allowing a construct like
1535 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1536 This solution attempts to prevent this situation from occurring. When
1537 we see something like the above, we spill the inner register to memory. */
1539 static rtx
1540 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1542 if (GET_CODE (in) == SUBREG
1543 && GET_MODE (SUBREG_REG (in)) == TImode
1544 && GET_CODE (SUBREG_REG (in)) == REG)
1546 rtx memt = assign_stack_temp (TImode, 16, 0);
1547 emit_move_insn (memt, SUBREG_REG (in));
1548 return adjust_address (memt, mode, 0);
1550 else if (force && GET_CODE (in) == REG)
1552 rtx memx = assign_stack_temp (mode, 16, 0);
1553 emit_move_insn (memx, in);
1554 return memx;
1556 else
1557 return in;
1560 /* Expand the movxf or movrf pattern (MODE says which) with the given
1561 OPERANDS, returning true if the pattern should then invoke
1562 DONE. */
1564 bool
1565 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1567 rtx op0 = operands[0];
1569 if (GET_CODE (op0) == SUBREG)
1570 op0 = SUBREG_REG (op0);
1572 /* We must support XFmode loads into general registers for stdarg/vararg,
1573 unprototyped calls, and a rare case where a long double is passed as
1574 an argument after a float HFA fills the FP registers. We split them into
1575 DImode loads for convenience. We also need to support XFmode stores
1576 for the last case. This case does not happen for stdarg/vararg routines,
1577 because we do a block store to memory of unnamed arguments. */
1579 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1581 rtx out[2];
1583 /* We're hoping to transform everything that deals with XFmode
1584 quantities and GR registers early in the compiler. */
1585 gcc_assert (can_create_pseudo_p ());
1587 /* Struct to register can just use TImode instead. */
1588 if ((GET_CODE (operands[1]) == SUBREG
1589 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1590 || (GET_CODE (operands[1]) == REG
1591 && GR_REGNO_P (REGNO (operands[1]))))
1593 rtx op1 = operands[1];
1595 if (GET_CODE (op1) == SUBREG)
1596 op1 = SUBREG_REG (op1);
1597 else
1598 op1 = gen_rtx_REG (TImode, REGNO (op1));
1600 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1601 return true;
1604 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1606 /* Don't word-swap when reading in the constant. */
1607 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1608 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1609 0, mode));
1610 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1611 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1612 0, mode));
1613 return true;
1616 /* If the quantity is in a register not known to be GR, spill it. */
1617 if (register_operand (operands[1], mode))
1618 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1620 gcc_assert (GET_CODE (operands[1]) == MEM);
1622 /* Don't word-swap when reading in the value. */
1623 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1624 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1626 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1627 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1628 return true;
1631 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1633 /* We're hoping to transform everything that deals with XFmode
1634 quantities and GR registers early in the compiler. */
1635 gcc_assert (can_create_pseudo_p ());
1637 /* Op0 can't be a GR_REG here, as that case is handled above.
1638 If op0 is a register, then we spill op1, so that we now have a
1639 MEM operand. This requires creating an XFmode subreg of a TImode reg
1640 to force the spill. */
1641 if (register_operand (operands[0], mode))
1643 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1644 op1 = gen_rtx_SUBREG (mode, op1, 0);
1645 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1648 else
1650 rtx in[2];
1652 gcc_assert (GET_CODE (operands[0]) == MEM);
1654 /* Don't word-swap when writing out the value. */
1655 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1656 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1658 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1659 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1660 return true;
1664 if (!reload_in_progress && !reload_completed)
1666 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1668 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1670 rtx memt, memx, in = operands[1];
1671 if (CONSTANT_P (in))
1672 in = validize_mem (force_const_mem (mode, in));
1673 if (GET_CODE (in) == MEM)
1674 memt = adjust_address (in, TImode, 0);
1675 else
1677 memt = assign_stack_temp (TImode, 16, 0);
1678 memx = adjust_address (memt, mode, 0);
1679 emit_move_insn (memx, in);
1681 emit_move_insn (op0, memt);
1682 return true;
1685 if (!ia64_move_ok (operands[0], operands[1]))
1686 operands[1] = force_reg (mode, operands[1]);
1689 return false;
1692 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1693 with the expression that holds the compare result (in VOIDmode). */
1695 static GTY(()) rtx cmptf_libfunc;
1697 void
1698 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1700 enum rtx_code code = GET_CODE (*expr);
1701 rtx cmp;
1703 /* If we have a BImode input, then we already have a compare result, and
1704 do not need to emit another comparison. */
1705 if (GET_MODE (*op0) == BImode)
1707 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1708 cmp = *op0;
1710 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1711 magic number as its third argument, that indicates what to do.
1712 The return value is an integer to be compared against zero. */
1713 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1715 enum qfcmp_magic {
1716 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1717 QCMP_UNORD = 2,
1718 QCMP_EQ = 4,
1719 QCMP_LT = 8,
1720 QCMP_GT = 16
1722 int magic;
1723 enum rtx_code ncode;
1724 rtx ret, insns;
1726 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1727 switch (code)
1729 /* 1 = equal, 0 = not equal. Equality operators do
1730 not raise FP_INVALID when given an SNaN operand. */
1731 case EQ: magic = QCMP_EQ; ncode = NE; break;
1732 case NE: magic = QCMP_EQ; ncode = EQ; break;
1733 /* isunordered() from C99. */
1734 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1735 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1736 /* Relational operators raise FP_INVALID when given
1737 an SNaN operand. */
1738 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1739 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1740 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1741 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1742 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1743 Expanders for buneq etc. weuld have to be added to ia64.md
1744 for this to be useful. */
1745 default: gcc_unreachable ();
1748 start_sequence ();
1750 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1751 *op0, TFmode, *op1, TFmode,
1752 GEN_INT (magic), DImode);
1753 cmp = gen_reg_rtx (BImode);
1754 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1755 gen_rtx_fmt_ee (ncode, BImode,
1756 ret, const0_rtx)));
1758 insns = get_insns ();
1759 end_sequence ();
1761 emit_libcall_block (insns, cmp, cmp,
1762 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1763 code = NE;
1765 else
1767 cmp = gen_reg_rtx (BImode);
1768 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1769 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1770 code = NE;
1773 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1774 *op0 = cmp;
1775 *op1 = const0_rtx;
1778 /* Generate an integral vector comparison. Return true if the condition has
1779 been reversed, and so the sense of the comparison should be inverted. */
1781 static bool
1782 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1783 rtx dest, rtx op0, rtx op1)
1785 bool negate = false;
1786 rtx x;
1788 /* Canonicalize the comparison to EQ, GT, GTU. */
1789 switch (code)
1791 case EQ:
1792 case GT:
1793 case GTU:
1794 break;
1796 case NE:
1797 case LE:
1798 case LEU:
1799 code = reverse_condition (code);
1800 negate = true;
1801 break;
1803 case GE:
1804 case GEU:
1805 code = reverse_condition (code);
1806 negate = true;
1807 /* FALLTHRU */
1809 case LT:
1810 case LTU:
1811 code = swap_condition (code);
1812 x = op0, op0 = op1, op1 = x;
1813 break;
1815 default:
1816 gcc_unreachable ();
1819 /* Unsigned parallel compare is not supported by the hardware. Play some
1820 tricks to turn this into a signed comparison against 0. */
1821 if (code == GTU)
1823 switch (mode)
1825 case V2SImode:
1827 rtx t1, t2, mask;
1829 /* Subtract (-(INT MAX) - 1) from both operands to make
1830 them signed. */
1831 mask = GEN_INT (0x80000000);
1832 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1833 mask = force_reg (mode, mask);
1834 t1 = gen_reg_rtx (mode);
1835 emit_insn (gen_subv2si3 (t1, op0, mask));
1836 t2 = gen_reg_rtx (mode);
1837 emit_insn (gen_subv2si3 (t2, op1, mask));
1838 op0 = t1;
1839 op1 = t2;
1840 code = GT;
1842 break;
1844 case V8QImode:
1845 case V4HImode:
1846 /* Perform a parallel unsigned saturating subtraction. */
1847 x = gen_reg_rtx (mode);
1848 emit_insn (gen_rtx_SET (VOIDmode, x,
1849 gen_rtx_US_MINUS (mode, op0, op1)));
1851 code = EQ;
1852 op0 = x;
1853 op1 = CONST0_RTX (mode);
1854 negate = !negate;
1855 break;
1857 default:
1858 gcc_unreachable ();
1862 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1863 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1865 return negate;
1868 /* Emit an integral vector conditional move. */
1870 void
1871 ia64_expand_vecint_cmov (rtx operands[])
1873 enum machine_mode mode = GET_MODE (operands[0]);
1874 enum rtx_code code = GET_CODE (operands[3]);
1875 bool negate;
1876 rtx cmp, x, ot, of;
1878 cmp = gen_reg_rtx (mode);
1879 negate = ia64_expand_vecint_compare (code, mode, cmp,
1880 operands[4], operands[5]);
1882 ot = operands[1+negate];
1883 of = operands[2-negate];
1885 if (ot == CONST0_RTX (mode))
1887 if (of == CONST0_RTX (mode))
1889 emit_move_insn (operands[0], ot);
1890 return;
1893 x = gen_rtx_NOT (mode, cmp);
1894 x = gen_rtx_AND (mode, x, of);
1895 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1897 else if (of == CONST0_RTX (mode))
1899 x = gen_rtx_AND (mode, cmp, ot);
1900 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1902 else
1904 rtx t, f;
1906 t = gen_reg_rtx (mode);
1907 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1908 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1910 f = gen_reg_rtx (mode);
1911 x = gen_rtx_NOT (mode, cmp);
1912 x = gen_rtx_AND (mode, x, operands[2-negate]);
1913 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1915 x = gen_rtx_IOR (mode, t, f);
1916 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1920 /* Emit an integral vector min or max operation. Return true if all done. */
1922 bool
1923 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1924 rtx operands[])
1926 rtx xops[6];
1928 /* These four combinations are supported directly. */
1929 if (mode == V8QImode && (code == UMIN || code == UMAX))
1930 return false;
1931 if (mode == V4HImode && (code == SMIN || code == SMAX))
1932 return false;
1934 /* This combination can be implemented with only saturating subtraction. */
1935 if (mode == V4HImode && code == UMAX)
1937 rtx x, tmp = gen_reg_rtx (mode);
1939 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1940 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1942 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1943 return true;
1946 /* Everything else implemented via vector comparisons. */
1947 xops[0] = operands[0];
1948 xops[4] = xops[1] = operands[1];
1949 xops[5] = xops[2] = operands[2];
1951 switch (code)
1953 case UMIN:
1954 code = LTU;
1955 break;
1956 case UMAX:
1957 code = GTU;
1958 break;
1959 case SMIN:
1960 code = LT;
1961 break;
1962 case SMAX:
1963 code = GT;
1964 break;
1965 default:
1966 gcc_unreachable ();
1968 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1970 ia64_expand_vecint_cmov (xops);
1971 return true;
1974 /* Emit an integral vector unpack operation. */
1976 void
1977 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
1979 enum machine_mode mode = GET_MODE (operands[1]);
1980 rtx (*gen) (rtx, rtx, rtx);
1981 rtx x;
1983 switch (mode)
1985 case V8QImode:
1986 gen = highp ? gen_vec_interleave_highv8qi : gen_vec_interleave_lowv8qi;
1987 break;
1988 case V4HImode:
1989 gen = highp ? gen_vec_interleave_highv4hi : gen_vec_interleave_lowv4hi;
1990 break;
1991 default:
1992 gcc_unreachable ();
1995 /* Fill in x with the sign extension of each element in op1. */
1996 if (unsignedp)
1997 x = CONST0_RTX (mode);
1998 else
2000 bool neg;
2002 x = gen_reg_rtx (mode);
2004 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
2005 CONST0_RTX (mode));
2006 gcc_assert (!neg);
2009 emit_insn (gen (gen_lowpart (mode, operands[0]), operands[1], x));
2012 /* Emit an integral vector widening sum operations. */
2014 void
2015 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2017 rtx l, h, x, s;
2018 enum machine_mode wmode, mode;
2019 rtx (*unpack_l) (rtx, rtx, rtx);
2020 rtx (*unpack_h) (rtx, rtx, rtx);
2021 rtx (*plus) (rtx, rtx, rtx);
2023 wmode = GET_MODE (operands[0]);
2024 mode = GET_MODE (operands[1]);
2026 switch (mode)
2028 case V8QImode:
2029 unpack_l = gen_vec_interleave_lowv8qi;
2030 unpack_h = gen_vec_interleave_highv8qi;
2031 plus = gen_addv4hi3;
2032 break;
2033 case V4HImode:
2034 unpack_l = gen_vec_interleave_lowv4hi;
2035 unpack_h = gen_vec_interleave_highv4hi;
2036 plus = gen_addv2si3;
2037 break;
2038 default:
2039 gcc_unreachable ();
2042 /* Fill in x with the sign extension of each element in op1. */
2043 if (unsignedp)
2044 x = CONST0_RTX (mode);
2045 else
2047 bool neg;
2049 x = gen_reg_rtx (mode);
2051 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
2052 CONST0_RTX (mode));
2053 gcc_assert (!neg);
2056 l = gen_reg_rtx (wmode);
2057 h = gen_reg_rtx (wmode);
2058 s = gen_reg_rtx (wmode);
2060 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
2061 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
2062 emit_insn (plus (s, l, operands[2]));
2063 emit_insn (plus (operands[0], h, s));
2066 void
2067 ia64_expand_widen_mul_v4hi (rtx operands[3], bool unsignedp, bool highp)
2069 rtx l = gen_reg_rtx (V4HImode);
2070 rtx h = gen_reg_rtx (V4HImode);
2071 rtx (*mulhigh)(rtx, rtx, rtx, rtx);
2072 rtx (*interl)(rtx, rtx, rtx);
2074 emit_insn (gen_mulv4hi3 (l, operands[1], operands[2]));
2076 /* For signed, pmpy2.r would appear to more closely match this operation.
2077 However, the vectorizer is more likely to use the LO and HI patterns
2078 in pairs. At which point, with this formulation, the first two insns
2079 of each can be CSEd. */
2080 mulhigh = unsignedp ? gen_pmpyshr2_u : gen_pmpyshr2;
2081 emit_insn (mulhigh (h, operands[1], operands[2], GEN_INT (16)));
2083 interl = highp ? gen_vec_interleave_highv4hi : gen_vec_interleave_lowv4hi;
2084 emit_insn (interl (gen_lowpart (V4HImode, operands[0]), l, h));
2087 /* Emit a signed or unsigned V8QI dot product operation. */
2089 void
2090 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
2092 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
2094 /* Fill in x1 and x2 with the sign extension of each element. */
2095 if (unsignedp)
2096 x1 = x2 = CONST0_RTX (V8QImode);
2097 else
2099 bool neg;
2101 x1 = gen_reg_rtx (V8QImode);
2102 x2 = gen_reg_rtx (V8QImode);
2104 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
2105 CONST0_RTX (V8QImode));
2106 gcc_assert (!neg);
2107 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
2108 CONST0_RTX (V8QImode));
2109 gcc_assert (!neg);
2112 l1 = gen_reg_rtx (V4HImode);
2113 l2 = gen_reg_rtx (V4HImode);
2114 h1 = gen_reg_rtx (V4HImode);
2115 h2 = gen_reg_rtx (V4HImode);
2117 emit_insn (gen_vec_interleave_lowv8qi
2118 (gen_lowpart (V8QImode, l1), operands[1], x1));
2119 emit_insn (gen_vec_interleave_lowv8qi
2120 (gen_lowpart (V8QImode, l2), operands[2], x2));
2121 emit_insn (gen_vec_interleave_highv8qi
2122 (gen_lowpart (V8QImode, h1), operands[1], x1));
2123 emit_insn (gen_vec_interleave_highv8qi
2124 (gen_lowpart (V8QImode, h2), operands[2], x2));
2126 p1 = gen_reg_rtx (V2SImode);
2127 p2 = gen_reg_rtx (V2SImode);
2128 p3 = gen_reg_rtx (V2SImode);
2129 p4 = gen_reg_rtx (V2SImode);
2130 emit_insn (gen_pmpy2_r (p1, l1, l2));
2131 emit_insn (gen_pmpy2_l (p2, l1, l2));
2132 emit_insn (gen_pmpy2_r (p3, h1, h2));
2133 emit_insn (gen_pmpy2_l (p4, h1, h2));
2135 s1 = gen_reg_rtx (V2SImode);
2136 s2 = gen_reg_rtx (V2SImode);
2137 s3 = gen_reg_rtx (V2SImode);
2138 emit_insn (gen_addv2si3 (s1, p1, p2));
2139 emit_insn (gen_addv2si3 (s2, p3, p4));
2140 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
2141 emit_insn (gen_addv2si3 (operands[0], s2, s3));
2144 /* Emit the appropriate sequence for a call. */
2146 void
2147 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2148 int sibcall_p)
2150 rtx insn, b0;
2152 addr = XEXP (addr, 0);
2153 addr = convert_memory_address (DImode, addr);
2154 b0 = gen_rtx_REG (DImode, R_BR (0));
2156 /* ??? Should do this for functions known to bind local too. */
2157 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2159 if (sibcall_p)
2160 insn = gen_sibcall_nogp (addr);
2161 else if (! retval)
2162 insn = gen_call_nogp (addr, b0);
2163 else
2164 insn = gen_call_value_nogp (retval, addr, b0);
2165 insn = emit_call_insn (insn);
2167 else
2169 if (sibcall_p)
2170 insn = gen_sibcall_gp (addr);
2171 else if (! retval)
2172 insn = gen_call_gp (addr, b0);
2173 else
2174 insn = gen_call_value_gp (retval, addr, b0);
2175 insn = emit_call_insn (insn);
2177 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2180 if (sibcall_p)
2181 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2183 if (TARGET_ABI_OPEN_VMS)
2184 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2185 gen_rtx_REG (DImode, GR_REG (25)));
2188 static void
2189 reg_emitted (enum ia64_frame_regs r)
2191 if (emitted_frame_related_regs[r] == 0)
2192 emitted_frame_related_regs[r] = current_frame_info.r[r];
2193 else
2194 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2197 static int
2198 get_reg (enum ia64_frame_regs r)
2200 reg_emitted (r);
2201 return current_frame_info.r[r];
2204 static bool
2205 is_emitted (int regno)
2207 unsigned int r;
2209 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2210 if (emitted_frame_related_regs[r] == regno)
2211 return true;
2212 return false;
2215 void
2216 ia64_reload_gp (void)
2218 rtx tmp;
2220 if (current_frame_info.r[reg_save_gp])
2222 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2224 else
2226 HOST_WIDE_INT offset;
2227 rtx offset_r;
2229 offset = (current_frame_info.spill_cfa_off
2230 + current_frame_info.spill_size);
2231 if (frame_pointer_needed)
2233 tmp = hard_frame_pointer_rtx;
2234 offset = -offset;
2236 else
2238 tmp = stack_pointer_rtx;
2239 offset = current_frame_info.total_size - offset;
2242 offset_r = GEN_INT (offset);
2243 if (satisfies_constraint_I (offset_r))
2244 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2245 else
2247 emit_move_insn (pic_offset_table_rtx, offset_r);
2248 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2249 pic_offset_table_rtx, tmp));
2252 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2255 emit_move_insn (pic_offset_table_rtx, tmp);
2258 void
2259 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2260 rtx scratch_b, int noreturn_p, int sibcall_p)
2262 rtx insn;
2263 bool is_desc = false;
2265 /* If we find we're calling through a register, then we're actually
2266 calling through a descriptor, so load up the values. */
2267 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2269 rtx tmp;
2270 bool addr_dead_p;
2272 /* ??? We are currently constrained to *not* use peep2, because
2273 we can legitimately change the global lifetime of the GP
2274 (in the form of killing where previously live). This is
2275 because a call through a descriptor doesn't use the previous
2276 value of the GP, while a direct call does, and we do not
2277 commit to either form until the split here.
2279 That said, this means that we lack precise life info for
2280 whether ADDR is dead after this call. This is not terribly
2281 important, since we can fix things up essentially for free
2282 with the POST_DEC below, but it's nice to not use it when we
2283 can immediately tell it's not necessary. */
2284 addr_dead_p = ((noreturn_p || sibcall_p
2285 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2286 REGNO (addr)))
2287 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2289 /* Load the code address into scratch_b. */
2290 tmp = gen_rtx_POST_INC (Pmode, addr);
2291 tmp = gen_rtx_MEM (Pmode, tmp);
2292 emit_move_insn (scratch_r, tmp);
2293 emit_move_insn (scratch_b, scratch_r);
2295 /* Load the GP address. If ADDR is not dead here, then we must
2296 revert the change made above via the POST_INCREMENT. */
2297 if (!addr_dead_p)
2298 tmp = gen_rtx_POST_DEC (Pmode, addr);
2299 else
2300 tmp = addr;
2301 tmp = gen_rtx_MEM (Pmode, tmp);
2302 emit_move_insn (pic_offset_table_rtx, tmp);
2304 is_desc = true;
2305 addr = scratch_b;
2308 if (sibcall_p)
2309 insn = gen_sibcall_nogp (addr);
2310 else if (retval)
2311 insn = gen_call_value_nogp (retval, addr, retaddr);
2312 else
2313 insn = gen_call_nogp (addr, retaddr);
2314 emit_call_insn (insn);
2316 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2317 ia64_reload_gp ();
2320 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2322 This differs from the generic code in that we know about the zero-extending
2323 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2324 also know that ld.acq+cmpxchg.rel equals a full barrier.
2326 The loop we want to generate looks like
2328 cmp_reg = mem;
2329 label:
2330 old_reg = cmp_reg;
2331 new_reg = cmp_reg op val;
2332 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2333 if (cmp_reg != old_reg)
2334 goto label;
2336 Note that we only do the plain load from memory once. Subsequent
2337 iterations use the value loaded by the compare-and-swap pattern. */
2339 void
2340 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2341 rtx old_dst, rtx new_dst)
2343 enum machine_mode mode = GET_MODE (mem);
2344 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2345 enum insn_code icode;
2347 /* Special case for using fetchadd. */
2348 if ((mode == SImode || mode == DImode)
2349 && (code == PLUS || code == MINUS)
2350 && fetchadd_operand (val, mode))
2352 if (code == MINUS)
2353 val = GEN_INT (-INTVAL (val));
2355 if (!old_dst)
2356 old_dst = gen_reg_rtx (mode);
2358 emit_insn (gen_memory_barrier ());
2360 if (mode == SImode)
2361 icode = CODE_FOR_fetchadd_acq_si;
2362 else
2363 icode = CODE_FOR_fetchadd_acq_di;
2364 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2366 if (new_dst)
2368 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2369 true, OPTAB_WIDEN);
2370 if (new_reg != new_dst)
2371 emit_move_insn (new_dst, new_reg);
2373 return;
2376 /* Because of the volatile mem read, we get an ld.acq, which is the
2377 front half of the full barrier. The end half is the cmpxchg.rel. */
2378 gcc_assert (MEM_VOLATILE_P (mem));
2380 old_reg = gen_reg_rtx (DImode);
2381 cmp_reg = gen_reg_rtx (DImode);
2382 label = gen_label_rtx ();
2384 if (mode != DImode)
2386 val = simplify_gen_subreg (DImode, val, mode, 0);
2387 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2389 else
2390 emit_move_insn (cmp_reg, mem);
2392 emit_label (label);
2394 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2395 emit_move_insn (old_reg, cmp_reg);
2396 emit_move_insn (ar_ccv, cmp_reg);
2398 if (old_dst)
2399 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2401 new_reg = cmp_reg;
2402 if (code == NOT)
2404 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2405 true, OPTAB_DIRECT);
2406 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2408 else
2409 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2410 true, OPTAB_DIRECT);
2412 if (mode != DImode)
2413 new_reg = gen_lowpart (mode, new_reg);
2414 if (new_dst)
2415 emit_move_insn (new_dst, new_reg);
2417 switch (mode)
2419 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2420 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2421 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2422 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2423 default:
2424 gcc_unreachable ();
2427 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2429 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2432 /* Begin the assembly file. */
2434 static void
2435 ia64_file_start (void)
2437 /* Variable tracking should be run after all optimizations which change order
2438 of insns. It also needs a valid CFG. This can't be done in
2439 ia64_option_override, because flag_var_tracking is finalized after
2440 that. */
2441 ia64_flag_var_tracking = flag_var_tracking;
2442 flag_var_tracking = 0;
2444 default_file_start ();
2445 emit_safe_across_calls ();
2448 void
2449 emit_safe_across_calls (void)
2451 unsigned int rs, re;
2452 int out_state;
2454 rs = 1;
2455 out_state = 0;
2456 while (1)
2458 while (rs < 64 && call_used_regs[PR_REG (rs)])
2459 rs++;
2460 if (rs >= 64)
2461 break;
2462 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2463 continue;
2464 if (out_state == 0)
2466 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2467 out_state = 1;
2469 else
2470 fputc (',', asm_out_file);
2471 if (re == rs + 1)
2472 fprintf (asm_out_file, "p%u", rs);
2473 else
2474 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2475 rs = re + 1;
2477 if (out_state)
2478 fputc ('\n', asm_out_file);
2481 /* Globalize a declaration. */
2483 static void
2484 ia64_globalize_decl_name (FILE * stream, tree decl)
2486 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2487 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2488 if (version_attr)
2490 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2491 const char *p = TREE_STRING_POINTER (v);
2492 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2494 targetm.asm_out.globalize_label (stream, name);
2495 if (TREE_CODE (decl) == FUNCTION_DECL)
2496 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2499 /* Helper function for ia64_compute_frame_size: find an appropriate general
2500 register to spill some special register to. SPECIAL_SPILL_MASK contains
2501 bits in GR0 to GR31 that have already been allocated by this routine.
2502 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2504 static int
2505 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2507 int regno;
2509 if (emitted_frame_related_regs[r] != 0)
2511 regno = emitted_frame_related_regs[r];
2512 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2513 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2514 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2515 else if (current_function_is_leaf
2516 && regno >= GR_REG (1) && regno <= GR_REG (31))
2517 current_frame_info.gr_used_mask |= 1 << regno;
2519 return regno;
2522 /* If this is a leaf function, first try an otherwise unused
2523 call-clobbered register. */
2524 if (current_function_is_leaf)
2526 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2527 if (! df_regs_ever_live_p (regno)
2528 && call_used_regs[regno]
2529 && ! fixed_regs[regno]
2530 && ! global_regs[regno]
2531 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2532 && ! is_emitted (regno))
2534 current_frame_info.gr_used_mask |= 1 << regno;
2535 return regno;
2539 if (try_locals)
2541 regno = current_frame_info.n_local_regs;
2542 /* If there is a frame pointer, then we can't use loc79, because
2543 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2544 reg_name switching code in ia64_expand_prologue. */
2545 while (regno < (80 - frame_pointer_needed))
2546 if (! is_emitted (LOC_REG (regno++)))
2548 current_frame_info.n_local_regs = regno;
2549 return LOC_REG (regno - 1);
2553 /* Failed to find a general register to spill to. Must use stack. */
2554 return 0;
2557 /* In order to make for nice schedules, we try to allocate every temporary
2558 to a different register. We must of course stay away from call-saved,
2559 fixed, and global registers. We must also stay away from registers
2560 allocated in current_frame_info.gr_used_mask, since those include regs
2561 used all through the prologue.
2563 Any register allocated here must be used immediately. The idea is to
2564 aid scheduling, not to solve data flow problems. */
2566 static int last_scratch_gr_reg;
2568 static int
2569 next_scratch_gr_reg (void)
2571 int i, regno;
2573 for (i = 0; i < 32; ++i)
2575 regno = (last_scratch_gr_reg + i + 1) & 31;
2576 if (call_used_regs[regno]
2577 && ! fixed_regs[regno]
2578 && ! global_regs[regno]
2579 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2581 last_scratch_gr_reg = regno;
2582 return regno;
2586 /* There must be _something_ available. */
2587 gcc_unreachable ();
2590 /* Helper function for ia64_compute_frame_size, called through
2591 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2593 static void
2594 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2596 unsigned int regno = REGNO (reg);
2597 if (regno < 32)
2599 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2600 for (i = 0; i < n; ++i)
2601 current_frame_info.gr_used_mask |= 1 << (regno + i);
2606 /* Returns the number of bytes offset between the frame pointer and the stack
2607 pointer for the current function. SIZE is the number of bytes of space
2608 needed for local variables. */
2610 static void
2611 ia64_compute_frame_size (HOST_WIDE_INT size)
2613 HOST_WIDE_INT total_size;
2614 HOST_WIDE_INT spill_size = 0;
2615 HOST_WIDE_INT extra_spill_size = 0;
2616 HOST_WIDE_INT pretend_args_size;
2617 HARD_REG_SET mask;
2618 int n_spilled = 0;
2619 int spilled_gr_p = 0;
2620 int spilled_fr_p = 0;
2621 unsigned int regno;
2622 int min_regno;
2623 int max_regno;
2624 int i;
2626 if (current_frame_info.initialized)
2627 return;
2629 memset (&current_frame_info, 0, sizeof current_frame_info);
2630 CLEAR_HARD_REG_SET (mask);
2632 /* Don't allocate scratches to the return register. */
2633 diddle_return_value (mark_reg_gr_used_mask, NULL);
2635 /* Don't allocate scratches to the EH scratch registers. */
2636 if (cfun->machine->ia64_eh_epilogue_sp)
2637 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2638 if (cfun->machine->ia64_eh_epilogue_bsp)
2639 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2641 /* Find the size of the register stack frame. We have only 80 local
2642 registers, because we reserve 8 for the inputs and 8 for the
2643 outputs. */
2645 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2646 since we'll be adjusting that down later. */
2647 regno = LOC_REG (78) + ! frame_pointer_needed;
2648 for (; regno >= LOC_REG (0); regno--)
2649 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2650 break;
2651 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2653 /* For functions marked with the syscall_linkage attribute, we must mark
2654 all eight input registers as in use, so that locals aren't visible to
2655 the caller. */
2657 if (cfun->machine->n_varargs > 0
2658 || lookup_attribute ("syscall_linkage",
2659 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2660 current_frame_info.n_input_regs = 8;
2661 else
2663 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2664 if (df_regs_ever_live_p (regno))
2665 break;
2666 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2669 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2670 if (df_regs_ever_live_p (regno))
2671 break;
2672 i = regno - OUT_REG (0) + 1;
2674 #ifndef PROFILE_HOOK
2675 /* When -p profiling, we need one output register for the mcount argument.
2676 Likewise for -a profiling for the bb_init_func argument. For -ax
2677 profiling, we need two output registers for the two bb_init_trace_func
2678 arguments. */
2679 if (crtl->profile)
2680 i = MAX (i, 1);
2681 #endif
2682 current_frame_info.n_output_regs = i;
2684 /* ??? No rotating register support yet. */
2685 current_frame_info.n_rotate_regs = 0;
2687 /* Discover which registers need spilling, and how much room that
2688 will take. Begin with floating point and general registers,
2689 which will always wind up on the stack. */
2691 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2692 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2694 SET_HARD_REG_BIT (mask, regno);
2695 spill_size += 16;
2696 n_spilled += 1;
2697 spilled_fr_p = 1;
2700 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2701 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2703 SET_HARD_REG_BIT (mask, regno);
2704 spill_size += 8;
2705 n_spilled += 1;
2706 spilled_gr_p = 1;
2709 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2710 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2712 SET_HARD_REG_BIT (mask, regno);
2713 spill_size += 8;
2714 n_spilled += 1;
2717 /* Now come all special registers that might get saved in other
2718 general registers. */
2720 if (frame_pointer_needed)
2722 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2723 /* If we did not get a register, then we take LOC79. This is guaranteed
2724 to be free, even if regs_ever_live is already set, because this is
2725 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2726 as we don't count loc79 above. */
2727 if (current_frame_info.r[reg_fp] == 0)
2729 current_frame_info.r[reg_fp] = LOC_REG (79);
2730 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2734 if (! current_function_is_leaf)
2736 /* Emit a save of BR0 if we call other functions. Do this even
2737 if this function doesn't return, as EH depends on this to be
2738 able to unwind the stack. */
2739 SET_HARD_REG_BIT (mask, BR_REG (0));
2741 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2742 if (current_frame_info.r[reg_save_b0] == 0)
2744 extra_spill_size += 8;
2745 n_spilled += 1;
2748 /* Similarly for ar.pfs. */
2749 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2750 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2751 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2753 extra_spill_size += 8;
2754 n_spilled += 1;
2757 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2758 registers are clobbered, so we fall back to the stack. */
2759 current_frame_info.r[reg_save_gp]
2760 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2761 if (current_frame_info.r[reg_save_gp] == 0)
2763 SET_HARD_REG_BIT (mask, GR_REG (1));
2764 spill_size += 8;
2765 n_spilled += 1;
2768 else
2770 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2772 SET_HARD_REG_BIT (mask, BR_REG (0));
2773 extra_spill_size += 8;
2774 n_spilled += 1;
2777 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2779 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2780 current_frame_info.r[reg_save_ar_pfs]
2781 = find_gr_spill (reg_save_ar_pfs, 1);
2782 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2784 extra_spill_size += 8;
2785 n_spilled += 1;
2790 /* Unwind descriptor hackery: things are most efficient if we allocate
2791 consecutive GR save registers for RP, PFS, FP in that order. However,
2792 it is absolutely critical that FP get the only hard register that's
2793 guaranteed to be free, so we allocated it first. If all three did
2794 happen to be allocated hard regs, and are consecutive, rearrange them
2795 into the preferred order now.
2797 If we have already emitted code for any of those registers,
2798 then it's already too late to change. */
2799 min_regno = MIN (current_frame_info.r[reg_fp],
2800 MIN (current_frame_info.r[reg_save_b0],
2801 current_frame_info.r[reg_save_ar_pfs]));
2802 max_regno = MAX (current_frame_info.r[reg_fp],
2803 MAX (current_frame_info.r[reg_save_b0],
2804 current_frame_info.r[reg_save_ar_pfs]));
2805 if (min_regno > 0
2806 && min_regno + 2 == max_regno
2807 && (current_frame_info.r[reg_fp] == min_regno + 1
2808 || current_frame_info.r[reg_save_b0] == min_regno + 1
2809 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2810 && (emitted_frame_related_regs[reg_save_b0] == 0
2811 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2812 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2813 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2814 && (emitted_frame_related_regs[reg_fp] == 0
2815 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2817 current_frame_info.r[reg_save_b0] = min_regno;
2818 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2819 current_frame_info.r[reg_fp] = min_regno + 2;
2822 /* See if we need to store the predicate register block. */
2823 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2824 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2825 break;
2826 if (regno <= PR_REG (63))
2828 SET_HARD_REG_BIT (mask, PR_REG (0));
2829 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2830 if (current_frame_info.r[reg_save_pr] == 0)
2832 extra_spill_size += 8;
2833 n_spilled += 1;
2836 /* ??? Mark them all as used so that register renaming and such
2837 are free to use them. */
2838 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2839 df_set_regs_ever_live (regno, true);
2842 /* If we're forced to use st8.spill, we're forced to save and restore
2843 ar.unat as well. The check for existing liveness allows inline asm
2844 to touch ar.unat. */
2845 if (spilled_gr_p || cfun->machine->n_varargs
2846 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2848 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2849 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2850 current_frame_info.r[reg_save_ar_unat]
2851 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2852 if (current_frame_info.r[reg_save_ar_unat] == 0)
2854 extra_spill_size += 8;
2855 n_spilled += 1;
2859 if (df_regs_ever_live_p (AR_LC_REGNUM))
2861 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2862 current_frame_info.r[reg_save_ar_lc]
2863 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2864 if (current_frame_info.r[reg_save_ar_lc] == 0)
2866 extra_spill_size += 8;
2867 n_spilled += 1;
2871 /* If we have an odd number of words of pretend arguments written to
2872 the stack, then the FR save area will be unaligned. We round the
2873 size of this area up to keep things 16 byte aligned. */
2874 if (spilled_fr_p)
2875 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2876 else
2877 pretend_args_size = crtl->args.pretend_args_size;
2879 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2880 + crtl->outgoing_args_size);
2881 total_size = IA64_STACK_ALIGN (total_size);
2883 /* We always use the 16-byte scratch area provided by the caller, but
2884 if we are a leaf function, there's no one to which we need to provide
2885 a scratch area. */
2886 if (current_function_is_leaf)
2887 total_size = MAX (0, total_size - 16);
2889 current_frame_info.total_size = total_size;
2890 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2891 current_frame_info.spill_size = spill_size;
2892 current_frame_info.extra_spill_size = extra_spill_size;
2893 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2894 current_frame_info.n_spilled = n_spilled;
2895 current_frame_info.initialized = reload_completed;
2898 /* Worker function for TARGET_CAN_ELIMINATE. */
2900 bool
2901 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2903 return (to == BR_REG (0) ? current_function_is_leaf : true);
2906 /* Compute the initial difference between the specified pair of registers. */
2908 HOST_WIDE_INT
2909 ia64_initial_elimination_offset (int from, int to)
2911 HOST_WIDE_INT offset;
2913 ia64_compute_frame_size (get_frame_size ());
2914 switch (from)
2916 case FRAME_POINTER_REGNUM:
2917 switch (to)
2919 case HARD_FRAME_POINTER_REGNUM:
2920 if (current_function_is_leaf)
2921 offset = -current_frame_info.total_size;
2922 else
2923 offset = -(current_frame_info.total_size
2924 - crtl->outgoing_args_size - 16);
2925 break;
2927 case STACK_POINTER_REGNUM:
2928 if (current_function_is_leaf)
2929 offset = 0;
2930 else
2931 offset = 16 + crtl->outgoing_args_size;
2932 break;
2934 default:
2935 gcc_unreachable ();
2937 break;
2939 case ARG_POINTER_REGNUM:
2940 /* Arguments start above the 16 byte save area, unless stdarg
2941 in which case we store through the 16 byte save area. */
2942 switch (to)
2944 case HARD_FRAME_POINTER_REGNUM:
2945 offset = 16 - crtl->args.pretend_args_size;
2946 break;
2948 case STACK_POINTER_REGNUM:
2949 offset = (current_frame_info.total_size
2950 + 16 - crtl->args.pretend_args_size);
2951 break;
2953 default:
2954 gcc_unreachable ();
2956 break;
2958 default:
2959 gcc_unreachable ();
2962 return offset;
2965 /* If there are more than a trivial number of register spills, we use
2966 two interleaved iterators so that we can get two memory references
2967 per insn group.
2969 In order to simplify things in the prologue and epilogue expanders,
2970 we use helper functions to fix up the memory references after the
2971 fact with the appropriate offsets to a POST_MODIFY memory mode.
2972 The following data structure tracks the state of the two iterators
2973 while insns are being emitted. */
2975 struct spill_fill_data
2977 rtx init_after; /* point at which to emit initializations */
2978 rtx init_reg[2]; /* initial base register */
2979 rtx iter_reg[2]; /* the iterator registers */
2980 rtx *prev_addr[2]; /* address of last memory use */
2981 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2982 HOST_WIDE_INT prev_off[2]; /* last offset */
2983 int n_iter; /* number of iterators in use */
2984 int next_iter; /* next iterator to use */
2985 unsigned int save_gr_used_mask;
2988 static struct spill_fill_data spill_fill_data;
2990 static void
2991 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2993 int i;
2995 spill_fill_data.init_after = get_last_insn ();
2996 spill_fill_data.init_reg[0] = init_reg;
2997 spill_fill_data.init_reg[1] = init_reg;
2998 spill_fill_data.prev_addr[0] = NULL;
2999 spill_fill_data.prev_addr[1] = NULL;
3000 spill_fill_data.prev_insn[0] = NULL;
3001 spill_fill_data.prev_insn[1] = NULL;
3002 spill_fill_data.prev_off[0] = cfa_off;
3003 spill_fill_data.prev_off[1] = cfa_off;
3004 spill_fill_data.next_iter = 0;
3005 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
3007 spill_fill_data.n_iter = 1 + (n_spills > 2);
3008 for (i = 0; i < spill_fill_data.n_iter; ++i)
3010 int regno = next_scratch_gr_reg ();
3011 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
3012 current_frame_info.gr_used_mask |= 1 << regno;
3016 static void
3017 finish_spill_pointers (void)
3019 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3022 static rtx
3023 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3025 int iter = spill_fill_data.next_iter;
3026 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3027 rtx disp_rtx = GEN_INT (disp);
3028 rtx mem;
3030 if (spill_fill_data.prev_addr[iter])
3032 if (satisfies_constraint_N (disp_rtx))
3034 *spill_fill_data.prev_addr[iter]
3035 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3036 gen_rtx_PLUS (DImode,
3037 spill_fill_data.iter_reg[iter],
3038 disp_rtx));
3039 add_reg_note (spill_fill_data.prev_insn[iter],
3040 REG_INC, spill_fill_data.iter_reg[iter]);
3042 else
3044 /* ??? Could use register post_modify for loads. */
3045 if (!satisfies_constraint_I (disp_rtx))
3047 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3048 emit_move_insn (tmp, disp_rtx);
3049 disp_rtx = tmp;
3051 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3052 spill_fill_data.iter_reg[iter], disp_rtx));
3055 /* Micro-optimization: if we've created a frame pointer, it's at
3056 CFA 0, which may allow the real iterator to be initialized lower,
3057 slightly increasing parallelism. Also, if there are few saves
3058 it may eliminate the iterator entirely. */
3059 else if (disp == 0
3060 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3061 && frame_pointer_needed)
3063 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3064 set_mem_alias_set (mem, get_varargs_alias_set ());
3065 return mem;
3067 else
3069 rtx seq, insn;
3071 if (disp == 0)
3072 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3073 spill_fill_data.init_reg[iter]);
3074 else
3076 start_sequence ();
3078 if (!satisfies_constraint_I (disp_rtx))
3080 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3081 emit_move_insn (tmp, disp_rtx);
3082 disp_rtx = tmp;
3085 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3086 spill_fill_data.init_reg[iter],
3087 disp_rtx));
3089 seq = get_insns ();
3090 end_sequence ();
3093 /* Careful for being the first insn in a sequence. */
3094 if (spill_fill_data.init_after)
3095 insn = emit_insn_after (seq, spill_fill_data.init_after);
3096 else
3098 rtx first = get_insns ();
3099 if (first)
3100 insn = emit_insn_before (seq, first);
3101 else
3102 insn = emit_insn (seq);
3104 spill_fill_data.init_after = insn;
3107 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3109 /* ??? Not all of the spills are for varargs, but some of them are.
3110 The rest of the spills belong in an alias set of their own. But
3111 it doesn't actually hurt to include them here. */
3112 set_mem_alias_set (mem, get_varargs_alias_set ());
3114 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3115 spill_fill_data.prev_off[iter] = cfa_off;
3117 if (++iter >= spill_fill_data.n_iter)
3118 iter = 0;
3119 spill_fill_data.next_iter = iter;
3121 return mem;
3124 static void
3125 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3126 rtx frame_reg)
3128 int iter = spill_fill_data.next_iter;
3129 rtx mem, insn;
3131 mem = spill_restore_mem (reg, cfa_off);
3132 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3133 spill_fill_data.prev_insn[iter] = insn;
3135 if (frame_reg)
3137 rtx base;
3138 HOST_WIDE_INT off;
3140 RTX_FRAME_RELATED_P (insn) = 1;
3142 /* Don't even pretend that the unwind code can intuit its way
3143 through a pair of interleaved post_modify iterators. Just
3144 provide the correct answer. */
3146 if (frame_pointer_needed)
3148 base = hard_frame_pointer_rtx;
3149 off = - cfa_off;
3151 else
3153 base = stack_pointer_rtx;
3154 off = current_frame_info.total_size - cfa_off;
3157 add_reg_note (insn, REG_CFA_OFFSET,
3158 gen_rtx_SET (VOIDmode,
3159 gen_rtx_MEM (GET_MODE (reg),
3160 plus_constant (base, off)),
3161 frame_reg));
3165 static void
3166 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3168 int iter = spill_fill_data.next_iter;
3169 rtx insn;
3171 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3172 GEN_INT (cfa_off)));
3173 spill_fill_data.prev_insn[iter] = insn;
3176 /* Wrapper functions that discards the CONST_INT spill offset. These
3177 exist so that we can give gr_spill/gr_fill the offset they need and
3178 use a consistent function interface. */
3180 static rtx
3181 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3183 return gen_movdi (dest, src);
3186 static rtx
3187 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3189 return gen_fr_spill (dest, src);
3192 static rtx
3193 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3195 return gen_fr_restore (dest, src);
3198 /* Called after register allocation to add any instructions needed for the
3199 prologue. Using a prologue insn is favored compared to putting all of the
3200 instructions in output_function_prologue(), since it allows the scheduler
3201 to intermix instructions with the saves of the caller saved registers. In
3202 some cases, it might be necessary to emit a barrier instruction as the last
3203 insn to prevent such scheduling.
3205 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3206 so that the debug info generation code can handle them properly.
3208 The register save area is layed out like so:
3209 cfa+16
3210 [ varargs spill area ]
3211 [ fr register spill area ]
3212 [ br register spill area ]
3213 [ ar register spill area ]
3214 [ pr register spill area ]
3215 [ gr register spill area ] */
3217 /* ??? Get inefficient code when the frame size is larger than can fit in an
3218 adds instruction. */
3220 void
3221 ia64_expand_prologue (void)
3223 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
3224 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3225 rtx reg, alt_reg;
3227 ia64_compute_frame_size (get_frame_size ());
3228 last_scratch_gr_reg = 15;
3230 if (flag_stack_usage)
3231 current_function_static_stack_size = current_frame_info.total_size;
3233 if (dump_file)
3235 fprintf (dump_file, "ia64 frame related registers "
3236 "recorded in current_frame_info.r[]:\n");
3237 #define PRINTREG(a) if (current_frame_info.r[a]) \
3238 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3239 PRINTREG(reg_fp);
3240 PRINTREG(reg_save_b0);
3241 PRINTREG(reg_save_pr);
3242 PRINTREG(reg_save_ar_pfs);
3243 PRINTREG(reg_save_ar_unat);
3244 PRINTREG(reg_save_ar_lc);
3245 PRINTREG(reg_save_gp);
3246 #undef PRINTREG
3249 /* If there is no epilogue, then we don't need some prologue insns.
3250 We need to avoid emitting the dead prologue insns, because flow
3251 will complain about them. */
3252 if (optimize)
3254 edge e;
3255 edge_iterator ei;
3257 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
3258 if ((e->flags & EDGE_FAKE) == 0
3259 && (e->flags & EDGE_FALLTHRU) != 0)
3260 break;
3261 epilogue_p = (e != NULL);
3263 else
3264 epilogue_p = 1;
3266 /* Set the local, input, and output register names. We need to do this
3267 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3268 half. If we use in/loc/out register names, then we get assembler errors
3269 in crtn.S because there is no alloc insn or regstk directive in there. */
3270 if (! TARGET_REG_NAMES)
3272 int inputs = current_frame_info.n_input_regs;
3273 int locals = current_frame_info.n_local_regs;
3274 int outputs = current_frame_info.n_output_regs;
3276 for (i = 0; i < inputs; i++)
3277 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3278 for (i = 0; i < locals; i++)
3279 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3280 for (i = 0; i < outputs; i++)
3281 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3284 /* Set the frame pointer register name. The regnum is logically loc79,
3285 but of course we'll not have allocated that many locals. Rather than
3286 worrying about renumbering the existing rtxs, we adjust the name. */
3287 /* ??? This code means that we can never use one local register when
3288 there is a frame pointer. loc79 gets wasted in this case, as it is
3289 renamed to a register that will never be used. See also the try_locals
3290 code in find_gr_spill. */
3291 if (current_frame_info.r[reg_fp])
3293 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3294 reg_names[HARD_FRAME_POINTER_REGNUM]
3295 = reg_names[current_frame_info.r[reg_fp]];
3296 reg_names[current_frame_info.r[reg_fp]] = tmp;
3299 /* We don't need an alloc instruction if we've used no outputs or locals. */
3300 if (current_frame_info.n_local_regs == 0
3301 && current_frame_info.n_output_regs == 0
3302 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3303 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3305 /* If there is no alloc, but there are input registers used, then we
3306 need a .regstk directive. */
3307 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3308 ar_pfs_save_reg = NULL_RTX;
3310 else
3312 current_frame_info.need_regstk = 0;
3314 if (current_frame_info.r[reg_save_ar_pfs])
3316 regno = current_frame_info.r[reg_save_ar_pfs];
3317 reg_emitted (reg_save_ar_pfs);
3319 else
3320 regno = next_scratch_gr_reg ();
3321 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3323 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3324 GEN_INT (current_frame_info.n_input_regs),
3325 GEN_INT (current_frame_info.n_local_regs),
3326 GEN_INT (current_frame_info.n_output_regs),
3327 GEN_INT (current_frame_info.n_rotate_regs)));
3328 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3331 /* Set up frame pointer, stack pointer, and spill iterators. */
3333 n_varargs = cfun->machine->n_varargs;
3334 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3335 stack_pointer_rtx, 0);
3337 if (frame_pointer_needed)
3339 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3340 RTX_FRAME_RELATED_P (insn) = 1;
3342 /* Force the unwind info to recognize this as defining a new CFA,
3343 rather than some temp register setup. */
3344 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3347 if (current_frame_info.total_size != 0)
3349 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3350 rtx offset;
3352 if (satisfies_constraint_I (frame_size_rtx))
3353 offset = frame_size_rtx;
3354 else
3356 regno = next_scratch_gr_reg ();
3357 offset = gen_rtx_REG (DImode, regno);
3358 emit_move_insn (offset, frame_size_rtx);
3361 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3362 stack_pointer_rtx, offset));
3364 if (! frame_pointer_needed)
3366 RTX_FRAME_RELATED_P (insn) = 1;
3367 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3368 gen_rtx_SET (VOIDmode,
3369 stack_pointer_rtx,
3370 gen_rtx_PLUS (DImode,
3371 stack_pointer_rtx,
3372 frame_size_rtx)));
3375 /* ??? At this point we must generate a magic insn that appears to
3376 modify the stack pointer, the frame pointer, and all spill
3377 iterators. This would allow the most scheduling freedom. For
3378 now, just hard stop. */
3379 emit_insn (gen_blockage ());
3382 /* Must copy out ar.unat before doing any integer spills. */
3383 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3385 if (current_frame_info.r[reg_save_ar_unat])
3387 ar_unat_save_reg
3388 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3389 reg_emitted (reg_save_ar_unat);
3391 else
3393 alt_regno = next_scratch_gr_reg ();
3394 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3395 current_frame_info.gr_used_mask |= 1 << alt_regno;
3398 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3399 insn = emit_move_insn (ar_unat_save_reg, reg);
3400 if (current_frame_info.r[reg_save_ar_unat])
3402 RTX_FRAME_RELATED_P (insn) = 1;
3403 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3406 /* Even if we're not going to generate an epilogue, we still
3407 need to save the register so that EH works. */
3408 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3409 emit_insn (gen_prologue_use (ar_unat_save_reg));
3411 else
3412 ar_unat_save_reg = NULL_RTX;
3414 /* Spill all varargs registers. Do this before spilling any GR registers,
3415 since we want the UNAT bits for the GR registers to override the UNAT
3416 bits from varargs, which we don't care about. */
3418 cfa_off = -16;
3419 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3421 reg = gen_rtx_REG (DImode, regno);
3422 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3425 /* Locate the bottom of the register save area. */
3426 cfa_off = (current_frame_info.spill_cfa_off
3427 + current_frame_info.spill_size
3428 + current_frame_info.extra_spill_size);
3430 /* Save the predicate register block either in a register or in memory. */
3431 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3433 reg = gen_rtx_REG (DImode, PR_REG (0));
3434 if (current_frame_info.r[reg_save_pr] != 0)
3436 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3437 reg_emitted (reg_save_pr);
3438 insn = emit_move_insn (alt_reg, reg);
3440 /* ??? Denote pr spill/fill by a DImode move that modifies all
3441 64 hard registers. */
3442 RTX_FRAME_RELATED_P (insn) = 1;
3443 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3445 /* Even if we're not going to generate an epilogue, we still
3446 need to save the register so that EH works. */
3447 if (! epilogue_p)
3448 emit_insn (gen_prologue_use (alt_reg));
3450 else
3452 alt_regno = next_scratch_gr_reg ();
3453 alt_reg = gen_rtx_REG (DImode, alt_regno);
3454 insn = emit_move_insn (alt_reg, reg);
3455 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3456 cfa_off -= 8;
3460 /* Handle AR regs in numerical order. All of them get special handling. */
3461 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3462 && current_frame_info.r[reg_save_ar_unat] == 0)
3464 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3465 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3466 cfa_off -= 8;
3469 /* The alloc insn already copied ar.pfs into a general register. The
3470 only thing we have to do now is copy that register to a stack slot
3471 if we'd not allocated a local register for the job. */
3472 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3473 && current_frame_info.r[reg_save_ar_pfs] == 0)
3475 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3476 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3477 cfa_off -= 8;
3480 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3482 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3483 if (current_frame_info.r[reg_save_ar_lc] != 0)
3485 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3486 reg_emitted (reg_save_ar_lc);
3487 insn = emit_move_insn (alt_reg, reg);
3488 RTX_FRAME_RELATED_P (insn) = 1;
3489 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3491 /* Even if we're not going to generate an epilogue, we still
3492 need to save the register so that EH works. */
3493 if (! epilogue_p)
3494 emit_insn (gen_prologue_use (alt_reg));
3496 else
3498 alt_regno = next_scratch_gr_reg ();
3499 alt_reg = gen_rtx_REG (DImode, alt_regno);
3500 emit_move_insn (alt_reg, reg);
3501 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3502 cfa_off -= 8;
3506 /* Save the return pointer. */
3507 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3509 reg = gen_rtx_REG (DImode, BR_REG (0));
3510 if (current_frame_info.r[reg_save_b0] != 0)
3512 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3513 reg_emitted (reg_save_b0);
3514 insn = emit_move_insn (alt_reg, reg);
3515 RTX_FRAME_RELATED_P (insn) = 1;
3516 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3518 /* Even if we're not going to generate an epilogue, we still
3519 need to save the register so that EH works. */
3520 if (! epilogue_p)
3521 emit_insn (gen_prologue_use (alt_reg));
3523 else
3525 alt_regno = next_scratch_gr_reg ();
3526 alt_reg = gen_rtx_REG (DImode, alt_regno);
3527 emit_move_insn (alt_reg, reg);
3528 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3529 cfa_off -= 8;
3533 if (current_frame_info.r[reg_save_gp])
3535 reg_emitted (reg_save_gp);
3536 insn = emit_move_insn (gen_rtx_REG (DImode,
3537 current_frame_info.r[reg_save_gp]),
3538 pic_offset_table_rtx);
3541 /* We should now be at the base of the gr/br/fr spill area. */
3542 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3543 + current_frame_info.spill_size));
3545 /* Spill all general registers. */
3546 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3547 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3549 reg = gen_rtx_REG (DImode, regno);
3550 do_spill (gen_gr_spill, reg, cfa_off, reg);
3551 cfa_off -= 8;
3554 /* Spill the rest of the BR registers. */
3555 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3556 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3558 alt_regno = next_scratch_gr_reg ();
3559 alt_reg = gen_rtx_REG (DImode, alt_regno);
3560 reg = gen_rtx_REG (DImode, regno);
3561 emit_move_insn (alt_reg, reg);
3562 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3563 cfa_off -= 8;
3566 /* Align the frame and spill all FR registers. */
3567 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3568 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3570 gcc_assert (!(cfa_off & 15));
3571 reg = gen_rtx_REG (XFmode, regno);
3572 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3573 cfa_off -= 16;
3576 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3578 finish_spill_pointers ();
3581 /* Output the textual info surrounding the prologue. */
3583 void
3584 ia64_start_function (FILE *file, const char *fnname,
3585 tree decl ATTRIBUTE_UNUSED)
3587 #if VMS_DEBUGGING_INFO
3588 if (vms_debug_main
3589 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
3591 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
3592 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
3593 dwarf2out_vms_debug_main_pointer ();
3594 vms_debug_main = 0;
3596 #endif
3598 fputs ("\t.proc ", file);
3599 assemble_name (file, fnname);
3600 fputc ('\n', file);
3601 ASM_OUTPUT_LABEL (file, fnname);
3604 /* Called after register allocation to add any instructions needed for the
3605 epilogue. Using an epilogue insn is favored compared to putting all of the
3606 instructions in output_function_prologue(), since it allows the scheduler
3607 to intermix instructions with the saves of the caller saved registers. In
3608 some cases, it might be necessary to emit a barrier instruction as the last
3609 insn to prevent such scheduling. */
3611 void
3612 ia64_expand_epilogue (int sibcall_p)
3614 rtx insn, reg, alt_reg, ar_unat_save_reg;
3615 int regno, alt_regno, cfa_off;
3617 ia64_compute_frame_size (get_frame_size ());
3619 /* If there is a frame pointer, then we use it instead of the stack
3620 pointer, so that the stack pointer does not need to be valid when
3621 the epilogue starts. See EXIT_IGNORE_STACK. */
3622 if (frame_pointer_needed)
3623 setup_spill_pointers (current_frame_info.n_spilled,
3624 hard_frame_pointer_rtx, 0);
3625 else
3626 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3627 current_frame_info.total_size);
3629 if (current_frame_info.total_size != 0)
3631 /* ??? At this point we must generate a magic insn that appears to
3632 modify the spill iterators and the frame pointer. This would
3633 allow the most scheduling freedom. For now, just hard stop. */
3634 emit_insn (gen_blockage ());
3637 /* Locate the bottom of the register save area. */
3638 cfa_off = (current_frame_info.spill_cfa_off
3639 + current_frame_info.spill_size
3640 + current_frame_info.extra_spill_size);
3642 /* Restore the predicate registers. */
3643 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3645 if (current_frame_info.r[reg_save_pr] != 0)
3647 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3648 reg_emitted (reg_save_pr);
3650 else
3652 alt_regno = next_scratch_gr_reg ();
3653 alt_reg = gen_rtx_REG (DImode, alt_regno);
3654 do_restore (gen_movdi_x, alt_reg, cfa_off);
3655 cfa_off -= 8;
3657 reg = gen_rtx_REG (DImode, PR_REG (0));
3658 emit_move_insn (reg, alt_reg);
3661 /* Restore the application registers. */
3663 /* Load the saved unat from the stack, but do not restore it until
3664 after the GRs have been restored. */
3665 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3667 if (current_frame_info.r[reg_save_ar_unat] != 0)
3669 ar_unat_save_reg
3670 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3671 reg_emitted (reg_save_ar_unat);
3673 else
3675 alt_regno = next_scratch_gr_reg ();
3676 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3677 current_frame_info.gr_used_mask |= 1 << alt_regno;
3678 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3679 cfa_off -= 8;
3682 else
3683 ar_unat_save_reg = NULL_RTX;
3685 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3687 reg_emitted (reg_save_ar_pfs);
3688 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3689 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3690 emit_move_insn (reg, alt_reg);
3692 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3694 alt_regno = next_scratch_gr_reg ();
3695 alt_reg = gen_rtx_REG (DImode, alt_regno);
3696 do_restore (gen_movdi_x, alt_reg, cfa_off);
3697 cfa_off -= 8;
3698 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3699 emit_move_insn (reg, alt_reg);
3702 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3704 if (current_frame_info.r[reg_save_ar_lc] != 0)
3706 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3707 reg_emitted (reg_save_ar_lc);
3709 else
3711 alt_regno = next_scratch_gr_reg ();
3712 alt_reg = gen_rtx_REG (DImode, alt_regno);
3713 do_restore (gen_movdi_x, alt_reg, cfa_off);
3714 cfa_off -= 8;
3716 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3717 emit_move_insn (reg, alt_reg);
3720 /* Restore the return pointer. */
3721 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3723 if (current_frame_info.r[reg_save_b0] != 0)
3725 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3726 reg_emitted (reg_save_b0);
3728 else
3730 alt_regno = next_scratch_gr_reg ();
3731 alt_reg = gen_rtx_REG (DImode, alt_regno);
3732 do_restore (gen_movdi_x, alt_reg, cfa_off);
3733 cfa_off -= 8;
3735 reg = gen_rtx_REG (DImode, BR_REG (0));
3736 emit_move_insn (reg, alt_reg);
3739 /* We should now be at the base of the gr/br/fr spill area. */
3740 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3741 + current_frame_info.spill_size));
3743 /* The GP may be stored on the stack in the prologue, but it's
3744 never restored in the epilogue. Skip the stack slot. */
3745 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3746 cfa_off -= 8;
3748 /* Restore all general registers. */
3749 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3750 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3752 reg = gen_rtx_REG (DImode, regno);
3753 do_restore (gen_gr_restore, reg, cfa_off);
3754 cfa_off -= 8;
3757 /* Restore the branch registers. */
3758 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3759 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3761 alt_regno = next_scratch_gr_reg ();
3762 alt_reg = gen_rtx_REG (DImode, alt_regno);
3763 do_restore (gen_movdi_x, alt_reg, cfa_off);
3764 cfa_off -= 8;
3765 reg = gen_rtx_REG (DImode, regno);
3766 emit_move_insn (reg, alt_reg);
3769 /* Restore floating point registers. */
3770 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3771 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3773 gcc_assert (!(cfa_off & 15));
3774 reg = gen_rtx_REG (XFmode, regno);
3775 do_restore (gen_fr_restore_x, reg, cfa_off);
3776 cfa_off -= 16;
3779 /* Restore ar.unat for real. */
3780 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3782 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3783 emit_move_insn (reg, ar_unat_save_reg);
3786 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3788 finish_spill_pointers ();
3790 if (current_frame_info.total_size
3791 || cfun->machine->ia64_eh_epilogue_sp
3792 || frame_pointer_needed)
3794 /* ??? At this point we must generate a magic insn that appears to
3795 modify the spill iterators, the stack pointer, and the frame
3796 pointer. This would allow the most scheduling freedom. For now,
3797 just hard stop. */
3798 emit_insn (gen_blockage ());
3801 if (cfun->machine->ia64_eh_epilogue_sp)
3802 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3803 else if (frame_pointer_needed)
3805 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3806 RTX_FRAME_RELATED_P (insn) = 1;
3807 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
3809 else if (current_frame_info.total_size)
3811 rtx offset, frame_size_rtx;
3813 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3814 if (satisfies_constraint_I (frame_size_rtx))
3815 offset = frame_size_rtx;
3816 else
3818 regno = next_scratch_gr_reg ();
3819 offset = gen_rtx_REG (DImode, regno);
3820 emit_move_insn (offset, frame_size_rtx);
3823 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3824 offset));
3826 RTX_FRAME_RELATED_P (insn) = 1;
3827 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3828 gen_rtx_SET (VOIDmode,
3829 stack_pointer_rtx,
3830 gen_rtx_PLUS (DImode,
3831 stack_pointer_rtx,
3832 frame_size_rtx)));
3835 if (cfun->machine->ia64_eh_epilogue_bsp)
3836 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3838 if (! sibcall_p)
3839 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3840 else
3842 int fp = GR_REG (2);
3843 /* We need a throw away register here, r0 and r1 are reserved,
3844 so r2 is the first available call clobbered register. If
3845 there was a frame_pointer register, we may have swapped the
3846 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
3847 sure we're using the string "r2" when emitting the register
3848 name for the assembler. */
3849 if (current_frame_info.r[reg_fp]
3850 && current_frame_info.r[reg_fp] == GR_REG (2))
3851 fp = HARD_FRAME_POINTER_REGNUM;
3853 /* We must emit an alloc to force the input registers to become output
3854 registers. Otherwise, if the callee tries to pass its parameters
3855 through to another call without an intervening alloc, then these
3856 values get lost. */
3857 /* ??? We don't need to preserve all input registers. We only need to
3858 preserve those input registers used as arguments to the sibling call.
3859 It is unclear how to compute that number here. */
3860 if (current_frame_info.n_input_regs != 0)
3862 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3863 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3864 const0_rtx, const0_rtx,
3865 n_inputs, const0_rtx));
3866 RTX_FRAME_RELATED_P (insn) = 1;
3871 /* Return 1 if br.ret can do all the work required to return from a
3872 function. */
3875 ia64_direct_return (void)
3877 if (reload_completed && ! frame_pointer_needed)
3879 ia64_compute_frame_size (get_frame_size ());
3881 return (current_frame_info.total_size == 0
3882 && current_frame_info.n_spilled == 0
3883 && current_frame_info.r[reg_save_b0] == 0
3884 && current_frame_info.r[reg_save_pr] == 0
3885 && current_frame_info.r[reg_save_ar_pfs] == 0
3886 && current_frame_info.r[reg_save_ar_unat] == 0
3887 && current_frame_info.r[reg_save_ar_lc] == 0);
3889 return 0;
3892 /* Return the magic cookie that we use to hold the return address
3893 during early compilation. */
3896 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3898 if (count != 0)
3899 return NULL;
3900 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3903 /* Split this value after reload, now that we know where the return
3904 address is saved. */
3906 void
3907 ia64_split_return_addr_rtx (rtx dest)
3909 rtx src;
3911 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3913 if (current_frame_info.r[reg_save_b0] != 0)
3915 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3916 reg_emitted (reg_save_b0);
3918 else
3920 HOST_WIDE_INT off;
3921 unsigned int regno;
3922 rtx off_r;
3924 /* Compute offset from CFA for BR0. */
3925 /* ??? Must be kept in sync with ia64_expand_prologue. */
3926 off = (current_frame_info.spill_cfa_off
3927 + current_frame_info.spill_size);
3928 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3929 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3930 off -= 8;
3932 /* Convert CFA offset to a register based offset. */
3933 if (frame_pointer_needed)
3934 src = hard_frame_pointer_rtx;
3935 else
3937 src = stack_pointer_rtx;
3938 off += current_frame_info.total_size;
3941 /* Load address into scratch register. */
3942 off_r = GEN_INT (off);
3943 if (satisfies_constraint_I (off_r))
3944 emit_insn (gen_adddi3 (dest, src, off_r));
3945 else
3947 emit_move_insn (dest, off_r);
3948 emit_insn (gen_adddi3 (dest, src, dest));
3951 src = gen_rtx_MEM (Pmode, dest);
3954 else
3955 src = gen_rtx_REG (DImode, BR_REG (0));
3957 emit_move_insn (dest, src);
3961 ia64_hard_regno_rename_ok (int from, int to)
3963 /* Don't clobber any of the registers we reserved for the prologue. */
3964 unsigned int r;
3966 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3967 if (to == current_frame_info.r[r]
3968 || from == current_frame_info.r[r]
3969 || to == emitted_frame_related_regs[r]
3970 || from == emitted_frame_related_regs[r])
3971 return 0;
3973 /* Don't use output registers outside the register frame. */
3974 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3975 return 0;
3977 /* Retain even/oddness on predicate register pairs. */
3978 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3979 return (from & 1) == (to & 1);
3981 return 1;
3984 /* Target hook for assembling integer objects. Handle word-sized
3985 aligned objects and detect the cases when @fptr is needed. */
3987 static bool
3988 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3990 if (size == POINTER_SIZE / BITS_PER_UNIT
3991 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3992 && GET_CODE (x) == SYMBOL_REF
3993 && SYMBOL_REF_FUNCTION_P (x))
3995 static const char * const directive[2][2] = {
3996 /* 64-bit pointer */ /* 32-bit pointer */
3997 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3998 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4000 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
4001 output_addr_const (asm_out_file, x);
4002 fputs (")\n", asm_out_file);
4003 return true;
4005 return default_assemble_integer (x, size, aligned_p);
4008 /* Emit the function prologue. */
4010 static void
4011 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4013 int mask, grsave, grsave_prev;
4015 if (current_frame_info.need_regstk)
4016 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4017 current_frame_info.n_input_regs,
4018 current_frame_info.n_local_regs,
4019 current_frame_info.n_output_regs,
4020 current_frame_info.n_rotate_regs);
4022 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4023 return;
4025 /* Emit the .prologue directive. */
4027 mask = 0;
4028 grsave = grsave_prev = 0;
4029 if (current_frame_info.r[reg_save_b0] != 0)
4031 mask |= 8;
4032 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4034 if (current_frame_info.r[reg_save_ar_pfs] != 0
4035 && (grsave_prev == 0
4036 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4038 mask |= 4;
4039 if (grsave_prev == 0)
4040 grsave = current_frame_info.r[reg_save_ar_pfs];
4041 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4043 if (current_frame_info.r[reg_fp] != 0
4044 && (grsave_prev == 0
4045 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4047 mask |= 2;
4048 if (grsave_prev == 0)
4049 grsave = HARD_FRAME_POINTER_REGNUM;
4050 grsave_prev = current_frame_info.r[reg_fp];
4052 if (current_frame_info.r[reg_save_pr] != 0
4053 && (grsave_prev == 0
4054 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4056 mask |= 1;
4057 if (grsave_prev == 0)
4058 grsave = current_frame_info.r[reg_save_pr];
4061 if (mask && TARGET_GNU_AS)
4062 fprintf (file, "\t.prologue %d, %d\n", mask,
4063 ia64_dbx_register_number (grsave));
4064 else
4065 fputs ("\t.prologue\n", file);
4067 /* Emit a .spill directive, if necessary, to relocate the base of
4068 the register spill area. */
4069 if (current_frame_info.spill_cfa_off != -16)
4070 fprintf (file, "\t.spill %ld\n",
4071 (long) (current_frame_info.spill_cfa_off
4072 + current_frame_info.spill_size));
4075 /* Emit the .body directive at the scheduled end of the prologue. */
4077 static void
4078 ia64_output_function_end_prologue (FILE *file)
4080 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4081 return;
4083 fputs ("\t.body\n", file);
4086 /* Emit the function epilogue. */
4088 static void
4089 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4090 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4092 int i;
4094 if (current_frame_info.r[reg_fp])
4096 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4097 reg_names[HARD_FRAME_POINTER_REGNUM]
4098 = reg_names[current_frame_info.r[reg_fp]];
4099 reg_names[current_frame_info.r[reg_fp]] = tmp;
4100 reg_emitted (reg_fp);
4102 if (! TARGET_REG_NAMES)
4104 for (i = 0; i < current_frame_info.n_input_regs; i++)
4105 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4106 for (i = 0; i < current_frame_info.n_local_regs; i++)
4107 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4108 for (i = 0; i < current_frame_info.n_output_regs; i++)
4109 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4112 current_frame_info.initialized = 0;
4116 ia64_dbx_register_number (int regno)
4118 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4119 from its home at loc79 to something inside the register frame. We
4120 must perform the same renumbering here for the debug info. */
4121 if (current_frame_info.r[reg_fp])
4123 if (regno == HARD_FRAME_POINTER_REGNUM)
4124 regno = current_frame_info.r[reg_fp];
4125 else if (regno == current_frame_info.r[reg_fp])
4126 regno = HARD_FRAME_POINTER_REGNUM;
4129 if (IN_REGNO_P (regno))
4130 return 32 + regno - IN_REG (0);
4131 else if (LOC_REGNO_P (regno))
4132 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4133 else if (OUT_REGNO_P (regno))
4134 return (32 + current_frame_info.n_input_regs
4135 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4136 else
4137 return regno;
4140 /* Implement TARGET_TRAMPOLINE_INIT.
4142 The trampoline should set the static chain pointer to value placed
4143 into the trampoline and should branch to the specified routine.
4144 To make the normal indirect-subroutine calling convention work,
4145 the trampoline must look like a function descriptor; the first
4146 word being the target address and the second being the target's
4147 global pointer.
4149 We abuse the concept of a global pointer by arranging for it
4150 to point to the data we need to load. The complete trampoline
4151 has the following form:
4153 +-------------------+ \
4154 TRAMP: | __ia64_trampoline | |
4155 +-------------------+ > fake function descriptor
4156 | TRAMP+16 | |
4157 +-------------------+ /
4158 | target descriptor |
4159 +-------------------+
4160 | static link |
4161 +-------------------+
4164 static void
4165 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4167 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4168 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4170 /* The Intel assembler requires that the global __ia64_trampoline symbol
4171 be declared explicitly */
4172 if (!TARGET_GNU_AS)
4174 static bool declared_ia64_trampoline = false;
4176 if (!declared_ia64_trampoline)
4178 declared_ia64_trampoline = true;
4179 (*targetm.asm_out.globalize_label) (asm_out_file,
4180 "__ia64_trampoline");
4184 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4185 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4186 fnaddr = convert_memory_address (Pmode, fnaddr);
4187 static_chain = convert_memory_address (Pmode, static_chain);
4189 /* Load up our iterator. */
4190 addr_reg = copy_to_reg (addr);
4191 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4193 /* The first two words are the fake descriptor:
4194 __ia64_trampoline, ADDR+16. */
4195 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4196 if (TARGET_ABI_OPEN_VMS)
4198 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4199 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4200 relocation against function symbols to make it identical to the
4201 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4202 strict ELF and dereference to get the bare code address. */
4203 rtx reg = gen_reg_rtx (Pmode);
4204 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4205 emit_move_insn (reg, tramp);
4206 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4207 tramp = reg;
4209 emit_move_insn (m_tramp, tramp);
4210 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4211 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4213 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
4214 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4215 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4217 /* The third word is the target descriptor. */
4218 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4219 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4220 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4222 /* The fourth word is the static chain. */
4223 emit_move_insn (m_tramp, static_chain);
4226 /* Do any needed setup for a variadic function. CUM has not been updated
4227 for the last named argument which has type TYPE and mode MODE.
4229 We generate the actual spill instructions during prologue generation. */
4231 static void
4232 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4233 tree type, int * pretend_size,
4234 int second_time ATTRIBUTE_UNUSED)
4236 CUMULATIVE_ARGS next_cum = *cum;
4238 /* Skip the current argument. */
4239 ia64_function_arg_advance (&next_cum, mode, type, 1);
4241 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4243 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4244 *pretend_size = n * UNITS_PER_WORD;
4245 cfun->machine->n_varargs = n;
4249 /* Check whether TYPE is a homogeneous floating point aggregate. If
4250 it is, return the mode of the floating point type that appears
4251 in all leafs. If it is not, return VOIDmode.
4253 An aggregate is a homogeneous floating point aggregate is if all
4254 fields/elements in it have the same floating point type (e.g,
4255 SFmode). 128-bit quad-precision floats are excluded.
4257 Variable sized aggregates should never arrive here, since we should
4258 have already decided to pass them by reference. Top-level zero-sized
4259 aggregates are excluded because our parallels crash the middle-end. */
4261 static enum machine_mode
4262 hfa_element_mode (const_tree type, bool nested)
4264 enum machine_mode element_mode = VOIDmode;
4265 enum machine_mode mode;
4266 enum tree_code code = TREE_CODE (type);
4267 int know_element_mode = 0;
4268 tree t;
4270 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4271 return VOIDmode;
4273 switch (code)
4275 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4276 case BOOLEAN_TYPE: case POINTER_TYPE:
4277 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4278 case LANG_TYPE: case FUNCTION_TYPE:
4279 return VOIDmode;
4281 /* Fortran complex types are supposed to be HFAs, so we need to handle
4282 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4283 types though. */
4284 case COMPLEX_TYPE:
4285 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4286 && TYPE_MODE (type) != TCmode)
4287 return GET_MODE_INNER (TYPE_MODE (type));
4288 else
4289 return VOIDmode;
4291 case REAL_TYPE:
4292 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4293 mode if this is contained within an aggregate. */
4294 if (nested && TYPE_MODE (type) != TFmode)
4295 return TYPE_MODE (type);
4296 else
4297 return VOIDmode;
4299 case ARRAY_TYPE:
4300 return hfa_element_mode (TREE_TYPE (type), 1);
4302 case RECORD_TYPE:
4303 case UNION_TYPE:
4304 case QUAL_UNION_TYPE:
4305 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4307 if (TREE_CODE (t) != FIELD_DECL)
4308 continue;
4310 mode = hfa_element_mode (TREE_TYPE (t), 1);
4311 if (know_element_mode)
4313 if (mode != element_mode)
4314 return VOIDmode;
4316 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4317 return VOIDmode;
4318 else
4320 know_element_mode = 1;
4321 element_mode = mode;
4324 return element_mode;
4326 default:
4327 /* If we reach here, we probably have some front-end specific type
4328 that the backend doesn't know about. This can happen via the
4329 aggregate_value_p call in init_function_start. All we can do is
4330 ignore unknown tree types. */
4331 return VOIDmode;
4334 return VOIDmode;
4337 /* Return the number of words required to hold a quantity of TYPE and MODE
4338 when passed as an argument. */
4339 static int
4340 ia64_function_arg_words (const_tree type, enum machine_mode mode)
4342 int words;
4344 if (mode == BLKmode)
4345 words = int_size_in_bytes (type);
4346 else
4347 words = GET_MODE_SIZE (mode);
4349 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4352 /* Return the number of registers that should be skipped so the current
4353 argument (described by TYPE and WORDS) will be properly aligned.
4355 Integer and float arguments larger than 8 bytes start at the next
4356 even boundary. Aggregates larger than 8 bytes start at the next
4357 even boundary if the aggregate has 16 byte alignment. Note that
4358 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4359 but are still to be aligned in registers.
4361 ??? The ABI does not specify how to handle aggregates with
4362 alignment from 9 to 15 bytes, or greater than 16. We handle them
4363 all as if they had 16 byte alignment. Such aggregates can occur
4364 only if gcc extensions are used. */
4365 static int
4366 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4367 const_tree type, int words)
4369 /* No registers are skipped on VMS. */
4370 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4371 return 0;
4373 if (type
4374 && TREE_CODE (type) != INTEGER_TYPE
4375 && TREE_CODE (type) != REAL_TYPE)
4376 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4377 else
4378 return words > 1;
4381 /* Return rtx for register where argument is passed, or zero if it is passed
4382 on the stack. */
4383 /* ??? 128-bit quad-precision floats are always passed in general
4384 registers. */
4386 static rtx
4387 ia64_function_arg_1 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4388 const_tree type, bool named, bool incoming)
4390 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4391 int words = ia64_function_arg_words (type, mode);
4392 int offset = ia64_function_arg_offset (cum, type, words);
4393 enum machine_mode hfa_mode = VOIDmode;
4395 /* For OPEN VMS, emit the instruction setting up the argument register here,
4396 when we know this will be together with the other arguments setup related
4397 insns. This is not the conceptually best place to do this, but this is
4398 the easiest as we have convenient access to cumulative args info. */
4400 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4401 && named == 1)
4403 unsigned HOST_WIDE_INT regval = cum->words;
4404 int i;
4406 for (i = 0; i < 8; i++)
4407 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4409 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4410 GEN_INT (regval));
4413 /* If all argument slots are used, then it must go on the stack. */
4414 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4415 return 0;
4417 /* Check for and handle homogeneous FP aggregates. */
4418 if (type)
4419 hfa_mode = hfa_element_mode (type, 0);
4421 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4422 and unprototyped hfas are passed specially. */
4423 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4425 rtx loc[16];
4426 int i = 0;
4427 int fp_regs = cum->fp_regs;
4428 int int_regs = cum->words + offset;
4429 int hfa_size = GET_MODE_SIZE (hfa_mode);
4430 int byte_size;
4431 int args_byte_size;
4433 /* If prototyped, pass it in FR regs then GR regs.
4434 If not prototyped, pass it in both FR and GR regs.
4436 If this is an SFmode aggregate, then it is possible to run out of
4437 FR regs while GR regs are still left. In that case, we pass the
4438 remaining part in the GR regs. */
4440 /* Fill the FP regs. We do this always. We stop if we reach the end
4441 of the argument, the last FP register, or the last argument slot. */
4443 byte_size = ((mode == BLKmode)
4444 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4445 args_byte_size = int_regs * UNITS_PER_WORD;
4446 offset = 0;
4447 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4448 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4450 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4451 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4452 + fp_regs)),
4453 GEN_INT (offset));
4454 offset += hfa_size;
4455 args_byte_size += hfa_size;
4456 fp_regs++;
4459 /* If no prototype, then the whole thing must go in GR regs. */
4460 if (! cum->prototype)
4461 offset = 0;
4462 /* If this is an SFmode aggregate, then we might have some left over
4463 that needs to go in GR regs. */
4464 else if (byte_size != offset)
4465 int_regs += offset / UNITS_PER_WORD;
4467 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4469 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4471 enum machine_mode gr_mode = DImode;
4472 unsigned int gr_size;
4474 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4475 then this goes in a GR reg left adjusted/little endian, right
4476 adjusted/big endian. */
4477 /* ??? Currently this is handled wrong, because 4-byte hunks are
4478 always right adjusted/little endian. */
4479 if (offset & 0x4)
4480 gr_mode = SImode;
4481 /* If we have an even 4 byte hunk because the aggregate is a
4482 multiple of 4 bytes in size, then this goes in a GR reg right
4483 adjusted/little endian. */
4484 else if (byte_size - offset == 4)
4485 gr_mode = SImode;
4487 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4488 gen_rtx_REG (gr_mode, (basereg
4489 + int_regs)),
4490 GEN_INT (offset));
4492 gr_size = GET_MODE_SIZE (gr_mode);
4493 offset += gr_size;
4494 if (gr_size == UNITS_PER_WORD
4495 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4496 int_regs++;
4497 else if (gr_size > UNITS_PER_WORD)
4498 int_regs += gr_size / UNITS_PER_WORD;
4500 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4503 /* On OpenVMS variable argument is either in Rn or Fn. */
4504 else if (TARGET_ABI_OPEN_VMS && named == 0)
4506 if (FLOAT_MODE_P (mode))
4507 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4508 else
4509 return gen_rtx_REG (mode, basereg + cum->words);
4512 /* Integral and aggregates go in general registers. If we have run out of
4513 FR registers, then FP values must also go in general registers. This can
4514 happen when we have a SFmode HFA. */
4515 else if (mode == TFmode || mode == TCmode
4516 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4518 int byte_size = ((mode == BLKmode)
4519 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4520 if (BYTES_BIG_ENDIAN
4521 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4522 && byte_size < UNITS_PER_WORD
4523 && byte_size > 0)
4525 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4526 gen_rtx_REG (DImode,
4527 (basereg + cum->words
4528 + offset)),
4529 const0_rtx);
4530 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4532 else
4533 return gen_rtx_REG (mode, basereg + cum->words + offset);
4537 /* If there is a prototype, then FP values go in a FR register when
4538 named, and in a GR register when unnamed. */
4539 else if (cum->prototype)
4541 if (named)
4542 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4543 /* In big-endian mode, an anonymous SFmode value must be represented
4544 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4545 the value into the high half of the general register. */
4546 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4547 return gen_rtx_PARALLEL (mode,
4548 gen_rtvec (1,
4549 gen_rtx_EXPR_LIST (VOIDmode,
4550 gen_rtx_REG (DImode, basereg + cum->words + offset),
4551 const0_rtx)));
4552 else
4553 return gen_rtx_REG (mode, basereg + cum->words + offset);
4555 /* If there is no prototype, then FP values go in both FR and GR
4556 registers. */
4557 else
4559 /* See comment above. */
4560 enum machine_mode inner_mode =
4561 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4563 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4564 gen_rtx_REG (mode, (FR_ARG_FIRST
4565 + cum->fp_regs)),
4566 const0_rtx);
4567 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4568 gen_rtx_REG (inner_mode,
4569 (basereg + cum->words
4570 + offset)),
4571 const0_rtx);
4573 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4577 /* Implement TARGET_FUNCION_ARG target hook. */
4579 static rtx
4580 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4581 const_tree type, bool named)
4583 return ia64_function_arg_1 (cum, mode, type, named, false);
4586 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4588 static rtx
4589 ia64_function_incoming_arg (CUMULATIVE_ARGS *cum,
4590 enum machine_mode mode,
4591 const_tree type, bool named)
4593 return ia64_function_arg_1 (cum, mode, type, named, true);
4596 /* Return number of bytes, at the beginning of the argument, that must be
4597 put in registers. 0 is the argument is entirely in registers or entirely
4598 in memory. */
4600 static int
4601 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4602 tree type, bool named ATTRIBUTE_UNUSED)
4604 int words = ia64_function_arg_words (type, mode);
4605 int offset = ia64_function_arg_offset (cum, type, words);
4607 /* If all argument slots are used, then it must go on the stack. */
4608 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4609 return 0;
4611 /* It doesn't matter whether the argument goes in FR or GR regs. If
4612 it fits within the 8 argument slots, then it goes entirely in
4613 registers. If it extends past the last argument slot, then the rest
4614 goes on the stack. */
4616 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4617 return 0;
4619 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4622 /* Return ivms_arg_type based on machine_mode. */
4624 static enum ivms_arg_type
4625 ia64_arg_type (enum machine_mode mode)
4627 switch (mode)
4629 case SFmode:
4630 return FS;
4631 case DFmode:
4632 return FT;
4633 default:
4634 return I64;
4638 /* Update CUM to point after this argument. This is patterned after
4639 ia64_function_arg. */
4641 static void
4642 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4643 const_tree type, bool named)
4645 int words = ia64_function_arg_words (type, mode);
4646 int offset = ia64_function_arg_offset (cum, type, words);
4647 enum machine_mode hfa_mode = VOIDmode;
4649 /* If all arg slots are already full, then there is nothing to do. */
4650 if (cum->words >= MAX_ARGUMENT_SLOTS)
4652 cum->words += words + offset;
4653 return;
4656 cum->atypes[cum->words] = ia64_arg_type (mode);
4657 cum->words += words + offset;
4659 /* Check for and handle homogeneous FP aggregates. */
4660 if (type)
4661 hfa_mode = hfa_element_mode (type, 0);
4663 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4664 and unprototyped hfas are passed specially. */
4665 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4667 int fp_regs = cum->fp_regs;
4668 /* This is the original value of cum->words + offset. */
4669 int int_regs = cum->words - words;
4670 int hfa_size = GET_MODE_SIZE (hfa_mode);
4671 int byte_size;
4672 int args_byte_size;
4674 /* If prototyped, pass it in FR regs then GR regs.
4675 If not prototyped, pass it in both FR and GR regs.
4677 If this is an SFmode aggregate, then it is possible to run out of
4678 FR regs while GR regs are still left. In that case, we pass the
4679 remaining part in the GR regs. */
4681 /* Fill the FP regs. We do this always. We stop if we reach the end
4682 of the argument, the last FP register, or the last argument slot. */
4684 byte_size = ((mode == BLKmode)
4685 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4686 args_byte_size = int_regs * UNITS_PER_WORD;
4687 offset = 0;
4688 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4689 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4691 offset += hfa_size;
4692 args_byte_size += hfa_size;
4693 fp_regs++;
4696 cum->fp_regs = fp_regs;
4699 /* On OpenVMS variable argument is either in Rn or Fn. */
4700 else if (TARGET_ABI_OPEN_VMS && named == 0)
4702 cum->int_regs = cum->words;
4703 cum->fp_regs = cum->words;
4706 /* Integral and aggregates go in general registers. So do TFmode FP values.
4707 If we have run out of FR registers, then other FP values must also go in
4708 general registers. This can happen when we have a SFmode HFA. */
4709 else if (mode == TFmode || mode == TCmode
4710 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4711 cum->int_regs = cum->words;
4713 /* If there is a prototype, then FP values go in a FR register when
4714 named, and in a GR register when unnamed. */
4715 else if (cum->prototype)
4717 if (! named)
4718 cum->int_regs = cum->words;
4719 else
4720 /* ??? Complex types should not reach here. */
4721 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4723 /* If there is no prototype, then FP values go in both FR and GR
4724 registers. */
4725 else
4727 /* ??? Complex types should not reach here. */
4728 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4729 cum->int_regs = cum->words;
4733 /* Arguments with alignment larger than 8 bytes start at the next even
4734 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4735 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4737 static unsigned int
4738 ia64_function_arg_boundary (enum machine_mode mode, const_tree type)
4740 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4741 return PARM_BOUNDARY * 2;
4743 if (type)
4745 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4746 return PARM_BOUNDARY * 2;
4747 else
4748 return PARM_BOUNDARY;
4751 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4752 return PARM_BOUNDARY * 2;
4753 else
4754 return PARM_BOUNDARY;
4757 /* True if it is OK to do sibling call optimization for the specified
4758 call expression EXP. DECL will be the called function, or NULL if
4759 this is an indirect call. */
4760 static bool
4761 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4763 /* We can't perform a sibcall if the current function has the syscall_linkage
4764 attribute. */
4765 if (lookup_attribute ("syscall_linkage",
4766 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4767 return false;
4769 /* We must always return with our current GP. This means we can
4770 only sibcall to functions defined in the current module unless
4771 TARGET_CONST_GP is set to true. */
4772 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
4776 /* Implement va_arg. */
4778 static tree
4779 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4780 gimple_seq *post_p)
4782 /* Variable sized types are passed by reference. */
4783 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4785 tree ptrtype = build_pointer_type (type);
4786 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4787 return build_va_arg_indirect_ref (addr);
4790 /* Aggregate arguments with alignment larger than 8 bytes start at
4791 the next even boundary. Integer and floating point arguments
4792 do so if they are larger than 8 bytes, whether or not they are
4793 also aligned larger than 8 bytes. */
4794 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4795 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4797 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4798 size_int (2 * UNITS_PER_WORD - 1));
4799 t = fold_convert (sizetype, t);
4800 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4801 size_int (-2 * UNITS_PER_WORD));
4802 t = fold_convert (TREE_TYPE (valist), t);
4803 gimplify_assign (unshare_expr (valist), t, pre_p);
4806 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4809 /* Return 1 if function return value returned in memory. Return 0 if it is
4810 in a register. */
4812 static bool
4813 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4815 enum machine_mode mode;
4816 enum machine_mode hfa_mode;
4817 HOST_WIDE_INT byte_size;
4819 mode = TYPE_MODE (valtype);
4820 byte_size = GET_MODE_SIZE (mode);
4821 if (mode == BLKmode)
4823 byte_size = int_size_in_bytes (valtype);
4824 if (byte_size < 0)
4825 return true;
4828 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4830 hfa_mode = hfa_element_mode (valtype, 0);
4831 if (hfa_mode != VOIDmode)
4833 int hfa_size = GET_MODE_SIZE (hfa_mode);
4835 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4836 return true;
4837 else
4838 return false;
4840 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4841 return true;
4842 else
4843 return false;
4846 /* Return rtx for register that holds the function return value. */
4848 static rtx
4849 ia64_function_value (const_tree valtype,
4850 const_tree fn_decl_or_type,
4851 bool outgoing ATTRIBUTE_UNUSED)
4853 enum machine_mode mode;
4854 enum machine_mode hfa_mode;
4855 int unsignedp;
4856 const_tree func = fn_decl_or_type;
4858 if (fn_decl_or_type
4859 && !DECL_P (fn_decl_or_type))
4860 func = NULL;
4862 mode = TYPE_MODE (valtype);
4863 hfa_mode = hfa_element_mode (valtype, 0);
4865 if (hfa_mode != VOIDmode)
4867 rtx loc[8];
4868 int i;
4869 int hfa_size;
4870 int byte_size;
4871 int offset;
4873 hfa_size = GET_MODE_SIZE (hfa_mode);
4874 byte_size = ((mode == BLKmode)
4875 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4876 offset = 0;
4877 for (i = 0; offset < byte_size; i++)
4879 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4880 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4881 GEN_INT (offset));
4882 offset += hfa_size;
4884 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4886 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4887 return gen_rtx_REG (mode, FR_ARG_FIRST);
4888 else
4890 bool need_parallel = false;
4892 /* In big-endian mode, we need to manage the layout of aggregates
4893 in the registers so that we get the bits properly aligned in
4894 the highpart of the registers. */
4895 if (BYTES_BIG_ENDIAN
4896 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4897 need_parallel = true;
4899 /* Something like struct S { long double x; char a[0] } is not an
4900 HFA structure, and therefore doesn't go in fp registers. But
4901 the middle-end will give it XFmode anyway, and XFmode values
4902 don't normally fit in integer registers. So we need to smuggle
4903 the value inside a parallel. */
4904 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4905 need_parallel = true;
4907 if (need_parallel)
4909 rtx loc[8];
4910 int offset;
4911 int bytesize;
4912 int i;
4914 offset = 0;
4915 bytesize = int_size_in_bytes (valtype);
4916 /* An empty PARALLEL is invalid here, but the return value
4917 doesn't matter for empty structs. */
4918 if (bytesize == 0)
4919 return gen_rtx_REG (mode, GR_RET_FIRST);
4920 for (i = 0; offset < bytesize; i++)
4922 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4923 gen_rtx_REG (DImode,
4924 GR_RET_FIRST + i),
4925 GEN_INT (offset));
4926 offset += UNITS_PER_WORD;
4928 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4931 mode = ia64_promote_function_mode (valtype, mode, &unsignedp,
4932 func ? TREE_TYPE (func) : NULL_TREE,
4933 true);
4935 return gen_rtx_REG (mode, GR_RET_FIRST);
4939 /* Worker function for TARGET_LIBCALL_VALUE. */
4941 static rtx
4942 ia64_libcall_value (enum machine_mode mode,
4943 const_rtx fun ATTRIBUTE_UNUSED)
4945 return gen_rtx_REG (mode,
4946 (((GET_MODE_CLASS (mode) == MODE_FLOAT
4947 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4948 && (mode) != TFmode)
4949 ? FR_RET_FIRST : GR_RET_FIRST));
4952 /* Worker function for FUNCTION_VALUE_REGNO_P. */
4954 static bool
4955 ia64_function_value_regno_p (const unsigned int regno)
4957 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
4958 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
4961 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4962 We need to emit DTP-relative relocations. */
4964 static void
4965 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4967 gcc_assert (size == 4 || size == 8);
4968 if (size == 4)
4969 fputs ("\tdata4.ua\t@dtprel(", file);
4970 else
4971 fputs ("\tdata8.ua\t@dtprel(", file);
4972 output_addr_const (file, x);
4973 fputs (")", file);
4976 /* Print a memory address as an operand to reference that memory location. */
4978 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4979 also call this from ia64_print_operand for memory addresses. */
4981 void
4982 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4983 rtx address ATTRIBUTE_UNUSED)
4987 /* Print an operand to an assembler instruction.
4988 C Swap and print a comparison operator.
4989 D Print an FP comparison operator.
4990 E Print 32 - constant, for SImode shifts as extract.
4991 e Print 64 - constant, for DImode rotates.
4992 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4993 a floating point register emitted normally.
4994 G A floating point constant.
4995 I Invert a predicate register by adding 1.
4996 J Select the proper predicate register for a condition.
4997 j Select the inverse predicate register for a condition.
4998 O Append .acq for volatile load.
4999 P Postincrement of a MEM.
5000 Q Append .rel for volatile store.
5001 R Print .s .d or nothing for a single, double or no truncation.
5002 S Shift amount for shladd instruction.
5003 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5004 for Intel assembler.
5005 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5006 for Intel assembler.
5007 X A pair of floating point registers.
5008 r Print register name, or constant 0 as r0. HP compatibility for
5009 Linux kernel.
5010 v Print vector constant value as an 8-byte integer value. */
5012 void
5013 ia64_print_operand (FILE * file, rtx x, int code)
5015 const char *str;
5017 switch (code)
5019 case 0:
5020 /* Handled below. */
5021 break;
5023 case 'C':
5025 enum rtx_code c = swap_condition (GET_CODE (x));
5026 fputs (GET_RTX_NAME (c), file);
5027 return;
5030 case 'D':
5031 switch (GET_CODE (x))
5033 case NE:
5034 str = "neq";
5035 break;
5036 case UNORDERED:
5037 str = "unord";
5038 break;
5039 case ORDERED:
5040 str = "ord";
5041 break;
5042 case UNLT:
5043 str = "nge";
5044 break;
5045 case UNLE:
5046 str = "ngt";
5047 break;
5048 case UNGT:
5049 str = "nle";
5050 break;
5051 case UNGE:
5052 str = "nlt";
5053 break;
5054 default:
5055 str = GET_RTX_NAME (GET_CODE (x));
5056 break;
5058 fputs (str, file);
5059 return;
5061 case 'E':
5062 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5063 return;
5065 case 'e':
5066 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5067 return;
5069 case 'F':
5070 if (x == CONST0_RTX (GET_MODE (x)))
5071 str = reg_names [FR_REG (0)];
5072 else if (x == CONST1_RTX (GET_MODE (x)))
5073 str = reg_names [FR_REG (1)];
5074 else
5076 gcc_assert (GET_CODE (x) == REG);
5077 str = reg_names [REGNO (x)];
5079 fputs (str, file);
5080 return;
5082 case 'G':
5084 long val[4];
5085 REAL_VALUE_TYPE rv;
5086 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5087 real_to_target (val, &rv, GET_MODE (x));
5088 if (GET_MODE (x) == SFmode)
5089 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5090 else if (GET_MODE (x) == DFmode)
5091 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5092 & 0xffffffff,
5093 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5094 & 0xffffffff);
5095 else
5096 output_operand_lossage ("invalid %%G mode");
5098 return;
5100 case 'I':
5101 fputs (reg_names [REGNO (x) + 1], file);
5102 return;
5104 case 'J':
5105 case 'j':
5107 unsigned int regno = REGNO (XEXP (x, 0));
5108 if (GET_CODE (x) == EQ)
5109 regno += 1;
5110 if (code == 'j')
5111 regno ^= 1;
5112 fputs (reg_names [regno], file);
5114 return;
5116 case 'O':
5117 if (MEM_VOLATILE_P (x))
5118 fputs(".acq", file);
5119 return;
5121 case 'P':
5123 HOST_WIDE_INT value;
5125 switch (GET_CODE (XEXP (x, 0)))
5127 default:
5128 return;
5130 case POST_MODIFY:
5131 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5132 if (GET_CODE (x) == CONST_INT)
5133 value = INTVAL (x);
5134 else
5136 gcc_assert (GET_CODE (x) == REG);
5137 fprintf (file, ", %s", reg_names[REGNO (x)]);
5138 return;
5140 break;
5142 case POST_INC:
5143 value = GET_MODE_SIZE (GET_MODE (x));
5144 break;
5146 case POST_DEC:
5147 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5148 break;
5151 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5152 return;
5155 case 'Q':
5156 if (MEM_VOLATILE_P (x))
5157 fputs(".rel", file);
5158 return;
5160 case 'R':
5161 if (x == CONST0_RTX (GET_MODE (x)))
5162 fputs(".s", file);
5163 else if (x == CONST1_RTX (GET_MODE (x)))
5164 fputs(".d", file);
5165 else if (x == CONST2_RTX (GET_MODE (x)))
5167 else
5168 output_operand_lossage ("invalid %%R value");
5169 return;
5171 case 'S':
5172 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5173 return;
5175 case 'T':
5176 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5178 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5179 return;
5181 break;
5183 case 'U':
5184 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5186 const char *prefix = "0x";
5187 if (INTVAL (x) & 0x80000000)
5189 fprintf (file, "0xffffffff");
5190 prefix = "";
5192 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5193 return;
5195 break;
5197 case 'X':
5199 unsigned int regno = REGNO (x);
5200 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5202 return;
5204 case 'r':
5205 /* If this operand is the constant zero, write it as register zero.
5206 Any register, zero, or CONST_INT value is OK here. */
5207 if (GET_CODE (x) == REG)
5208 fputs (reg_names[REGNO (x)], file);
5209 else if (x == CONST0_RTX (GET_MODE (x)))
5210 fputs ("r0", file);
5211 else if (GET_CODE (x) == CONST_INT)
5212 output_addr_const (file, x);
5213 else
5214 output_operand_lossage ("invalid %%r value");
5215 return;
5217 case 'v':
5218 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5219 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5220 break;
5222 case '+':
5224 const char *which;
5226 /* For conditional branches, returns or calls, substitute
5227 sptk, dptk, dpnt, or spnt for %s. */
5228 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5229 if (x)
5231 int pred_val = INTVAL (XEXP (x, 0));
5233 /* Guess top and bottom 10% statically predicted. */
5234 if (pred_val < REG_BR_PROB_BASE / 50
5235 && br_prob_note_reliable_p (x))
5236 which = ".spnt";
5237 else if (pred_val < REG_BR_PROB_BASE / 2)
5238 which = ".dpnt";
5239 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5240 || !br_prob_note_reliable_p (x))
5241 which = ".dptk";
5242 else
5243 which = ".sptk";
5245 else if (GET_CODE (current_output_insn) == CALL_INSN)
5246 which = ".sptk";
5247 else
5248 which = ".dptk";
5250 fputs (which, file);
5251 return;
5254 case ',':
5255 x = current_insn_predicate;
5256 if (x)
5258 unsigned int regno = REGNO (XEXP (x, 0));
5259 if (GET_CODE (x) == EQ)
5260 regno += 1;
5261 fprintf (file, "(%s) ", reg_names [regno]);
5263 return;
5265 default:
5266 output_operand_lossage ("ia64_print_operand: unknown code");
5267 return;
5270 switch (GET_CODE (x))
5272 /* This happens for the spill/restore instructions. */
5273 case POST_INC:
5274 case POST_DEC:
5275 case POST_MODIFY:
5276 x = XEXP (x, 0);
5277 /* ... fall through ... */
5279 case REG:
5280 fputs (reg_names [REGNO (x)], file);
5281 break;
5283 case MEM:
5285 rtx addr = XEXP (x, 0);
5286 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5287 addr = XEXP (addr, 0);
5288 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5289 break;
5292 default:
5293 output_addr_const (file, x);
5294 break;
5297 return;
5300 /* Compute a (partial) cost for rtx X. Return true if the complete
5301 cost has been computed, and false if subexpressions should be
5302 scanned. In either case, *TOTAL contains the cost result. */
5303 /* ??? This is incomplete. */
5305 static bool
5306 ia64_rtx_costs (rtx x, int code, int outer_code, int *total,
5307 bool speed ATTRIBUTE_UNUSED)
5309 switch (code)
5311 case CONST_INT:
5312 switch (outer_code)
5314 case SET:
5315 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5316 return true;
5317 case PLUS:
5318 if (satisfies_constraint_I (x))
5319 *total = 0;
5320 else if (satisfies_constraint_J (x))
5321 *total = 1;
5322 else
5323 *total = COSTS_N_INSNS (1);
5324 return true;
5325 default:
5326 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5327 *total = 0;
5328 else
5329 *total = COSTS_N_INSNS (1);
5330 return true;
5333 case CONST_DOUBLE:
5334 *total = COSTS_N_INSNS (1);
5335 return true;
5337 case CONST:
5338 case SYMBOL_REF:
5339 case LABEL_REF:
5340 *total = COSTS_N_INSNS (3);
5341 return true;
5343 case FMA:
5344 *total = COSTS_N_INSNS (4);
5345 return true;
5347 case MULT:
5348 /* For multiplies wider than HImode, we have to go to the FPU,
5349 which normally involves copies. Plus there's the latency
5350 of the multiply itself, and the latency of the instructions to
5351 transfer integer regs to FP regs. */
5352 if (FLOAT_MODE_P (GET_MODE (x)))
5353 *total = COSTS_N_INSNS (4);
5354 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5355 *total = COSTS_N_INSNS (10);
5356 else
5357 *total = COSTS_N_INSNS (2);
5358 return true;
5360 case PLUS:
5361 case MINUS:
5362 if (FLOAT_MODE_P (GET_MODE (x)))
5364 *total = COSTS_N_INSNS (4);
5365 return true;
5367 /* FALLTHRU */
5369 case ASHIFT:
5370 case ASHIFTRT:
5371 case LSHIFTRT:
5372 *total = COSTS_N_INSNS (1);
5373 return true;
5375 case DIV:
5376 case UDIV:
5377 case MOD:
5378 case UMOD:
5379 /* We make divide expensive, so that divide-by-constant will be
5380 optimized to a multiply. */
5381 *total = COSTS_N_INSNS (60);
5382 return true;
5384 default:
5385 return false;
5389 /* Calculate the cost of moving data from a register in class FROM to
5390 one in class TO, using MODE. */
5392 static int
5393 ia64_register_move_cost (enum machine_mode mode, reg_class_t from_i,
5394 reg_class_t to_i)
5396 enum reg_class from = (enum reg_class) from_i;
5397 enum reg_class to = (enum reg_class) to_i;
5399 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5400 if (to == ADDL_REGS)
5401 to = GR_REGS;
5402 if (from == ADDL_REGS)
5403 from = GR_REGS;
5405 /* All costs are symmetric, so reduce cases by putting the
5406 lower number class as the destination. */
5407 if (from < to)
5409 enum reg_class tmp = to;
5410 to = from, from = tmp;
5413 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5414 so that we get secondary memory reloads. Between FR_REGS,
5415 we have to make this at least as expensive as memory_move_cost
5416 to avoid spectacularly poor register class preferencing. */
5417 if (mode == XFmode || mode == RFmode)
5419 if (to != GR_REGS || from != GR_REGS)
5420 return memory_move_cost (mode, to, false);
5421 else
5422 return 3;
5425 switch (to)
5427 case PR_REGS:
5428 /* Moving between PR registers takes two insns. */
5429 if (from == PR_REGS)
5430 return 3;
5431 /* Moving between PR and anything but GR is impossible. */
5432 if (from != GR_REGS)
5433 return memory_move_cost (mode, to, false);
5434 break;
5436 case BR_REGS:
5437 /* Moving between BR and anything but GR is impossible. */
5438 if (from != GR_REGS && from != GR_AND_BR_REGS)
5439 return memory_move_cost (mode, to, false);
5440 break;
5442 case AR_I_REGS:
5443 case AR_M_REGS:
5444 /* Moving between AR and anything but GR is impossible. */
5445 if (from != GR_REGS)
5446 return memory_move_cost (mode, to, false);
5447 break;
5449 case GR_REGS:
5450 case FR_REGS:
5451 case FP_REGS:
5452 case GR_AND_FR_REGS:
5453 case GR_AND_BR_REGS:
5454 case ALL_REGS:
5455 break;
5457 default:
5458 gcc_unreachable ();
5461 return 2;
5464 /* Calculate the cost of moving data of MODE from a register to or from
5465 memory. */
5467 static int
5468 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5469 reg_class_t rclass,
5470 bool in ATTRIBUTE_UNUSED)
5472 if (rclass == GENERAL_REGS
5473 || rclass == FR_REGS
5474 || rclass == FP_REGS
5475 || rclass == GR_AND_FR_REGS)
5476 return 4;
5477 else
5478 return 10;
5481 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5482 on RCLASS to use when copying X into that class. */
5484 static reg_class_t
5485 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5487 switch (rclass)
5489 case FR_REGS:
5490 case FP_REGS:
5491 /* Don't allow volatile mem reloads into floating point registers.
5492 This is defined to force reload to choose the r/m case instead
5493 of the f/f case when reloading (set (reg fX) (mem/v)). */
5494 if (MEM_P (x) && MEM_VOLATILE_P (x))
5495 return NO_REGS;
5497 /* Force all unrecognized constants into the constant pool. */
5498 if (CONSTANT_P (x))
5499 return NO_REGS;
5500 break;
5502 case AR_M_REGS:
5503 case AR_I_REGS:
5504 if (!OBJECT_P (x))
5505 return NO_REGS;
5506 break;
5508 default:
5509 break;
5512 return rclass;
5515 /* This function returns the register class required for a secondary
5516 register when copying between one of the registers in RCLASS, and X,
5517 using MODE. A return value of NO_REGS means that no secondary register
5518 is required. */
5520 enum reg_class
5521 ia64_secondary_reload_class (enum reg_class rclass,
5522 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5524 int regno = -1;
5526 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5527 regno = true_regnum (x);
5529 switch (rclass)
5531 case BR_REGS:
5532 case AR_M_REGS:
5533 case AR_I_REGS:
5534 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5535 interaction. We end up with two pseudos with overlapping lifetimes
5536 both of which are equiv to the same constant, and both which need
5537 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5538 changes depending on the path length, which means the qty_first_reg
5539 check in make_regs_eqv can give different answers at different times.
5540 At some point I'll probably need a reload_indi pattern to handle
5541 this.
5543 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5544 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5545 non-general registers for good measure. */
5546 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5547 return GR_REGS;
5549 /* This is needed if a pseudo used as a call_operand gets spilled to a
5550 stack slot. */
5551 if (GET_CODE (x) == MEM)
5552 return GR_REGS;
5553 break;
5555 case FR_REGS:
5556 case FP_REGS:
5557 /* Need to go through general registers to get to other class regs. */
5558 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5559 return GR_REGS;
5561 /* This can happen when a paradoxical subreg is an operand to the
5562 muldi3 pattern. */
5563 /* ??? This shouldn't be necessary after instruction scheduling is
5564 enabled, because paradoxical subregs are not accepted by
5565 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5566 stop the paradoxical subreg stupidity in the *_operand functions
5567 in recog.c. */
5568 if (GET_CODE (x) == MEM
5569 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5570 || GET_MODE (x) == QImode))
5571 return GR_REGS;
5573 /* This can happen because of the ior/and/etc patterns that accept FP
5574 registers as operands. If the third operand is a constant, then it
5575 needs to be reloaded into a FP register. */
5576 if (GET_CODE (x) == CONST_INT)
5577 return GR_REGS;
5579 /* This can happen because of register elimination in a muldi3 insn.
5580 E.g. `26107 * (unsigned long)&u'. */
5581 if (GET_CODE (x) == PLUS)
5582 return GR_REGS;
5583 break;
5585 case PR_REGS:
5586 /* ??? This happens if we cse/gcse a BImode value across a call,
5587 and the function has a nonlocal goto. This is because global
5588 does not allocate call crossing pseudos to hard registers when
5589 crtl->has_nonlocal_goto is true. This is relatively
5590 common for C++ programs that use exceptions. To reproduce,
5591 return NO_REGS and compile libstdc++. */
5592 if (GET_CODE (x) == MEM)
5593 return GR_REGS;
5595 /* This can happen when we take a BImode subreg of a DImode value,
5596 and that DImode value winds up in some non-GR register. */
5597 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5598 return GR_REGS;
5599 break;
5601 default:
5602 break;
5605 return NO_REGS;
5609 /* Implement targetm.unspec_may_trap_p hook. */
5610 static int
5611 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5613 if (GET_CODE (x) == UNSPEC)
5615 switch (XINT (x, 1))
5617 case UNSPEC_LDA:
5618 case UNSPEC_LDS:
5619 case UNSPEC_LDSA:
5620 case UNSPEC_LDCCLR:
5621 case UNSPEC_CHKACLR:
5622 case UNSPEC_CHKS:
5623 /* These unspecs are just wrappers. */
5624 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5628 return default_unspec_may_trap_p (x, flags);
5632 /* Parse the -mfixed-range= option string. */
5634 static void
5635 fix_range (const char *const_str)
5637 int i, first, last;
5638 char *str, *dash, *comma;
5640 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5641 REG2 are either register names or register numbers. The effect
5642 of this option is to mark the registers in the range from REG1 to
5643 REG2 as ``fixed'' so they won't be used by the compiler. This is
5644 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5646 i = strlen (const_str);
5647 str = (char *) alloca (i + 1);
5648 memcpy (str, const_str, i + 1);
5650 while (1)
5652 dash = strchr (str, '-');
5653 if (!dash)
5655 warning (0, "value of -mfixed-range must have form REG1-REG2");
5656 return;
5658 *dash = '\0';
5660 comma = strchr (dash + 1, ',');
5661 if (comma)
5662 *comma = '\0';
5664 first = decode_reg_name (str);
5665 if (first < 0)
5667 warning (0, "unknown register name: %s", str);
5668 return;
5671 last = decode_reg_name (dash + 1);
5672 if (last < 0)
5674 warning (0, "unknown register name: %s", dash + 1);
5675 return;
5678 *dash = '-';
5680 if (first > last)
5682 warning (0, "%s-%s is an empty range", str, dash + 1);
5683 return;
5686 for (i = first; i <= last; ++i)
5687 fixed_regs[i] = call_used_regs[i] = 1;
5689 if (!comma)
5690 break;
5692 *comma = ',';
5693 str = comma + 1;
5697 /* Implement TARGET_HANDLE_OPTION. */
5699 static bool
5700 ia64_handle_option (size_t code, const char *arg, int value)
5702 switch (code)
5704 case OPT_mfixed_range_:
5705 fix_range (arg);
5706 return true;
5708 case OPT_mtls_size_:
5709 if (value != 14 && value != 22 && value != 64)
5710 error ("bad value %<%s%> for -mtls-size= switch", arg);
5711 return true;
5713 case OPT_mtune_:
5715 static struct pta
5717 const char *name; /* processor name or nickname. */
5718 enum processor_type processor;
5720 const processor_alias_table[] =
5722 {"itanium2", PROCESSOR_ITANIUM2},
5723 {"mckinley", PROCESSOR_ITANIUM2},
5725 int const pta_size = ARRAY_SIZE (processor_alias_table);
5726 int i;
5728 for (i = 0; i < pta_size; i++)
5729 if (!strcmp (arg, processor_alias_table[i].name))
5731 ia64_tune = processor_alias_table[i].processor;
5732 break;
5734 if (i == pta_size)
5735 error ("bad value %<%s%> for -mtune= switch", arg);
5736 return true;
5739 default:
5740 return true;
5744 /* Implement TARGET_OPTION_OVERRIDE. */
5746 static void
5747 ia64_option_override (void)
5749 if (TARGET_AUTO_PIC)
5750 target_flags |= MASK_CONST_GP;
5752 /* Numerous experiment shows that IRA based loop pressure
5753 calculation works better for RTL loop invariant motion on targets
5754 with enough (>= 32) registers. It is an expensive optimization.
5755 So it is on only for peak performance. */
5756 if (optimize >= 3)
5757 flag_ira_loop_pressure = 1;
5760 ia64_section_threshold = (global_options_set.x_g_switch_value
5761 ? g_switch_value
5762 : IA64_DEFAULT_GVALUE);
5764 init_machine_status = ia64_init_machine_status;
5766 if (align_functions <= 0)
5767 align_functions = 64;
5768 if (align_loops <= 0)
5769 align_loops = 32;
5770 if (TARGET_ABI_OPEN_VMS)
5771 flag_no_common = 1;
5773 ia64_override_options_after_change();
5776 /* Implement targetm.override_options_after_change. */
5778 static void
5779 ia64_override_options_after_change (void)
5781 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5782 flag_schedule_insns_after_reload = 0;
5784 if (optimize >= 3
5785 && !global_options_set.x_flag_selective_scheduling
5786 && !global_options_set.x_flag_selective_scheduling2)
5788 flag_selective_scheduling2 = 1;
5789 flag_sel_sched_pipelining = 1;
5791 if (mflag_sched_control_spec == 2)
5793 /* Control speculation is on by default for the selective scheduler,
5794 but not for the Haifa scheduler. */
5795 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5797 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5799 /* FIXME: remove this when we'd implement breaking autoinsns as
5800 a transformation. */
5801 flag_auto_inc_dec = 0;
5805 /* Initialize the record of emitted frame related registers. */
5807 void ia64_init_expanders (void)
5809 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5812 static struct machine_function *
5813 ia64_init_machine_status (void)
5815 return ggc_alloc_cleared_machine_function ();
5818 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5819 static enum attr_type ia64_safe_type (rtx);
5821 static enum attr_itanium_class
5822 ia64_safe_itanium_class (rtx insn)
5824 if (recog_memoized (insn) >= 0)
5825 return get_attr_itanium_class (insn);
5826 else if (DEBUG_INSN_P (insn))
5827 return ITANIUM_CLASS_IGNORE;
5828 else
5829 return ITANIUM_CLASS_UNKNOWN;
5832 static enum attr_type
5833 ia64_safe_type (rtx insn)
5835 if (recog_memoized (insn) >= 0)
5836 return get_attr_type (insn);
5837 else
5838 return TYPE_UNKNOWN;
5841 /* The following collection of routines emit instruction group stop bits as
5842 necessary to avoid dependencies. */
5844 /* Need to track some additional registers as far as serialization is
5845 concerned so we can properly handle br.call and br.ret. We could
5846 make these registers visible to gcc, but since these registers are
5847 never explicitly used in gcc generated code, it seems wasteful to
5848 do so (plus it would make the call and return patterns needlessly
5849 complex). */
5850 #define REG_RP (BR_REG (0))
5851 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5852 /* This is used for volatile asms which may require a stop bit immediately
5853 before and after them. */
5854 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5855 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5856 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5858 /* For each register, we keep track of how it has been written in the
5859 current instruction group.
5861 If a register is written unconditionally (no qualifying predicate),
5862 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5864 If a register is written if its qualifying predicate P is true, we
5865 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5866 may be written again by the complement of P (P^1) and when this happens,
5867 WRITE_COUNT gets set to 2.
5869 The result of this is that whenever an insn attempts to write a register
5870 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5872 If a predicate register is written by a floating-point insn, we set
5873 WRITTEN_BY_FP to true.
5875 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5876 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5878 #if GCC_VERSION >= 4000
5879 #define RWS_FIELD_TYPE __extension__ unsigned short
5880 #else
5881 #define RWS_FIELD_TYPE unsigned int
5882 #endif
5883 struct reg_write_state
5885 RWS_FIELD_TYPE write_count : 2;
5886 RWS_FIELD_TYPE first_pred : 10;
5887 RWS_FIELD_TYPE written_by_fp : 1;
5888 RWS_FIELD_TYPE written_by_and : 1;
5889 RWS_FIELD_TYPE written_by_or : 1;
5892 /* Cumulative info for the current instruction group. */
5893 struct reg_write_state rws_sum[NUM_REGS];
5894 #ifdef ENABLE_CHECKING
5895 /* Bitmap whether a register has been written in the current insn. */
5896 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5897 / HOST_BITS_PER_WIDEST_FAST_INT];
5899 static inline void
5900 rws_insn_set (int regno)
5902 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5903 SET_HARD_REG_BIT (rws_insn, regno);
5906 static inline int
5907 rws_insn_test (int regno)
5909 return TEST_HARD_REG_BIT (rws_insn, regno);
5911 #else
5912 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5913 unsigned char rws_insn[2];
5915 static inline void
5916 rws_insn_set (int regno)
5918 if (regno == REG_AR_CFM)
5919 rws_insn[0] = 1;
5920 else if (regno == REG_VOLATILE)
5921 rws_insn[1] = 1;
5924 static inline int
5925 rws_insn_test (int regno)
5927 if (regno == REG_AR_CFM)
5928 return rws_insn[0];
5929 if (regno == REG_VOLATILE)
5930 return rws_insn[1];
5931 return 0;
5933 #endif
5935 /* Indicates whether this is the first instruction after a stop bit,
5936 in which case we don't need another stop bit. Without this,
5937 ia64_variable_issue will die when scheduling an alloc. */
5938 static int first_instruction;
5940 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5941 RTL for one instruction. */
5942 struct reg_flags
5944 unsigned int is_write : 1; /* Is register being written? */
5945 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5946 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5947 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5948 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5949 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5952 static void rws_update (int, struct reg_flags, int);
5953 static int rws_access_regno (int, struct reg_flags, int);
5954 static int rws_access_reg (rtx, struct reg_flags, int);
5955 static void update_set_flags (rtx, struct reg_flags *);
5956 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5957 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5958 static void init_insn_group_barriers (void);
5959 static int group_barrier_needed (rtx);
5960 static int safe_group_barrier_needed (rtx);
5961 static int in_safe_group_barrier;
5963 /* Update *RWS for REGNO, which is being written by the current instruction,
5964 with predicate PRED, and associated register flags in FLAGS. */
5966 static void
5967 rws_update (int regno, struct reg_flags flags, int pred)
5969 if (pred)
5970 rws_sum[regno].write_count++;
5971 else
5972 rws_sum[regno].write_count = 2;
5973 rws_sum[regno].written_by_fp |= flags.is_fp;
5974 /* ??? Not tracking and/or across differing predicates. */
5975 rws_sum[regno].written_by_and = flags.is_and;
5976 rws_sum[regno].written_by_or = flags.is_or;
5977 rws_sum[regno].first_pred = pred;
5980 /* Handle an access to register REGNO of type FLAGS using predicate register
5981 PRED. Update rws_sum array. Return 1 if this access creates
5982 a dependency with an earlier instruction in the same group. */
5984 static int
5985 rws_access_regno (int regno, struct reg_flags flags, int pred)
5987 int need_barrier = 0;
5989 gcc_assert (regno < NUM_REGS);
5991 if (! PR_REGNO_P (regno))
5992 flags.is_and = flags.is_or = 0;
5994 if (flags.is_write)
5996 int write_count;
5998 rws_insn_set (regno);
5999 write_count = rws_sum[regno].write_count;
6001 switch (write_count)
6003 case 0:
6004 /* The register has not been written yet. */
6005 if (!in_safe_group_barrier)
6006 rws_update (regno, flags, pred);
6007 break;
6009 case 1:
6010 /* The register has been written via a predicate. Treat
6011 it like a unconditional write and do not try to check
6012 for complementary pred reg in earlier write. */
6013 if (flags.is_and && rws_sum[regno].written_by_and)
6015 else if (flags.is_or && rws_sum[regno].written_by_or)
6017 else
6018 need_barrier = 1;
6019 if (!in_safe_group_barrier)
6020 rws_update (regno, flags, pred);
6021 break;
6023 case 2:
6024 /* The register has been unconditionally written already. We
6025 need a barrier. */
6026 if (flags.is_and && rws_sum[regno].written_by_and)
6028 else if (flags.is_or && rws_sum[regno].written_by_or)
6030 else
6031 need_barrier = 1;
6032 if (!in_safe_group_barrier)
6034 rws_sum[regno].written_by_and = flags.is_and;
6035 rws_sum[regno].written_by_or = flags.is_or;
6037 break;
6039 default:
6040 gcc_unreachable ();
6043 else
6045 if (flags.is_branch)
6047 /* Branches have several RAW exceptions that allow to avoid
6048 barriers. */
6050 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6051 /* RAW dependencies on branch regs are permissible as long
6052 as the writer is a non-branch instruction. Since we
6053 never generate code that uses a branch register written
6054 by a branch instruction, handling this case is
6055 easy. */
6056 return 0;
6058 if (REGNO_REG_CLASS (regno) == PR_REGS
6059 && ! rws_sum[regno].written_by_fp)
6060 /* The predicates of a branch are available within the
6061 same insn group as long as the predicate was written by
6062 something other than a floating-point instruction. */
6063 return 0;
6066 if (flags.is_and && rws_sum[regno].written_by_and)
6067 return 0;
6068 if (flags.is_or && rws_sum[regno].written_by_or)
6069 return 0;
6071 switch (rws_sum[regno].write_count)
6073 case 0:
6074 /* The register has not been written yet. */
6075 break;
6077 case 1:
6078 /* The register has been written via a predicate, assume we
6079 need a barrier (don't check for complementary regs). */
6080 need_barrier = 1;
6081 break;
6083 case 2:
6084 /* The register has been unconditionally written already. We
6085 need a barrier. */
6086 need_barrier = 1;
6087 break;
6089 default:
6090 gcc_unreachable ();
6094 return need_barrier;
6097 static int
6098 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6100 int regno = REGNO (reg);
6101 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6103 if (n == 1)
6104 return rws_access_regno (regno, flags, pred);
6105 else
6107 int need_barrier = 0;
6108 while (--n >= 0)
6109 need_barrier |= rws_access_regno (regno + n, flags, pred);
6110 return need_barrier;
6114 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6115 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6117 static void
6118 update_set_flags (rtx x, struct reg_flags *pflags)
6120 rtx src = SET_SRC (x);
6122 switch (GET_CODE (src))
6124 case CALL:
6125 return;
6127 case IF_THEN_ELSE:
6128 /* There are four cases here:
6129 (1) The destination is (pc), in which case this is a branch,
6130 nothing here applies.
6131 (2) The destination is ar.lc, in which case this is a
6132 doloop_end_internal,
6133 (3) The destination is an fp register, in which case this is
6134 an fselect instruction.
6135 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6136 this is a check load.
6137 In all cases, nothing we do in this function applies. */
6138 return;
6140 default:
6141 if (COMPARISON_P (src)
6142 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6143 /* Set pflags->is_fp to 1 so that we know we're dealing
6144 with a floating point comparison when processing the
6145 destination of the SET. */
6146 pflags->is_fp = 1;
6148 /* Discover if this is a parallel comparison. We only handle
6149 and.orcm and or.andcm at present, since we must retain a
6150 strict inverse on the predicate pair. */
6151 else if (GET_CODE (src) == AND)
6152 pflags->is_and = 1;
6153 else if (GET_CODE (src) == IOR)
6154 pflags->is_or = 1;
6156 break;
6160 /* Subroutine of rtx_needs_barrier; this function determines whether the
6161 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6162 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6163 for this insn. */
6165 static int
6166 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6168 int need_barrier = 0;
6169 rtx dst;
6170 rtx src = SET_SRC (x);
6172 if (GET_CODE (src) == CALL)
6173 /* We don't need to worry about the result registers that
6174 get written by subroutine call. */
6175 return rtx_needs_barrier (src, flags, pred);
6176 else if (SET_DEST (x) == pc_rtx)
6178 /* X is a conditional branch. */
6179 /* ??? This seems redundant, as the caller sets this bit for
6180 all JUMP_INSNs. */
6181 if (!ia64_spec_check_src_p (src))
6182 flags.is_branch = 1;
6183 return rtx_needs_barrier (src, flags, pred);
6186 if (ia64_spec_check_src_p (src))
6187 /* Avoid checking one register twice (in condition
6188 and in 'then' section) for ldc pattern. */
6190 gcc_assert (REG_P (XEXP (src, 2)));
6191 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6193 /* We process MEM below. */
6194 src = XEXP (src, 1);
6197 need_barrier |= rtx_needs_barrier (src, flags, pred);
6199 dst = SET_DEST (x);
6200 if (GET_CODE (dst) == ZERO_EXTRACT)
6202 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6203 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6205 return need_barrier;
6208 /* Handle an access to rtx X of type FLAGS using predicate register
6209 PRED. Return 1 if this access creates a dependency with an earlier
6210 instruction in the same group. */
6212 static int
6213 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6215 int i, j;
6216 int is_complemented = 0;
6217 int need_barrier = 0;
6218 const char *format_ptr;
6219 struct reg_flags new_flags;
6220 rtx cond;
6222 if (! x)
6223 return 0;
6225 new_flags = flags;
6227 switch (GET_CODE (x))
6229 case SET:
6230 update_set_flags (x, &new_flags);
6231 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6232 if (GET_CODE (SET_SRC (x)) != CALL)
6234 new_flags.is_write = 1;
6235 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6237 break;
6239 case CALL:
6240 new_flags.is_write = 0;
6241 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6243 /* Avoid multiple register writes, in case this is a pattern with
6244 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6245 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6247 new_flags.is_write = 1;
6248 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6249 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6250 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6252 break;
6254 case COND_EXEC:
6255 /* X is a predicated instruction. */
6257 cond = COND_EXEC_TEST (x);
6258 gcc_assert (!pred);
6259 need_barrier = rtx_needs_barrier (cond, flags, 0);
6261 if (GET_CODE (cond) == EQ)
6262 is_complemented = 1;
6263 cond = XEXP (cond, 0);
6264 gcc_assert (GET_CODE (cond) == REG
6265 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6266 pred = REGNO (cond);
6267 if (is_complemented)
6268 ++pred;
6270 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6271 return need_barrier;
6273 case CLOBBER:
6274 case USE:
6275 /* Clobber & use are for earlier compiler-phases only. */
6276 break;
6278 case ASM_OPERANDS:
6279 case ASM_INPUT:
6280 /* We always emit stop bits for traditional asms. We emit stop bits
6281 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6282 if (GET_CODE (x) != ASM_OPERANDS
6283 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6285 /* Avoid writing the register multiple times if we have multiple
6286 asm outputs. This avoids a failure in rws_access_reg. */
6287 if (! rws_insn_test (REG_VOLATILE))
6289 new_flags.is_write = 1;
6290 rws_access_regno (REG_VOLATILE, new_flags, pred);
6292 return 1;
6295 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6296 We cannot just fall through here since then we would be confused
6297 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6298 traditional asms unlike their normal usage. */
6300 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6301 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6302 need_barrier = 1;
6303 break;
6305 case PARALLEL:
6306 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6308 rtx pat = XVECEXP (x, 0, i);
6309 switch (GET_CODE (pat))
6311 case SET:
6312 update_set_flags (pat, &new_flags);
6313 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6314 break;
6316 case USE:
6317 case CALL:
6318 case ASM_OPERANDS:
6319 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6320 break;
6322 case CLOBBER:
6323 if (REG_P (XEXP (pat, 0))
6324 && extract_asm_operands (x) != NULL_RTX
6325 && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
6327 new_flags.is_write = 1;
6328 need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
6329 new_flags, pred);
6330 new_flags = flags;
6332 break;
6334 case RETURN:
6335 break;
6337 default:
6338 gcc_unreachable ();
6341 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6343 rtx pat = XVECEXP (x, 0, i);
6344 if (GET_CODE (pat) == SET)
6346 if (GET_CODE (SET_SRC (pat)) != CALL)
6348 new_flags.is_write = 1;
6349 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6350 pred);
6353 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6354 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6356 break;
6358 case SUBREG:
6359 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6360 break;
6361 case REG:
6362 if (REGNO (x) == AR_UNAT_REGNUM)
6364 for (i = 0; i < 64; ++i)
6365 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6367 else
6368 need_barrier = rws_access_reg (x, flags, pred);
6369 break;
6371 case MEM:
6372 /* Find the regs used in memory address computation. */
6373 new_flags.is_write = 0;
6374 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6375 break;
6377 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6378 case SYMBOL_REF: case LABEL_REF: case CONST:
6379 break;
6381 /* Operators with side-effects. */
6382 case POST_INC: case POST_DEC:
6383 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6385 new_flags.is_write = 0;
6386 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6387 new_flags.is_write = 1;
6388 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6389 break;
6391 case POST_MODIFY:
6392 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6394 new_flags.is_write = 0;
6395 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6396 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6397 new_flags.is_write = 1;
6398 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6399 break;
6401 /* Handle common unary and binary ops for efficiency. */
6402 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6403 case MOD: case UDIV: case UMOD: case AND: case IOR:
6404 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6405 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6406 case NE: case EQ: case GE: case GT: case LE:
6407 case LT: case GEU: case GTU: case LEU: case LTU:
6408 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6409 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6410 break;
6412 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6413 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6414 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6415 case SQRT: case FFS: case POPCOUNT:
6416 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6417 break;
6419 case VEC_SELECT:
6420 /* VEC_SELECT's second argument is a PARALLEL with integers that
6421 describe the elements selected. On ia64, those integers are
6422 always constants. Avoid walking the PARALLEL so that we don't
6423 get confused with "normal" parallels and then die. */
6424 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6425 break;
6427 case UNSPEC:
6428 switch (XINT (x, 1))
6430 case UNSPEC_LTOFF_DTPMOD:
6431 case UNSPEC_LTOFF_DTPREL:
6432 case UNSPEC_DTPREL:
6433 case UNSPEC_LTOFF_TPREL:
6434 case UNSPEC_TPREL:
6435 case UNSPEC_PRED_REL_MUTEX:
6436 case UNSPEC_PIC_CALL:
6437 case UNSPEC_MF:
6438 case UNSPEC_FETCHADD_ACQ:
6439 case UNSPEC_BSP_VALUE:
6440 case UNSPEC_FLUSHRS:
6441 case UNSPEC_BUNDLE_SELECTOR:
6442 break;
6444 case UNSPEC_GR_SPILL:
6445 case UNSPEC_GR_RESTORE:
6447 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6448 HOST_WIDE_INT bit = (offset >> 3) & 63;
6450 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6451 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6452 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6453 new_flags, pred);
6454 break;
6457 case UNSPEC_FR_SPILL:
6458 case UNSPEC_FR_RESTORE:
6459 case UNSPEC_GETF_EXP:
6460 case UNSPEC_SETF_EXP:
6461 case UNSPEC_ADDP4:
6462 case UNSPEC_FR_SQRT_RECIP_APPROX:
6463 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6464 case UNSPEC_LDA:
6465 case UNSPEC_LDS:
6466 case UNSPEC_LDS_A:
6467 case UNSPEC_LDSA:
6468 case UNSPEC_CHKACLR:
6469 case UNSPEC_CHKS:
6470 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6471 break;
6473 case UNSPEC_FR_RECIP_APPROX:
6474 case UNSPEC_SHRP:
6475 case UNSPEC_COPYSIGN:
6476 case UNSPEC_FR_RECIP_APPROX_RES:
6477 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6478 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6479 break;
6481 case UNSPEC_CMPXCHG_ACQ:
6482 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6483 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6484 break;
6486 default:
6487 gcc_unreachable ();
6489 break;
6491 case UNSPEC_VOLATILE:
6492 switch (XINT (x, 1))
6494 case UNSPECV_ALLOC:
6495 /* Alloc must always be the first instruction of a group.
6496 We force this by always returning true. */
6497 /* ??? We might get better scheduling if we explicitly check for
6498 input/local/output register dependencies, and modify the
6499 scheduler so that alloc is always reordered to the start of
6500 the current group. We could then eliminate all of the
6501 first_instruction code. */
6502 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6504 new_flags.is_write = 1;
6505 rws_access_regno (REG_AR_CFM, new_flags, pred);
6506 return 1;
6508 case UNSPECV_SET_BSP:
6509 need_barrier = 1;
6510 break;
6512 case UNSPECV_BLOCKAGE:
6513 case UNSPECV_INSN_GROUP_BARRIER:
6514 case UNSPECV_BREAK:
6515 case UNSPECV_PSAC_ALL:
6516 case UNSPECV_PSAC_NORMAL:
6517 return 0;
6519 default:
6520 gcc_unreachable ();
6522 break;
6524 case RETURN:
6525 new_flags.is_write = 0;
6526 need_barrier = rws_access_regno (REG_RP, flags, pred);
6527 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6529 new_flags.is_write = 1;
6530 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6531 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6532 break;
6534 default:
6535 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6536 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6537 switch (format_ptr[i])
6539 case '0': /* unused field */
6540 case 'i': /* integer */
6541 case 'n': /* note */
6542 case 'w': /* wide integer */
6543 case 's': /* pointer to string */
6544 case 'S': /* optional pointer to string */
6545 break;
6547 case 'e':
6548 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6549 need_barrier = 1;
6550 break;
6552 case 'E':
6553 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6554 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6555 need_barrier = 1;
6556 break;
6558 default:
6559 gcc_unreachable ();
6561 break;
6563 return need_barrier;
6566 /* Clear out the state for group_barrier_needed at the start of a
6567 sequence of insns. */
6569 static void
6570 init_insn_group_barriers (void)
6572 memset (rws_sum, 0, sizeof (rws_sum));
6573 first_instruction = 1;
6576 /* Given the current state, determine whether a group barrier (a stop bit) is
6577 necessary before INSN. Return nonzero if so. This modifies the state to
6578 include the effects of INSN as a side-effect. */
6580 static int
6581 group_barrier_needed (rtx insn)
6583 rtx pat;
6584 int need_barrier = 0;
6585 struct reg_flags flags;
6587 memset (&flags, 0, sizeof (flags));
6588 switch (GET_CODE (insn))
6590 case NOTE:
6591 case DEBUG_INSN:
6592 break;
6594 case BARRIER:
6595 /* A barrier doesn't imply an instruction group boundary. */
6596 break;
6598 case CODE_LABEL:
6599 memset (rws_insn, 0, sizeof (rws_insn));
6600 return 1;
6602 case CALL_INSN:
6603 flags.is_branch = 1;
6604 flags.is_sibcall = SIBLING_CALL_P (insn);
6605 memset (rws_insn, 0, sizeof (rws_insn));
6607 /* Don't bundle a call following another call. */
6608 if ((pat = prev_active_insn (insn))
6609 && GET_CODE (pat) == CALL_INSN)
6611 need_barrier = 1;
6612 break;
6615 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6616 break;
6618 case JUMP_INSN:
6619 if (!ia64_spec_check_p (insn))
6620 flags.is_branch = 1;
6622 /* Don't bundle a jump following a call. */
6623 if ((pat = prev_active_insn (insn))
6624 && GET_CODE (pat) == CALL_INSN)
6626 need_barrier = 1;
6627 break;
6629 /* FALLTHRU */
6631 case INSN:
6632 if (GET_CODE (PATTERN (insn)) == USE
6633 || GET_CODE (PATTERN (insn)) == CLOBBER)
6634 /* Don't care about USE and CLOBBER "insns"---those are used to
6635 indicate to the optimizer that it shouldn't get rid of
6636 certain operations. */
6637 break;
6639 pat = PATTERN (insn);
6641 /* Ug. Hack hacks hacked elsewhere. */
6642 switch (recog_memoized (insn))
6644 /* We play dependency tricks with the epilogue in order
6645 to get proper schedules. Undo this for dv analysis. */
6646 case CODE_FOR_epilogue_deallocate_stack:
6647 case CODE_FOR_prologue_allocate_stack:
6648 pat = XVECEXP (pat, 0, 0);
6649 break;
6651 /* The pattern we use for br.cloop confuses the code above.
6652 The second element of the vector is representative. */
6653 case CODE_FOR_doloop_end_internal:
6654 pat = XVECEXP (pat, 0, 1);
6655 break;
6657 /* Doesn't generate code. */
6658 case CODE_FOR_pred_rel_mutex:
6659 case CODE_FOR_prologue_use:
6660 return 0;
6662 default:
6663 break;
6666 memset (rws_insn, 0, sizeof (rws_insn));
6667 need_barrier = rtx_needs_barrier (pat, flags, 0);
6669 /* Check to see if the previous instruction was a volatile
6670 asm. */
6671 if (! need_barrier)
6672 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6674 break;
6676 default:
6677 gcc_unreachable ();
6680 if (first_instruction && INSN_P (insn)
6681 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6682 && GET_CODE (PATTERN (insn)) != USE
6683 && GET_CODE (PATTERN (insn)) != CLOBBER)
6685 need_barrier = 0;
6686 first_instruction = 0;
6689 return need_barrier;
6692 /* Like group_barrier_needed, but do not clobber the current state. */
6694 static int
6695 safe_group_barrier_needed (rtx insn)
6697 int saved_first_instruction;
6698 int t;
6700 saved_first_instruction = first_instruction;
6701 in_safe_group_barrier = 1;
6703 t = group_barrier_needed (insn);
6705 first_instruction = saved_first_instruction;
6706 in_safe_group_barrier = 0;
6708 return t;
6711 /* Scan the current function and insert stop bits as necessary to
6712 eliminate dependencies. This function assumes that a final
6713 instruction scheduling pass has been run which has already
6714 inserted most of the necessary stop bits. This function only
6715 inserts new ones at basic block boundaries, since these are
6716 invisible to the scheduler. */
6718 static void
6719 emit_insn_group_barriers (FILE *dump)
6721 rtx insn;
6722 rtx last_label = 0;
6723 int insns_since_last_label = 0;
6725 init_insn_group_barriers ();
6727 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6729 if (GET_CODE (insn) == CODE_LABEL)
6731 if (insns_since_last_label)
6732 last_label = insn;
6733 insns_since_last_label = 0;
6735 else if (GET_CODE (insn) == NOTE
6736 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6738 if (insns_since_last_label)
6739 last_label = insn;
6740 insns_since_last_label = 0;
6742 else if (GET_CODE (insn) == INSN
6743 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6744 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6746 init_insn_group_barriers ();
6747 last_label = 0;
6749 else if (NONDEBUG_INSN_P (insn))
6751 insns_since_last_label = 1;
6753 if (group_barrier_needed (insn))
6755 if (last_label)
6757 if (dump)
6758 fprintf (dump, "Emitting stop before label %d\n",
6759 INSN_UID (last_label));
6760 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6761 insn = last_label;
6763 init_insn_group_barriers ();
6764 last_label = 0;
6771 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6772 This function has to emit all necessary group barriers. */
6774 static void
6775 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6777 rtx insn;
6779 init_insn_group_barriers ();
6781 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6783 if (GET_CODE (insn) == BARRIER)
6785 rtx last = prev_active_insn (insn);
6787 if (! last)
6788 continue;
6789 if (GET_CODE (last) == JUMP_INSN
6790 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6791 last = prev_active_insn (last);
6792 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6793 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6795 init_insn_group_barriers ();
6797 else if (NONDEBUG_INSN_P (insn))
6799 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6800 init_insn_group_barriers ();
6801 else if (group_barrier_needed (insn))
6803 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6804 init_insn_group_barriers ();
6805 group_barrier_needed (insn);
6813 /* Instruction scheduling support. */
6815 #define NR_BUNDLES 10
6817 /* A list of names of all available bundles. */
6819 static const char *bundle_name [NR_BUNDLES] =
6821 ".mii",
6822 ".mmi",
6823 ".mfi",
6824 ".mmf",
6825 #if NR_BUNDLES == 10
6826 ".bbb",
6827 ".mbb",
6828 #endif
6829 ".mib",
6830 ".mmb",
6831 ".mfb",
6832 ".mlx"
6835 /* Nonzero if we should insert stop bits into the schedule. */
6837 int ia64_final_schedule = 0;
6839 /* Codes of the corresponding queried units: */
6841 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6842 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6844 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6845 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6847 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6849 /* The following variable value is an insn group barrier. */
6851 static rtx dfa_stop_insn;
6853 /* The following variable value is the last issued insn. */
6855 static rtx last_scheduled_insn;
6857 /* The following variable value is pointer to a DFA state used as
6858 temporary variable. */
6860 static state_t temp_dfa_state = NULL;
6862 /* The following variable value is DFA state after issuing the last
6863 insn. */
6865 static state_t prev_cycle_state = NULL;
6867 /* The following array element values are TRUE if the corresponding
6868 insn requires to add stop bits before it. */
6870 static char *stops_p = NULL;
6872 /* The following variable is used to set up the mentioned above array. */
6874 static int stop_before_p = 0;
6876 /* The following variable value is length of the arrays `clocks' and
6877 `add_cycles'. */
6879 static int clocks_length;
6881 /* The following variable value is number of data speculations in progress. */
6882 static int pending_data_specs = 0;
6884 /* Number of memory references on current and three future processor cycles. */
6885 static char mem_ops_in_group[4];
6887 /* Number of current processor cycle (from scheduler's point of view). */
6888 static int current_cycle;
6890 static rtx ia64_single_set (rtx);
6891 static void ia64_emit_insn_before (rtx, rtx);
6893 /* Map a bundle number to its pseudo-op. */
6895 const char *
6896 get_bundle_name (int b)
6898 return bundle_name[b];
6902 /* Return the maximum number of instructions a cpu can issue. */
6904 static int
6905 ia64_issue_rate (void)
6907 return 6;
6910 /* Helper function - like single_set, but look inside COND_EXEC. */
6912 static rtx
6913 ia64_single_set (rtx insn)
6915 rtx x = PATTERN (insn), ret;
6916 if (GET_CODE (x) == COND_EXEC)
6917 x = COND_EXEC_CODE (x);
6918 if (GET_CODE (x) == SET)
6919 return x;
6921 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6922 Although they are not classical single set, the second set is there just
6923 to protect it from moving past FP-relative stack accesses. */
6924 switch (recog_memoized (insn))
6926 case CODE_FOR_prologue_allocate_stack:
6927 case CODE_FOR_epilogue_deallocate_stack:
6928 ret = XVECEXP (x, 0, 0);
6929 break;
6931 default:
6932 ret = single_set_2 (insn, x);
6933 break;
6936 return ret;
6939 /* Adjust the cost of a scheduling dependency.
6940 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
6941 COST is the current cost, DW is dependency weakness. */
6942 static int
6943 ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
6945 enum reg_note dep_type = (enum reg_note) dep_type1;
6946 enum attr_itanium_class dep_class;
6947 enum attr_itanium_class insn_class;
6949 insn_class = ia64_safe_itanium_class (insn);
6950 dep_class = ia64_safe_itanium_class (dep_insn);
6952 /* Treat true memory dependencies separately. Ignore apparent true
6953 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
6954 if (dep_type == REG_DEP_TRUE
6955 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
6956 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
6957 return 0;
6959 if (dw == MIN_DEP_WEAK)
6960 /* Store and load are likely to alias, use higher cost to avoid stall. */
6961 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
6962 else if (dw > MIN_DEP_WEAK)
6964 /* Store and load are less likely to alias. */
6965 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
6966 /* Assume there will be no cache conflict for floating-point data.
6967 For integer data, L1 conflict penalty is huge (17 cycles), so we
6968 never assume it will not cause a conflict. */
6969 return 0;
6970 else
6971 return cost;
6974 if (dep_type != REG_DEP_OUTPUT)
6975 return cost;
6977 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6978 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6979 return 0;
6981 return cost;
6984 /* Like emit_insn_before, but skip cycle_display notes.
6985 ??? When cycle display notes are implemented, update this. */
6987 static void
6988 ia64_emit_insn_before (rtx insn, rtx before)
6990 emit_insn_before (insn, before);
6993 /* The following function marks insns who produce addresses for load
6994 and store insns. Such insns will be placed into M slots because it
6995 decrease latency time for Itanium1 (see function
6996 `ia64_produce_address_p' and the DFA descriptions). */
6998 static void
6999 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
7001 rtx insn, next, next_tail;
7003 /* Before reload, which_alternative is not set, which means that
7004 ia64_safe_itanium_class will produce wrong results for (at least)
7005 move instructions. */
7006 if (!reload_completed)
7007 return;
7009 next_tail = NEXT_INSN (tail);
7010 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7011 if (INSN_P (insn))
7012 insn->call = 0;
7013 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7014 if (INSN_P (insn)
7015 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
7017 sd_iterator_def sd_it;
7018 dep_t dep;
7019 bool has_mem_op_consumer_p = false;
7021 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7023 enum attr_itanium_class c;
7025 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7026 continue;
7028 next = DEP_CON (dep);
7029 c = ia64_safe_itanium_class (next);
7030 if ((c == ITANIUM_CLASS_ST
7031 || c == ITANIUM_CLASS_STF)
7032 && ia64_st_address_bypass_p (insn, next))
7034 has_mem_op_consumer_p = true;
7035 break;
7037 else if ((c == ITANIUM_CLASS_LD
7038 || c == ITANIUM_CLASS_FLD
7039 || c == ITANIUM_CLASS_FLDP)
7040 && ia64_ld_address_bypass_p (insn, next))
7042 has_mem_op_consumer_p = true;
7043 break;
7047 insn->call = has_mem_op_consumer_p;
7051 /* We're beginning a new block. Initialize data structures as necessary. */
7053 static void
7054 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7055 int sched_verbose ATTRIBUTE_UNUSED,
7056 int max_ready ATTRIBUTE_UNUSED)
7058 #ifdef ENABLE_CHECKING
7059 rtx insn;
7061 if (!sel_sched_p () && reload_completed)
7062 for (insn = NEXT_INSN (current_sched_info->prev_head);
7063 insn != current_sched_info->next_tail;
7064 insn = NEXT_INSN (insn))
7065 gcc_assert (!SCHED_GROUP_P (insn));
7066 #endif
7067 last_scheduled_insn = NULL_RTX;
7068 init_insn_group_barriers ();
7070 current_cycle = 0;
7071 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7074 /* We're beginning a scheduling pass. Check assertion. */
7076 static void
7077 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7078 int sched_verbose ATTRIBUTE_UNUSED,
7079 int max_ready ATTRIBUTE_UNUSED)
7081 gcc_assert (pending_data_specs == 0);
7084 /* Scheduling pass is now finished. Free/reset static variable. */
7085 static void
7086 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7087 int sched_verbose ATTRIBUTE_UNUSED)
7089 gcc_assert (pending_data_specs == 0);
7092 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7093 speculation check), FALSE otherwise. */
7094 static bool
7095 is_load_p (rtx insn)
7097 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7099 return
7100 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7101 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7104 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7105 (taking account for 3-cycle cache reference postponing for stores: Intel
7106 Itanium 2 Reference Manual for Software Development and Optimization,
7107 6.7.3.1). */
7108 static void
7109 record_memory_reference (rtx insn)
7111 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7113 switch (insn_class) {
7114 case ITANIUM_CLASS_FLD:
7115 case ITANIUM_CLASS_LD:
7116 mem_ops_in_group[current_cycle % 4]++;
7117 break;
7118 case ITANIUM_CLASS_STF:
7119 case ITANIUM_CLASS_ST:
7120 mem_ops_in_group[(current_cycle + 3) % 4]++;
7121 break;
7122 default:;
7126 /* We are about to being issuing insns for this clock cycle.
7127 Override the default sort algorithm to better slot instructions. */
7129 static int
7130 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
7131 int *pn_ready, int clock_var,
7132 int reorder_type)
7134 int n_asms;
7135 int n_ready = *pn_ready;
7136 rtx *e_ready = ready + n_ready;
7137 rtx *insnp;
7139 if (sched_verbose)
7140 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7142 if (reorder_type == 0)
7144 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7145 n_asms = 0;
7146 for (insnp = ready; insnp < e_ready; insnp++)
7147 if (insnp < e_ready)
7149 rtx insn = *insnp;
7150 enum attr_type t = ia64_safe_type (insn);
7151 if (t == TYPE_UNKNOWN)
7153 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7154 || asm_noperands (PATTERN (insn)) >= 0)
7156 rtx lowest = ready[n_asms];
7157 ready[n_asms] = insn;
7158 *insnp = lowest;
7159 n_asms++;
7161 else
7163 rtx highest = ready[n_ready - 1];
7164 ready[n_ready - 1] = insn;
7165 *insnp = highest;
7166 return 1;
7171 if (n_asms < n_ready)
7173 /* Some normal insns to process. Skip the asms. */
7174 ready += n_asms;
7175 n_ready -= n_asms;
7177 else if (n_ready > 0)
7178 return 1;
7181 if (ia64_final_schedule)
7183 int deleted = 0;
7184 int nr_need_stop = 0;
7186 for (insnp = ready; insnp < e_ready; insnp++)
7187 if (safe_group_barrier_needed (*insnp))
7188 nr_need_stop++;
7190 if (reorder_type == 1 && n_ready == nr_need_stop)
7191 return 0;
7192 if (reorder_type == 0)
7193 return 1;
7194 insnp = e_ready;
7195 /* Move down everything that needs a stop bit, preserving
7196 relative order. */
7197 while (insnp-- > ready + deleted)
7198 while (insnp >= ready + deleted)
7200 rtx insn = *insnp;
7201 if (! safe_group_barrier_needed (insn))
7202 break;
7203 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7204 *ready = insn;
7205 deleted++;
7207 n_ready -= deleted;
7208 ready += deleted;
7211 current_cycle = clock_var;
7212 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7214 int moved = 0;
7216 insnp = e_ready;
7217 /* Move down loads/stores, preserving relative order. */
7218 while (insnp-- > ready + moved)
7219 while (insnp >= ready + moved)
7221 rtx insn = *insnp;
7222 if (! is_load_p (insn))
7223 break;
7224 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7225 *ready = insn;
7226 moved++;
7228 n_ready -= moved;
7229 ready += moved;
7232 return 1;
7235 /* We are about to being issuing insns for this clock cycle. Override
7236 the default sort algorithm to better slot instructions. */
7238 static int
7239 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
7240 int clock_var)
7242 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7243 pn_ready, clock_var, 0);
7246 /* Like ia64_sched_reorder, but called after issuing each insn.
7247 Override the default sort algorithm to better slot instructions. */
7249 static int
7250 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7251 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
7252 int *pn_ready, int clock_var)
7254 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7255 clock_var, 1);
7258 /* We are about to issue INSN. Return the number of insns left on the
7259 ready queue that can be issued this cycle. */
7261 static int
7262 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7263 int sched_verbose ATTRIBUTE_UNUSED,
7264 rtx insn ATTRIBUTE_UNUSED,
7265 int can_issue_more ATTRIBUTE_UNUSED)
7267 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7268 /* Modulo scheduling does not extend h_i_d when emitting
7269 new instructions. Don't use h_i_d, if we don't have to. */
7271 if (DONE_SPEC (insn) & BEGIN_DATA)
7272 pending_data_specs++;
7273 if (CHECK_SPEC (insn) & BEGIN_DATA)
7274 pending_data_specs--;
7277 if (DEBUG_INSN_P (insn))
7278 return 1;
7280 last_scheduled_insn = insn;
7281 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7282 if (reload_completed)
7284 int needed = group_barrier_needed (insn);
7286 gcc_assert (!needed);
7287 if (GET_CODE (insn) == CALL_INSN)
7288 init_insn_group_barriers ();
7289 stops_p [INSN_UID (insn)] = stop_before_p;
7290 stop_before_p = 0;
7292 record_memory_reference (insn);
7294 return 1;
7297 /* We are choosing insn from the ready queue. Return nonzero if INSN
7298 can be chosen. */
7300 static int
7301 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
7303 gcc_assert (insn && INSN_P (insn));
7304 return ((!reload_completed
7305 || !safe_group_barrier_needed (insn))
7306 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn)
7307 && (!mflag_sched_mem_insns_hard_limit
7308 || !is_load_p (insn)
7309 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns));
7312 /* We are choosing insn from the ready queue. Return nonzero if INSN
7313 can be chosen. */
7315 static bool
7316 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
7318 gcc_assert (insn && INSN_P (insn));
7319 /* Size of ALAT is 32. As far as we perform conservative data speculation,
7320 we keep ALAT half-empty. */
7321 return (pending_data_specs < 16
7322 || !(TODO_SPEC (insn) & BEGIN_DATA));
7325 /* The following variable value is pseudo-insn used by the DFA insn
7326 scheduler to change the DFA state when the simulated clock is
7327 increased. */
7329 static rtx dfa_pre_cycle_insn;
7331 /* Returns 1 when a meaningful insn was scheduled between the last group
7332 barrier and LAST. */
7333 static int
7334 scheduled_good_insn (rtx last)
7336 if (last && recog_memoized (last) >= 0)
7337 return 1;
7339 for ( ;
7340 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7341 && !stops_p[INSN_UID (last)];
7342 last = PREV_INSN (last))
7343 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7344 the ebb we're scheduling. */
7345 if (INSN_P (last) && recog_memoized (last) >= 0)
7346 return 1;
7348 return 0;
7351 /* We are about to being issuing INSN. Return nonzero if we cannot
7352 issue it on given cycle CLOCK and return zero if we should not sort
7353 the ready queue on the next clock start. */
7355 static int
7356 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
7357 int clock, int *sort_p)
7359 gcc_assert (insn && INSN_P (insn));
7361 if (DEBUG_INSN_P (insn))
7362 return 0;
7364 /* When a group barrier is needed for insn, last_scheduled_insn
7365 should be set. */
7366 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7367 || last_scheduled_insn);
7369 if ((reload_completed
7370 && (safe_group_barrier_needed (insn)
7371 || (mflag_sched_stop_bits_after_every_cycle
7372 && last_clock != clock
7373 && last_scheduled_insn
7374 && scheduled_good_insn (last_scheduled_insn))))
7375 || (last_scheduled_insn
7376 && (GET_CODE (last_scheduled_insn) == CALL_INSN
7377 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7378 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
7380 init_insn_group_barriers ();
7382 if (verbose && dump)
7383 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7384 last_clock == clock ? " + cycle advance" : "");
7386 stop_before_p = 1;
7387 current_cycle = clock;
7388 mem_ops_in_group[current_cycle % 4] = 0;
7390 if (last_clock == clock)
7392 state_transition (curr_state, dfa_stop_insn);
7393 if (TARGET_EARLY_STOP_BITS)
7394 *sort_p = (last_scheduled_insn == NULL_RTX
7395 || GET_CODE (last_scheduled_insn) != CALL_INSN);
7396 else
7397 *sort_p = 0;
7398 return 1;
7401 if (last_scheduled_insn)
7403 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7404 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
7405 state_reset (curr_state);
7406 else
7408 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7409 state_transition (curr_state, dfa_stop_insn);
7410 state_transition (curr_state, dfa_pre_cycle_insn);
7411 state_transition (curr_state, NULL);
7415 return 0;
7418 /* Implement targetm.sched.h_i_d_extended hook.
7419 Extend internal data structures. */
7420 static void
7421 ia64_h_i_d_extended (void)
7423 if (stops_p != NULL)
7425 int new_clocks_length = get_max_uid () * 3 / 2;
7426 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7427 clocks_length = new_clocks_length;
7432 /* This structure describes the data used by the backend to guide scheduling.
7433 When the current scheduling point is switched, this data should be saved
7434 and restored later, if the scheduler returns to this point. */
7435 struct _ia64_sched_context
7437 state_t prev_cycle_state;
7438 rtx last_scheduled_insn;
7439 struct reg_write_state rws_sum[NUM_REGS];
7440 struct reg_write_state rws_insn[NUM_REGS];
7441 int first_instruction;
7442 int pending_data_specs;
7443 int current_cycle;
7444 char mem_ops_in_group[4];
7446 typedef struct _ia64_sched_context *ia64_sched_context_t;
7448 /* Allocates a scheduling context. */
7449 static void *
7450 ia64_alloc_sched_context (void)
7452 return xmalloc (sizeof (struct _ia64_sched_context));
7455 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7456 the global context otherwise. */
7457 static void
7458 ia64_init_sched_context (void *_sc, bool clean_p)
7460 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7462 sc->prev_cycle_state = xmalloc (dfa_state_size);
7463 if (clean_p)
7465 state_reset (sc->prev_cycle_state);
7466 sc->last_scheduled_insn = NULL_RTX;
7467 memset (sc->rws_sum, 0, sizeof (rws_sum));
7468 memset (sc->rws_insn, 0, sizeof (rws_insn));
7469 sc->first_instruction = 1;
7470 sc->pending_data_specs = 0;
7471 sc->current_cycle = 0;
7472 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7474 else
7476 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7477 sc->last_scheduled_insn = last_scheduled_insn;
7478 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7479 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7480 sc->first_instruction = first_instruction;
7481 sc->pending_data_specs = pending_data_specs;
7482 sc->current_cycle = current_cycle;
7483 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7487 /* Sets the global scheduling context to the one pointed to by _SC. */
7488 static void
7489 ia64_set_sched_context (void *_sc)
7491 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7493 gcc_assert (sc != NULL);
7495 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7496 last_scheduled_insn = sc->last_scheduled_insn;
7497 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7498 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7499 first_instruction = sc->first_instruction;
7500 pending_data_specs = sc->pending_data_specs;
7501 current_cycle = sc->current_cycle;
7502 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7505 /* Clears the data in the _SC scheduling context. */
7506 static void
7507 ia64_clear_sched_context (void *_sc)
7509 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7511 free (sc->prev_cycle_state);
7512 sc->prev_cycle_state = NULL;
7515 /* Frees the _SC scheduling context. */
7516 static void
7517 ia64_free_sched_context (void *_sc)
7519 gcc_assert (_sc != NULL);
7521 free (_sc);
7524 typedef rtx (* gen_func_t) (rtx, rtx);
7526 /* Return a function that will generate a load of mode MODE_NO
7527 with speculation types TS. */
7528 static gen_func_t
7529 get_spec_load_gen_function (ds_t ts, int mode_no)
7531 static gen_func_t gen_ld_[] = {
7532 gen_movbi,
7533 gen_movqi_internal,
7534 gen_movhi_internal,
7535 gen_movsi_internal,
7536 gen_movdi_internal,
7537 gen_movsf_internal,
7538 gen_movdf_internal,
7539 gen_movxf_internal,
7540 gen_movti_internal,
7541 gen_zero_extendqidi2,
7542 gen_zero_extendhidi2,
7543 gen_zero_extendsidi2,
7546 static gen_func_t gen_ld_a[] = {
7547 gen_movbi_advanced,
7548 gen_movqi_advanced,
7549 gen_movhi_advanced,
7550 gen_movsi_advanced,
7551 gen_movdi_advanced,
7552 gen_movsf_advanced,
7553 gen_movdf_advanced,
7554 gen_movxf_advanced,
7555 gen_movti_advanced,
7556 gen_zero_extendqidi2_advanced,
7557 gen_zero_extendhidi2_advanced,
7558 gen_zero_extendsidi2_advanced,
7560 static gen_func_t gen_ld_s[] = {
7561 gen_movbi_speculative,
7562 gen_movqi_speculative,
7563 gen_movhi_speculative,
7564 gen_movsi_speculative,
7565 gen_movdi_speculative,
7566 gen_movsf_speculative,
7567 gen_movdf_speculative,
7568 gen_movxf_speculative,
7569 gen_movti_speculative,
7570 gen_zero_extendqidi2_speculative,
7571 gen_zero_extendhidi2_speculative,
7572 gen_zero_extendsidi2_speculative,
7574 static gen_func_t gen_ld_sa[] = {
7575 gen_movbi_speculative_advanced,
7576 gen_movqi_speculative_advanced,
7577 gen_movhi_speculative_advanced,
7578 gen_movsi_speculative_advanced,
7579 gen_movdi_speculative_advanced,
7580 gen_movsf_speculative_advanced,
7581 gen_movdf_speculative_advanced,
7582 gen_movxf_speculative_advanced,
7583 gen_movti_speculative_advanced,
7584 gen_zero_extendqidi2_speculative_advanced,
7585 gen_zero_extendhidi2_speculative_advanced,
7586 gen_zero_extendsidi2_speculative_advanced,
7588 static gen_func_t gen_ld_s_a[] = {
7589 gen_movbi_speculative_a,
7590 gen_movqi_speculative_a,
7591 gen_movhi_speculative_a,
7592 gen_movsi_speculative_a,
7593 gen_movdi_speculative_a,
7594 gen_movsf_speculative_a,
7595 gen_movdf_speculative_a,
7596 gen_movxf_speculative_a,
7597 gen_movti_speculative_a,
7598 gen_zero_extendqidi2_speculative_a,
7599 gen_zero_extendhidi2_speculative_a,
7600 gen_zero_extendsidi2_speculative_a,
7603 gen_func_t *gen_ld;
7605 if (ts & BEGIN_DATA)
7607 if (ts & BEGIN_CONTROL)
7608 gen_ld = gen_ld_sa;
7609 else
7610 gen_ld = gen_ld_a;
7612 else if (ts & BEGIN_CONTROL)
7614 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7615 || ia64_needs_block_p (ts))
7616 gen_ld = gen_ld_s;
7617 else
7618 gen_ld = gen_ld_s_a;
7620 else if (ts == 0)
7621 gen_ld = gen_ld_;
7622 else
7623 gcc_unreachable ();
7625 return gen_ld[mode_no];
7628 /* Constants that help mapping 'enum machine_mode' to int. */
7629 enum SPEC_MODES
7631 SPEC_MODE_INVALID = -1,
7632 SPEC_MODE_FIRST = 0,
7633 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7634 SPEC_MODE_FOR_EXTEND_LAST = 3,
7635 SPEC_MODE_LAST = 8
7638 enum
7640 /* Offset to reach ZERO_EXTEND patterns. */
7641 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7644 /* Return index of the MODE. */
7645 static int
7646 ia64_mode_to_int (enum machine_mode mode)
7648 switch (mode)
7650 case BImode: return 0; /* SPEC_MODE_FIRST */
7651 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7652 case HImode: return 2;
7653 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7654 case DImode: return 4;
7655 case SFmode: return 5;
7656 case DFmode: return 6;
7657 case XFmode: return 7;
7658 case TImode:
7659 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7660 mentioned in itanium[12].md. Predicate fp_register_operand also
7661 needs to be defined. Bottom line: better disable for now. */
7662 return SPEC_MODE_INVALID;
7663 default: return SPEC_MODE_INVALID;
7667 /* Provide information about speculation capabilities. */
7668 static void
7669 ia64_set_sched_flags (spec_info_t spec_info)
7671 unsigned int *flags = &(current_sched_info->flags);
7673 if (*flags & SCHED_RGN
7674 || *flags & SCHED_EBB
7675 || *flags & SEL_SCHED)
7677 int mask = 0;
7679 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7680 || (mflag_sched_ar_data_spec && reload_completed))
7682 mask |= BEGIN_DATA;
7684 if (!sel_sched_p ()
7685 && ((mflag_sched_br_in_data_spec && !reload_completed)
7686 || (mflag_sched_ar_in_data_spec && reload_completed)))
7687 mask |= BE_IN_DATA;
7690 if (mflag_sched_control_spec
7691 && (!sel_sched_p ()
7692 || reload_completed))
7694 mask |= BEGIN_CONTROL;
7696 if (!sel_sched_p () && mflag_sched_in_control_spec)
7697 mask |= BE_IN_CONTROL;
7700 spec_info->mask = mask;
7702 if (mask)
7704 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7706 if (mask & BE_IN_SPEC)
7707 *flags |= NEW_BBS;
7709 spec_info->flags = 0;
7711 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
7712 spec_info->flags |= PREFER_NON_DATA_SPEC;
7714 if (mask & CONTROL_SPEC)
7716 if (mflag_sched_prefer_non_control_spec_insns)
7717 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
7719 if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7720 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7723 if (sched_verbose >= 1)
7724 spec_info->dump = sched_dump;
7725 else
7726 spec_info->dump = 0;
7728 if (mflag_sched_count_spec_in_critical_path)
7729 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7732 else
7733 spec_info->mask = 0;
7736 /* If INSN is an appropriate load return its mode.
7737 Return -1 otherwise. */
7738 static int
7739 get_mode_no_for_insn (rtx insn)
7741 rtx reg, mem, mode_rtx;
7742 int mode_no;
7743 bool extend_p;
7745 extract_insn_cached (insn);
7747 /* We use WHICH_ALTERNATIVE only after reload. This will
7748 guarantee that reload won't touch a speculative insn. */
7750 if (recog_data.n_operands != 2)
7751 return -1;
7753 reg = recog_data.operand[0];
7754 mem = recog_data.operand[1];
7756 /* We should use MEM's mode since REG's mode in presence of
7757 ZERO_EXTEND will always be DImode. */
7758 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7759 /* Process non-speculative ld. */
7761 if (!reload_completed)
7763 /* Do not speculate into regs like ar.lc. */
7764 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7765 return -1;
7767 if (!MEM_P (mem))
7768 return -1;
7771 rtx mem_reg = XEXP (mem, 0);
7773 if (!REG_P (mem_reg))
7774 return -1;
7777 mode_rtx = mem;
7779 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
7781 gcc_assert (REG_P (reg) && MEM_P (mem));
7782 mode_rtx = mem;
7784 else
7785 return -1;
7787 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
7788 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
7789 || get_attr_check_load (insn) == CHECK_LOAD_YES)
7790 /* Process speculative ld or ld.c. */
7792 gcc_assert (REG_P (reg) && MEM_P (mem));
7793 mode_rtx = mem;
7795 else
7797 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
7799 if (attr_class == ITANIUM_CLASS_CHK_A
7800 || attr_class == ITANIUM_CLASS_CHK_S_I
7801 || attr_class == ITANIUM_CLASS_CHK_S_F)
7802 /* Process chk. */
7803 mode_rtx = reg;
7804 else
7805 return -1;
7808 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
7810 if (mode_no == SPEC_MODE_INVALID)
7811 return -1;
7813 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
7815 if (extend_p)
7817 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
7818 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
7819 return -1;
7821 mode_no += SPEC_GEN_EXTEND_OFFSET;
7824 return mode_no;
7827 /* If X is an unspec part of a speculative load, return its code.
7828 Return -1 otherwise. */
7829 static int
7830 get_spec_unspec_code (const_rtx x)
7832 if (GET_CODE (x) != UNSPEC)
7833 return -1;
7836 int code;
7838 code = XINT (x, 1);
7840 switch (code)
7842 case UNSPEC_LDA:
7843 case UNSPEC_LDS:
7844 case UNSPEC_LDS_A:
7845 case UNSPEC_LDSA:
7846 return code;
7848 default:
7849 return -1;
7854 /* Implement skip_rtx_p hook. */
7855 static bool
7856 ia64_skip_rtx_p (const_rtx x)
7858 return get_spec_unspec_code (x) != -1;
7861 /* If INSN is a speculative load, return its UNSPEC code.
7862 Return -1 otherwise. */
7863 static int
7864 get_insn_spec_code (const_rtx insn)
7866 rtx pat, reg, mem;
7868 pat = PATTERN (insn);
7870 if (GET_CODE (pat) == COND_EXEC)
7871 pat = COND_EXEC_CODE (pat);
7873 if (GET_CODE (pat) != SET)
7874 return -1;
7876 reg = SET_DEST (pat);
7877 if (!REG_P (reg))
7878 return -1;
7880 mem = SET_SRC (pat);
7881 if (GET_CODE (mem) == ZERO_EXTEND)
7882 mem = XEXP (mem, 0);
7884 return get_spec_unspec_code (mem);
7887 /* If INSN is a speculative load, return a ds with the speculation types.
7888 Otherwise [if INSN is a normal instruction] return 0. */
7889 static ds_t
7890 ia64_get_insn_spec_ds (rtx insn)
7892 int code = get_insn_spec_code (insn);
7894 switch (code)
7896 case UNSPEC_LDA:
7897 return BEGIN_DATA;
7899 case UNSPEC_LDS:
7900 case UNSPEC_LDS_A:
7901 return BEGIN_CONTROL;
7903 case UNSPEC_LDSA:
7904 return BEGIN_DATA | BEGIN_CONTROL;
7906 default:
7907 return 0;
7911 /* If INSN is a speculative load return a ds with the speculation types that
7912 will be checked.
7913 Otherwise [if INSN is a normal instruction] return 0. */
7914 static ds_t
7915 ia64_get_insn_checked_ds (rtx insn)
7917 int code = get_insn_spec_code (insn);
7919 switch (code)
7921 case UNSPEC_LDA:
7922 return BEGIN_DATA | BEGIN_CONTROL;
7924 case UNSPEC_LDS:
7925 return BEGIN_CONTROL;
7927 case UNSPEC_LDS_A:
7928 case UNSPEC_LDSA:
7929 return BEGIN_DATA | BEGIN_CONTROL;
7931 default:
7932 return 0;
7936 /* If GEN_P is true, calculate the index of needed speculation check and return
7937 speculative pattern for INSN with speculative mode TS, machine mode
7938 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7939 If GEN_P is false, just calculate the index of needed speculation check. */
7940 static rtx
7941 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
7943 rtx pat, new_pat;
7944 gen_func_t gen_load;
7946 gen_load = get_spec_load_gen_function (ts, mode_no);
7948 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
7949 copy_rtx (recog_data.operand[1]));
7951 pat = PATTERN (insn);
7952 if (GET_CODE (pat) == COND_EXEC)
7953 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7954 new_pat);
7956 return new_pat;
7959 static bool
7960 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
7961 ds_t ds ATTRIBUTE_UNUSED)
7963 return false;
7966 /* Implement targetm.sched.speculate_insn hook.
7967 Check if the INSN can be TS speculative.
7968 If 'no' - return -1.
7969 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
7970 If current pattern of the INSN already provides TS speculation,
7971 return 0. */
7972 static int
7973 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
7975 int mode_no;
7976 int res;
7978 gcc_assert (!(ts & ~SPECULATIVE));
7980 if (ia64_spec_check_p (insn))
7981 return -1;
7983 if ((ts & BE_IN_SPEC)
7984 && !insn_can_be_in_speculative_p (insn, ts))
7985 return -1;
7987 mode_no = get_mode_no_for_insn (insn);
7989 if (mode_no != SPEC_MODE_INVALID)
7991 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
7992 res = 0;
7993 else
7995 res = 1;
7996 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
7999 else
8000 res = -1;
8002 return res;
8005 /* Return a function that will generate a check for speculation TS with mode
8006 MODE_NO.
8007 If simple check is needed, pass true for SIMPLE_CHECK_P.
8008 If clearing check is needed, pass true for CLEARING_CHECK_P. */
8009 static gen_func_t
8010 get_spec_check_gen_function (ds_t ts, int mode_no,
8011 bool simple_check_p, bool clearing_check_p)
8013 static gen_func_t gen_ld_c_clr[] = {
8014 gen_movbi_clr,
8015 gen_movqi_clr,
8016 gen_movhi_clr,
8017 gen_movsi_clr,
8018 gen_movdi_clr,
8019 gen_movsf_clr,
8020 gen_movdf_clr,
8021 gen_movxf_clr,
8022 gen_movti_clr,
8023 gen_zero_extendqidi2_clr,
8024 gen_zero_extendhidi2_clr,
8025 gen_zero_extendsidi2_clr,
8027 static gen_func_t gen_ld_c_nc[] = {
8028 gen_movbi_nc,
8029 gen_movqi_nc,
8030 gen_movhi_nc,
8031 gen_movsi_nc,
8032 gen_movdi_nc,
8033 gen_movsf_nc,
8034 gen_movdf_nc,
8035 gen_movxf_nc,
8036 gen_movti_nc,
8037 gen_zero_extendqidi2_nc,
8038 gen_zero_extendhidi2_nc,
8039 gen_zero_extendsidi2_nc,
8041 static gen_func_t gen_chk_a_clr[] = {
8042 gen_advanced_load_check_clr_bi,
8043 gen_advanced_load_check_clr_qi,
8044 gen_advanced_load_check_clr_hi,
8045 gen_advanced_load_check_clr_si,
8046 gen_advanced_load_check_clr_di,
8047 gen_advanced_load_check_clr_sf,
8048 gen_advanced_load_check_clr_df,
8049 gen_advanced_load_check_clr_xf,
8050 gen_advanced_load_check_clr_ti,
8051 gen_advanced_load_check_clr_di,
8052 gen_advanced_load_check_clr_di,
8053 gen_advanced_load_check_clr_di,
8055 static gen_func_t gen_chk_a_nc[] = {
8056 gen_advanced_load_check_nc_bi,
8057 gen_advanced_load_check_nc_qi,
8058 gen_advanced_load_check_nc_hi,
8059 gen_advanced_load_check_nc_si,
8060 gen_advanced_load_check_nc_di,
8061 gen_advanced_load_check_nc_sf,
8062 gen_advanced_load_check_nc_df,
8063 gen_advanced_load_check_nc_xf,
8064 gen_advanced_load_check_nc_ti,
8065 gen_advanced_load_check_nc_di,
8066 gen_advanced_load_check_nc_di,
8067 gen_advanced_load_check_nc_di,
8069 static gen_func_t gen_chk_s[] = {
8070 gen_speculation_check_bi,
8071 gen_speculation_check_qi,
8072 gen_speculation_check_hi,
8073 gen_speculation_check_si,
8074 gen_speculation_check_di,
8075 gen_speculation_check_sf,
8076 gen_speculation_check_df,
8077 gen_speculation_check_xf,
8078 gen_speculation_check_ti,
8079 gen_speculation_check_di,
8080 gen_speculation_check_di,
8081 gen_speculation_check_di,
8084 gen_func_t *gen_check;
8086 if (ts & BEGIN_DATA)
8088 /* We don't need recovery because even if this is ld.sa
8089 ALAT entry will be allocated only if NAT bit is set to zero.
8090 So it is enough to use ld.c here. */
8092 if (simple_check_p)
8094 gcc_assert (mflag_sched_spec_ldc);
8096 if (clearing_check_p)
8097 gen_check = gen_ld_c_clr;
8098 else
8099 gen_check = gen_ld_c_nc;
8101 else
8103 if (clearing_check_p)
8104 gen_check = gen_chk_a_clr;
8105 else
8106 gen_check = gen_chk_a_nc;
8109 else if (ts & BEGIN_CONTROL)
8111 if (simple_check_p)
8112 /* We might want to use ld.sa -> ld.c instead of
8113 ld.s -> chk.s. */
8115 gcc_assert (!ia64_needs_block_p (ts));
8117 if (clearing_check_p)
8118 gen_check = gen_ld_c_clr;
8119 else
8120 gen_check = gen_ld_c_nc;
8122 else
8124 gen_check = gen_chk_s;
8127 else
8128 gcc_unreachable ();
8130 gcc_assert (mode_no >= 0);
8131 return gen_check[mode_no];
8134 /* Return nonzero, if INSN needs branchy recovery check. */
8135 static bool
8136 ia64_needs_block_p (ds_t ts)
8138 if (ts & BEGIN_DATA)
8139 return !mflag_sched_spec_ldc;
8141 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8143 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8146 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
8147 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
8148 Otherwise, generate a simple check. */
8149 static rtx
8150 ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
8152 rtx op1, pat, check_pat;
8153 gen_func_t gen_check;
8154 int mode_no;
8156 mode_no = get_mode_no_for_insn (insn);
8157 gcc_assert (mode_no >= 0);
8159 if (label)
8160 op1 = label;
8161 else
8163 gcc_assert (!ia64_needs_block_p (ds));
8164 op1 = copy_rtx (recog_data.operand[1]);
8167 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8168 true);
8170 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8172 pat = PATTERN (insn);
8173 if (GET_CODE (pat) == COND_EXEC)
8174 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8175 check_pat);
8177 return check_pat;
8180 /* Return nonzero, if X is branchy recovery check. */
8181 static int
8182 ia64_spec_check_p (rtx x)
8184 x = PATTERN (x);
8185 if (GET_CODE (x) == COND_EXEC)
8186 x = COND_EXEC_CODE (x);
8187 if (GET_CODE (x) == SET)
8188 return ia64_spec_check_src_p (SET_SRC (x));
8189 return 0;
8192 /* Return nonzero, if SRC belongs to recovery check. */
8193 static int
8194 ia64_spec_check_src_p (rtx src)
8196 if (GET_CODE (src) == IF_THEN_ELSE)
8198 rtx t;
8200 t = XEXP (src, 0);
8201 if (GET_CODE (t) == NE)
8203 t = XEXP (t, 0);
8205 if (GET_CODE (t) == UNSPEC)
8207 int code;
8209 code = XINT (t, 1);
8211 if (code == UNSPEC_LDCCLR
8212 || code == UNSPEC_LDCNC
8213 || code == UNSPEC_CHKACLR
8214 || code == UNSPEC_CHKANC
8215 || code == UNSPEC_CHKS)
8217 gcc_assert (code != 0);
8218 return code;
8223 return 0;
8227 /* The following page contains abstract data `bundle states' which are
8228 used for bundling insns (inserting nops and template generation). */
8230 /* The following describes state of insn bundling. */
8232 struct bundle_state
8234 /* Unique bundle state number to identify them in the debugging
8235 output */
8236 int unique_num;
8237 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
8238 /* number nops before and after the insn */
8239 short before_nops_num, after_nops_num;
8240 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8241 insn */
8242 int cost; /* cost of the state in cycles */
8243 int accumulated_insns_num; /* number of all previous insns including
8244 nops. L is considered as 2 insns */
8245 int branch_deviation; /* deviation of previous branches from 3rd slots */
8246 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8247 struct bundle_state *next; /* next state with the same insn_num */
8248 struct bundle_state *originator; /* originator (previous insn state) */
8249 /* All bundle states are in the following chain. */
8250 struct bundle_state *allocated_states_chain;
8251 /* The DFA State after issuing the insn and the nops. */
8252 state_t dfa_state;
8255 /* The following is map insn number to the corresponding bundle state. */
8257 static struct bundle_state **index_to_bundle_states;
8259 /* The unique number of next bundle state. */
8261 static int bundle_states_num;
8263 /* All allocated bundle states are in the following chain. */
8265 static struct bundle_state *allocated_bundle_states_chain;
8267 /* All allocated but not used bundle states are in the following
8268 chain. */
8270 static struct bundle_state *free_bundle_state_chain;
8273 /* The following function returns a free bundle state. */
8275 static struct bundle_state *
8276 get_free_bundle_state (void)
8278 struct bundle_state *result;
8280 if (free_bundle_state_chain != NULL)
8282 result = free_bundle_state_chain;
8283 free_bundle_state_chain = result->next;
8285 else
8287 result = XNEW (struct bundle_state);
8288 result->dfa_state = xmalloc (dfa_state_size);
8289 result->allocated_states_chain = allocated_bundle_states_chain;
8290 allocated_bundle_states_chain = result;
8292 result->unique_num = bundle_states_num++;
8293 return result;
8297 /* The following function frees given bundle state. */
8299 static void
8300 free_bundle_state (struct bundle_state *state)
8302 state->next = free_bundle_state_chain;
8303 free_bundle_state_chain = state;
8306 /* Start work with abstract data `bundle states'. */
8308 static void
8309 initiate_bundle_states (void)
8311 bundle_states_num = 0;
8312 free_bundle_state_chain = NULL;
8313 allocated_bundle_states_chain = NULL;
8316 /* Finish work with abstract data `bundle states'. */
8318 static void
8319 finish_bundle_states (void)
8321 struct bundle_state *curr_state, *next_state;
8323 for (curr_state = allocated_bundle_states_chain;
8324 curr_state != NULL;
8325 curr_state = next_state)
8327 next_state = curr_state->allocated_states_chain;
8328 free (curr_state->dfa_state);
8329 free (curr_state);
8333 /* Hash table of the bundle states. The key is dfa_state and insn_num
8334 of the bundle states. */
8336 static htab_t bundle_state_table;
8338 /* The function returns hash of BUNDLE_STATE. */
8340 static unsigned
8341 bundle_state_hash (const void *bundle_state)
8343 const struct bundle_state *const state
8344 = (const struct bundle_state *) bundle_state;
8345 unsigned result, i;
8347 for (result = i = 0; i < dfa_state_size; i++)
8348 result += (((unsigned char *) state->dfa_state) [i]
8349 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8350 return result + state->insn_num;
8353 /* The function returns nonzero if the bundle state keys are equal. */
8355 static int
8356 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
8358 const struct bundle_state *const state1
8359 = (const struct bundle_state *) bundle_state_1;
8360 const struct bundle_state *const state2
8361 = (const struct bundle_state *) bundle_state_2;
8363 return (state1->insn_num == state2->insn_num
8364 && memcmp (state1->dfa_state, state2->dfa_state,
8365 dfa_state_size) == 0);
8368 /* The function inserts the BUNDLE_STATE into the hash table. The
8369 function returns nonzero if the bundle has been inserted into the
8370 table. The table contains the best bundle state with given key. */
8372 static int
8373 insert_bundle_state (struct bundle_state *bundle_state)
8375 void **entry_ptr;
8377 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, INSERT);
8378 if (*entry_ptr == NULL)
8380 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8381 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8382 *entry_ptr = (void *) bundle_state;
8383 return TRUE;
8385 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
8386 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
8387 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
8388 > bundle_state->accumulated_insns_num
8389 || (((struct bundle_state *)
8390 *entry_ptr)->accumulated_insns_num
8391 == bundle_state->accumulated_insns_num
8392 && (((struct bundle_state *)
8393 *entry_ptr)->branch_deviation
8394 > bundle_state->branch_deviation
8395 || (((struct bundle_state *)
8396 *entry_ptr)->branch_deviation
8397 == bundle_state->branch_deviation
8398 && ((struct bundle_state *)
8399 *entry_ptr)->middle_bundle_stops
8400 > bundle_state->middle_bundle_stops))))))
8403 struct bundle_state temp;
8405 temp = *(struct bundle_state *) *entry_ptr;
8406 *(struct bundle_state *) *entry_ptr = *bundle_state;
8407 ((struct bundle_state *) *entry_ptr)->next = temp.next;
8408 *bundle_state = temp;
8410 return FALSE;
8413 /* Start work with the hash table. */
8415 static void
8416 initiate_bundle_state_table (void)
8418 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
8419 (htab_del) 0);
8422 /* Finish work with the hash table. */
8424 static void
8425 finish_bundle_state_table (void)
8427 htab_delete (bundle_state_table);
8432 /* The following variable is a insn `nop' used to check bundle states
8433 with different number of inserted nops. */
8435 static rtx ia64_nop;
8437 /* The following function tries to issue NOPS_NUM nops for the current
8438 state without advancing processor cycle. If it failed, the
8439 function returns FALSE and frees the current state. */
8441 static int
8442 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8444 int i;
8446 for (i = 0; i < nops_num; i++)
8447 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8449 free_bundle_state (curr_state);
8450 return FALSE;
8452 return TRUE;
8455 /* The following function tries to issue INSN for the current
8456 state without advancing processor cycle. If it failed, the
8457 function returns FALSE and frees the current state. */
8459 static int
8460 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8462 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8464 free_bundle_state (curr_state);
8465 return FALSE;
8467 return TRUE;
8470 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8471 starting with ORIGINATOR without advancing processor cycle. If
8472 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8473 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8474 If it was successful, the function creates new bundle state and
8475 insert into the hash table and into `index_to_bundle_states'. */
8477 static void
8478 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8479 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
8481 struct bundle_state *curr_state;
8483 curr_state = get_free_bundle_state ();
8484 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8485 curr_state->insn = insn;
8486 curr_state->insn_num = originator->insn_num + 1;
8487 curr_state->cost = originator->cost;
8488 curr_state->originator = originator;
8489 curr_state->before_nops_num = before_nops_num;
8490 curr_state->after_nops_num = 0;
8491 curr_state->accumulated_insns_num
8492 = originator->accumulated_insns_num + before_nops_num;
8493 curr_state->branch_deviation = originator->branch_deviation;
8494 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8495 gcc_assert (insn);
8496 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8498 gcc_assert (GET_MODE (insn) != TImode);
8499 if (!try_issue_nops (curr_state, before_nops_num))
8500 return;
8501 if (!try_issue_insn (curr_state, insn))
8502 return;
8503 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8504 if (curr_state->accumulated_insns_num % 3 != 0)
8505 curr_state->middle_bundle_stops++;
8506 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8507 && curr_state->accumulated_insns_num % 3 != 0)
8509 free_bundle_state (curr_state);
8510 return;
8513 else if (GET_MODE (insn) != TImode)
8515 if (!try_issue_nops (curr_state, before_nops_num))
8516 return;
8517 if (!try_issue_insn (curr_state, insn))
8518 return;
8519 curr_state->accumulated_insns_num++;
8520 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
8521 && asm_noperands (PATTERN (insn)) < 0);
8523 if (ia64_safe_type (insn) == TYPE_L)
8524 curr_state->accumulated_insns_num++;
8526 else
8528 /* If this is an insn that must be first in a group, then don't allow
8529 nops to be emitted before it. Currently, alloc is the only such
8530 supported instruction. */
8531 /* ??? The bundling automatons should handle this for us, but they do
8532 not yet have support for the first_insn attribute. */
8533 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8535 free_bundle_state (curr_state);
8536 return;
8539 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8540 state_transition (curr_state->dfa_state, NULL);
8541 curr_state->cost++;
8542 if (!try_issue_nops (curr_state, before_nops_num))
8543 return;
8544 if (!try_issue_insn (curr_state, insn))
8545 return;
8546 curr_state->accumulated_insns_num++;
8547 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
8548 || asm_noperands (PATTERN (insn)) >= 0)
8550 /* Finish bundle containing asm insn. */
8551 curr_state->after_nops_num
8552 = 3 - curr_state->accumulated_insns_num % 3;
8553 curr_state->accumulated_insns_num
8554 += 3 - curr_state->accumulated_insns_num % 3;
8556 else if (ia64_safe_type (insn) == TYPE_L)
8557 curr_state->accumulated_insns_num++;
8559 if (ia64_safe_type (insn) == TYPE_B)
8560 curr_state->branch_deviation
8561 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8562 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8564 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8566 state_t dfa_state;
8567 struct bundle_state *curr_state1;
8568 struct bundle_state *allocated_states_chain;
8570 curr_state1 = get_free_bundle_state ();
8571 dfa_state = curr_state1->dfa_state;
8572 allocated_states_chain = curr_state1->allocated_states_chain;
8573 *curr_state1 = *curr_state;
8574 curr_state1->dfa_state = dfa_state;
8575 curr_state1->allocated_states_chain = allocated_states_chain;
8576 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8577 dfa_state_size);
8578 curr_state = curr_state1;
8580 if (!try_issue_nops (curr_state,
8581 3 - curr_state->accumulated_insns_num % 3))
8582 return;
8583 curr_state->after_nops_num
8584 = 3 - curr_state->accumulated_insns_num % 3;
8585 curr_state->accumulated_insns_num
8586 += 3 - curr_state->accumulated_insns_num % 3;
8588 if (!insert_bundle_state (curr_state))
8589 free_bundle_state (curr_state);
8590 return;
8593 /* The following function returns position in the two window bundle
8594 for given STATE. */
8596 static int
8597 get_max_pos (state_t state)
8599 if (cpu_unit_reservation_p (state, pos_6))
8600 return 6;
8601 else if (cpu_unit_reservation_p (state, pos_5))
8602 return 5;
8603 else if (cpu_unit_reservation_p (state, pos_4))
8604 return 4;
8605 else if (cpu_unit_reservation_p (state, pos_3))
8606 return 3;
8607 else if (cpu_unit_reservation_p (state, pos_2))
8608 return 2;
8609 else if (cpu_unit_reservation_p (state, pos_1))
8610 return 1;
8611 else
8612 return 0;
8615 /* The function returns code of a possible template for given position
8616 and state. The function should be called only with 2 values of
8617 position equal to 3 or 6. We avoid generating F NOPs by putting
8618 templates containing F insns at the end of the template search
8619 because undocumented anomaly in McKinley derived cores which can
8620 cause stalls if an F-unit insn (including a NOP) is issued within a
8621 six-cycle window after reading certain application registers (such
8622 as ar.bsp). Furthermore, power-considerations also argue against
8623 the use of F-unit instructions unless they're really needed. */
8625 static int
8626 get_template (state_t state, int pos)
8628 switch (pos)
8630 case 3:
8631 if (cpu_unit_reservation_p (state, _0mmi_))
8632 return 1;
8633 else if (cpu_unit_reservation_p (state, _0mii_))
8634 return 0;
8635 else if (cpu_unit_reservation_p (state, _0mmb_))
8636 return 7;
8637 else if (cpu_unit_reservation_p (state, _0mib_))
8638 return 6;
8639 else if (cpu_unit_reservation_p (state, _0mbb_))
8640 return 5;
8641 else if (cpu_unit_reservation_p (state, _0bbb_))
8642 return 4;
8643 else if (cpu_unit_reservation_p (state, _0mmf_))
8644 return 3;
8645 else if (cpu_unit_reservation_p (state, _0mfi_))
8646 return 2;
8647 else if (cpu_unit_reservation_p (state, _0mfb_))
8648 return 8;
8649 else if (cpu_unit_reservation_p (state, _0mlx_))
8650 return 9;
8651 else
8652 gcc_unreachable ();
8653 case 6:
8654 if (cpu_unit_reservation_p (state, _1mmi_))
8655 return 1;
8656 else if (cpu_unit_reservation_p (state, _1mii_))
8657 return 0;
8658 else if (cpu_unit_reservation_p (state, _1mmb_))
8659 return 7;
8660 else if (cpu_unit_reservation_p (state, _1mib_))
8661 return 6;
8662 else if (cpu_unit_reservation_p (state, _1mbb_))
8663 return 5;
8664 else if (cpu_unit_reservation_p (state, _1bbb_))
8665 return 4;
8666 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8667 return 3;
8668 else if (cpu_unit_reservation_p (state, _1mfi_))
8669 return 2;
8670 else if (cpu_unit_reservation_p (state, _1mfb_))
8671 return 8;
8672 else if (cpu_unit_reservation_p (state, _1mlx_))
8673 return 9;
8674 else
8675 gcc_unreachable ();
8676 default:
8677 gcc_unreachable ();
8681 /* True when INSN is important for bundling. */
8682 static bool
8683 important_for_bundling_p (rtx insn)
8685 return (INSN_P (insn)
8686 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8687 && GET_CODE (PATTERN (insn)) != USE
8688 && GET_CODE (PATTERN (insn)) != CLOBBER);
8691 /* The following function returns an insn important for insn bundling
8692 followed by INSN and before TAIL. */
8694 static rtx
8695 get_next_important_insn (rtx insn, rtx tail)
8697 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8698 if (important_for_bundling_p (insn))
8699 return insn;
8700 return NULL_RTX;
8703 /* Add a bundle selector TEMPLATE0 before INSN. */
8705 static void
8706 ia64_add_bundle_selector_before (int template0, rtx insn)
8708 rtx b = gen_bundle_selector (GEN_INT (template0));
8710 ia64_emit_insn_before (b, insn);
8711 #if NR_BUNDLES == 10
8712 if ((template0 == 4 || template0 == 5)
8713 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8715 int i;
8716 rtx note = NULL_RTX;
8718 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8719 first or second slot. If it is and has REG_EH_NOTE set, copy it
8720 to following nops, as br.call sets rp to the address of following
8721 bundle and therefore an EH region end must be on a bundle
8722 boundary. */
8723 insn = PREV_INSN (insn);
8724 for (i = 0; i < 3; i++)
8727 insn = next_active_insn (insn);
8728 while (GET_CODE (insn) == INSN
8729 && get_attr_empty (insn) == EMPTY_YES);
8730 if (GET_CODE (insn) == CALL_INSN)
8731 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8732 else if (note)
8734 int code;
8736 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8737 || code == CODE_FOR_nop_b);
8738 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8739 note = NULL_RTX;
8740 else
8741 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8745 #endif
8748 /* The following function does insn bundling. Bundling means
8749 inserting templates and nop insns to fit insn groups into permitted
8750 templates. Instruction scheduling uses NDFA (non-deterministic
8751 finite automata) encoding informations about the templates and the
8752 inserted nops. Nondeterminism of the automata permits follows
8753 all possible insn sequences very fast.
8755 Unfortunately it is not possible to get information about inserting
8756 nop insns and used templates from the automata states. The
8757 automata only says that we can issue an insn possibly inserting
8758 some nops before it and using some template. Therefore insn
8759 bundling in this function is implemented by using DFA
8760 (deterministic finite automata). We follow all possible insn
8761 sequences by inserting 0-2 nops (that is what the NDFA describe for
8762 insn scheduling) before/after each insn being bundled. We know the
8763 start of simulated processor cycle from insn scheduling (insn
8764 starting a new cycle has TImode).
8766 Simple implementation of insn bundling would create enormous
8767 number of possible insn sequences satisfying information about new
8768 cycle ticks taken from the insn scheduling. To make the algorithm
8769 practical we use dynamic programming. Each decision (about
8770 inserting nops and implicitly about previous decisions) is described
8771 by structure bundle_state (see above). If we generate the same
8772 bundle state (key is automaton state after issuing the insns and
8773 nops for it), we reuse already generated one. As consequence we
8774 reject some decisions which cannot improve the solution and
8775 reduce memory for the algorithm.
8777 When we reach the end of EBB (extended basic block), we choose the
8778 best sequence and then, moving back in EBB, insert templates for
8779 the best alternative. The templates are taken from querying
8780 automaton state for each insn in chosen bundle states.
8782 So the algorithm makes two (forward and backward) passes through
8783 EBB. */
8785 static void
8786 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
8788 struct bundle_state *curr_state, *next_state, *best_state;
8789 rtx insn, next_insn;
8790 int insn_num;
8791 int i, bundle_end_p, only_bundle_end_p, asm_p;
8792 int pos = 0, max_pos, template0, template1;
8793 rtx b;
8794 rtx nop;
8795 enum attr_type type;
8797 insn_num = 0;
8798 /* Count insns in the EBB. */
8799 for (insn = NEXT_INSN (prev_head_insn);
8800 insn && insn != tail;
8801 insn = NEXT_INSN (insn))
8802 if (INSN_P (insn))
8803 insn_num++;
8804 if (insn_num == 0)
8805 return;
8806 bundling_p = 1;
8807 dfa_clean_insn_cache ();
8808 initiate_bundle_state_table ();
8809 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
8810 /* First (forward) pass -- generation of bundle states. */
8811 curr_state = get_free_bundle_state ();
8812 curr_state->insn = NULL;
8813 curr_state->before_nops_num = 0;
8814 curr_state->after_nops_num = 0;
8815 curr_state->insn_num = 0;
8816 curr_state->cost = 0;
8817 curr_state->accumulated_insns_num = 0;
8818 curr_state->branch_deviation = 0;
8819 curr_state->middle_bundle_stops = 0;
8820 curr_state->next = NULL;
8821 curr_state->originator = NULL;
8822 state_reset (curr_state->dfa_state);
8823 index_to_bundle_states [0] = curr_state;
8824 insn_num = 0;
8825 /* Shift cycle mark if it is put on insn which could be ignored. */
8826 for (insn = NEXT_INSN (prev_head_insn);
8827 insn != tail;
8828 insn = NEXT_INSN (insn))
8829 if (INSN_P (insn)
8830 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
8831 || GET_CODE (PATTERN (insn)) == USE
8832 || GET_CODE (PATTERN (insn)) == CLOBBER)
8833 && GET_MODE (insn) == TImode)
8835 PUT_MODE (insn, VOIDmode);
8836 for (next_insn = NEXT_INSN (insn);
8837 next_insn != tail;
8838 next_insn = NEXT_INSN (next_insn))
8839 if (INSN_P (next_insn)
8840 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
8841 && GET_CODE (PATTERN (next_insn)) != USE
8842 && GET_CODE (PATTERN (next_insn)) != CLOBBER
8843 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
8845 PUT_MODE (next_insn, TImode);
8846 break;
8849 /* Forward pass: generation of bundle states. */
8850 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8851 insn != NULL_RTX;
8852 insn = next_insn)
8854 gcc_assert (INSN_P (insn)
8855 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8856 && GET_CODE (PATTERN (insn)) != USE
8857 && GET_CODE (PATTERN (insn)) != CLOBBER);
8858 type = ia64_safe_type (insn);
8859 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8860 insn_num++;
8861 index_to_bundle_states [insn_num] = NULL;
8862 for (curr_state = index_to_bundle_states [insn_num - 1];
8863 curr_state != NULL;
8864 curr_state = next_state)
8866 pos = curr_state->accumulated_insns_num % 3;
8867 next_state = curr_state->next;
8868 /* We must fill up the current bundle in order to start a
8869 subsequent asm insn in a new bundle. Asm insn is always
8870 placed in a separate bundle. */
8871 only_bundle_end_p
8872 = (next_insn != NULL_RTX
8873 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
8874 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
8875 /* We may fill up the current bundle if it is the cycle end
8876 without a group barrier. */
8877 bundle_end_p
8878 = (only_bundle_end_p || next_insn == NULL_RTX
8879 || (GET_MODE (next_insn) == TImode
8880 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
8881 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
8882 || type == TYPE_S)
8883 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8884 only_bundle_end_p);
8885 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8886 only_bundle_end_p);
8887 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8888 only_bundle_end_p);
8890 gcc_assert (index_to_bundle_states [insn_num]);
8891 for (curr_state = index_to_bundle_states [insn_num];
8892 curr_state != NULL;
8893 curr_state = curr_state->next)
8894 if (verbose >= 2 && dump)
8896 /* This structure is taken from generated code of the
8897 pipeline hazard recognizer (see file insn-attrtab.c).
8898 Please don't forget to change the structure if a new
8899 automaton is added to .md file. */
8900 struct DFA_chip
8902 unsigned short one_automaton_state;
8903 unsigned short oneb_automaton_state;
8904 unsigned short two_automaton_state;
8905 unsigned short twob_automaton_state;
8908 fprintf
8909 (dump,
8910 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
8911 curr_state->unique_num,
8912 (curr_state->originator == NULL
8913 ? -1 : curr_state->originator->unique_num),
8914 curr_state->cost,
8915 curr_state->before_nops_num, curr_state->after_nops_num,
8916 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8917 curr_state->middle_bundle_stops,
8918 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8919 INSN_UID (insn));
8923 /* We should find a solution because the 2nd insn scheduling has
8924 found one. */
8925 gcc_assert (index_to_bundle_states [insn_num]);
8926 /* Find a state corresponding to the best insn sequence. */
8927 best_state = NULL;
8928 for (curr_state = index_to_bundle_states [insn_num];
8929 curr_state != NULL;
8930 curr_state = curr_state->next)
8931 /* We are just looking at the states with fully filled up last
8932 bundle. The first we prefer insn sequences with minimal cost
8933 then with minimal inserted nops and finally with branch insns
8934 placed in the 3rd slots. */
8935 if (curr_state->accumulated_insns_num % 3 == 0
8936 && (best_state == NULL || best_state->cost > curr_state->cost
8937 || (best_state->cost == curr_state->cost
8938 && (curr_state->accumulated_insns_num
8939 < best_state->accumulated_insns_num
8940 || (curr_state->accumulated_insns_num
8941 == best_state->accumulated_insns_num
8942 && (curr_state->branch_deviation
8943 < best_state->branch_deviation
8944 || (curr_state->branch_deviation
8945 == best_state->branch_deviation
8946 && curr_state->middle_bundle_stops
8947 < best_state->middle_bundle_stops)))))))
8948 best_state = curr_state;
8949 /* Second (backward) pass: adding nops and templates. */
8950 gcc_assert (best_state);
8951 insn_num = best_state->before_nops_num;
8952 template0 = template1 = -1;
8953 for (curr_state = best_state;
8954 curr_state->originator != NULL;
8955 curr_state = curr_state->originator)
8957 insn = curr_state->insn;
8958 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8959 || asm_noperands (PATTERN (insn)) >= 0);
8960 insn_num++;
8961 if (verbose >= 2 && dump)
8963 struct DFA_chip
8965 unsigned short one_automaton_state;
8966 unsigned short oneb_automaton_state;
8967 unsigned short two_automaton_state;
8968 unsigned short twob_automaton_state;
8971 fprintf
8972 (dump,
8973 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
8974 curr_state->unique_num,
8975 (curr_state->originator == NULL
8976 ? -1 : curr_state->originator->unique_num),
8977 curr_state->cost,
8978 curr_state->before_nops_num, curr_state->after_nops_num,
8979 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8980 curr_state->middle_bundle_stops,
8981 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8982 INSN_UID (insn));
8984 /* Find the position in the current bundle window. The window can
8985 contain at most two bundles. Two bundle window means that
8986 the processor will make two bundle rotation. */
8987 max_pos = get_max_pos (curr_state->dfa_state);
8988 if (max_pos == 6
8989 /* The following (negative template number) means that the
8990 processor did one bundle rotation. */
8991 || (max_pos == 3 && template0 < 0))
8993 /* We are at the end of the window -- find template(s) for
8994 its bundle(s). */
8995 pos = max_pos;
8996 if (max_pos == 3)
8997 template0 = get_template (curr_state->dfa_state, 3);
8998 else
9000 template1 = get_template (curr_state->dfa_state, 3);
9001 template0 = get_template (curr_state->dfa_state, 6);
9004 if (max_pos > 3 && template1 < 0)
9005 /* It may happen when we have the stop inside a bundle. */
9007 gcc_assert (pos <= 3);
9008 template1 = get_template (curr_state->dfa_state, 3);
9009 pos += 3;
9011 if (!asm_p)
9012 /* Emit nops after the current insn. */
9013 for (i = 0; i < curr_state->after_nops_num; i++)
9015 nop = gen_nop ();
9016 emit_insn_after (nop, insn);
9017 pos--;
9018 gcc_assert (pos >= 0);
9019 if (pos % 3 == 0)
9021 /* We are at the start of a bundle: emit the template
9022 (it should be defined). */
9023 gcc_assert (template0 >= 0);
9024 ia64_add_bundle_selector_before (template0, nop);
9025 /* If we have two bundle window, we make one bundle
9026 rotation. Otherwise template0 will be undefined
9027 (negative value). */
9028 template0 = template1;
9029 template1 = -1;
9032 /* Move the position backward in the window. Group barrier has
9033 no slot. Asm insn takes all bundle. */
9034 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9035 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9036 && asm_noperands (PATTERN (insn)) < 0)
9037 pos--;
9038 /* Long insn takes 2 slots. */
9039 if (ia64_safe_type (insn) == TYPE_L)
9040 pos--;
9041 gcc_assert (pos >= 0);
9042 if (pos % 3 == 0
9043 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9044 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9045 && asm_noperands (PATTERN (insn)) < 0)
9047 /* The current insn is at the bundle start: emit the
9048 template. */
9049 gcc_assert (template0 >= 0);
9050 ia64_add_bundle_selector_before (template0, insn);
9051 b = PREV_INSN (insn);
9052 insn = b;
9053 /* See comment above in analogous place for emitting nops
9054 after the insn. */
9055 template0 = template1;
9056 template1 = -1;
9058 /* Emit nops after the current insn. */
9059 for (i = 0; i < curr_state->before_nops_num; i++)
9061 nop = gen_nop ();
9062 ia64_emit_insn_before (nop, insn);
9063 nop = PREV_INSN (insn);
9064 insn = nop;
9065 pos--;
9066 gcc_assert (pos >= 0);
9067 if (pos % 3 == 0)
9069 /* See comment above in analogous place for emitting nops
9070 after the insn. */
9071 gcc_assert (template0 >= 0);
9072 ia64_add_bundle_selector_before (template0, insn);
9073 b = PREV_INSN (insn);
9074 insn = b;
9075 template0 = template1;
9076 template1 = -1;
9081 #ifdef ENABLE_CHECKING
9083 /* Assert right calculation of middle_bundle_stops. */
9084 int num = best_state->middle_bundle_stops;
9085 bool start_bundle = true, end_bundle = false;
9087 for (insn = NEXT_INSN (prev_head_insn);
9088 insn && insn != tail;
9089 insn = NEXT_INSN (insn))
9091 if (!INSN_P (insn))
9092 continue;
9093 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9094 start_bundle = true;
9095 else
9097 rtx next_insn;
9099 for (next_insn = NEXT_INSN (insn);
9100 next_insn && next_insn != tail;
9101 next_insn = NEXT_INSN (next_insn))
9102 if (INSN_P (next_insn)
9103 && (ia64_safe_itanium_class (next_insn)
9104 != ITANIUM_CLASS_IGNORE
9105 || recog_memoized (next_insn)
9106 == CODE_FOR_bundle_selector)
9107 && GET_CODE (PATTERN (next_insn)) != USE
9108 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9109 break;
9111 end_bundle = next_insn == NULL_RTX
9112 || next_insn == tail
9113 || (INSN_P (next_insn)
9114 && recog_memoized (next_insn)
9115 == CODE_FOR_bundle_selector);
9116 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9117 && !start_bundle && !end_bundle
9118 && next_insn
9119 && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
9120 && asm_noperands (PATTERN (next_insn)) < 0)
9121 num--;
9123 start_bundle = false;
9127 gcc_assert (num == 0);
9129 #endif
9131 free (index_to_bundle_states);
9132 finish_bundle_state_table ();
9133 bundling_p = 0;
9134 dfa_clean_insn_cache ();
9137 /* The following function is called at the end of scheduling BB or
9138 EBB. After reload, it inserts stop bits and does insn bundling. */
9140 static void
9141 ia64_sched_finish (FILE *dump, int sched_verbose)
9143 if (sched_verbose)
9144 fprintf (dump, "// Finishing schedule.\n");
9145 if (!reload_completed)
9146 return;
9147 if (reload_completed)
9149 final_emit_insn_group_barriers (dump);
9150 bundling (dump, sched_verbose, current_sched_info->prev_head,
9151 current_sched_info->next_tail);
9152 if (sched_verbose && dump)
9153 fprintf (dump, "// finishing %d-%d\n",
9154 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9155 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9157 return;
9161 /* The following function inserts stop bits in scheduled BB or EBB. */
9163 static void
9164 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9166 rtx insn;
9167 int need_barrier_p = 0;
9168 int seen_good_insn = 0;
9170 init_insn_group_barriers ();
9172 for (insn = NEXT_INSN (current_sched_info->prev_head);
9173 insn != current_sched_info->next_tail;
9174 insn = NEXT_INSN (insn))
9176 if (GET_CODE (insn) == BARRIER)
9178 rtx last = prev_active_insn (insn);
9180 if (! last)
9181 continue;
9182 if (GET_CODE (last) == JUMP_INSN
9183 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
9184 last = prev_active_insn (last);
9185 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9186 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9188 init_insn_group_barriers ();
9189 seen_good_insn = 0;
9190 need_barrier_p = 0;
9192 else if (NONDEBUG_INSN_P (insn))
9194 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9196 init_insn_group_barriers ();
9197 seen_good_insn = 0;
9198 need_barrier_p = 0;
9200 else if (need_barrier_p || group_barrier_needed (insn)
9201 || (mflag_sched_stop_bits_after_every_cycle
9202 && GET_MODE (insn) == TImode
9203 && seen_good_insn))
9205 if (TARGET_EARLY_STOP_BITS)
9207 rtx last;
9209 for (last = insn;
9210 last != current_sched_info->prev_head;
9211 last = PREV_INSN (last))
9212 if (INSN_P (last) && GET_MODE (last) == TImode
9213 && stops_p [INSN_UID (last)])
9214 break;
9215 if (last == current_sched_info->prev_head)
9216 last = insn;
9217 last = prev_active_insn (last);
9218 if (last
9219 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9220 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9221 last);
9222 init_insn_group_barriers ();
9223 for (last = NEXT_INSN (last);
9224 last != insn;
9225 last = NEXT_INSN (last))
9226 if (INSN_P (last))
9228 group_barrier_needed (last);
9229 if (recog_memoized (last) >= 0
9230 && important_for_bundling_p (last))
9231 seen_good_insn = 1;
9234 else
9236 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9237 insn);
9238 init_insn_group_barriers ();
9239 seen_good_insn = 0;
9241 group_barrier_needed (insn);
9242 if (recog_memoized (insn) >= 0
9243 && important_for_bundling_p (insn))
9244 seen_good_insn = 1;
9246 else if (recog_memoized (insn) >= 0
9247 && important_for_bundling_p (insn))
9248 seen_good_insn = 1;
9249 need_barrier_p = (GET_CODE (insn) == CALL_INSN
9250 || GET_CODE (PATTERN (insn)) == ASM_INPUT
9251 || asm_noperands (PATTERN (insn)) >= 0);
9258 /* If the following function returns TRUE, we will use the DFA
9259 insn scheduler. */
9261 static int
9262 ia64_first_cycle_multipass_dfa_lookahead (void)
9264 return (reload_completed ? 6 : 4);
9267 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9269 static void
9270 ia64_init_dfa_pre_cycle_insn (void)
9272 if (temp_dfa_state == NULL)
9274 dfa_state_size = state_size ();
9275 temp_dfa_state = xmalloc (dfa_state_size);
9276 prev_cycle_state = xmalloc (dfa_state_size);
9278 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9279 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9280 recog_memoized (dfa_pre_cycle_insn);
9281 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9282 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9283 recog_memoized (dfa_stop_insn);
9286 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9287 used by the DFA insn scheduler. */
9289 static rtx
9290 ia64_dfa_pre_cycle_insn (void)
9292 return dfa_pre_cycle_insn;
9295 /* The following function returns TRUE if PRODUCER (of type ilog or
9296 ld) produces address for CONSUMER (of type st or stf). */
9299 ia64_st_address_bypass_p (rtx producer, rtx consumer)
9301 rtx dest, reg, mem;
9303 gcc_assert (producer && consumer);
9304 dest = ia64_single_set (producer);
9305 gcc_assert (dest);
9306 reg = SET_DEST (dest);
9307 gcc_assert (reg);
9308 if (GET_CODE (reg) == SUBREG)
9309 reg = SUBREG_REG (reg);
9310 gcc_assert (GET_CODE (reg) == REG);
9312 dest = ia64_single_set (consumer);
9313 gcc_assert (dest);
9314 mem = SET_DEST (dest);
9315 gcc_assert (mem && GET_CODE (mem) == MEM);
9316 return reg_mentioned_p (reg, mem);
9319 /* The following function returns TRUE if PRODUCER (of type ilog or
9320 ld) produces address for CONSUMER (of type ld or fld). */
9323 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
9325 rtx dest, src, reg, mem;
9327 gcc_assert (producer && consumer);
9328 dest = ia64_single_set (producer);
9329 gcc_assert (dest);
9330 reg = SET_DEST (dest);
9331 gcc_assert (reg);
9332 if (GET_CODE (reg) == SUBREG)
9333 reg = SUBREG_REG (reg);
9334 gcc_assert (GET_CODE (reg) == REG);
9336 src = ia64_single_set (consumer);
9337 gcc_assert (src);
9338 mem = SET_SRC (src);
9339 gcc_assert (mem);
9341 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9342 mem = XVECEXP (mem, 0, 0);
9343 else if (GET_CODE (mem) == IF_THEN_ELSE)
9344 /* ??? Is this bypass necessary for ld.c? */
9346 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9347 mem = XEXP (mem, 1);
9350 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9351 mem = XEXP (mem, 0);
9353 if (GET_CODE (mem) == UNSPEC)
9355 int c = XINT (mem, 1);
9357 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9358 || c == UNSPEC_LDSA);
9359 mem = XVECEXP (mem, 0, 0);
9362 /* Note that LO_SUM is used for GOT loads. */
9363 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9365 return reg_mentioned_p (reg, mem);
9368 /* The following function returns TRUE if INSN produces address for a
9369 load/store insn. We will place such insns into M slot because it
9370 decreases its latency time. */
9373 ia64_produce_address_p (rtx insn)
9375 return insn->call;
9379 /* Emit pseudo-ops for the assembler to describe predicate relations.
9380 At present this assumes that we only consider predicate pairs to
9381 be mutex, and that the assembler can deduce proper values from
9382 straight-line code. */
9384 static void
9385 emit_predicate_relation_info (void)
9387 basic_block bb;
9389 FOR_EACH_BB_REVERSE (bb)
9391 int r;
9392 rtx head = BB_HEAD (bb);
9394 /* We only need such notes at code labels. */
9395 if (GET_CODE (head) != CODE_LABEL)
9396 continue;
9397 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9398 head = NEXT_INSN (head);
9400 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9401 grabbing the entire block of predicate registers. */
9402 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9403 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9405 rtx p = gen_rtx_REG (BImode, r);
9406 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
9407 if (head == BB_END (bb))
9408 BB_END (bb) = n;
9409 head = n;
9413 /* Look for conditional calls that do not return, and protect predicate
9414 relations around them. Otherwise the assembler will assume the call
9415 returns, and complain about uses of call-clobbered predicates after
9416 the call. */
9417 FOR_EACH_BB_REVERSE (bb)
9419 rtx insn = BB_HEAD (bb);
9421 while (1)
9423 if (GET_CODE (insn) == CALL_INSN
9424 && GET_CODE (PATTERN (insn)) == COND_EXEC
9425 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9427 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
9428 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9429 if (BB_HEAD (bb) == insn)
9430 BB_HEAD (bb) = b;
9431 if (BB_END (bb) == insn)
9432 BB_END (bb) = a;
9435 if (insn == BB_END (bb))
9436 break;
9437 insn = NEXT_INSN (insn);
9442 /* Perform machine dependent operations on the rtl chain INSNS. */
9444 static void
9445 ia64_reorg (void)
9447 /* We are freeing block_for_insn in the toplev to keep compatibility
9448 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9449 compute_bb_for_insn ();
9451 /* If optimizing, we'll have split before scheduling. */
9452 if (optimize == 0)
9453 split_all_insns ();
9455 if (optimize && ia64_flag_schedule_insns2
9456 && dbg_cnt (ia64_sched2))
9458 timevar_push (TV_SCHED2);
9459 ia64_final_schedule = 1;
9461 initiate_bundle_states ();
9462 ia64_nop = make_insn_raw (gen_nop ());
9463 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9464 recog_memoized (ia64_nop);
9465 clocks_length = get_max_uid () + 1;
9466 stops_p = XCNEWVEC (char, clocks_length);
9468 if (ia64_tune == PROCESSOR_ITANIUM2)
9470 pos_1 = get_cpu_unit_code ("2_1");
9471 pos_2 = get_cpu_unit_code ("2_2");
9472 pos_3 = get_cpu_unit_code ("2_3");
9473 pos_4 = get_cpu_unit_code ("2_4");
9474 pos_5 = get_cpu_unit_code ("2_5");
9475 pos_6 = get_cpu_unit_code ("2_6");
9476 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9477 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9478 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9479 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9480 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9481 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9482 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9483 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9484 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9485 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9486 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9487 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9488 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9489 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9490 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9491 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9492 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9493 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9494 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9495 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9497 else
9499 pos_1 = get_cpu_unit_code ("1_1");
9500 pos_2 = get_cpu_unit_code ("1_2");
9501 pos_3 = get_cpu_unit_code ("1_3");
9502 pos_4 = get_cpu_unit_code ("1_4");
9503 pos_5 = get_cpu_unit_code ("1_5");
9504 pos_6 = get_cpu_unit_code ("1_6");
9505 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9506 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9507 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9508 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9509 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9510 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9511 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9512 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9513 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9514 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9515 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9516 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9517 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9518 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9519 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9520 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9521 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9522 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9523 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9524 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9527 if (flag_selective_scheduling2
9528 && !maybe_skip_selective_scheduling ())
9529 run_selective_scheduling ();
9530 else
9531 schedule_ebbs ();
9533 /* Redo alignment computation, as it might gone wrong. */
9534 compute_alignments ();
9536 /* We cannot reuse this one because it has been corrupted by the
9537 evil glat. */
9538 finish_bundle_states ();
9539 free (stops_p);
9540 stops_p = NULL;
9541 emit_insn_group_barriers (dump_file);
9543 ia64_final_schedule = 0;
9544 timevar_pop (TV_SCHED2);
9546 else
9547 emit_all_insn_group_barriers (dump_file);
9549 df_analyze ();
9551 /* A call must not be the last instruction in a function, so that the
9552 return address is still within the function, so that unwinding works
9553 properly. Note that IA-64 differs from dwarf2 on this point. */
9554 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9556 rtx insn;
9557 int saw_stop = 0;
9559 insn = get_last_insn ();
9560 if (! INSN_P (insn))
9561 insn = prev_active_insn (insn);
9562 if (insn)
9564 /* Skip over insns that expand to nothing. */
9565 while (GET_CODE (insn) == INSN
9566 && get_attr_empty (insn) == EMPTY_YES)
9568 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9569 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9570 saw_stop = 1;
9571 insn = prev_active_insn (insn);
9573 if (GET_CODE (insn) == CALL_INSN)
9575 if (! saw_stop)
9576 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9577 emit_insn (gen_break_f ());
9578 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9583 emit_predicate_relation_info ();
9585 if (ia64_flag_var_tracking)
9587 timevar_push (TV_VAR_TRACKING);
9588 variable_tracking_main ();
9589 timevar_pop (TV_VAR_TRACKING);
9591 df_finish_pass (false);
9594 /* Return true if REGNO is used by the epilogue. */
9597 ia64_epilogue_uses (int regno)
9599 switch (regno)
9601 case R_GR (1):
9602 /* With a call to a function in another module, we will write a new
9603 value to "gp". After returning from such a call, we need to make
9604 sure the function restores the original gp-value, even if the
9605 function itself does not use the gp anymore. */
9606 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9608 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9609 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9610 /* For functions defined with the syscall_linkage attribute, all
9611 input registers are marked as live at all function exits. This
9612 prevents the register allocator from using the input registers,
9613 which in turn makes it possible to restart a system call after
9614 an interrupt without having to save/restore the input registers.
9615 This also prevents kernel data from leaking to application code. */
9616 return lookup_attribute ("syscall_linkage",
9617 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9619 case R_BR (0):
9620 /* Conditional return patterns can't represent the use of `b0' as
9621 the return address, so we force the value live this way. */
9622 return 1;
9624 case AR_PFS_REGNUM:
9625 /* Likewise for ar.pfs, which is used by br.ret. */
9626 return 1;
9628 default:
9629 return 0;
9633 /* Return true if REGNO is used by the frame unwinder. */
9636 ia64_eh_uses (int regno)
9638 unsigned int r;
9640 if (! reload_completed)
9641 return 0;
9643 if (regno == 0)
9644 return 0;
9646 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9647 if (regno == current_frame_info.r[r]
9648 || regno == emitted_frame_related_regs[r])
9649 return 1;
9651 return 0;
9654 /* Return true if this goes in small data/bss. */
9656 /* ??? We could also support own long data here. Generating movl/add/ld8
9657 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9658 code faster because there is one less load. This also includes incomplete
9659 types which can't go in sdata/sbss. */
9661 static bool
9662 ia64_in_small_data_p (const_tree exp)
9664 if (TARGET_NO_SDATA)
9665 return false;
9667 /* We want to merge strings, so we never consider them small data. */
9668 if (TREE_CODE (exp) == STRING_CST)
9669 return false;
9671 /* Functions are never small data. */
9672 if (TREE_CODE (exp) == FUNCTION_DECL)
9673 return false;
9675 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9677 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
9679 if (strcmp (section, ".sdata") == 0
9680 || strncmp (section, ".sdata.", 7) == 0
9681 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9682 || strcmp (section, ".sbss") == 0
9683 || strncmp (section, ".sbss.", 6) == 0
9684 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9685 return true;
9687 else
9689 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9691 /* If this is an incomplete type with size 0, then we can't put it
9692 in sdata because it might be too big when completed. */
9693 if (size > 0 && size <= ia64_section_threshold)
9694 return true;
9697 return false;
9700 /* Output assembly directives for prologue regions. */
9702 /* The current basic block number. */
9704 static bool last_block;
9706 /* True if we need a copy_state command at the start of the next block. */
9708 static bool need_copy_state;
9710 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9711 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9712 #endif
9714 /* Emit a debugging label after a call-frame-related insn. We'd
9715 rather output the label right away, but we'd have to output it
9716 after, not before, the instruction, and the instruction has not
9717 been output yet. So we emit the label after the insn, delete it to
9718 avoid introducing basic blocks, and mark it as preserved, such that
9719 it is still output, given that it is referenced in debug info. */
9721 static const char *
9722 ia64_emit_deleted_label_after_insn (rtx insn)
9724 char label[MAX_ARTIFICIAL_LABEL_BYTES];
9725 rtx lb = gen_label_rtx ();
9726 rtx label_insn = emit_label_after (lb, insn);
9728 LABEL_PRESERVE_P (lb) = 1;
9730 delete_insn (label_insn);
9732 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
9734 return xstrdup (label);
9737 /* Define the CFA after INSN with the steady-state definition. */
9739 static void
9740 ia64_dwarf2out_def_steady_cfa (rtx insn, bool frame)
9742 rtx fp = frame_pointer_needed
9743 ? hard_frame_pointer_rtx
9744 : stack_pointer_rtx;
9745 const char *label = ia64_emit_deleted_label_after_insn (insn);
9747 if (!frame)
9748 return;
9750 dwarf2out_def_cfa
9751 (label, REGNO (fp),
9752 ia64_initial_elimination_offset
9753 (REGNO (arg_pointer_rtx), REGNO (fp))
9754 + ARG_POINTER_CFA_OFFSET (current_function_decl));
9757 /* All we need to do here is avoid a crash in the generic dwarf2
9758 processing. The real CFA definition is set up above. */
9760 static void
9761 ia64_dwarf_handle_frame_unspec (const char * ARG_UNUSED (label),
9762 rtx ARG_UNUSED (pattern),
9763 int index)
9765 gcc_assert (index == UNSPECV_ALLOC);
9768 /* The generic dwarf2 frame debug info generator does not define a
9769 separate region for the very end of the epilogue, so refrain from
9770 doing so in the IA64-specific code as well. */
9772 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
9774 /* The function emits unwind directives for the start of an epilogue. */
9776 static void
9777 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
9779 /* If this isn't the last block of the function, then we need to label the
9780 current state, and copy it back in at the start of the next block. */
9782 if (!last_block)
9784 if (unwind)
9785 fprintf (asm_out_file, "\t.label_state %d\n",
9786 ++cfun->machine->state_num);
9787 need_copy_state = true;
9790 if (unwind)
9791 fprintf (asm_out_file, "\t.restore sp\n");
9792 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9793 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
9794 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
9797 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9799 static void
9800 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9801 bool unwind, bool frame)
9803 rtx dest = SET_DEST (pat);
9804 rtx src = SET_SRC (pat);
9806 if (dest == stack_pointer_rtx)
9808 if (GET_CODE (src) == PLUS)
9810 rtx op0 = XEXP (src, 0);
9811 rtx op1 = XEXP (src, 1);
9813 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9815 if (INTVAL (op1) < 0)
9817 gcc_assert (!frame_pointer_needed);
9818 if (unwind)
9819 fprintf (asm_out_file,
9820 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9821 -INTVAL (op1));
9822 ia64_dwarf2out_def_steady_cfa (insn, frame);
9824 else
9825 process_epilogue (asm_out_file, insn, unwind, frame);
9827 else
9829 gcc_assert (src == hard_frame_pointer_rtx);
9830 process_epilogue (asm_out_file, insn, unwind, frame);
9833 else if (dest == hard_frame_pointer_rtx)
9835 gcc_assert (src == stack_pointer_rtx);
9836 gcc_assert (frame_pointer_needed);
9838 if (unwind)
9839 fprintf (asm_out_file, "\t.vframe r%d\n",
9840 ia64_dbx_register_number (REGNO (dest)));
9841 ia64_dwarf2out_def_steady_cfa (insn, frame);
9843 else
9844 gcc_unreachable ();
9847 /* This function processes a SET pattern for REG_CFA_REGISTER. */
9849 static void
9850 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
9852 rtx dest = SET_DEST (pat);
9853 rtx src = SET_SRC (pat);
9855 int dest_regno = REGNO (dest);
9856 int src_regno = REGNO (src);
9858 switch (src_regno)
9860 case BR_REG (0):
9861 /* Saving return address pointer. */
9862 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9863 if (unwind)
9864 fprintf (asm_out_file, "\t.save rp, r%d\n",
9865 ia64_dbx_register_number (dest_regno));
9866 break;
9868 case PR_REG (0):
9869 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9870 if (unwind)
9871 fprintf (asm_out_file, "\t.save pr, r%d\n",
9872 ia64_dbx_register_number (dest_regno));
9873 break;
9875 case AR_UNAT_REGNUM:
9876 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9877 if (unwind)
9878 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9879 ia64_dbx_register_number (dest_regno));
9880 break;
9882 case AR_LC_REGNUM:
9883 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9884 if (unwind)
9885 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9886 ia64_dbx_register_number (dest_regno));
9887 break;
9889 default:
9890 /* Everything else should indicate being stored to memory. */
9891 gcc_unreachable ();
9895 /* This function processes a SET pattern for REG_CFA_OFFSET. */
9897 static void
9898 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
9900 rtx dest = SET_DEST (pat);
9901 rtx src = SET_SRC (pat);
9902 int src_regno = REGNO (src);
9903 const char *saveop;
9904 HOST_WIDE_INT off;
9905 rtx base;
9907 gcc_assert (MEM_P (dest));
9908 if (GET_CODE (XEXP (dest, 0)) == REG)
9910 base = XEXP (dest, 0);
9911 off = 0;
9913 else
9915 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9916 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9917 base = XEXP (XEXP (dest, 0), 0);
9918 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9921 if (base == hard_frame_pointer_rtx)
9923 saveop = ".savepsp";
9924 off = - off;
9926 else
9928 gcc_assert (base == stack_pointer_rtx);
9929 saveop = ".savesp";
9932 src_regno = REGNO (src);
9933 switch (src_regno)
9935 case BR_REG (0):
9936 gcc_assert (!current_frame_info.r[reg_save_b0]);
9937 if (unwind)
9938 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
9939 saveop, off);
9940 break;
9942 case PR_REG (0):
9943 gcc_assert (!current_frame_info.r[reg_save_pr]);
9944 if (unwind)
9945 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
9946 saveop, off);
9947 break;
9949 case AR_LC_REGNUM:
9950 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9951 if (unwind)
9952 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
9953 saveop, off);
9954 break;
9956 case AR_PFS_REGNUM:
9957 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9958 if (unwind)
9959 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
9960 saveop, off);
9961 break;
9963 case AR_UNAT_REGNUM:
9964 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9965 if (unwind)
9966 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
9967 saveop, off);
9968 break;
9970 case GR_REG (4):
9971 case GR_REG (5):
9972 case GR_REG (6):
9973 case GR_REG (7):
9974 if (unwind)
9975 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9976 1 << (src_regno - GR_REG (4)));
9977 break;
9979 case BR_REG (1):
9980 case BR_REG (2):
9981 case BR_REG (3):
9982 case BR_REG (4):
9983 case BR_REG (5):
9984 if (unwind)
9985 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9986 1 << (src_regno - BR_REG (1)));
9987 break;
9989 case FR_REG (2):
9990 case FR_REG (3):
9991 case FR_REG (4):
9992 case FR_REG (5):
9993 if (unwind)
9994 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9995 1 << (src_regno - FR_REG (2)));
9996 break;
9998 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9999 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
10000 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
10001 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
10002 if (unwind)
10003 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
10004 1 << (src_regno - FR_REG (12)));
10005 break;
10007 default:
10008 /* ??? For some reason we mark other general registers, even those
10009 we can't represent in the unwind info. Ignore them. */
10010 break;
10014 /* This function looks at a single insn and emits any directives
10015 required to unwind this insn. */
10017 static void
10018 ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn)
10020 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
10021 bool frame = dwarf2out_do_frame ();
10022 rtx note, pat;
10023 bool handled_one;
10025 if (!unwind && !frame)
10026 return;
10028 if (NOTE_INSN_BASIC_BLOCK_P (insn))
10030 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
10032 /* Restore unwind state from immediately before the epilogue. */
10033 if (need_copy_state)
10035 if (unwind)
10037 fprintf (asm_out_file, "\t.body\n");
10038 fprintf (asm_out_file, "\t.copy_state %d\n",
10039 cfun->machine->state_num);
10041 if (IA64_CHANGE_CFA_IN_EPILOGUE)
10042 ia64_dwarf2out_def_steady_cfa (insn, frame);
10043 need_copy_state = false;
10047 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
10048 return;
10050 /* Look for the ALLOC insn. */
10051 if (INSN_CODE (insn) == CODE_FOR_alloc)
10053 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10054 int dest_regno = REGNO (dest);
10056 /* If this is the final destination for ar.pfs, then this must
10057 be the alloc in the prologue. */
10058 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10060 if (unwind)
10061 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10062 ia64_dbx_register_number (dest_regno));
10064 else
10066 /* This must be an alloc before a sibcall. We must drop the
10067 old frame info. The easiest way to drop the old frame
10068 info is to ensure we had a ".restore sp" directive
10069 followed by a new prologue. If the procedure doesn't
10070 have a memory-stack frame, we'll issue a dummy ".restore
10071 sp" now. */
10072 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10073 /* if haven't done process_epilogue() yet, do it now */
10074 process_epilogue (asm_out_file, insn, unwind, frame);
10075 if (unwind)
10076 fprintf (asm_out_file, "\t.prologue\n");
10078 return;
10081 handled_one = false;
10082 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10083 switch (REG_NOTE_KIND (note))
10085 case REG_CFA_ADJUST_CFA:
10086 pat = XEXP (note, 0);
10087 if (pat == NULL)
10088 pat = PATTERN (insn);
10089 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10090 handled_one = true;
10091 break;
10093 case REG_CFA_OFFSET:
10094 pat = XEXP (note, 0);
10095 if (pat == NULL)
10096 pat = PATTERN (insn);
10097 process_cfa_offset (asm_out_file, pat, unwind);
10098 handled_one = true;
10099 break;
10101 case REG_CFA_REGISTER:
10102 pat = XEXP (note, 0);
10103 if (pat == NULL)
10104 pat = PATTERN (insn);
10105 process_cfa_register (asm_out_file, pat, unwind);
10106 handled_one = true;
10107 break;
10109 case REG_FRAME_RELATED_EXPR:
10110 case REG_CFA_DEF_CFA:
10111 case REG_CFA_EXPRESSION:
10112 case REG_CFA_RESTORE:
10113 case REG_CFA_SET_VDRAP:
10114 /* Not used in the ia64 port. */
10115 gcc_unreachable ();
10117 default:
10118 /* Not a frame-related note. */
10119 break;
10122 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10123 explicit action to take. No guessing required. */
10124 gcc_assert (handled_one);
10127 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10129 static void
10130 ia64_asm_emit_except_personality (rtx personality)
10132 fputs ("\t.personality\t", asm_out_file);
10133 output_addr_const (asm_out_file, personality);
10134 fputc ('\n', asm_out_file);
10137 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10139 static void
10140 ia64_asm_init_sections (void)
10142 exception_section = get_unnamed_section (0, output_section_asm_op,
10143 "\t.handlerdata");
10146 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10148 static enum unwind_info_type
10149 ia64_debug_unwind_info (void)
10151 return UI_TARGET;
10154 /* Implement TARGET_EXCEPT_UNWIND_INFO. */
10156 static enum unwind_info_type
10157 ia64_except_unwind_info (struct gcc_options *opts)
10159 /* Honor the --enable-sjlj-exceptions configure switch. */
10160 #ifdef CONFIG_UNWIND_EXCEPTIONS
10161 if (CONFIG_UNWIND_EXCEPTIONS)
10162 return UI_SJLJ;
10163 #endif
10165 /* For simplicity elsewhere in this file, indicate that all unwind
10166 info is disabled if we're not emitting unwind tables. */
10167 if (!opts->x_flag_exceptions && !opts->x_flag_unwind_tables)
10168 return UI_NONE;
10170 return UI_TARGET;
10173 enum ia64_builtins
10175 IA64_BUILTIN_BSP,
10176 IA64_BUILTIN_COPYSIGNQ,
10177 IA64_BUILTIN_FABSQ,
10178 IA64_BUILTIN_FLUSHRS,
10179 IA64_BUILTIN_INFQ,
10180 IA64_BUILTIN_HUGE_VALQ,
10181 IA64_BUILTIN_max
10184 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10186 void
10187 ia64_init_builtins (void)
10189 tree fpreg_type;
10190 tree float80_type;
10191 tree decl;
10193 /* The __fpreg type. */
10194 fpreg_type = make_node (REAL_TYPE);
10195 TYPE_PRECISION (fpreg_type) = 82;
10196 layout_type (fpreg_type);
10197 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10199 /* The __float80 type. */
10200 float80_type = make_node (REAL_TYPE);
10201 TYPE_PRECISION (float80_type) = 80;
10202 layout_type (float80_type);
10203 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10205 /* The __float128 type. */
10206 if (!TARGET_HPUX)
10208 tree ftype;
10209 tree float128_type = make_node (REAL_TYPE);
10211 TYPE_PRECISION (float128_type) = 128;
10212 layout_type (float128_type);
10213 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10215 /* TFmode support builtins. */
10216 ftype = build_function_type (float128_type, void_list_node);
10217 decl = add_builtin_function ("__builtin_infq", ftype,
10218 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10219 NULL, NULL_TREE);
10220 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10222 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10223 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10224 NULL, NULL_TREE);
10225 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10227 ftype = build_function_type_list (float128_type,
10228 float128_type,
10229 NULL_TREE);
10230 decl = add_builtin_function ("__builtin_fabsq", ftype,
10231 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10232 "__fabstf2", NULL_TREE);
10233 TREE_READONLY (decl) = 1;
10234 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10236 ftype = build_function_type_list (float128_type,
10237 float128_type,
10238 float128_type,
10239 NULL_TREE);
10240 decl = add_builtin_function ("__builtin_copysignq", ftype,
10241 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10242 "__copysigntf3", NULL_TREE);
10243 TREE_READONLY (decl) = 1;
10244 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10246 else
10247 /* Under HPUX, this is a synonym for "long double". */
10248 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10249 "__float128");
10251 /* Fwrite on VMS is non-standard. */
10252 if (TARGET_ABI_OPEN_VMS)
10254 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
10255 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
10258 #define def_builtin(name, type, code) \
10259 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10260 NULL, NULL_TREE)
10262 decl = def_builtin ("__builtin_ia64_bsp",
10263 build_function_type (ptr_type_node, void_list_node),
10264 IA64_BUILTIN_BSP);
10265 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10267 decl = def_builtin ("__builtin_ia64_flushrs",
10268 build_function_type (void_type_node, void_list_node),
10269 IA64_BUILTIN_FLUSHRS);
10270 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10272 #undef def_builtin
10274 if (TARGET_HPUX)
10276 if (built_in_decls [BUILT_IN_FINITE])
10277 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
10278 "_Isfinite");
10279 if (built_in_decls [BUILT_IN_FINITEF])
10280 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
10281 "_Isfinitef");
10282 if (built_in_decls [BUILT_IN_FINITEL])
10283 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
10284 "_Isfinitef128");
10289 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10290 enum machine_mode mode ATTRIBUTE_UNUSED,
10291 int ignore ATTRIBUTE_UNUSED)
10293 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10294 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10296 switch (fcode)
10298 case IA64_BUILTIN_BSP:
10299 if (! target || ! register_operand (target, DImode))
10300 target = gen_reg_rtx (DImode);
10301 emit_insn (gen_bsp_value (target));
10302 #ifdef POINTERS_EXTEND_UNSIGNED
10303 target = convert_memory_address (ptr_mode, target);
10304 #endif
10305 return target;
10307 case IA64_BUILTIN_FLUSHRS:
10308 emit_insn (gen_flushrs ());
10309 return const0_rtx;
10311 case IA64_BUILTIN_INFQ:
10312 case IA64_BUILTIN_HUGE_VALQ:
10314 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10315 REAL_VALUE_TYPE inf;
10316 rtx tmp;
10318 real_inf (&inf);
10319 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10321 tmp = validize_mem (force_const_mem (target_mode, tmp));
10323 if (target == 0)
10324 target = gen_reg_rtx (target_mode);
10326 emit_move_insn (target, tmp);
10327 return target;
10330 case IA64_BUILTIN_FABSQ:
10331 case IA64_BUILTIN_COPYSIGNQ:
10332 return expand_call (exp, target, ignore);
10334 default:
10335 gcc_unreachable ();
10338 return NULL_RTX;
10341 /* Return the ia64 builtin for CODE. */
10343 static tree
10344 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10346 if (code >= IA64_BUILTIN_max)
10347 return error_mark_node;
10349 return ia64_builtins[code];
10352 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10353 most significant bits of the stack slot. */
10355 enum direction
10356 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10358 /* Exception to normal case for structures/unions/etc. */
10360 if (type && AGGREGATE_TYPE_P (type)
10361 && int_size_in_bytes (type) < UNITS_PER_WORD)
10362 return upward;
10364 /* Fall back to the default. */
10365 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10368 /* Emit text to declare externally defined variables and functions, because
10369 the Intel assembler does not support undefined externals. */
10371 void
10372 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10374 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10375 set in order to avoid putting out names that are never really
10376 used. */
10377 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10379 /* maybe_assemble_visibility will return 1 if the assembler
10380 visibility directive is output. */
10381 int need_visibility = ((*targetm.binds_local_p) (decl)
10382 && maybe_assemble_visibility (decl));
10384 #ifdef DO_CRTL_NAMES
10385 DO_CRTL_NAMES;
10386 #endif
10388 /* GNU as does not need anything here, but the HP linker does
10389 need something for external functions. */
10390 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10391 && TREE_CODE (decl) == FUNCTION_DECL)
10392 (*targetm.asm_out.globalize_decl_name) (file, decl);
10393 else if (need_visibility && !TARGET_GNU_AS)
10394 (*targetm.asm_out.globalize_label) (file, name);
10398 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10399 modes of word_mode and larger. Rename the TFmode libfuncs using the
10400 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10401 backward compatibility. */
10403 static void
10404 ia64_init_libfuncs (void)
10406 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10407 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10408 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10409 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10411 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10412 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10413 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10414 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10415 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10417 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10418 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10419 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10420 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10421 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10422 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10424 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10425 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10426 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10427 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10428 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10430 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10431 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10432 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10433 /* HP-UX 11.23 libc does not have a function for unsigned
10434 SImode-to-TFmode conversion. */
10435 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10438 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10440 static void
10441 ia64_hpux_init_libfuncs (void)
10443 ia64_init_libfuncs ();
10445 /* The HP SI millicode division and mod functions expect DI arguments.
10446 By turning them off completely we avoid using both libgcc and the
10447 non-standard millicode routines and use the HP DI millicode routines
10448 instead. */
10450 set_optab_libfunc (sdiv_optab, SImode, 0);
10451 set_optab_libfunc (udiv_optab, SImode, 0);
10452 set_optab_libfunc (smod_optab, SImode, 0);
10453 set_optab_libfunc (umod_optab, SImode, 0);
10455 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10456 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10457 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10458 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10460 /* HP-UX libc has TF min/max/abs routines in it. */
10461 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10462 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10463 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10465 /* ia64_expand_compare uses this. */
10466 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10468 /* These should never be used. */
10469 set_optab_libfunc (eq_optab, TFmode, 0);
10470 set_optab_libfunc (ne_optab, TFmode, 0);
10471 set_optab_libfunc (gt_optab, TFmode, 0);
10472 set_optab_libfunc (ge_optab, TFmode, 0);
10473 set_optab_libfunc (lt_optab, TFmode, 0);
10474 set_optab_libfunc (le_optab, TFmode, 0);
10477 /* Rename the division and modulus functions in VMS. */
10479 static void
10480 ia64_vms_init_libfuncs (void)
10482 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10483 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10484 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10485 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10486 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10487 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10488 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10489 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10490 abort_libfunc = init_one_libfunc ("decc$abort");
10491 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10492 #ifdef MEM_LIBFUNCS_INIT
10493 MEM_LIBFUNCS_INIT;
10494 #endif
10497 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10498 the HPUX conventions. */
10500 static void
10501 ia64_sysv4_init_libfuncs (void)
10503 ia64_init_libfuncs ();
10505 /* These functions are not part of the HPUX TFmode interface. We
10506 use them instead of _U_Qfcmp, which doesn't work the way we
10507 expect. */
10508 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10509 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10510 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10511 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10512 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10513 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10515 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10516 glibc doesn't have them. */
10519 /* Use soft-fp. */
10521 static void
10522 ia64_soft_fp_init_libfuncs (void)
10526 static bool
10527 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10529 return (mode == SImode || mode == DImode);
10532 /* For HPUX, it is illegal to have relocations in shared segments. */
10534 static int
10535 ia64_hpux_reloc_rw_mask (void)
10537 return 3;
10540 /* For others, relax this so that relocations to local data goes in
10541 read-only segments, but we still cannot allow global relocations
10542 in read-only segments. */
10544 static int
10545 ia64_reloc_rw_mask (void)
10547 return flag_pic ? 3 : 2;
10550 /* Return the section to use for X. The only special thing we do here
10551 is to honor small data. */
10553 static section *
10554 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10555 unsigned HOST_WIDE_INT align)
10557 if (GET_MODE_SIZE (mode) > 0
10558 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10559 && !TARGET_NO_SDATA)
10560 return sdata_section;
10561 else
10562 return default_elf_select_rtx_section (mode, x, align);
10565 static unsigned int
10566 ia64_section_type_flags (tree decl, const char *name, int reloc)
10568 unsigned int flags = 0;
10570 if (strcmp (name, ".sdata") == 0
10571 || strncmp (name, ".sdata.", 7) == 0
10572 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10573 || strncmp (name, ".sdata2.", 8) == 0
10574 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10575 || strcmp (name, ".sbss") == 0
10576 || strncmp (name, ".sbss.", 6) == 0
10577 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10578 flags = SECTION_SMALL;
10580 #if TARGET_ABI_OPEN_VMS
10581 if (decl && DECL_ATTRIBUTES (decl)
10582 && lookup_attribute ("common_object", DECL_ATTRIBUTES (decl)))
10583 flags |= SECTION_VMS_OVERLAY;
10584 #endif
10586 flags |= default_section_type_flags (decl, name, reloc);
10587 return flags;
10590 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10591 structure type and that the address of that type should be passed
10592 in out0, rather than in r8. */
10594 static bool
10595 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10597 tree ret_type = TREE_TYPE (fntype);
10599 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10600 as the structure return address parameter, if the return value
10601 type has a non-trivial copy constructor or destructor. It is not
10602 clear if this same convention should be used for other
10603 programming languages. Until G++ 3.4, we incorrectly used r8 for
10604 these return values. */
10605 return (abi_version_at_least (2)
10606 && ret_type
10607 && TYPE_MODE (ret_type) == BLKmode
10608 && TREE_ADDRESSABLE (ret_type)
10609 && strcmp (lang_hooks.name, "GNU C++") == 0);
10612 /* Output the assembler code for a thunk function. THUNK_DECL is the
10613 declaration for the thunk function itself, FUNCTION is the decl for
10614 the target function. DELTA is an immediate constant offset to be
10615 added to THIS. If VCALL_OFFSET is nonzero, the word at
10616 *(*this + vcall_offset) should be added to THIS. */
10618 static void
10619 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10620 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10621 tree function)
10623 rtx this_rtx, insn, funexp;
10624 unsigned int this_parmno;
10625 unsigned int this_regno;
10626 rtx delta_rtx;
10628 reload_completed = 1;
10629 epilogue_completed = 1;
10631 /* Set things up as ia64_expand_prologue might. */
10632 last_scratch_gr_reg = 15;
10634 memset (&current_frame_info, 0, sizeof (current_frame_info));
10635 current_frame_info.spill_cfa_off = -16;
10636 current_frame_info.n_input_regs = 1;
10637 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10639 /* Mark the end of the (empty) prologue. */
10640 emit_note (NOTE_INSN_PROLOGUE_END);
10642 /* Figure out whether "this" will be the first parameter (the
10643 typical case) or the second parameter (as happens when the
10644 virtual function returns certain class objects). */
10645 this_parmno
10646 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10647 ? 1 : 0);
10648 this_regno = IN_REG (this_parmno);
10649 if (!TARGET_REG_NAMES)
10650 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10652 this_rtx = gen_rtx_REG (Pmode, this_regno);
10654 /* Apply the constant offset, if required. */
10655 delta_rtx = GEN_INT (delta);
10656 if (TARGET_ILP32)
10658 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10659 REG_POINTER (tmp) = 1;
10660 if (delta && satisfies_constraint_I (delta_rtx))
10662 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10663 delta = 0;
10665 else
10666 emit_insn (gen_ptr_extend (this_rtx, tmp));
10668 if (delta)
10670 if (!satisfies_constraint_I (delta_rtx))
10672 rtx tmp = gen_rtx_REG (Pmode, 2);
10673 emit_move_insn (tmp, delta_rtx);
10674 delta_rtx = tmp;
10676 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10679 /* Apply the offset from the vtable, if required. */
10680 if (vcall_offset)
10682 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10683 rtx tmp = gen_rtx_REG (Pmode, 2);
10685 if (TARGET_ILP32)
10687 rtx t = gen_rtx_REG (ptr_mode, 2);
10688 REG_POINTER (t) = 1;
10689 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10690 if (satisfies_constraint_I (vcall_offset_rtx))
10692 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10693 vcall_offset = 0;
10695 else
10696 emit_insn (gen_ptr_extend (tmp, t));
10698 else
10699 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10701 if (vcall_offset)
10703 if (!satisfies_constraint_J (vcall_offset_rtx))
10705 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10706 emit_move_insn (tmp2, vcall_offset_rtx);
10707 vcall_offset_rtx = tmp2;
10709 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10712 if (TARGET_ILP32)
10713 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10714 else
10715 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10717 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10720 /* Generate a tail call to the target function. */
10721 if (! TREE_USED (function))
10723 assemble_external (function);
10724 TREE_USED (function) = 1;
10726 funexp = XEXP (DECL_RTL (function), 0);
10727 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10728 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10729 insn = get_last_insn ();
10730 SIBLING_CALL_P (insn) = 1;
10732 /* Code generation for calls relies on splitting. */
10733 reload_completed = 1;
10734 epilogue_completed = 1;
10735 try_split (PATTERN (insn), insn, 0);
10737 emit_barrier ();
10739 /* Run just enough of rest_of_compilation to get the insns emitted.
10740 There's not really enough bulk here to make other passes such as
10741 instruction scheduling worth while. Note that use_thunk calls
10742 assemble_start_function and assemble_end_function. */
10744 insn_locators_alloc ();
10745 emit_all_insn_group_barriers (NULL);
10746 insn = get_insns ();
10747 shorten_branches (insn);
10748 final_start_function (insn, file, 1);
10749 final (insn, file, 1);
10750 final_end_function ();
10752 reload_completed = 0;
10753 epilogue_completed = 0;
10756 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10758 static rtx
10759 ia64_struct_value_rtx (tree fntype,
10760 int incoming ATTRIBUTE_UNUSED)
10762 if (TARGET_ABI_OPEN_VMS ||
10763 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10764 return NULL_RTX;
10765 return gen_rtx_REG (Pmode, GR_REG (8));
10768 static bool
10769 ia64_scalar_mode_supported_p (enum machine_mode mode)
10771 switch (mode)
10773 case QImode:
10774 case HImode:
10775 case SImode:
10776 case DImode:
10777 case TImode:
10778 return true;
10780 case SFmode:
10781 case DFmode:
10782 case XFmode:
10783 case RFmode:
10784 return true;
10786 case TFmode:
10787 return true;
10789 default:
10790 return false;
10794 static bool
10795 ia64_vector_mode_supported_p (enum machine_mode mode)
10797 switch (mode)
10799 case V8QImode:
10800 case V4HImode:
10801 case V2SImode:
10802 return true;
10804 case V2SFmode:
10805 return true;
10807 default:
10808 return false;
10812 /* Implement the FUNCTION_PROFILER macro. */
10814 void
10815 ia64_output_function_profiler (FILE *file, int labelno)
10817 bool indirect_call;
10819 /* If the function needs a static chain and the static chain
10820 register is r15, we use an indirect call so as to bypass
10821 the PLT stub in case the executable is dynamically linked,
10822 because the stub clobbers r15 as per 5.3.6 of the psABI.
10823 We don't need to do that in non canonical PIC mode. */
10825 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10827 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10828 indirect_call = true;
10830 else
10831 indirect_call = false;
10833 if (TARGET_GNU_AS)
10834 fputs ("\t.prologue 4, r40\n", file);
10835 else
10836 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
10837 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
10839 if (NO_PROFILE_COUNTERS)
10840 fputs ("\tmov out3 = r0\n", file);
10841 else
10843 char buf[20];
10844 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10846 if (TARGET_AUTO_PIC)
10847 fputs ("\tmovl out3 = @gprel(", file);
10848 else
10849 fputs ("\taddl out3 = @ltoff(", file);
10850 assemble_name (file, buf);
10851 if (TARGET_AUTO_PIC)
10852 fputs (")\n", file);
10853 else
10854 fputs ("), r1\n", file);
10857 if (indirect_call)
10858 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
10859 fputs ("\t;;\n", file);
10861 fputs ("\t.save rp, r42\n", file);
10862 fputs ("\tmov out2 = b0\n", file);
10863 if (indirect_call)
10864 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
10865 fputs ("\t.body\n", file);
10866 fputs ("\tmov out1 = r1\n", file);
10867 if (indirect_call)
10869 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
10870 fputs ("\tmov b6 = r16\n", file);
10871 fputs ("\tld8 r1 = [r14]\n", file);
10872 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
10874 else
10875 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
10878 static GTY(()) rtx mcount_func_rtx;
10879 static rtx
10880 gen_mcount_func_rtx (void)
10882 if (!mcount_func_rtx)
10883 mcount_func_rtx = init_one_libfunc ("_mcount");
10884 return mcount_func_rtx;
10887 void
10888 ia64_profile_hook (int labelno)
10890 rtx label, ip;
10892 if (NO_PROFILE_COUNTERS)
10893 label = const0_rtx;
10894 else
10896 char buf[30];
10897 const char *label_name;
10898 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10899 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
10900 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
10901 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
10903 ip = gen_reg_rtx (Pmode);
10904 emit_insn (gen_ip_value (ip));
10905 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
10906 VOIDmode, 3,
10907 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
10908 ip, Pmode,
10909 label, Pmode);
10912 /* Return the mangling of TYPE if it is an extended fundamental type. */
10914 static const char *
10915 ia64_mangle_type (const_tree type)
10917 type = TYPE_MAIN_VARIANT (type);
10919 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
10920 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
10921 return NULL;
10923 /* On HP-UX, "long double" is mangled as "e" so __float128 is
10924 mangled as "e". */
10925 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
10926 return "g";
10927 /* On HP-UX, "e" is not available as a mangling of __float80 so use
10928 an extended mangling. Elsewhere, "e" is available since long
10929 double is 80 bits. */
10930 if (TYPE_MODE (type) == XFmode)
10931 return TARGET_HPUX ? "u9__float80" : "e";
10932 if (TYPE_MODE (type) == RFmode)
10933 return "u7__fpreg";
10934 return NULL;
10937 /* Return the diagnostic message string if conversion from FROMTYPE to
10938 TOTYPE is not allowed, NULL otherwise. */
10939 static const char *
10940 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
10942 /* Reject nontrivial conversion to or from __fpreg. */
10943 if (TYPE_MODE (fromtype) == RFmode
10944 && TYPE_MODE (totype) != RFmode
10945 && TYPE_MODE (totype) != VOIDmode)
10946 return N_("invalid conversion from %<__fpreg%>");
10947 if (TYPE_MODE (totype) == RFmode
10948 && TYPE_MODE (fromtype) != RFmode)
10949 return N_("invalid conversion to %<__fpreg%>");
10950 return NULL;
10953 /* Return the diagnostic message string if the unary operation OP is
10954 not permitted on TYPE, NULL otherwise. */
10955 static const char *
10956 ia64_invalid_unary_op (int op, const_tree type)
10958 /* Reject operations on __fpreg other than unary + or &. */
10959 if (TYPE_MODE (type) == RFmode
10960 && op != CONVERT_EXPR
10961 && op != ADDR_EXPR)
10962 return N_("invalid operation on %<__fpreg%>");
10963 return NULL;
10966 /* Return the diagnostic message string if the binary operation OP is
10967 not permitted on TYPE1 and TYPE2, NULL otherwise. */
10968 static const char *
10969 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
10971 /* Reject operations on __fpreg. */
10972 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
10973 return N_("invalid operation on %<__fpreg%>");
10974 return NULL;
10977 /* Implement TARGET_OPTION_DEFAULT_PARAMS. */
10978 static void
10979 ia64_option_default_params (void)
10981 /* Let the scheduler form additional regions. */
10982 set_default_param_value (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS, 2);
10984 /* Set the default values for cache-related parameters. */
10985 set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6);
10986 set_default_param_value (PARAM_L1_CACHE_LINE_SIZE, 32);
10988 set_default_param_value (PARAM_SCHED_MEM_TRUE_DEP_COST, 4);
10991 /* HP-UX version_id attribute.
10992 For object foo, if the version_id is set to 1234 put out an alias
10993 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
10994 other than an alias statement because it is an illegal symbol name. */
10996 static tree
10997 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
10998 tree name ATTRIBUTE_UNUSED,
10999 tree args,
11000 int flags ATTRIBUTE_UNUSED,
11001 bool *no_add_attrs)
11003 tree arg = TREE_VALUE (args);
11005 if (TREE_CODE (arg) != STRING_CST)
11007 error("version attribute is not a string");
11008 *no_add_attrs = true;
11009 return NULL_TREE;
11011 return NULL_TREE;
11014 /* Target hook for c_mode_for_suffix. */
11016 static enum machine_mode
11017 ia64_c_mode_for_suffix (char suffix)
11019 if (suffix == 'q')
11020 return TFmode;
11021 if (suffix == 'w')
11022 return XFmode;
11024 return VOIDmode;
11027 static enum machine_mode
11028 ia64_promote_function_mode (const_tree type,
11029 enum machine_mode mode,
11030 int *punsignedp,
11031 const_tree funtype,
11032 int for_return)
11034 /* Special processing required for OpenVMS ... */
11036 if (!TARGET_ABI_OPEN_VMS)
11037 return default_promote_function_mode(type, mode, punsignedp, funtype,
11038 for_return);
11040 /* HP OpenVMS Calling Standard dated June, 2004, that describes
11041 HP OpenVMS I64 Version 8.2EFT,
11042 chapter 4 "OpenVMS I64 Conventions"
11043 section 4.7 "Procedure Linkage"
11044 subsection 4.7.5.2, "Normal Register Parameters"
11046 "Unsigned integral (except unsigned 32-bit), set, and VAX floating-point
11047 values passed in registers are zero-filled; signed integral values as
11048 well as unsigned 32-bit integral values are sign-extended to 64 bits.
11049 For all other types passed in the general registers, unused bits are
11050 undefined." */
11052 if (!AGGREGATE_TYPE_P (type)
11053 && GET_MODE_CLASS (mode) == MODE_INT
11054 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
11056 if (mode == SImode)
11057 *punsignedp = 0;
11058 return DImode;
11060 else
11061 return promote_mode (type, mode, punsignedp);
11064 static GTY(()) rtx ia64_dconst_0_5_rtx;
11067 ia64_dconst_0_5 (void)
11069 if (! ia64_dconst_0_5_rtx)
11071 REAL_VALUE_TYPE rv;
11072 real_from_string (&rv, "0.5");
11073 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
11075 return ia64_dconst_0_5_rtx;
11078 static GTY(()) rtx ia64_dconst_0_375_rtx;
11081 ia64_dconst_0_375 (void)
11083 if (! ia64_dconst_0_375_rtx)
11085 REAL_VALUE_TYPE rv;
11086 real_from_string (&rv, "0.375");
11087 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11089 return ia64_dconst_0_375_rtx;
11092 static enum machine_mode
11093 ia64_get_reg_raw_mode (int regno)
11095 if (FR_REGNO_P (regno))
11096 return XFmode;
11097 return default_get_reg_raw_mode(regno);
11100 /* Always default to .text section until HP-UX linker is fixed. */
11102 ATTRIBUTE_UNUSED static section *
11103 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11104 enum node_frequency freq ATTRIBUTE_UNUSED,
11105 bool startup ATTRIBUTE_UNUSED,
11106 bool exit ATTRIBUTE_UNUSED)
11108 return NULL;
11111 #include "gt-ia64.h"