ia64.h (MEMORY_MOVE_COST): Remove macro.
[official-gcc.git] / gcc / config / ia64 / ia64.c
blobc00aa7f3104784b20bab083598a9b0b671864cf7
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
3 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by James E. Wilson <wilson@cygnus.com> and
6 David Mosberger <davidm@hpl.hp.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "libfuncs.h"
45 #include "toplev.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "gimple.h"
55 #include "intl.h"
56 #include "df.h"
57 #include "debug.h"
58 #include "params.h"
59 #include "dbgcnt.h"
60 #include "tm-constrs.h"
61 #include "sel-sched.h"
62 #include "reload.h"
64 /* This is used for communication between ASM_OUTPUT_LABEL and
65 ASM_OUTPUT_LABELREF. */
66 int ia64_asm_output_label = 0;
68 /* Register names for ia64_expand_prologue. */
69 static const char * const ia64_reg_numbers[96] =
70 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
71 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
72 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
73 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
74 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
75 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
76 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
77 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
78 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
79 "r104","r105","r106","r107","r108","r109","r110","r111",
80 "r112","r113","r114","r115","r116","r117","r118","r119",
81 "r120","r121","r122","r123","r124","r125","r126","r127"};
83 /* ??? These strings could be shared with REGISTER_NAMES. */
84 static const char * const ia64_input_reg_names[8] =
85 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
87 /* ??? These strings could be shared with REGISTER_NAMES. */
88 static const char * const ia64_local_reg_names[80] =
89 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
90 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
91 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
92 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
93 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
94 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
95 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
96 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
97 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
98 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
100 /* ??? These strings could be shared with REGISTER_NAMES. */
101 static const char * const ia64_output_reg_names[8] =
102 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
104 /* Which cpu are we scheduling for. */
105 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
107 /* Determines whether we run our final scheduling pass or not. We always
108 avoid the normal second scheduling pass. */
109 static int ia64_flag_schedule_insns2;
111 /* Determines whether we run variable tracking in machine dependent
112 reorganization. */
113 static int ia64_flag_var_tracking;
115 /* Variables which are this size or smaller are put in the sdata/sbss
116 sections. */
118 unsigned int ia64_section_threshold;
120 /* The following variable is used by the DFA insn scheduler. The value is
121 TRUE if we do insn bundling instead of insn scheduling. */
122 int bundling_p = 0;
124 enum ia64_frame_regs
126 reg_fp,
127 reg_save_b0,
128 reg_save_pr,
129 reg_save_ar_pfs,
130 reg_save_ar_unat,
131 reg_save_ar_lc,
132 reg_save_gp,
133 number_of_ia64_frame_regs
136 /* Structure to be filled in by ia64_compute_frame_size with register
137 save masks and offsets for the current function. */
139 struct ia64_frame_info
141 HOST_WIDE_INT total_size; /* size of the stack frame, not including
142 the caller's scratch area. */
143 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
144 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
145 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
146 HARD_REG_SET mask; /* mask of saved registers. */
147 unsigned int gr_used_mask; /* mask of registers in use as gr spill
148 registers or long-term scratches. */
149 int n_spilled; /* number of spilled registers. */
150 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
151 int n_input_regs; /* number of input registers used. */
152 int n_local_regs; /* number of local registers used. */
153 int n_output_regs; /* number of output registers used. */
154 int n_rotate_regs; /* number of rotating registers used. */
156 char need_regstk; /* true if a .regstk directive needed. */
157 char initialized; /* true if the data is finalized. */
160 /* Current frame information calculated by ia64_compute_frame_size. */
161 static struct ia64_frame_info current_frame_info;
162 /* The actual registers that are emitted. */
163 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
165 static int ia64_first_cycle_multipass_dfa_lookahead (void);
166 static void ia64_dependencies_evaluation_hook (rtx, rtx);
167 static void ia64_init_dfa_pre_cycle_insn (void);
168 static rtx ia64_dfa_pre_cycle_insn (void);
169 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
170 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
171 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
172 static void ia64_h_i_d_extended (void);
173 static void * ia64_alloc_sched_context (void);
174 static void ia64_init_sched_context (void *, bool);
175 static void ia64_set_sched_context (void *);
176 static void ia64_clear_sched_context (void *);
177 static void ia64_free_sched_context (void *);
178 static int ia64_mode_to_int (enum machine_mode);
179 static void ia64_set_sched_flags (spec_info_t);
180 static ds_t ia64_get_insn_spec_ds (rtx);
181 static ds_t ia64_get_insn_checked_ds (rtx);
182 static bool ia64_skip_rtx_p (const_rtx);
183 static int ia64_speculate_insn (rtx, ds_t, rtx *);
184 static bool ia64_needs_block_p (int);
185 static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
186 static int ia64_spec_check_p (rtx);
187 static int ia64_spec_check_src_p (rtx);
188 static rtx gen_tls_get_addr (void);
189 static rtx gen_thread_pointer (void);
190 static int find_gr_spill (enum ia64_frame_regs, int);
191 static int next_scratch_gr_reg (void);
192 static void mark_reg_gr_used_mask (rtx, void *);
193 static void ia64_compute_frame_size (HOST_WIDE_INT);
194 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
195 static void finish_spill_pointers (void);
196 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
197 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
198 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
199 static rtx gen_movdi_x (rtx, rtx, rtx);
200 static rtx gen_fr_spill_x (rtx, rtx, rtx);
201 static rtx gen_fr_restore_x (rtx, rtx, rtx);
203 static bool ia64_can_eliminate (const int, const int);
204 static enum machine_mode hfa_element_mode (const_tree, bool);
205 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
206 tree, int *, int);
207 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
208 tree, bool);
209 static bool ia64_function_ok_for_sibcall (tree, tree);
210 static bool ia64_return_in_memory (const_tree, const_tree);
211 static rtx ia64_function_value (const_tree, const_tree, bool);
212 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
213 static bool ia64_function_value_regno_p (const unsigned int);
214 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
215 reg_class_t);
216 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
217 bool);
218 static bool ia64_rtx_costs (rtx, int, int, int *, bool);
219 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
220 static void fix_range (const char *);
221 static bool ia64_handle_option (size_t, const char *, int);
222 static struct machine_function * ia64_init_machine_status (void);
223 static void emit_insn_group_barriers (FILE *);
224 static void emit_all_insn_group_barriers (FILE *);
225 static void final_emit_insn_group_barriers (FILE *);
226 static void emit_predicate_relation_info (void);
227 static void ia64_reorg (void);
228 static bool ia64_in_small_data_p (const_tree);
229 static void process_epilogue (FILE *, rtx, bool, bool);
230 static int process_set (FILE *, rtx, rtx, bool, bool);
232 static bool ia64_assemble_integer (rtx, unsigned int, int);
233 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
234 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
235 static void ia64_output_function_end_prologue (FILE *);
237 static int ia64_issue_rate (void);
238 static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
239 static void ia64_sched_init (FILE *, int, int);
240 static void ia64_sched_init_global (FILE *, int, int);
241 static void ia64_sched_finish_global (FILE *, int);
242 static void ia64_sched_finish (FILE *, int);
243 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
244 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
245 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
246 static int ia64_variable_issue (FILE *, int, rtx, int);
248 static struct bundle_state *get_free_bundle_state (void);
249 static void free_bundle_state (struct bundle_state *);
250 static void initiate_bundle_states (void);
251 static void finish_bundle_states (void);
252 static unsigned bundle_state_hash (const void *);
253 static int bundle_state_eq_p (const void *, const void *);
254 static int insert_bundle_state (struct bundle_state *);
255 static void initiate_bundle_state_table (void);
256 static void finish_bundle_state_table (void);
257 static int try_issue_nops (struct bundle_state *, int);
258 static int try_issue_insn (struct bundle_state *, rtx);
259 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
260 static int get_max_pos (state_t);
261 static int get_template (state_t, int);
263 static rtx get_next_important_insn (rtx, rtx);
264 static bool important_for_bundling_p (rtx);
265 static void bundling (FILE *, int, rtx, rtx);
267 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
268 HOST_WIDE_INT, tree);
269 static void ia64_file_start (void);
270 static void ia64_globalize_decl_name (FILE *, tree);
272 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
273 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
274 static section *ia64_select_rtx_section (enum machine_mode, rtx,
275 unsigned HOST_WIDE_INT);
276 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
277 ATTRIBUTE_UNUSED;
278 static unsigned int ia64_section_type_flags (tree, const char *, int);
279 static void ia64_init_libfuncs (void)
280 ATTRIBUTE_UNUSED;
281 static void ia64_hpux_init_libfuncs (void)
282 ATTRIBUTE_UNUSED;
283 static void ia64_sysv4_init_libfuncs (void)
284 ATTRIBUTE_UNUSED;
285 static void ia64_vms_init_libfuncs (void)
286 ATTRIBUTE_UNUSED;
287 static void ia64_soft_fp_init_libfuncs (void)
288 ATTRIBUTE_UNUSED;
289 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
290 ATTRIBUTE_UNUSED;
291 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
292 ATTRIBUTE_UNUSED;
294 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
295 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
296 static void ia64_encode_section_info (tree, rtx, int);
297 static rtx ia64_struct_value_rtx (tree, int);
298 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
299 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
300 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
301 static bool ia64_cannot_force_const_mem (rtx);
302 static const char *ia64_mangle_type (const_tree);
303 static const char *ia64_invalid_conversion (const_tree, const_tree);
304 static const char *ia64_invalid_unary_op (int, const_tree);
305 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
306 static enum machine_mode ia64_c_mode_for_suffix (char);
307 static enum machine_mode ia64_promote_function_mode (const_tree,
308 enum machine_mode,
309 int *,
310 const_tree,
311 int);
312 static void ia64_trampoline_init (rtx, tree, rtx);
313 static void ia64_override_options_after_change (void);
315 /* Table of valid machine attributes. */
316 static const struct attribute_spec ia64_attribute_table[] =
318 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
319 { "syscall_linkage", 0, 0, false, true, true, NULL },
320 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
321 #if TARGET_ABI_OPEN_VMS
322 { "common_object", 1, 1, true, false, false, ia64_vms_common_object_attribute},
323 #endif
324 { "version_id", 1, 1, true, false, false,
325 ia64_handle_version_id_attribute },
326 { NULL, 0, 0, false, false, false, NULL }
329 /* Initialize the GCC target structure. */
330 #undef TARGET_ATTRIBUTE_TABLE
331 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
333 #undef TARGET_INIT_BUILTINS
334 #define TARGET_INIT_BUILTINS ia64_init_builtins
336 #undef TARGET_EXPAND_BUILTIN
337 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
339 #undef TARGET_ASM_BYTE_OP
340 #define TARGET_ASM_BYTE_OP "\tdata1\t"
341 #undef TARGET_ASM_ALIGNED_HI_OP
342 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
343 #undef TARGET_ASM_ALIGNED_SI_OP
344 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
345 #undef TARGET_ASM_ALIGNED_DI_OP
346 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
347 #undef TARGET_ASM_UNALIGNED_HI_OP
348 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
349 #undef TARGET_ASM_UNALIGNED_SI_OP
350 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
351 #undef TARGET_ASM_UNALIGNED_DI_OP
352 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
353 #undef TARGET_ASM_INTEGER
354 #define TARGET_ASM_INTEGER ia64_assemble_integer
356 #undef TARGET_ASM_FUNCTION_PROLOGUE
357 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
358 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
359 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
360 #undef TARGET_ASM_FUNCTION_EPILOGUE
361 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
363 #undef TARGET_IN_SMALL_DATA_P
364 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
366 #undef TARGET_SCHED_ADJUST_COST_2
367 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
368 #undef TARGET_SCHED_ISSUE_RATE
369 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
370 #undef TARGET_SCHED_VARIABLE_ISSUE
371 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
372 #undef TARGET_SCHED_INIT
373 #define TARGET_SCHED_INIT ia64_sched_init
374 #undef TARGET_SCHED_FINISH
375 #define TARGET_SCHED_FINISH ia64_sched_finish
376 #undef TARGET_SCHED_INIT_GLOBAL
377 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
378 #undef TARGET_SCHED_FINISH_GLOBAL
379 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
380 #undef TARGET_SCHED_REORDER
381 #define TARGET_SCHED_REORDER ia64_sched_reorder
382 #undef TARGET_SCHED_REORDER2
383 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
385 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
386 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
388 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
389 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
391 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
392 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
393 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
394 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
396 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
397 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
398 ia64_first_cycle_multipass_dfa_lookahead_guard
400 #undef TARGET_SCHED_DFA_NEW_CYCLE
401 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
403 #undef TARGET_SCHED_H_I_D_EXTENDED
404 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
406 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
407 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
409 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
410 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
412 #undef TARGET_SCHED_SET_SCHED_CONTEXT
413 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
415 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
416 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
418 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
419 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
421 #undef TARGET_SCHED_SET_SCHED_FLAGS
422 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
424 #undef TARGET_SCHED_GET_INSN_SPEC_DS
425 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
427 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
428 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
430 #undef TARGET_SCHED_SPECULATE_INSN
431 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
433 #undef TARGET_SCHED_NEEDS_BLOCK_P
434 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
436 #undef TARGET_SCHED_GEN_SPEC_CHECK
437 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
439 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
440 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
441 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
443 #undef TARGET_SCHED_SKIP_RTX_P
444 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
446 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
447 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
448 #undef TARGET_ARG_PARTIAL_BYTES
449 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
451 #undef TARGET_ASM_OUTPUT_MI_THUNK
452 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
453 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
454 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
456 #undef TARGET_ASM_FILE_START
457 #define TARGET_ASM_FILE_START ia64_file_start
459 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
460 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
462 #undef TARGET_REGISTER_MOVE_COST
463 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
464 #undef TARGET_MEMORY_MOVE_COST
465 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
466 #undef TARGET_RTX_COSTS
467 #define TARGET_RTX_COSTS ia64_rtx_costs
468 #undef TARGET_ADDRESS_COST
469 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
471 #undef TARGET_UNSPEC_MAY_TRAP_P
472 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
474 #undef TARGET_MACHINE_DEPENDENT_REORG
475 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
477 #undef TARGET_ENCODE_SECTION_INFO
478 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
480 #undef TARGET_SECTION_TYPE_FLAGS
481 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
483 #ifdef HAVE_AS_TLS
484 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
485 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
486 #endif
488 #undef TARGET_PROMOTE_FUNCTION_MODE
489 #define TARGET_PROMOTE_FUNCTION_MODE ia64_promote_function_mode
491 /* ??? Investigate. */
492 #if 0
493 #undef TARGET_PROMOTE_PROTOTYPES
494 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
495 #endif
497 #undef TARGET_FUNCTION_VALUE
498 #define TARGET_FUNCTION_VALUE ia64_function_value
499 #undef TARGET_LIBCALL_VALUE
500 #define TARGET_LIBCALL_VALUE ia64_libcall_value
501 #undef TARGET_FUNCTION_VALUE_REGNO_P
502 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
504 #undef TARGET_STRUCT_VALUE_RTX
505 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
506 #undef TARGET_RETURN_IN_MEMORY
507 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
508 #undef TARGET_SETUP_INCOMING_VARARGS
509 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
510 #undef TARGET_STRICT_ARGUMENT_NAMING
511 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
512 #undef TARGET_MUST_PASS_IN_STACK
513 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
515 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
516 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
518 #undef TARGET_ASM_UNWIND_EMIT
519 #define TARGET_ASM_UNWIND_EMIT process_for_unwind_directive
521 #undef TARGET_SCALAR_MODE_SUPPORTED_P
522 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
523 #undef TARGET_VECTOR_MODE_SUPPORTED_P
524 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
526 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
527 in an order different from the specified program order. */
528 #undef TARGET_RELAXED_ORDERING
529 #define TARGET_RELAXED_ORDERING true
531 #undef TARGET_DEFAULT_TARGET_FLAGS
532 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
533 #undef TARGET_HANDLE_OPTION
534 #define TARGET_HANDLE_OPTION ia64_handle_option
536 #undef TARGET_CANNOT_FORCE_CONST_MEM
537 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
539 #undef TARGET_MANGLE_TYPE
540 #define TARGET_MANGLE_TYPE ia64_mangle_type
542 #undef TARGET_INVALID_CONVERSION
543 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
544 #undef TARGET_INVALID_UNARY_OP
545 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
546 #undef TARGET_INVALID_BINARY_OP
547 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
549 #undef TARGET_C_MODE_FOR_SUFFIX
550 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
552 #undef TARGET_CAN_ELIMINATE
553 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
555 #undef TARGET_TRAMPOLINE_INIT
556 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
558 #undef TARGET_INVALID_WITHIN_DOLOOP
559 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
561 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
562 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
564 struct gcc_target targetm = TARGET_INITIALIZER;
566 typedef enum
568 ADDR_AREA_NORMAL, /* normal address area */
569 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
571 ia64_addr_area;
573 static GTY(()) tree small_ident1;
574 static GTY(()) tree small_ident2;
576 static void
577 init_idents (void)
579 if (small_ident1 == 0)
581 small_ident1 = get_identifier ("small");
582 small_ident2 = get_identifier ("__small__");
586 /* Retrieve the address area that has been chosen for the given decl. */
588 static ia64_addr_area
589 ia64_get_addr_area (tree decl)
591 tree model_attr;
593 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
594 if (model_attr)
596 tree id;
598 init_idents ();
599 id = TREE_VALUE (TREE_VALUE (model_attr));
600 if (id == small_ident1 || id == small_ident2)
601 return ADDR_AREA_SMALL;
603 return ADDR_AREA_NORMAL;
606 static tree
607 ia64_handle_model_attribute (tree *node, tree name, tree args,
608 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
610 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
611 ia64_addr_area area;
612 tree arg, decl = *node;
614 init_idents ();
615 arg = TREE_VALUE (args);
616 if (arg == small_ident1 || arg == small_ident2)
618 addr_area = ADDR_AREA_SMALL;
620 else
622 warning (OPT_Wattributes, "invalid argument of %qE attribute",
623 name);
624 *no_add_attrs = true;
627 switch (TREE_CODE (decl))
629 case VAR_DECL:
630 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
631 == FUNCTION_DECL)
632 && !TREE_STATIC (decl))
634 error_at (DECL_SOURCE_LOCATION (decl),
635 "an address area attribute cannot be specified for "
636 "local variables");
637 *no_add_attrs = true;
639 area = ia64_get_addr_area (decl);
640 if (area != ADDR_AREA_NORMAL && addr_area != area)
642 error ("address area of %q+D conflicts with previous "
643 "declaration", decl);
644 *no_add_attrs = true;
646 break;
648 case FUNCTION_DECL:
649 error_at (DECL_SOURCE_LOCATION (decl),
650 "address area attribute cannot be specified for "
651 "functions");
652 *no_add_attrs = true;
653 break;
655 default:
656 warning (OPT_Wattributes, "%qE attribute ignored",
657 name);
658 *no_add_attrs = true;
659 break;
662 return NULL_TREE;
665 /* The section must have global and overlaid attributes. */
666 #define SECTION_VMS_OVERLAY SECTION_MACH_DEP
668 /* Part of the low level implementation of DEC Ada pragma Common_Object which
669 enables the shared use of variables stored in overlaid linker areas
670 corresponding to the use of Fortran COMMON. */
672 static tree
673 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
674 int flags ATTRIBUTE_UNUSED,
675 bool *no_add_attrs)
677 tree decl = *node;
678 tree id, val;
679 if (! DECL_P (decl))
680 abort ();
682 DECL_COMMON (decl) = 1;
683 id = TREE_VALUE (args);
684 if (TREE_CODE (id) == IDENTIFIER_NODE)
685 val = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id));
686 else if (TREE_CODE (id) == STRING_CST)
687 val = id;
688 else
690 warning (OPT_Wattributes,
691 "%qE attribute requires a string constant argument", name);
692 *no_add_attrs = true;
693 return NULL_TREE;
695 DECL_SECTION_NAME (decl) = val;
696 return NULL_TREE;
699 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
701 void
702 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
703 unsigned HOST_WIDE_INT size,
704 unsigned int align)
706 tree attr = DECL_ATTRIBUTES (decl);
708 /* As common_object attribute set DECL_SECTION_NAME check it before
709 looking up the attribute. */
710 if (DECL_SECTION_NAME (decl) && attr)
711 attr = lookup_attribute ("common_object", attr);
712 else
713 attr = NULL_TREE;
715 if (!attr)
717 /* Code from elfos.h. */
718 fprintf (file, "%s", COMMON_ASM_OP);
719 assemble_name (file, name);
720 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
721 size, align / BITS_PER_UNIT);
723 else
725 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
726 ASM_OUTPUT_LABEL (file, name);
727 ASM_OUTPUT_SKIP (file, size ? size : 1);
731 /* Definition of TARGET_ASM_NAMED_SECTION for VMS. */
733 void
734 ia64_vms_elf_asm_named_section (const char *name, unsigned int flags,
735 tree decl)
737 if (!(flags & SECTION_VMS_OVERLAY))
739 default_elf_asm_named_section (name, flags, decl);
740 return;
742 if (flags != (SECTION_VMS_OVERLAY | SECTION_WRITE))
743 abort ();
745 if (flags & SECTION_DECLARED)
747 fprintf (asm_out_file, "\t.section\t%s\n", name);
748 return;
751 fprintf (asm_out_file, "\t.section\t%s,\"awgO\"\n", name);
754 static void
755 ia64_encode_addr_area (tree decl, rtx symbol)
757 int flags;
759 flags = SYMBOL_REF_FLAGS (symbol);
760 switch (ia64_get_addr_area (decl))
762 case ADDR_AREA_NORMAL: break;
763 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
764 default: gcc_unreachable ();
766 SYMBOL_REF_FLAGS (symbol) = flags;
769 static void
770 ia64_encode_section_info (tree decl, rtx rtl, int first)
772 default_encode_section_info (decl, rtl, first);
774 /* Careful not to prod global register variables. */
775 if (TREE_CODE (decl) == VAR_DECL
776 && GET_CODE (DECL_RTL (decl)) == MEM
777 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
778 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
779 ia64_encode_addr_area (decl, XEXP (rtl, 0));
782 /* Return 1 if the operands of a move are ok. */
785 ia64_move_ok (rtx dst, rtx src)
787 /* If we're under init_recog_no_volatile, we'll not be able to use
788 memory_operand. So check the code directly and don't worry about
789 the validity of the underlying address, which should have been
790 checked elsewhere anyway. */
791 if (GET_CODE (dst) != MEM)
792 return 1;
793 if (GET_CODE (src) == MEM)
794 return 0;
795 if (register_operand (src, VOIDmode))
796 return 1;
798 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
799 if (INTEGRAL_MODE_P (GET_MODE (dst)))
800 return src == const0_rtx;
801 else
802 return satisfies_constraint_G (src);
805 /* Return 1 if the operands are ok for a floating point load pair. */
808 ia64_load_pair_ok (rtx dst, rtx src)
810 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
811 return 0;
812 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
813 return 0;
814 switch (GET_CODE (XEXP (src, 0)))
816 case REG:
817 case POST_INC:
818 break;
819 case POST_DEC:
820 return 0;
821 case POST_MODIFY:
823 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
825 if (GET_CODE (adjust) != CONST_INT
826 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
827 return 0;
829 break;
830 default:
831 abort ();
833 return 1;
837 addp4_optimize_ok (rtx op1, rtx op2)
839 return (basereg_operand (op1, GET_MODE(op1)) !=
840 basereg_operand (op2, GET_MODE(op2)));
843 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
844 Return the length of the field, or <= 0 on failure. */
847 ia64_depz_field_mask (rtx rop, rtx rshift)
849 unsigned HOST_WIDE_INT op = INTVAL (rop);
850 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
852 /* Get rid of the zero bits we're shifting in. */
853 op >>= shift;
855 /* We must now have a solid block of 1's at bit 0. */
856 return exact_log2 (op + 1);
859 /* Return the TLS model to use for ADDR. */
861 static enum tls_model
862 tls_symbolic_operand_type (rtx addr)
864 enum tls_model tls_kind = TLS_MODEL_NONE;
866 if (GET_CODE (addr) == CONST)
868 if (GET_CODE (XEXP (addr, 0)) == PLUS
869 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
870 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
872 else if (GET_CODE (addr) == SYMBOL_REF)
873 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
875 return tls_kind;
878 /* Return true if X is a constant that is valid for some immediate
879 field in an instruction. */
881 bool
882 ia64_legitimate_constant_p (rtx x)
884 switch (GET_CODE (x))
886 case CONST_INT:
887 case LABEL_REF:
888 return true;
890 case CONST_DOUBLE:
891 if (GET_MODE (x) == VOIDmode || GET_MODE (x) == SFmode
892 || GET_MODE (x) == DFmode)
893 return true;
894 return satisfies_constraint_G (x);
896 case CONST:
897 case SYMBOL_REF:
898 /* ??? Short term workaround for PR 28490. We must make the code here
899 match the code in ia64_expand_move and move_operand, even though they
900 are both technically wrong. */
901 if (tls_symbolic_operand_type (x) == 0)
903 HOST_WIDE_INT addend = 0;
904 rtx op = x;
906 if (GET_CODE (op) == CONST
907 && GET_CODE (XEXP (op, 0)) == PLUS
908 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
910 addend = INTVAL (XEXP (XEXP (op, 0), 1));
911 op = XEXP (XEXP (op, 0), 0);
914 if (any_offset_symbol_operand (op, GET_MODE (op))
915 || function_operand (op, GET_MODE (op)))
916 return true;
917 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
918 return (addend & 0x3fff) == 0;
919 return false;
921 return false;
923 case CONST_VECTOR:
925 enum machine_mode mode = GET_MODE (x);
927 if (mode == V2SFmode)
928 return satisfies_constraint_Y (x);
930 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
931 && GET_MODE_SIZE (mode) <= 8);
934 default:
935 return false;
939 /* Don't allow TLS addresses to get spilled to memory. */
941 static bool
942 ia64_cannot_force_const_mem (rtx x)
944 if (GET_MODE (x) == RFmode)
945 return true;
946 return tls_symbolic_operand_type (x) != 0;
949 /* Expand a symbolic constant load. */
951 bool
952 ia64_expand_load_address (rtx dest, rtx src)
954 gcc_assert (GET_CODE (dest) == REG);
956 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
957 having to pointer-extend the value afterward. Other forms of address
958 computation below are also more natural to compute as 64-bit quantities.
959 If we've been given an SImode destination register, change it. */
960 if (GET_MODE (dest) != Pmode)
961 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
962 byte_lowpart_offset (Pmode, GET_MODE (dest)));
964 if (TARGET_NO_PIC)
965 return false;
966 if (small_addr_symbolic_operand (src, VOIDmode))
967 return false;
969 if (TARGET_AUTO_PIC)
970 emit_insn (gen_load_gprel64 (dest, src));
971 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
972 emit_insn (gen_load_fptr (dest, src));
973 else if (sdata_symbolic_operand (src, VOIDmode))
974 emit_insn (gen_load_gprel (dest, src));
975 else
977 HOST_WIDE_INT addend = 0;
978 rtx tmp;
980 /* We did split constant offsets in ia64_expand_move, and we did try
981 to keep them split in move_operand, but we also allowed reload to
982 rematerialize arbitrary constants rather than spill the value to
983 the stack and reload it. So we have to be prepared here to split
984 them apart again. */
985 if (GET_CODE (src) == CONST)
987 HOST_WIDE_INT hi, lo;
989 hi = INTVAL (XEXP (XEXP (src, 0), 1));
990 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
991 hi = hi - lo;
993 if (lo != 0)
995 addend = lo;
996 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
1000 tmp = gen_rtx_HIGH (Pmode, src);
1001 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1002 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1004 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
1005 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1007 if (addend)
1009 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1010 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1014 return true;
1017 static GTY(()) rtx gen_tls_tga;
1018 static rtx
1019 gen_tls_get_addr (void)
1021 if (!gen_tls_tga)
1022 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1023 return gen_tls_tga;
1026 static GTY(()) rtx thread_pointer_rtx;
1027 static rtx
1028 gen_thread_pointer (void)
1030 if (!thread_pointer_rtx)
1031 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1032 return thread_pointer_rtx;
1035 static rtx
1036 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1037 rtx orig_op1, HOST_WIDE_INT addend)
1039 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1040 rtx orig_op0 = op0;
1041 HOST_WIDE_INT addend_lo, addend_hi;
1043 switch (tls_kind)
1045 case TLS_MODEL_GLOBAL_DYNAMIC:
1046 start_sequence ();
1048 tga_op1 = gen_reg_rtx (Pmode);
1049 emit_insn (gen_load_dtpmod (tga_op1, op1));
1051 tga_op2 = gen_reg_rtx (Pmode);
1052 emit_insn (gen_load_dtprel (tga_op2, op1));
1054 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1055 LCT_CONST, Pmode, 2, tga_op1,
1056 Pmode, tga_op2, Pmode);
1058 insns = get_insns ();
1059 end_sequence ();
1061 if (GET_MODE (op0) != Pmode)
1062 op0 = tga_ret;
1063 emit_libcall_block (insns, op0, tga_ret, op1);
1064 break;
1066 case TLS_MODEL_LOCAL_DYNAMIC:
1067 /* ??? This isn't the completely proper way to do local-dynamic
1068 If the call to __tls_get_addr is used only by a single symbol,
1069 then we should (somehow) move the dtprel to the second arg
1070 to avoid the extra add. */
1071 start_sequence ();
1073 tga_op1 = gen_reg_rtx (Pmode);
1074 emit_insn (gen_load_dtpmod (tga_op1, op1));
1076 tga_op2 = const0_rtx;
1078 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1079 LCT_CONST, Pmode, 2, tga_op1,
1080 Pmode, tga_op2, Pmode);
1082 insns = get_insns ();
1083 end_sequence ();
1085 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1086 UNSPEC_LD_BASE);
1087 tmp = gen_reg_rtx (Pmode);
1088 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1090 if (!register_operand (op0, Pmode))
1091 op0 = gen_reg_rtx (Pmode);
1092 if (TARGET_TLS64)
1094 emit_insn (gen_load_dtprel (op0, op1));
1095 emit_insn (gen_adddi3 (op0, tmp, op0));
1097 else
1098 emit_insn (gen_add_dtprel (op0, op1, tmp));
1099 break;
1101 case TLS_MODEL_INITIAL_EXEC:
1102 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1103 addend_hi = addend - addend_lo;
1105 op1 = plus_constant (op1, addend_hi);
1106 addend = addend_lo;
1108 tmp = gen_reg_rtx (Pmode);
1109 emit_insn (gen_load_tprel (tmp, op1));
1111 if (!register_operand (op0, Pmode))
1112 op0 = gen_reg_rtx (Pmode);
1113 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1114 break;
1116 case TLS_MODEL_LOCAL_EXEC:
1117 if (!register_operand (op0, Pmode))
1118 op0 = gen_reg_rtx (Pmode);
1120 op1 = orig_op1;
1121 addend = 0;
1122 if (TARGET_TLS64)
1124 emit_insn (gen_load_tprel (op0, op1));
1125 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1127 else
1128 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1129 break;
1131 default:
1132 gcc_unreachable ();
1135 if (addend)
1136 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1137 orig_op0, 1, OPTAB_DIRECT);
1138 if (orig_op0 == op0)
1139 return NULL_RTX;
1140 if (GET_MODE (orig_op0) == Pmode)
1141 return op0;
1142 return gen_lowpart (GET_MODE (orig_op0), op0);
1146 ia64_expand_move (rtx op0, rtx op1)
1148 enum machine_mode mode = GET_MODE (op0);
1150 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1151 op1 = force_reg (mode, op1);
1153 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1155 HOST_WIDE_INT addend = 0;
1156 enum tls_model tls_kind;
1157 rtx sym = op1;
1159 if (GET_CODE (op1) == CONST
1160 && GET_CODE (XEXP (op1, 0)) == PLUS
1161 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1163 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1164 sym = XEXP (XEXP (op1, 0), 0);
1167 tls_kind = tls_symbolic_operand_type (sym);
1168 if (tls_kind)
1169 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1171 if (any_offset_symbol_operand (sym, mode))
1172 addend = 0;
1173 else if (aligned_offset_symbol_operand (sym, mode))
1175 HOST_WIDE_INT addend_lo, addend_hi;
1177 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1178 addend_hi = addend - addend_lo;
1180 if (addend_lo != 0)
1182 op1 = plus_constant (sym, addend_hi);
1183 addend = addend_lo;
1185 else
1186 addend = 0;
1188 else
1189 op1 = sym;
1191 if (reload_completed)
1193 /* We really should have taken care of this offset earlier. */
1194 gcc_assert (addend == 0);
1195 if (ia64_expand_load_address (op0, op1))
1196 return NULL_RTX;
1199 if (addend)
1201 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1203 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1205 op1 = expand_simple_binop (mode, PLUS, subtarget,
1206 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1207 if (op0 == op1)
1208 return NULL_RTX;
1212 return op1;
1215 /* Split a move from OP1 to OP0 conditional on COND. */
1217 void
1218 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1220 rtx insn, first = get_last_insn ();
1222 emit_move_insn (op0, op1);
1224 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1225 if (INSN_P (insn))
1226 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1227 PATTERN (insn));
1230 /* Split a post-reload TImode or TFmode reference into two DImode
1231 components. This is made extra difficult by the fact that we do
1232 not get any scratch registers to work with, because reload cannot
1233 be prevented from giving us a scratch that overlaps the register
1234 pair involved. So instead, when addressing memory, we tweak the
1235 pointer register up and back down with POST_INCs. Or up and not
1236 back down when we can get away with it.
1238 REVERSED is true when the loads must be done in reversed order
1239 (high word first) for correctness. DEAD is true when the pointer
1240 dies with the second insn we generate and therefore the second
1241 address must not carry a postmodify.
1243 May return an insn which is to be emitted after the moves. */
1245 static rtx
1246 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1248 rtx fixup = 0;
1250 switch (GET_CODE (in))
1252 case REG:
1253 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1254 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1255 break;
1257 case CONST_INT:
1258 case CONST_DOUBLE:
1259 /* Cannot occur reversed. */
1260 gcc_assert (!reversed);
1262 if (GET_MODE (in) != TFmode)
1263 split_double (in, &out[0], &out[1]);
1264 else
1265 /* split_double does not understand how to split a TFmode
1266 quantity into a pair of DImode constants. */
1268 REAL_VALUE_TYPE r;
1269 unsigned HOST_WIDE_INT p[2];
1270 long l[4]; /* TFmode is 128 bits */
1272 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1273 real_to_target (l, &r, TFmode);
1275 if (FLOAT_WORDS_BIG_ENDIAN)
1277 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1278 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1280 else
1282 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1283 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1285 out[0] = GEN_INT (p[0]);
1286 out[1] = GEN_INT (p[1]);
1288 break;
1290 case MEM:
1292 rtx base = XEXP (in, 0);
1293 rtx offset;
1295 switch (GET_CODE (base))
1297 case REG:
1298 if (!reversed)
1300 out[0] = adjust_automodify_address
1301 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1302 out[1] = adjust_automodify_address
1303 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1305 else
1307 /* Reversal requires a pre-increment, which can only
1308 be done as a separate insn. */
1309 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1310 out[0] = adjust_automodify_address
1311 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1312 out[1] = adjust_address (in, DImode, 0);
1314 break;
1316 case POST_INC:
1317 gcc_assert (!reversed && !dead);
1319 /* Just do the increment in two steps. */
1320 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1321 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1322 break;
1324 case POST_DEC:
1325 gcc_assert (!reversed && !dead);
1327 /* Add 8, subtract 24. */
1328 base = XEXP (base, 0);
1329 out[0] = adjust_automodify_address
1330 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1331 out[1] = adjust_automodify_address
1332 (in, DImode,
1333 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1335 break;
1337 case POST_MODIFY:
1338 gcc_assert (!reversed && !dead);
1340 /* Extract and adjust the modification. This case is
1341 trickier than the others, because we might have an
1342 index register, or we might have a combined offset that
1343 doesn't fit a signed 9-bit displacement field. We can
1344 assume the incoming expression is already legitimate. */
1345 offset = XEXP (base, 1);
1346 base = XEXP (base, 0);
1348 out[0] = adjust_automodify_address
1349 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1351 if (GET_CODE (XEXP (offset, 1)) == REG)
1353 /* Can't adjust the postmodify to match. Emit the
1354 original, then a separate addition insn. */
1355 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1356 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1358 else
1360 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1361 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1363 /* Again the postmodify cannot be made to match,
1364 but in this case it's more efficient to get rid
1365 of the postmodify entirely and fix up with an
1366 add insn. */
1367 out[1] = adjust_automodify_address (in, DImode, base, 8);
1368 fixup = gen_adddi3
1369 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1371 else
1373 /* Combined offset still fits in the displacement field.
1374 (We cannot overflow it at the high end.) */
1375 out[1] = adjust_automodify_address
1376 (in, DImode, gen_rtx_POST_MODIFY
1377 (Pmode, base, gen_rtx_PLUS
1378 (Pmode, base,
1379 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1383 break;
1385 default:
1386 gcc_unreachable ();
1388 break;
1391 default:
1392 gcc_unreachable ();
1395 return fixup;
1398 /* Split a TImode or TFmode move instruction after reload.
1399 This is used by *movtf_internal and *movti_internal. */
1400 void
1401 ia64_split_tmode_move (rtx operands[])
1403 rtx in[2], out[2], insn;
1404 rtx fixup[2];
1405 bool dead = false;
1406 bool reversed = false;
1408 /* It is possible for reload to decide to overwrite a pointer with
1409 the value it points to. In that case we have to do the loads in
1410 the appropriate order so that the pointer is not destroyed too
1411 early. Also we must not generate a postmodify for that second
1412 load, or rws_access_regno will die. */
1413 if (GET_CODE (operands[1]) == MEM
1414 && reg_overlap_mentioned_p (operands[0], operands[1]))
1416 rtx base = XEXP (operands[1], 0);
1417 while (GET_CODE (base) != REG)
1418 base = XEXP (base, 0);
1420 if (REGNO (base) == REGNO (operands[0]))
1421 reversed = true;
1422 dead = true;
1424 /* Another reason to do the moves in reversed order is if the first
1425 element of the target register pair is also the second element of
1426 the source register pair. */
1427 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1428 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1429 reversed = true;
1431 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1432 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1434 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1435 if (GET_CODE (EXP) == MEM \
1436 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1437 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1438 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1439 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1441 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1442 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1443 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1445 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1446 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1447 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1449 if (fixup[0])
1450 emit_insn (fixup[0]);
1451 if (fixup[1])
1452 emit_insn (fixup[1]);
1454 #undef MAYBE_ADD_REG_INC_NOTE
1457 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1458 through memory plus an extra GR scratch register. Except that you can
1459 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1460 SECONDARY_RELOAD_CLASS, but not both.
1462 We got into problems in the first place by allowing a construct like
1463 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1464 This solution attempts to prevent this situation from occurring. When
1465 we see something like the above, we spill the inner register to memory. */
1467 static rtx
1468 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1470 if (GET_CODE (in) == SUBREG
1471 && GET_MODE (SUBREG_REG (in)) == TImode
1472 && GET_CODE (SUBREG_REG (in)) == REG)
1474 rtx memt = assign_stack_temp (TImode, 16, 0);
1475 emit_move_insn (memt, SUBREG_REG (in));
1476 return adjust_address (memt, mode, 0);
1478 else if (force && GET_CODE (in) == REG)
1480 rtx memx = assign_stack_temp (mode, 16, 0);
1481 emit_move_insn (memx, in);
1482 return memx;
1484 else
1485 return in;
1488 /* Expand the movxf or movrf pattern (MODE says which) with the given
1489 OPERANDS, returning true if the pattern should then invoke
1490 DONE. */
1492 bool
1493 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1495 rtx op0 = operands[0];
1497 if (GET_CODE (op0) == SUBREG)
1498 op0 = SUBREG_REG (op0);
1500 /* We must support XFmode loads into general registers for stdarg/vararg,
1501 unprototyped calls, and a rare case where a long double is passed as
1502 an argument after a float HFA fills the FP registers. We split them into
1503 DImode loads for convenience. We also need to support XFmode stores
1504 for the last case. This case does not happen for stdarg/vararg routines,
1505 because we do a block store to memory of unnamed arguments. */
1507 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1509 rtx out[2];
1511 /* We're hoping to transform everything that deals with XFmode
1512 quantities and GR registers early in the compiler. */
1513 gcc_assert (can_create_pseudo_p ());
1515 /* Struct to register can just use TImode instead. */
1516 if ((GET_CODE (operands[1]) == SUBREG
1517 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1518 || (GET_CODE (operands[1]) == REG
1519 && GR_REGNO_P (REGNO (operands[1]))))
1521 rtx op1 = operands[1];
1523 if (GET_CODE (op1) == SUBREG)
1524 op1 = SUBREG_REG (op1);
1525 else
1526 op1 = gen_rtx_REG (TImode, REGNO (op1));
1528 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1529 return true;
1532 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1534 /* Don't word-swap when reading in the constant. */
1535 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1536 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1537 0, mode));
1538 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1539 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1540 0, mode));
1541 return true;
1544 /* If the quantity is in a register not known to be GR, spill it. */
1545 if (register_operand (operands[1], mode))
1546 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1548 gcc_assert (GET_CODE (operands[1]) == MEM);
1550 /* Don't word-swap when reading in the value. */
1551 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1552 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1554 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1555 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1556 return true;
1559 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1561 /* We're hoping to transform everything that deals with XFmode
1562 quantities and GR registers early in the compiler. */
1563 gcc_assert (can_create_pseudo_p ());
1565 /* Op0 can't be a GR_REG here, as that case is handled above.
1566 If op0 is a register, then we spill op1, so that we now have a
1567 MEM operand. This requires creating an XFmode subreg of a TImode reg
1568 to force the spill. */
1569 if (register_operand (operands[0], mode))
1571 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1572 op1 = gen_rtx_SUBREG (mode, op1, 0);
1573 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1576 else
1578 rtx in[2];
1580 gcc_assert (GET_CODE (operands[0]) == MEM);
1582 /* Don't word-swap when writing out the value. */
1583 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1584 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1586 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1587 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1588 return true;
1592 if (!reload_in_progress && !reload_completed)
1594 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1596 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1598 rtx memt, memx, in = operands[1];
1599 if (CONSTANT_P (in))
1600 in = validize_mem (force_const_mem (mode, in));
1601 if (GET_CODE (in) == MEM)
1602 memt = adjust_address (in, TImode, 0);
1603 else
1605 memt = assign_stack_temp (TImode, 16, 0);
1606 memx = adjust_address (memt, mode, 0);
1607 emit_move_insn (memx, in);
1609 emit_move_insn (op0, memt);
1610 return true;
1613 if (!ia64_move_ok (operands[0], operands[1]))
1614 operands[1] = force_reg (mode, operands[1]);
1617 return false;
1620 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1621 with the expression that holds the compare result (in VOIDmode). */
1623 static GTY(()) rtx cmptf_libfunc;
1625 void
1626 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1628 enum rtx_code code = GET_CODE (*expr);
1629 rtx cmp;
1631 /* If we have a BImode input, then we already have a compare result, and
1632 do not need to emit another comparison. */
1633 if (GET_MODE (*op0) == BImode)
1635 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1636 cmp = *op0;
1638 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1639 magic number as its third argument, that indicates what to do.
1640 The return value is an integer to be compared against zero. */
1641 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1643 enum qfcmp_magic {
1644 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1645 QCMP_UNORD = 2,
1646 QCMP_EQ = 4,
1647 QCMP_LT = 8,
1648 QCMP_GT = 16
1650 int magic;
1651 enum rtx_code ncode;
1652 rtx ret, insns;
1654 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1655 switch (code)
1657 /* 1 = equal, 0 = not equal. Equality operators do
1658 not raise FP_INVALID when given an SNaN operand. */
1659 case EQ: magic = QCMP_EQ; ncode = NE; break;
1660 case NE: magic = QCMP_EQ; ncode = EQ; break;
1661 /* isunordered() from C99. */
1662 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1663 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1664 /* Relational operators raise FP_INVALID when given
1665 an SNaN operand. */
1666 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1667 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1668 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1669 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1670 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1671 Expanders for buneq etc. weuld have to be added to ia64.md
1672 for this to be useful. */
1673 default: gcc_unreachable ();
1676 start_sequence ();
1678 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1679 *op0, TFmode, *op1, TFmode,
1680 GEN_INT (magic), DImode);
1681 cmp = gen_reg_rtx (BImode);
1682 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1683 gen_rtx_fmt_ee (ncode, BImode,
1684 ret, const0_rtx)));
1686 insns = get_insns ();
1687 end_sequence ();
1689 emit_libcall_block (insns, cmp, cmp,
1690 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1691 code = NE;
1693 else
1695 cmp = gen_reg_rtx (BImode);
1696 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1697 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1698 code = NE;
1701 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1702 *op0 = cmp;
1703 *op1 = const0_rtx;
1706 /* Generate an integral vector comparison. Return true if the condition has
1707 been reversed, and so the sense of the comparison should be inverted. */
1709 static bool
1710 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1711 rtx dest, rtx op0, rtx op1)
1713 bool negate = false;
1714 rtx x;
1716 /* Canonicalize the comparison to EQ, GT, GTU. */
1717 switch (code)
1719 case EQ:
1720 case GT:
1721 case GTU:
1722 break;
1724 case NE:
1725 case LE:
1726 case LEU:
1727 code = reverse_condition (code);
1728 negate = true;
1729 break;
1731 case GE:
1732 case GEU:
1733 code = reverse_condition (code);
1734 negate = true;
1735 /* FALLTHRU */
1737 case LT:
1738 case LTU:
1739 code = swap_condition (code);
1740 x = op0, op0 = op1, op1 = x;
1741 break;
1743 default:
1744 gcc_unreachable ();
1747 /* Unsigned parallel compare is not supported by the hardware. Play some
1748 tricks to turn this into a signed comparison against 0. */
1749 if (code == GTU)
1751 switch (mode)
1753 case V2SImode:
1755 rtx t1, t2, mask;
1757 /* Subtract (-(INT MAX) - 1) from both operands to make
1758 them signed. */
1759 mask = GEN_INT (0x80000000);
1760 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1761 mask = force_reg (mode, mask);
1762 t1 = gen_reg_rtx (mode);
1763 emit_insn (gen_subv2si3 (t1, op0, mask));
1764 t2 = gen_reg_rtx (mode);
1765 emit_insn (gen_subv2si3 (t2, op1, mask));
1766 op0 = t1;
1767 op1 = t2;
1768 code = GT;
1770 break;
1772 case V8QImode:
1773 case V4HImode:
1774 /* Perform a parallel unsigned saturating subtraction. */
1775 x = gen_reg_rtx (mode);
1776 emit_insn (gen_rtx_SET (VOIDmode, x,
1777 gen_rtx_US_MINUS (mode, op0, op1)));
1779 code = EQ;
1780 op0 = x;
1781 op1 = CONST0_RTX (mode);
1782 negate = !negate;
1783 break;
1785 default:
1786 gcc_unreachable ();
1790 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1791 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1793 return negate;
1796 /* Emit an integral vector conditional move. */
1798 void
1799 ia64_expand_vecint_cmov (rtx operands[])
1801 enum machine_mode mode = GET_MODE (operands[0]);
1802 enum rtx_code code = GET_CODE (operands[3]);
1803 bool negate;
1804 rtx cmp, x, ot, of;
1806 cmp = gen_reg_rtx (mode);
1807 negate = ia64_expand_vecint_compare (code, mode, cmp,
1808 operands[4], operands[5]);
1810 ot = operands[1+negate];
1811 of = operands[2-negate];
1813 if (ot == CONST0_RTX (mode))
1815 if (of == CONST0_RTX (mode))
1817 emit_move_insn (operands[0], ot);
1818 return;
1821 x = gen_rtx_NOT (mode, cmp);
1822 x = gen_rtx_AND (mode, x, of);
1823 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1825 else if (of == CONST0_RTX (mode))
1827 x = gen_rtx_AND (mode, cmp, ot);
1828 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1830 else
1832 rtx t, f;
1834 t = gen_reg_rtx (mode);
1835 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1836 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1838 f = gen_reg_rtx (mode);
1839 x = gen_rtx_NOT (mode, cmp);
1840 x = gen_rtx_AND (mode, x, operands[2-negate]);
1841 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1843 x = gen_rtx_IOR (mode, t, f);
1844 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1848 /* Emit an integral vector min or max operation. Return true if all done. */
1850 bool
1851 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1852 rtx operands[])
1854 rtx xops[6];
1856 /* These four combinations are supported directly. */
1857 if (mode == V8QImode && (code == UMIN || code == UMAX))
1858 return false;
1859 if (mode == V4HImode && (code == SMIN || code == SMAX))
1860 return false;
1862 /* This combination can be implemented with only saturating subtraction. */
1863 if (mode == V4HImode && code == UMAX)
1865 rtx x, tmp = gen_reg_rtx (mode);
1867 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1868 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1870 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1871 return true;
1874 /* Everything else implemented via vector comparisons. */
1875 xops[0] = operands[0];
1876 xops[4] = xops[1] = operands[1];
1877 xops[5] = xops[2] = operands[2];
1879 switch (code)
1881 case UMIN:
1882 code = LTU;
1883 break;
1884 case UMAX:
1885 code = GTU;
1886 break;
1887 case SMIN:
1888 code = LT;
1889 break;
1890 case SMAX:
1891 code = GT;
1892 break;
1893 default:
1894 gcc_unreachable ();
1896 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1898 ia64_expand_vecint_cmov (xops);
1899 return true;
1902 /* Emit an integral vector widening sum operations. */
1904 void
1905 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1907 rtx l, h, x, s;
1908 enum machine_mode wmode, mode;
1909 rtx (*unpack_l) (rtx, rtx, rtx);
1910 rtx (*unpack_h) (rtx, rtx, rtx);
1911 rtx (*plus) (rtx, rtx, rtx);
1913 wmode = GET_MODE (operands[0]);
1914 mode = GET_MODE (operands[1]);
1916 switch (mode)
1918 case V8QImode:
1919 unpack_l = gen_unpack1_l;
1920 unpack_h = gen_unpack1_h;
1921 plus = gen_addv4hi3;
1922 break;
1923 case V4HImode:
1924 unpack_l = gen_unpack2_l;
1925 unpack_h = gen_unpack2_h;
1926 plus = gen_addv2si3;
1927 break;
1928 default:
1929 gcc_unreachable ();
1932 /* Fill in x with the sign extension of each element in op1. */
1933 if (unsignedp)
1934 x = CONST0_RTX (mode);
1935 else
1937 bool neg;
1939 x = gen_reg_rtx (mode);
1941 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1942 CONST0_RTX (mode));
1943 gcc_assert (!neg);
1946 l = gen_reg_rtx (wmode);
1947 h = gen_reg_rtx (wmode);
1948 s = gen_reg_rtx (wmode);
1950 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1951 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1952 emit_insn (plus (s, l, operands[2]));
1953 emit_insn (plus (operands[0], h, s));
1956 /* Emit a signed or unsigned V8QI dot product operation. */
1958 void
1959 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1961 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1963 /* Fill in x1 and x2 with the sign extension of each element. */
1964 if (unsignedp)
1965 x1 = x2 = CONST0_RTX (V8QImode);
1966 else
1968 bool neg;
1970 x1 = gen_reg_rtx (V8QImode);
1971 x2 = gen_reg_rtx (V8QImode);
1973 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1974 CONST0_RTX (V8QImode));
1975 gcc_assert (!neg);
1976 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1977 CONST0_RTX (V8QImode));
1978 gcc_assert (!neg);
1981 l1 = gen_reg_rtx (V4HImode);
1982 l2 = gen_reg_rtx (V4HImode);
1983 h1 = gen_reg_rtx (V4HImode);
1984 h2 = gen_reg_rtx (V4HImode);
1986 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1987 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1988 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1989 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1991 p1 = gen_reg_rtx (V2SImode);
1992 p2 = gen_reg_rtx (V2SImode);
1993 p3 = gen_reg_rtx (V2SImode);
1994 p4 = gen_reg_rtx (V2SImode);
1995 emit_insn (gen_pmpy2_r (p1, l1, l2));
1996 emit_insn (gen_pmpy2_l (p2, l1, l2));
1997 emit_insn (gen_pmpy2_r (p3, h1, h2));
1998 emit_insn (gen_pmpy2_l (p4, h1, h2));
2000 s1 = gen_reg_rtx (V2SImode);
2001 s2 = gen_reg_rtx (V2SImode);
2002 s3 = gen_reg_rtx (V2SImode);
2003 emit_insn (gen_addv2si3 (s1, p1, p2));
2004 emit_insn (gen_addv2si3 (s2, p3, p4));
2005 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
2006 emit_insn (gen_addv2si3 (operands[0], s2, s3));
2009 /* Emit the appropriate sequence for a call. */
2011 void
2012 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2013 int sibcall_p)
2015 rtx insn, b0;
2017 addr = XEXP (addr, 0);
2018 addr = convert_memory_address (DImode, addr);
2019 b0 = gen_rtx_REG (DImode, R_BR (0));
2021 /* ??? Should do this for functions known to bind local too. */
2022 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2024 if (sibcall_p)
2025 insn = gen_sibcall_nogp (addr);
2026 else if (! retval)
2027 insn = gen_call_nogp (addr, b0);
2028 else
2029 insn = gen_call_value_nogp (retval, addr, b0);
2030 insn = emit_call_insn (insn);
2032 else
2034 if (sibcall_p)
2035 insn = gen_sibcall_gp (addr);
2036 else if (! retval)
2037 insn = gen_call_gp (addr, b0);
2038 else
2039 insn = gen_call_value_gp (retval, addr, b0);
2040 insn = emit_call_insn (insn);
2042 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2045 if (sibcall_p)
2046 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2048 if (TARGET_ABI_OPEN_VMS)
2049 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2050 gen_rtx_REG (DImode, GR_REG (25)));
2053 static void
2054 reg_emitted (enum ia64_frame_regs r)
2056 if (emitted_frame_related_regs[r] == 0)
2057 emitted_frame_related_regs[r] = current_frame_info.r[r];
2058 else
2059 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2062 static int
2063 get_reg (enum ia64_frame_regs r)
2065 reg_emitted (r);
2066 return current_frame_info.r[r];
2069 static bool
2070 is_emitted (int regno)
2072 unsigned int r;
2074 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2075 if (emitted_frame_related_regs[r] == regno)
2076 return true;
2077 return false;
2080 void
2081 ia64_reload_gp (void)
2083 rtx tmp;
2085 if (current_frame_info.r[reg_save_gp])
2087 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2089 else
2091 HOST_WIDE_INT offset;
2092 rtx offset_r;
2094 offset = (current_frame_info.spill_cfa_off
2095 + current_frame_info.spill_size);
2096 if (frame_pointer_needed)
2098 tmp = hard_frame_pointer_rtx;
2099 offset = -offset;
2101 else
2103 tmp = stack_pointer_rtx;
2104 offset = current_frame_info.total_size - offset;
2107 offset_r = GEN_INT (offset);
2108 if (satisfies_constraint_I (offset_r))
2109 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2110 else
2112 emit_move_insn (pic_offset_table_rtx, offset_r);
2113 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2114 pic_offset_table_rtx, tmp));
2117 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2120 emit_move_insn (pic_offset_table_rtx, tmp);
2123 void
2124 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2125 rtx scratch_b, int noreturn_p, int sibcall_p)
2127 rtx insn;
2128 bool is_desc = false;
2130 /* If we find we're calling through a register, then we're actually
2131 calling through a descriptor, so load up the values. */
2132 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2134 rtx tmp;
2135 bool addr_dead_p;
2137 /* ??? We are currently constrained to *not* use peep2, because
2138 we can legitimately change the global lifetime of the GP
2139 (in the form of killing where previously live). This is
2140 because a call through a descriptor doesn't use the previous
2141 value of the GP, while a direct call does, and we do not
2142 commit to either form until the split here.
2144 That said, this means that we lack precise life info for
2145 whether ADDR is dead after this call. This is not terribly
2146 important, since we can fix things up essentially for free
2147 with the POST_DEC below, but it's nice to not use it when we
2148 can immediately tell it's not necessary. */
2149 addr_dead_p = ((noreturn_p || sibcall_p
2150 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2151 REGNO (addr)))
2152 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2154 /* Load the code address into scratch_b. */
2155 tmp = gen_rtx_POST_INC (Pmode, addr);
2156 tmp = gen_rtx_MEM (Pmode, tmp);
2157 emit_move_insn (scratch_r, tmp);
2158 emit_move_insn (scratch_b, scratch_r);
2160 /* Load the GP address. If ADDR is not dead here, then we must
2161 revert the change made above via the POST_INCREMENT. */
2162 if (!addr_dead_p)
2163 tmp = gen_rtx_POST_DEC (Pmode, addr);
2164 else
2165 tmp = addr;
2166 tmp = gen_rtx_MEM (Pmode, tmp);
2167 emit_move_insn (pic_offset_table_rtx, tmp);
2169 is_desc = true;
2170 addr = scratch_b;
2173 if (sibcall_p)
2174 insn = gen_sibcall_nogp (addr);
2175 else if (retval)
2176 insn = gen_call_value_nogp (retval, addr, retaddr);
2177 else
2178 insn = gen_call_nogp (addr, retaddr);
2179 emit_call_insn (insn);
2181 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2182 ia64_reload_gp ();
2185 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2187 This differs from the generic code in that we know about the zero-extending
2188 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2189 also know that ld.acq+cmpxchg.rel equals a full barrier.
2191 The loop we want to generate looks like
2193 cmp_reg = mem;
2194 label:
2195 old_reg = cmp_reg;
2196 new_reg = cmp_reg op val;
2197 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2198 if (cmp_reg != old_reg)
2199 goto label;
2201 Note that we only do the plain load from memory once. Subsequent
2202 iterations use the value loaded by the compare-and-swap pattern. */
2204 void
2205 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2206 rtx old_dst, rtx new_dst)
2208 enum machine_mode mode = GET_MODE (mem);
2209 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2210 enum insn_code icode;
2212 /* Special case for using fetchadd. */
2213 if ((mode == SImode || mode == DImode)
2214 && (code == PLUS || code == MINUS)
2215 && fetchadd_operand (val, mode))
2217 if (code == MINUS)
2218 val = GEN_INT (-INTVAL (val));
2220 if (!old_dst)
2221 old_dst = gen_reg_rtx (mode);
2223 emit_insn (gen_memory_barrier ());
2225 if (mode == SImode)
2226 icode = CODE_FOR_fetchadd_acq_si;
2227 else
2228 icode = CODE_FOR_fetchadd_acq_di;
2229 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2231 if (new_dst)
2233 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2234 true, OPTAB_WIDEN);
2235 if (new_reg != new_dst)
2236 emit_move_insn (new_dst, new_reg);
2238 return;
2241 /* Because of the volatile mem read, we get an ld.acq, which is the
2242 front half of the full barrier. The end half is the cmpxchg.rel. */
2243 gcc_assert (MEM_VOLATILE_P (mem));
2245 old_reg = gen_reg_rtx (DImode);
2246 cmp_reg = gen_reg_rtx (DImode);
2247 label = gen_label_rtx ();
2249 if (mode != DImode)
2251 val = simplify_gen_subreg (DImode, val, mode, 0);
2252 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2254 else
2255 emit_move_insn (cmp_reg, mem);
2257 emit_label (label);
2259 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2260 emit_move_insn (old_reg, cmp_reg);
2261 emit_move_insn (ar_ccv, cmp_reg);
2263 if (old_dst)
2264 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2266 new_reg = cmp_reg;
2267 if (code == NOT)
2269 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2270 true, OPTAB_DIRECT);
2271 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2273 else
2274 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2275 true, OPTAB_DIRECT);
2277 if (mode != DImode)
2278 new_reg = gen_lowpart (mode, new_reg);
2279 if (new_dst)
2280 emit_move_insn (new_dst, new_reg);
2282 switch (mode)
2284 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2285 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2286 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2287 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2288 default:
2289 gcc_unreachable ();
2292 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2294 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2297 /* Begin the assembly file. */
2299 static void
2300 ia64_file_start (void)
2302 /* Variable tracking should be run after all optimizations which change order
2303 of insns. It also needs a valid CFG. This can't be done in
2304 ia64_override_options, because flag_var_tracking is finalized after
2305 that. */
2306 ia64_flag_var_tracking = flag_var_tracking;
2307 flag_var_tracking = 0;
2309 default_file_start ();
2310 emit_safe_across_calls ();
2313 void
2314 emit_safe_across_calls (void)
2316 unsigned int rs, re;
2317 int out_state;
2319 rs = 1;
2320 out_state = 0;
2321 while (1)
2323 while (rs < 64 && call_used_regs[PR_REG (rs)])
2324 rs++;
2325 if (rs >= 64)
2326 break;
2327 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2328 continue;
2329 if (out_state == 0)
2331 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2332 out_state = 1;
2334 else
2335 fputc (',', asm_out_file);
2336 if (re == rs + 1)
2337 fprintf (asm_out_file, "p%u", rs);
2338 else
2339 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2340 rs = re + 1;
2342 if (out_state)
2343 fputc ('\n', asm_out_file);
2346 /* Globalize a declaration. */
2348 static void
2349 ia64_globalize_decl_name (FILE * stream, tree decl)
2351 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2352 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2353 if (version_attr)
2355 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2356 const char *p = TREE_STRING_POINTER (v);
2357 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2359 targetm.asm_out.globalize_label (stream, name);
2360 if (TREE_CODE (decl) == FUNCTION_DECL)
2361 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2364 /* Helper function for ia64_compute_frame_size: find an appropriate general
2365 register to spill some special register to. SPECIAL_SPILL_MASK contains
2366 bits in GR0 to GR31 that have already been allocated by this routine.
2367 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2369 static int
2370 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2372 int regno;
2374 if (emitted_frame_related_regs[r] != 0)
2376 regno = emitted_frame_related_regs[r];
2377 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2378 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2379 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2380 else if (current_function_is_leaf
2381 && regno >= GR_REG (1) && regno <= GR_REG (31))
2382 current_frame_info.gr_used_mask |= 1 << regno;
2384 return regno;
2387 /* If this is a leaf function, first try an otherwise unused
2388 call-clobbered register. */
2389 if (current_function_is_leaf)
2391 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2392 if (! df_regs_ever_live_p (regno)
2393 && call_used_regs[regno]
2394 && ! fixed_regs[regno]
2395 && ! global_regs[regno]
2396 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2397 && ! is_emitted (regno))
2399 current_frame_info.gr_used_mask |= 1 << regno;
2400 return regno;
2404 if (try_locals)
2406 regno = current_frame_info.n_local_regs;
2407 /* If there is a frame pointer, then we can't use loc79, because
2408 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2409 reg_name switching code in ia64_expand_prologue. */
2410 while (regno < (80 - frame_pointer_needed))
2411 if (! is_emitted (LOC_REG (regno++)))
2413 current_frame_info.n_local_regs = regno;
2414 return LOC_REG (regno - 1);
2418 /* Failed to find a general register to spill to. Must use stack. */
2419 return 0;
2422 /* In order to make for nice schedules, we try to allocate every temporary
2423 to a different register. We must of course stay away from call-saved,
2424 fixed, and global registers. We must also stay away from registers
2425 allocated in current_frame_info.gr_used_mask, since those include regs
2426 used all through the prologue.
2428 Any register allocated here must be used immediately. The idea is to
2429 aid scheduling, not to solve data flow problems. */
2431 static int last_scratch_gr_reg;
2433 static int
2434 next_scratch_gr_reg (void)
2436 int i, regno;
2438 for (i = 0; i < 32; ++i)
2440 regno = (last_scratch_gr_reg + i + 1) & 31;
2441 if (call_used_regs[regno]
2442 && ! fixed_regs[regno]
2443 && ! global_regs[regno]
2444 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2446 last_scratch_gr_reg = regno;
2447 return regno;
2451 /* There must be _something_ available. */
2452 gcc_unreachable ();
2455 /* Helper function for ia64_compute_frame_size, called through
2456 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2458 static void
2459 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2461 unsigned int regno = REGNO (reg);
2462 if (regno < 32)
2464 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2465 for (i = 0; i < n; ++i)
2466 current_frame_info.gr_used_mask |= 1 << (regno + i);
2471 /* Returns the number of bytes offset between the frame pointer and the stack
2472 pointer for the current function. SIZE is the number of bytes of space
2473 needed for local variables. */
2475 static void
2476 ia64_compute_frame_size (HOST_WIDE_INT size)
2478 HOST_WIDE_INT total_size;
2479 HOST_WIDE_INT spill_size = 0;
2480 HOST_WIDE_INT extra_spill_size = 0;
2481 HOST_WIDE_INT pretend_args_size;
2482 HARD_REG_SET mask;
2483 int n_spilled = 0;
2484 int spilled_gr_p = 0;
2485 int spilled_fr_p = 0;
2486 unsigned int regno;
2487 int min_regno;
2488 int max_regno;
2489 int i;
2491 if (current_frame_info.initialized)
2492 return;
2494 memset (&current_frame_info, 0, sizeof current_frame_info);
2495 CLEAR_HARD_REG_SET (mask);
2497 /* Don't allocate scratches to the return register. */
2498 diddle_return_value (mark_reg_gr_used_mask, NULL);
2500 /* Don't allocate scratches to the EH scratch registers. */
2501 if (cfun->machine->ia64_eh_epilogue_sp)
2502 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2503 if (cfun->machine->ia64_eh_epilogue_bsp)
2504 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2506 /* Find the size of the register stack frame. We have only 80 local
2507 registers, because we reserve 8 for the inputs and 8 for the
2508 outputs. */
2510 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2511 since we'll be adjusting that down later. */
2512 regno = LOC_REG (78) + ! frame_pointer_needed;
2513 for (; regno >= LOC_REG (0); regno--)
2514 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2515 break;
2516 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2518 /* For functions marked with the syscall_linkage attribute, we must mark
2519 all eight input registers as in use, so that locals aren't visible to
2520 the caller. */
2522 if (cfun->machine->n_varargs > 0
2523 || lookup_attribute ("syscall_linkage",
2524 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2525 current_frame_info.n_input_regs = 8;
2526 else
2528 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2529 if (df_regs_ever_live_p (regno))
2530 break;
2531 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2534 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2535 if (df_regs_ever_live_p (regno))
2536 break;
2537 i = regno - OUT_REG (0) + 1;
2539 #ifndef PROFILE_HOOK
2540 /* When -p profiling, we need one output register for the mcount argument.
2541 Likewise for -a profiling for the bb_init_func argument. For -ax
2542 profiling, we need two output registers for the two bb_init_trace_func
2543 arguments. */
2544 if (crtl->profile)
2545 i = MAX (i, 1);
2546 #endif
2547 current_frame_info.n_output_regs = i;
2549 /* ??? No rotating register support yet. */
2550 current_frame_info.n_rotate_regs = 0;
2552 /* Discover which registers need spilling, and how much room that
2553 will take. Begin with floating point and general registers,
2554 which will always wind up on the stack. */
2556 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2557 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2559 SET_HARD_REG_BIT (mask, regno);
2560 spill_size += 16;
2561 n_spilled += 1;
2562 spilled_fr_p = 1;
2565 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2566 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2568 SET_HARD_REG_BIT (mask, regno);
2569 spill_size += 8;
2570 n_spilled += 1;
2571 spilled_gr_p = 1;
2574 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2575 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2577 SET_HARD_REG_BIT (mask, regno);
2578 spill_size += 8;
2579 n_spilled += 1;
2582 /* Now come all special registers that might get saved in other
2583 general registers. */
2585 if (frame_pointer_needed)
2587 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2588 /* If we did not get a register, then we take LOC79. This is guaranteed
2589 to be free, even if regs_ever_live is already set, because this is
2590 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2591 as we don't count loc79 above. */
2592 if (current_frame_info.r[reg_fp] == 0)
2594 current_frame_info.r[reg_fp] = LOC_REG (79);
2595 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2599 if (! current_function_is_leaf)
2601 /* Emit a save of BR0 if we call other functions. Do this even
2602 if this function doesn't return, as EH depends on this to be
2603 able to unwind the stack. */
2604 SET_HARD_REG_BIT (mask, BR_REG (0));
2606 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2607 if (current_frame_info.r[reg_save_b0] == 0)
2609 extra_spill_size += 8;
2610 n_spilled += 1;
2613 /* Similarly for ar.pfs. */
2614 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2615 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2616 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2618 extra_spill_size += 8;
2619 n_spilled += 1;
2622 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2623 registers are clobbered, so we fall back to the stack. */
2624 current_frame_info.r[reg_save_gp]
2625 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2626 if (current_frame_info.r[reg_save_gp] == 0)
2628 SET_HARD_REG_BIT (mask, GR_REG (1));
2629 spill_size += 8;
2630 n_spilled += 1;
2633 else
2635 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2637 SET_HARD_REG_BIT (mask, BR_REG (0));
2638 extra_spill_size += 8;
2639 n_spilled += 1;
2642 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2644 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2645 current_frame_info.r[reg_save_ar_pfs]
2646 = find_gr_spill (reg_save_ar_pfs, 1);
2647 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2649 extra_spill_size += 8;
2650 n_spilled += 1;
2655 /* Unwind descriptor hackery: things are most efficient if we allocate
2656 consecutive GR save registers for RP, PFS, FP in that order. However,
2657 it is absolutely critical that FP get the only hard register that's
2658 guaranteed to be free, so we allocated it first. If all three did
2659 happen to be allocated hard regs, and are consecutive, rearrange them
2660 into the preferred order now.
2662 If we have already emitted code for any of those registers,
2663 then it's already too late to change. */
2664 min_regno = MIN (current_frame_info.r[reg_fp],
2665 MIN (current_frame_info.r[reg_save_b0],
2666 current_frame_info.r[reg_save_ar_pfs]));
2667 max_regno = MAX (current_frame_info.r[reg_fp],
2668 MAX (current_frame_info.r[reg_save_b0],
2669 current_frame_info.r[reg_save_ar_pfs]));
2670 if (min_regno > 0
2671 && min_regno + 2 == max_regno
2672 && (current_frame_info.r[reg_fp] == min_regno + 1
2673 || current_frame_info.r[reg_save_b0] == min_regno + 1
2674 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2675 && (emitted_frame_related_regs[reg_save_b0] == 0
2676 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2677 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2678 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2679 && (emitted_frame_related_regs[reg_fp] == 0
2680 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2682 current_frame_info.r[reg_save_b0] = min_regno;
2683 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2684 current_frame_info.r[reg_fp] = min_regno + 2;
2687 /* See if we need to store the predicate register block. */
2688 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2689 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2690 break;
2691 if (regno <= PR_REG (63))
2693 SET_HARD_REG_BIT (mask, PR_REG (0));
2694 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2695 if (current_frame_info.r[reg_save_pr] == 0)
2697 extra_spill_size += 8;
2698 n_spilled += 1;
2701 /* ??? Mark them all as used so that register renaming and such
2702 are free to use them. */
2703 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2704 df_set_regs_ever_live (regno, true);
2707 /* If we're forced to use st8.spill, we're forced to save and restore
2708 ar.unat as well. The check for existing liveness allows inline asm
2709 to touch ar.unat. */
2710 if (spilled_gr_p || cfun->machine->n_varargs
2711 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2713 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2714 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2715 current_frame_info.r[reg_save_ar_unat]
2716 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2717 if (current_frame_info.r[reg_save_ar_unat] == 0)
2719 extra_spill_size += 8;
2720 n_spilled += 1;
2724 if (df_regs_ever_live_p (AR_LC_REGNUM))
2726 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2727 current_frame_info.r[reg_save_ar_lc]
2728 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2729 if (current_frame_info.r[reg_save_ar_lc] == 0)
2731 extra_spill_size += 8;
2732 n_spilled += 1;
2736 /* If we have an odd number of words of pretend arguments written to
2737 the stack, then the FR save area will be unaligned. We round the
2738 size of this area up to keep things 16 byte aligned. */
2739 if (spilled_fr_p)
2740 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2741 else
2742 pretend_args_size = crtl->args.pretend_args_size;
2744 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2745 + crtl->outgoing_args_size);
2746 total_size = IA64_STACK_ALIGN (total_size);
2748 /* We always use the 16-byte scratch area provided by the caller, but
2749 if we are a leaf function, there's no one to which we need to provide
2750 a scratch area. */
2751 if (current_function_is_leaf)
2752 total_size = MAX (0, total_size - 16);
2754 current_frame_info.total_size = total_size;
2755 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2756 current_frame_info.spill_size = spill_size;
2757 current_frame_info.extra_spill_size = extra_spill_size;
2758 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2759 current_frame_info.n_spilled = n_spilled;
2760 current_frame_info.initialized = reload_completed;
2763 /* Worker function for TARGET_CAN_ELIMINATE. */
2765 bool
2766 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2768 return (to == BR_REG (0) ? current_function_is_leaf : true);
2771 /* Compute the initial difference between the specified pair of registers. */
2773 HOST_WIDE_INT
2774 ia64_initial_elimination_offset (int from, int to)
2776 HOST_WIDE_INT offset;
2778 ia64_compute_frame_size (get_frame_size ());
2779 switch (from)
2781 case FRAME_POINTER_REGNUM:
2782 switch (to)
2784 case HARD_FRAME_POINTER_REGNUM:
2785 if (current_function_is_leaf)
2786 offset = -current_frame_info.total_size;
2787 else
2788 offset = -(current_frame_info.total_size
2789 - crtl->outgoing_args_size - 16);
2790 break;
2792 case STACK_POINTER_REGNUM:
2793 if (current_function_is_leaf)
2794 offset = 0;
2795 else
2796 offset = 16 + crtl->outgoing_args_size;
2797 break;
2799 default:
2800 gcc_unreachable ();
2802 break;
2804 case ARG_POINTER_REGNUM:
2805 /* Arguments start above the 16 byte save area, unless stdarg
2806 in which case we store through the 16 byte save area. */
2807 switch (to)
2809 case HARD_FRAME_POINTER_REGNUM:
2810 offset = 16 - crtl->args.pretend_args_size;
2811 break;
2813 case STACK_POINTER_REGNUM:
2814 offset = (current_frame_info.total_size
2815 + 16 - crtl->args.pretend_args_size);
2816 break;
2818 default:
2819 gcc_unreachable ();
2821 break;
2823 default:
2824 gcc_unreachable ();
2827 return offset;
2830 /* If there are more than a trivial number of register spills, we use
2831 two interleaved iterators so that we can get two memory references
2832 per insn group.
2834 In order to simplify things in the prologue and epilogue expanders,
2835 we use helper functions to fix up the memory references after the
2836 fact with the appropriate offsets to a POST_MODIFY memory mode.
2837 The following data structure tracks the state of the two iterators
2838 while insns are being emitted. */
2840 struct spill_fill_data
2842 rtx init_after; /* point at which to emit initializations */
2843 rtx init_reg[2]; /* initial base register */
2844 rtx iter_reg[2]; /* the iterator registers */
2845 rtx *prev_addr[2]; /* address of last memory use */
2846 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2847 HOST_WIDE_INT prev_off[2]; /* last offset */
2848 int n_iter; /* number of iterators in use */
2849 int next_iter; /* next iterator to use */
2850 unsigned int save_gr_used_mask;
2853 static struct spill_fill_data spill_fill_data;
2855 static void
2856 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2858 int i;
2860 spill_fill_data.init_after = get_last_insn ();
2861 spill_fill_data.init_reg[0] = init_reg;
2862 spill_fill_data.init_reg[1] = init_reg;
2863 spill_fill_data.prev_addr[0] = NULL;
2864 spill_fill_data.prev_addr[1] = NULL;
2865 spill_fill_data.prev_insn[0] = NULL;
2866 spill_fill_data.prev_insn[1] = NULL;
2867 spill_fill_data.prev_off[0] = cfa_off;
2868 spill_fill_data.prev_off[1] = cfa_off;
2869 spill_fill_data.next_iter = 0;
2870 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2872 spill_fill_data.n_iter = 1 + (n_spills > 2);
2873 for (i = 0; i < spill_fill_data.n_iter; ++i)
2875 int regno = next_scratch_gr_reg ();
2876 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2877 current_frame_info.gr_used_mask |= 1 << regno;
2881 static void
2882 finish_spill_pointers (void)
2884 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2887 static rtx
2888 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2890 int iter = spill_fill_data.next_iter;
2891 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2892 rtx disp_rtx = GEN_INT (disp);
2893 rtx mem;
2895 if (spill_fill_data.prev_addr[iter])
2897 if (satisfies_constraint_N (disp_rtx))
2899 *spill_fill_data.prev_addr[iter]
2900 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2901 gen_rtx_PLUS (DImode,
2902 spill_fill_data.iter_reg[iter],
2903 disp_rtx));
2904 add_reg_note (spill_fill_data.prev_insn[iter],
2905 REG_INC, spill_fill_data.iter_reg[iter]);
2907 else
2909 /* ??? Could use register post_modify for loads. */
2910 if (!satisfies_constraint_I (disp_rtx))
2912 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2913 emit_move_insn (tmp, disp_rtx);
2914 disp_rtx = tmp;
2916 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2917 spill_fill_data.iter_reg[iter], disp_rtx));
2920 /* Micro-optimization: if we've created a frame pointer, it's at
2921 CFA 0, which may allow the real iterator to be initialized lower,
2922 slightly increasing parallelism. Also, if there are few saves
2923 it may eliminate the iterator entirely. */
2924 else if (disp == 0
2925 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2926 && frame_pointer_needed)
2928 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2929 set_mem_alias_set (mem, get_varargs_alias_set ());
2930 return mem;
2932 else
2934 rtx seq, insn;
2936 if (disp == 0)
2937 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2938 spill_fill_data.init_reg[iter]);
2939 else
2941 start_sequence ();
2943 if (!satisfies_constraint_I (disp_rtx))
2945 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2946 emit_move_insn (tmp, disp_rtx);
2947 disp_rtx = tmp;
2950 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2951 spill_fill_data.init_reg[iter],
2952 disp_rtx));
2954 seq = get_insns ();
2955 end_sequence ();
2958 /* Careful for being the first insn in a sequence. */
2959 if (spill_fill_data.init_after)
2960 insn = emit_insn_after (seq, spill_fill_data.init_after);
2961 else
2963 rtx first = get_insns ();
2964 if (first)
2965 insn = emit_insn_before (seq, first);
2966 else
2967 insn = emit_insn (seq);
2969 spill_fill_data.init_after = insn;
2972 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2974 /* ??? Not all of the spills are for varargs, but some of them are.
2975 The rest of the spills belong in an alias set of their own. But
2976 it doesn't actually hurt to include them here. */
2977 set_mem_alias_set (mem, get_varargs_alias_set ());
2979 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2980 spill_fill_data.prev_off[iter] = cfa_off;
2982 if (++iter >= spill_fill_data.n_iter)
2983 iter = 0;
2984 spill_fill_data.next_iter = iter;
2986 return mem;
2989 static void
2990 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2991 rtx frame_reg)
2993 int iter = spill_fill_data.next_iter;
2994 rtx mem, insn;
2996 mem = spill_restore_mem (reg, cfa_off);
2997 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2998 spill_fill_data.prev_insn[iter] = insn;
3000 if (frame_reg)
3002 rtx base;
3003 HOST_WIDE_INT off;
3005 RTX_FRAME_RELATED_P (insn) = 1;
3007 /* Don't even pretend that the unwind code can intuit its way
3008 through a pair of interleaved post_modify iterators. Just
3009 provide the correct answer. */
3011 if (frame_pointer_needed)
3013 base = hard_frame_pointer_rtx;
3014 off = - cfa_off;
3016 else
3018 base = stack_pointer_rtx;
3019 off = current_frame_info.total_size - cfa_off;
3022 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3023 gen_rtx_SET (VOIDmode,
3024 gen_rtx_MEM (GET_MODE (reg),
3025 plus_constant (base, off)),
3026 frame_reg));
3030 static void
3031 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3033 int iter = spill_fill_data.next_iter;
3034 rtx insn;
3036 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3037 GEN_INT (cfa_off)));
3038 spill_fill_data.prev_insn[iter] = insn;
3041 /* Wrapper functions that discards the CONST_INT spill offset. These
3042 exist so that we can give gr_spill/gr_fill the offset they need and
3043 use a consistent function interface. */
3045 static rtx
3046 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3048 return gen_movdi (dest, src);
3051 static rtx
3052 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3054 return gen_fr_spill (dest, src);
3057 static rtx
3058 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3060 return gen_fr_restore (dest, src);
3063 /* Called after register allocation to add any instructions needed for the
3064 prologue. Using a prologue insn is favored compared to putting all of the
3065 instructions in output_function_prologue(), since it allows the scheduler
3066 to intermix instructions with the saves of the caller saved registers. In
3067 some cases, it might be necessary to emit a barrier instruction as the last
3068 insn to prevent such scheduling.
3070 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3071 so that the debug info generation code can handle them properly.
3073 The register save area is layed out like so:
3074 cfa+16
3075 [ varargs spill area ]
3076 [ fr register spill area ]
3077 [ br register spill area ]
3078 [ ar register spill area ]
3079 [ pr register spill area ]
3080 [ gr register spill area ] */
3082 /* ??? Get inefficient code when the frame size is larger than can fit in an
3083 adds instruction. */
3085 void
3086 ia64_expand_prologue (void)
3088 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
3089 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3090 rtx reg, alt_reg;
3092 ia64_compute_frame_size (get_frame_size ());
3093 last_scratch_gr_reg = 15;
3095 if (dump_file)
3097 fprintf (dump_file, "ia64 frame related registers "
3098 "recorded in current_frame_info.r[]:\n");
3099 #define PRINTREG(a) if (current_frame_info.r[a]) \
3100 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3101 PRINTREG(reg_fp);
3102 PRINTREG(reg_save_b0);
3103 PRINTREG(reg_save_pr);
3104 PRINTREG(reg_save_ar_pfs);
3105 PRINTREG(reg_save_ar_unat);
3106 PRINTREG(reg_save_ar_lc);
3107 PRINTREG(reg_save_gp);
3108 #undef PRINTREG
3111 /* If there is no epilogue, then we don't need some prologue insns.
3112 We need to avoid emitting the dead prologue insns, because flow
3113 will complain about them. */
3114 if (optimize)
3116 edge e;
3117 edge_iterator ei;
3119 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
3120 if ((e->flags & EDGE_FAKE) == 0
3121 && (e->flags & EDGE_FALLTHRU) != 0)
3122 break;
3123 epilogue_p = (e != NULL);
3125 else
3126 epilogue_p = 1;
3128 /* Set the local, input, and output register names. We need to do this
3129 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3130 half. If we use in/loc/out register names, then we get assembler errors
3131 in crtn.S because there is no alloc insn or regstk directive in there. */
3132 if (! TARGET_REG_NAMES)
3134 int inputs = current_frame_info.n_input_regs;
3135 int locals = current_frame_info.n_local_regs;
3136 int outputs = current_frame_info.n_output_regs;
3138 for (i = 0; i < inputs; i++)
3139 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3140 for (i = 0; i < locals; i++)
3141 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3142 for (i = 0; i < outputs; i++)
3143 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3146 /* Set the frame pointer register name. The regnum is logically loc79,
3147 but of course we'll not have allocated that many locals. Rather than
3148 worrying about renumbering the existing rtxs, we adjust the name. */
3149 /* ??? This code means that we can never use one local register when
3150 there is a frame pointer. loc79 gets wasted in this case, as it is
3151 renamed to a register that will never be used. See also the try_locals
3152 code in find_gr_spill. */
3153 if (current_frame_info.r[reg_fp])
3155 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3156 reg_names[HARD_FRAME_POINTER_REGNUM]
3157 = reg_names[current_frame_info.r[reg_fp]];
3158 reg_names[current_frame_info.r[reg_fp]] = tmp;
3161 /* We don't need an alloc instruction if we've used no outputs or locals. */
3162 if (current_frame_info.n_local_regs == 0
3163 && current_frame_info.n_output_regs == 0
3164 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3165 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3167 /* If there is no alloc, but there are input registers used, then we
3168 need a .regstk directive. */
3169 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3170 ar_pfs_save_reg = NULL_RTX;
3172 else
3174 current_frame_info.need_regstk = 0;
3176 if (current_frame_info.r[reg_save_ar_pfs])
3178 regno = current_frame_info.r[reg_save_ar_pfs];
3179 reg_emitted (reg_save_ar_pfs);
3181 else
3182 regno = next_scratch_gr_reg ();
3183 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3185 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3186 GEN_INT (current_frame_info.n_input_regs),
3187 GEN_INT (current_frame_info.n_local_regs),
3188 GEN_INT (current_frame_info.n_output_regs),
3189 GEN_INT (current_frame_info.n_rotate_regs)));
3190 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3193 /* Set up frame pointer, stack pointer, and spill iterators. */
3195 n_varargs = cfun->machine->n_varargs;
3196 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3197 stack_pointer_rtx, 0);
3199 if (frame_pointer_needed)
3201 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3202 RTX_FRAME_RELATED_P (insn) = 1;
3205 if (current_frame_info.total_size != 0)
3207 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3208 rtx offset;
3210 if (satisfies_constraint_I (frame_size_rtx))
3211 offset = frame_size_rtx;
3212 else
3214 regno = next_scratch_gr_reg ();
3215 offset = gen_rtx_REG (DImode, regno);
3216 emit_move_insn (offset, frame_size_rtx);
3219 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3220 stack_pointer_rtx, offset));
3222 if (! frame_pointer_needed)
3224 RTX_FRAME_RELATED_P (insn) = 1;
3225 if (GET_CODE (offset) != CONST_INT)
3226 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3227 gen_rtx_SET (VOIDmode,
3228 stack_pointer_rtx,
3229 gen_rtx_PLUS (DImode,
3230 stack_pointer_rtx,
3231 frame_size_rtx)));
3234 /* ??? At this point we must generate a magic insn that appears to
3235 modify the stack pointer, the frame pointer, and all spill
3236 iterators. This would allow the most scheduling freedom. For
3237 now, just hard stop. */
3238 emit_insn (gen_blockage ());
3241 /* Must copy out ar.unat before doing any integer spills. */
3242 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3244 if (current_frame_info.r[reg_save_ar_unat])
3246 ar_unat_save_reg
3247 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3248 reg_emitted (reg_save_ar_unat);
3250 else
3252 alt_regno = next_scratch_gr_reg ();
3253 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3254 current_frame_info.gr_used_mask |= 1 << alt_regno;
3257 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3258 insn = emit_move_insn (ar_unat_save_reg, reg);
3259 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_unat] != 0);
3261 /* Even if we're not going to generate an epilogue, we still
3262 need to save the register so that EH works. */
3263 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3264 emit_insn (gen_prologue_use (ar_unat_save_reg));
3266 else
3267 ar_unat_save_reg = NULL_RTX;
3269 /* Spill all varargs registers. Do this before spilling any GR registers,
3270 since we want the UNAT bits for the GR registers to override the UNAT
3271 bits from varargs, which we don't care about. */
3273 cfa_off = -16;
3274 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3276 reg = gen_rtx_REG (DImode, regno);
3277 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3280 /* Locate the bottom of the register save area. */
3281 cfa_off = (current_frame_info.spill_cfa_off
3282 + current_frame_info.spill_size
3283 + current_frame_info.extra_spill_size);
3285 /* Save the predicate register block either in a register or in memory. */
3286 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3288 reg = gen_rtx_REG (DImode, PR_REG (0));
3289 if (current_frame_info.r[reg_save_pr] != 0)
3291 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3292 reg_emitted (reg_save_pr);
3293 insn = emit_move_insn (alt_reg, reg);
3295 /* ??? Denote pr spill/fill by a DImode move that modifies all
3296 64 hard registers. */
3297 RTX_FRAME_RELATED_P (insn) = 1;
3298 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3299 gen_rtx_SET (VOIDmode, alt_reg, reg));
3301 /* Even if we're not going to generate an epilogue, we still
3302 need to save the register so that EH works. */
3303 if (! epilogue_p)
3304 emit_insn (gen_prologue_use (alt_reg));
3306 else
3308 alt_regno = next_scratch_gr_reg ();
3309 alt_reg = gen_rtx_REG (DImode, alt_regno);
3310 insn = emit_move_insn (alt_reg, reg);
3311 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3312 cfa_off -= 8;
3316 /* Handle AR regs in numerical order. All of them get special handling. */
3317 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3318 && current_frame_info.r[reg_save_ar_unat] == 0)
3320 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3321 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3322 cfa_off -= 8;
3325 /* The alloc insn already copied ar.pfs into a general register. The
3326 only thing we have to do now is copy that register to a stack slot
3327 if we'd not allocated a local register for the job. */
3328 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3329 && current_frame_info.r[reg_save_ar_pfs] == 0)
3331 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3332 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3333 cfa_off -= 8;
3336 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3338 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3339 if (current_frame_info.r[reg_save_ar_lc] != 0)
3341 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3342 reg_emitted (reg_save_ar_lc);
3343 insn = emit_move_insn (alt_reg, reg);
3344 RTX_FRAME_RELATED_P (insn) = 1;
3346 /* Even if we're not going to generate an epilogue, we still
3347 need to save the register so that EH works. */
3348 if (! epilogue_p)
3349 emit_insn (gen_prologue_use (alt_reg));
3351 else
3353 alt_regno = next_scratch_gr_reg ();
3354 alt_reg = gen_rtx_REG (DImode, alt_regno);
3355 emit_move_insn (alt_reg, reg);
3356 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3357 cfa_off -= 8;
3361 /* Save the return pointer. */
3362 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3364 reg = gen_rtx_REG (DImode, BR_REG (0));
3365 if (current_frame_info.r[reg_save_b0] != 0)
3367 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3368 reg_emitted (reg_save_b0);
3369 insn = emit_move_insn (alt_reg, reg);
3370 RTX_FRAME_RELATED_P (insn) = 1;
3372 /* Even if we're not going to generate an epilogue, we still
3373 need to save the register so that EH works. */
3374 if (! epilogue_p)
3375 emit_insn (gen_prologue_use (alt_reg));
3377 else
3379 alt_regno = next_scratch_gr_reg ();
3380 alt_reg = gen_rtx_REG (DImode, alt_regno);
3381 emit_move_insn (alt_reg, reg);
3382 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3383 cfa_off -= 8;
3387 if (current_frame_info.r[reg_save_gp])
3389 reg_emitted (reg_save_gp);
3390 insn = emit_move_insn (gen_rtx_REG (DImode,
3391 current_frame_info.r[reg_save_gp]),
3392 pic_offset_table_rtx);
3395 /* We should now be at the base of the gr/br/fr spill area. */
3396 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3397 + current_frame_info.spill_size));
3399 /* Spill all general registers. */
3400 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3401 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3403 reg = gen_rtx_REG (DImode, regno);
3404 do_spill (gen_gr_spill, reg, cfa_off, reg);
3405 cfa_off -= 8;
3408 /* Spill the rest of the BR registers. */
3409 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3410 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3412 alt_regno = next_scratch_gr_reg ();
3413 alt_reg = gen_rtx_REG (DImode, alt_regno);
3414 reg = gen_rtx_REG (DImode, regno);
3415 emit_move_insn (alt_reg, reg);
3416 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3417 cfa_off -= 8;
3420 /* Align the frame and spill all FR registers. */
3421 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3422 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3424 gcc_assert (!(cfa_off & 15));
3425 reg = gen_rtx_REG (XFmode, regno);
3426 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3427 cfa_off -= 16;
3430 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3432 finish_spill_pointers ();
3435 /* Output the textual info surrounding the prologue. */
3437 void
3438 ia64_start_function (FILE *file, const char *fnname,
3439 tree decl ATTRIBUTE_UNUSED)
3441 #if VMS_DEBUGGING_INFO
3442 if (vms_debug_main
3443 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
3445 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
3446 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
3447 dwarf2out_vms_debug_main_pointer ();
3448 vms_debug_main = 0;
3450 #endif
3452 fputs ("\t.proc ", file);
3453 assemble_name (file, fnname);
3454 fputc ('\n', file);
3455 ASM_OUTPUT_LABEL (file, fnname);
3458 /* Called after register allocation to add any instructions needed for the
3459 epilogue. Using an epilogue insn is favored compared to putting all of the
3460 instructions in output_function_prologue(), since it allows the scheduler
3461 to intermix instructions with the saves of the caller saved registers. In
3462 some cases, it might be necessary to emit a barrier instruction as the last
3463 insn to prevent such scheduling. */
3465 void
3466 ia64_expand_epilogue (int sibcall_p)
3468 rtx insn, reg, alt_reg, ar_unat_save_reg;
3469 int regno, alt_regno, cfa_off;
3471 ia64_compute_frame_size (get_frame_size ());
3473 /* If there is a frame pointer, then we use it instead of the stack
3474 pointer, so that the stack pointer does not need to be valid when
3475 the epilogue starts. See EXIT_IGNORE_STACK. */
3476 if (frame_pointer_needed)
3477 setup_spill_pointers (current_frame_info.n_spilled,
3478 hard_frame_pointer_rtx, 0);
3479 else
3480 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3481 current_frame_info.total_size);
3483 if (current_frame_info.total_size != 0)
3485 /* ??? At this point we must generate a magic insn that appears to
3486 modify the spill iterators and the frame pointer. This would
3487 allow the most scheduling freedom. For now, just hard stop. */
3488 emit_insn (gen_blockage ());
3491 /* Locate the bottom of the register save area. */
3492 cfa_off = (current_frame_info.spill_cfa_off
3493 + current_frame_info.spill_size
3494 + current_frame_info.extra_spill_size);
3496 /* Restore the predicate registers. */
3497 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3499 if (current_frame_info.r[reg_save_pr] != 0)
3501 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3502 reg_emitted (reg_save_pr);
3504 else
3506 alt_regno = next_scratch_gr_reg ();
3507 alt_reg = gen_rtx_REG (DImode, alt_regno);
3508 do_restore (gen_movdi_x, alt_reg, cfa_off);
3509 cfa_off -= 8;
3511 reg = gen_rtx_REG (DImode, PR_REG (0));
3512 emit_move_insn (reg, alt_reg);
3515 /* Restore the application registers. */
3517 /* Load the saved unat from the stack, but do not restore it until
3518 after the GRs have been restored. */
3519 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3521 if (current_frame_info.r[reg_save_ar_unat] != 0)
3523 ar_unat_save_reg
3524 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3525 reg_emitted (reg_save_ar_unat);
3527 else
3529 alt_regno = next_scratch_gr_reg ();
3530 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3531 current_frame_info.gr_used_mask |= 1 << alt_regno;
3532 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3533 cfa_off -= 8;
3536 else
3537 ar_unat_save_reg = NULL_RTX;
3539 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3541 reg_emitted (reg_save_ar_pfs);
3542 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3543 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3544 emit_move_insn (reg, alt_reg);
3546 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3548 alt_regno = next_scratch_gr_reg ();
3549 alt_reg = gen_rtx_REG (DImode, alt_regno);
3550 do_restore (gen_movdi_x, alt_reg, cfa_off);
3551 cfa_off -= 8;
3552 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3553 emit_move_insn (reg, alt_reg);
3556 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3558 if (current_frame_info.r[reg_save_ar_lc] != 0)
3560 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3561 reg_emitted (reg_save_ar_lc);
3563 else
3565 alt_regno = next_scratch_gr_reg ();
3566 alt_reg = gen_rtx_REG (DImode, alt_regno);
3567 do_restore (gen_movdi_x, alt_reg, cfa_off);
3568 cfa_off -= 8;
3570 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3571 emit_move_insn (reg, alt_reg);
3574 /* Restore the return pointer. */
3575 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3577 if (current_frame_info.r[reg_save_b0] != 0)
3579 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3580 reg_emitted (reg_save_b0);
3582 else
3584 alt_regno = next_scratch_gr_reg ();
3585 alt_reg = gen_rtx_REG (DImode, alt_regno);
3586 do_restore (gen_movdi_x, alt_reg, cfa_off);
3587 cfa_off -= 8;
3589 reg = gen_rtx_REG (DImode, BR_REG (0));
3590 emit_move_insn (reg, alt_reg);
3593 /* We should now be at the base of the gr/br/fr spill area. */
3594 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3595 + current_frame_info.spill_size));
3597 /* The GP may be stored on the stack in the prologue, but it's
3598 never restored in the epilogue. Skip the stack slot. */
3599 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3600 cfa_off -= 8;
3602 /* Restore all general registers. */
3603 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3604 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3606 reg = gen_rtx_REG (DImode, regno);
3607 do_restore (gen_gr_restore, reg, cfa_off);
3608 cfa_off -= 8;
3611 /* Restore the branch registers. */
3612 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3613 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3615 alt_regno = next_scratch_gr_reg ();
3616 alt_reg = gen_rtx_REG (DImode, alt_regno);
3617 do_restore (gen_movdi_x, alt_reg, cfa_off);
3618 cfa_off -= 8;
3619 reg = gen_rtx_REG (DImode, regno);
3620 emit_move_insn (reg, alt_reg);
3623 /* Restore floating point registers. */
3624 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3625 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3627 gcc_assert (!(cfa_off & 15));
3628 reg = gen_rtx_REG (XFmode, regno);
3629 do_restore (gen_fr_restore_x, reg, cfa_off);
3630 cfa_off -= 16;
3633 /* Restore ar.unat for real. */
3634 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3636 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3637 emit_move_insn (reg, ar_unat_save_reg);
3640 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3642 finish_spill_pointers ();
3644 if (current_frame_info.total_size
3645 || cfun->machine->ia64_eh_epilogue_sp
3646 || frame_pointer_needed)
3648 /* ??? At this point we must generate a magic insn that appears to
3649 modify the spill iterators, the stack pointer, and the frame
3650 pointer. This would allow the most scheduling freedom. For now,
3651 just hard stop. */
3652 emit_insn (gen_blockage ());
3655 if (cfun->machine->ia64_eh_epilogue_sp)
3656 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3657 else if (frame_pointer_needed)
3659 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3660 RTX_FRAME_RELATED_P (insn) = 1;
3662 else if (current_frame_info.total_size)
3664 rtx offset, frame_size_rtx;
3666 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3667 if (satisfies_constraint_I (frame_size_rtx))
3668 offset = frame_size_rtx;
3669 else
3671 regno = next_scratch_gr_reg ();
3672 offset = gen_rtx_REG (DImode, regno);
3673 emit_move_insn (offset, frame_size_rtx);
3676 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3677 offset));
3679 RTX_FRAME_RELATED_P (insn) = 1;
3680 if (GET_CODE (offset) != CONST_INT)
3681 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3682 gen_rtx_SET (VOIDmode,
3683 stack_pointer_rtx,
3684 gen_rtx_PLUS (DImode,
3685 stack_pointer_rtx,
3686 frame_size_rtx)));
3689 if (cfun->machine->ia64_eh_epilogue_bsp)
3690 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3692 if (! sibcall_p)
3693 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3694 else
3696 int fp = GR_REG (2);
3697 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3698 first available call clobbered register. If there was a frame_pointer
3699 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3700 so we have to make sure we're using the string "r2" when emitting
3701 the register name for the assembler. */
3702 if (current_frame_info.r[reg_fp]
3703 && current_frame_info.r[reg_fp] == GR_REG (2))
3704 fp = HARD_FRAME_POINTER_REGNUM;
3706 /* We must emit an alloc to force the input registers to become output
3707 registers. Otherwise, if the callee tries to pass its parameters
3708 through to another call without an intervening alloc, then these
3709 values get lost. */
3710 /* ??? We don't need to preserve all input registers. We only need to
3711 preserve those input registers used as arguments to the sibling call.
3712 It is unclear how to compute that number here. */
3713 if (current_frame_info.n_input_regs != 0)
3715 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3716 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3717 const0_rtx, const0_rtx,
3718 n_inputs, const0_rtx));
3719 RTX_FRAME_RELATED_P (insn) = 1;
3724 /* Return 1 if br.ret can do all the work required to return from a
3725 function. */
3728 ia64_direct_return (void)
3730 if (reload_completed && ! frame_pointer_needed)
3732 ia64_compute_frame_size (get_frame_size ());
3734 return (current_frame_info.total_size == 0
3735 && current_frame_info.n_spilled == 0
3736 && current_frame_info.r[reg_save_b0] == 0
3737 && current_frame_info.r[reg_save_pr] == 0
3738 && current_frame_info.r[reg_save_ar_pfs] == 0
3739 && current_frame_info.r[reg_save_ar_unat] == 0
3740 && current_frame_info.r[reg_save_ar_lc] == 0);
3742 return 0;
3745 /* Return the magic cookie that we use to hold the return address
3746 during early compilation. */
3749 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3751 if (count != 0)
3752 return NULL;
3753 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3756 /* Split this value after reload, now that we know where the return
3757 address is saved. */
3759 void
3760 ia64_split_return_addr_rtx (rtx dest)
3762 rtx src;
3764 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3766 if (current_frame_info.r[reg_save_b0] != 0)
3768 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3769 reg_emitted (reg_save_b0);
3771 else
3773 HOST_WIDE_INT off;
3774 unsigned int regno;
3775 rtx off_r;
3777 /* Compute offset from CFA for BR0. */
3778 /* ??? Must be kept in sync with ia64_expand_prologue. */
3779 off = (current_frame_info.spill_cfa_off
3780 + current_frame_info.spill_size);
3781 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3782 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3783 off -= 8;
3785 /* Convert CFA offset to a register based offset. */
3786 if (frame_pointer_needed)
3787 src = hard_frame_pointer_rtx;
3788 else
3790 src = stack_pointer_rtx;
3791 off += current_frame_info.total_size;
3794 /* Load address into scratch register. */
3795 off_r = GEN_INT (off);
3796 if (satisfies_constraint_I (off_r))
3797 emit_insn (gen_adddi3 (dest, src, off_r));
3798 else
3800 emit_move_insn (dest, off_r);
3801 emit_insn (gen_adddi3 (dest, src, dest));
3804 src = gen_rtx_MEM (Pmode, dest);
3807 else
3808 src = gen_rtx_REG (DImode, BR_REG (0));
3810 emit_move_insn (dest, src);
3814 ia64_hard_regno_rename_ok (int from, int to)
3816 /* Don't clobber any of the registers we reserved for the prologue. */
3817 unsigned int r;
3819 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3820 if (to == current_frame_info.r[r]
3821 || from == current_frame_info.r[r]
3822 || to == emitted_frame_related_regs[r]
3823 || from == emitted_frame_related_regs[r])
3824 return 0;
3826 /* Don't use output registers outside the register frame. */
3827 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3828 return 0;
3830 /* Retain even/oddness on predicate register pairs. */
3831 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3832 return (from & 1) == (to & 1);
3834 return 1;
3837 /* Target hook for assembling integer objects. Handle word-sized
3838 aligned objects and detect the cases when @fptr is needed. */
3840 static bool
3841 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3843 if (size == POINTER_SIZE / BITS_PER_UNIT
3844 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3845 && GET_CODE (x) == SYMBOL_REF
3846 && SYMBOL_REF_FUNCTION_P (x))
3848 static const char * const directive[2][2] = {
3849 /* 64-bit pointer */ /* 32-bit pointer */
3850 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3851 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3853 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3854 output_addr_const (asm_out_file, x);
3855 fputs (")\n", asm_out_file);
3856 return true;
3858 return default_assemble_integer (x, size, aligned_p);
3861 /* Emit the function prologue. */
3863 static void
3864 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3866 int mask, grsave, grsave_prev;
3868 if (current_frame_info.need_regstk)
3869 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3870 current_frame_info.n_input_regs,
3871 current_frame_info.n_local_regs,
3872 current_frame_info.n_output_regs,
3873 current_frame_info.n_rotate_regs);
3875 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3876 return;
3878 /* Emit the .prologue directive. */
3880 mask = 0;
3881 grsave = grsave_prev = 0;
3882 if (current_frame_info.r[reg_save_b0] != 0)
3884 mask |= 8;
3885 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
3887 if (current_frame_info.r[reg_save_ar_pfs] != 0
3888 && (grsave_prev == 0
3889 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
3891 mask |= 4;
3892 if (grsave_prev == 0)
3893 grsave = current_frame_info.r[reg_save_ar_pfs];
3894 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
3896 if (current_frame_info.r[reg_fp] != 0
3897 && (grsave_prev == 0
3898 || current_frame_info.r[reg_fp] == grsave_prev + 1))
3900 mask |= 2;
3901 if (grsave_prev == 0)
3902 grsave = HARD_FRAME_POINTER_REGNUM;
3903 grsave_prev = current_frame_info.r[reg_fp];
3905 if (current_frame_info.r[reg_save_pr] != 0
3906 && (grsave_prev == 0
3907 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
3909 mask |= 1;
3910 if (grsave_prev == 0)
3911 grsave = current_frame_info.r[reg_save_pr];
3914 if (mask && TARGET_GNU_AS)
3915 fprintf (file, "\t.prologue %d, %d\n", mask,
3916 ia64_dbx_register_number (grsave));
3917 else
3918 fputs ("\t.prologue\n", file);
3920 /* Emit a .spill directive, if necessary, to relocate the base of
3921 the register spill area. */
3922 if (current_frame_info.spill_cfa_off != -16)
3923 fprintf (file, "\t.spill %ld\n",
3924 (long) (current_frame_info.spill_cfa_off
3925 + current_frame_info.spill_size));
3928 /* Emit the .body directive at the scheduled end of the prologue. */
3930 static void
3931 ia64_output_function_end_prologue (FILE *file)
3933 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3934 return;
3936 fputs ("\t.body\n", file);
3939 /* Emit the function epilogue. */
3941 static void
3942 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3943 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3945 int i;
3947 if (current_frame_info.r[reg_fp])
3949 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3950 reg_names[HARD_FRAME_POINTER_REGNUM]
3951 = reg_names[current_frame_info.r[reg_fp]];
3952 reg_names[current_frame_info.r[reg_fp]] = tmp;
3953 reg_emitted (reg_fp);
3955 if (! TARGET_REG_NAMES)
3957 for (i = 0; i < current_frame_info.n_input_regs; i++)
3958 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3959 for (i = 0; i < current_frame_info.n_local_regs; i++)
3960 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3961 for (i = 0; i < current_frame_info.n_output_regs; i++)
3962 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3965 current_frame_info.initialized = 0;
3969 ia64_dbx_register_number (int regno)
3971 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3972 from its home at loc79 to something inside the register frame. We
3973 must perform the same renumbering here for the debug info. */
3974 if (current_frame_info.r[reg_fp])
3976 if (regno == HARD_FRAME_POINTER_REGNUM)
3977 regno = current_frame_info.r[reg_fp];
3978 else if (regno == current_frame_info.r[reg_fp])
3979 regno = HARD_FRAME_POINTER_REGNUM;
3982 if (IN_REGNO_P (regno))
3983 return 32 + regno - IN_REG (0);
3984 else if (LOC_REGNO_P (regno))
3985 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3986 else if (OUT_REGNO_P (regno))
3987 return (32 + current_frame_info.n_input_regs
3988 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3989 else
3990 return regno;
3993 /* Implement TARGET_TRAMPOLINE_INIT.
3995 The trampoline should set the static chain pointer to value placed
3996 into the trampoline and should branch to the specified routine.
3997 To make the normal indirect-subroutine calling convention work,
3998 the trampoline must look like a function descriptor; the first
3999 word being the target address and the second being the target's
4000 global pointer.
4002 We abuse the concept of a global pointer by arranging for it
4003 to point to the data we need to load. The complete trampoline
4004 has the following form:
4006 +-------------------+ \
4007 TRAMP: | __ia64_trampoline | |
4008 +-------------------+ > fake function descriptor
4009 | TRAMP+16 | |
4010 +-------------------+ /
4011 | target descriptor |
4012 +-------------------+
4013 | static link |
4014 +-------------------+
4017 static void
4018 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4020 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4021 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4023 /* The Intel assembler requires that the global __ia64_trampoline symbol
4024 be declared explicitly */
4025 if (!TARGET_GNU_AS)
4027 static bool declared_ia64_trampoline = false;
4029 if (!declared_ia64_trampoline)
4031 declared_ia64_trampoline = true;
4032 (*targetm.asm_out.globalize_label) (asm_out_file,
4033 "__ia64_trampoline");
4037 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4038 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4039 fnaddr = convert_memory_address (Pmode, fnaddr);
4040 static_chain = convert_memory_address (Pmode, static_chain);
4042 /* Load up our iterator. */
4043 addr_reg = copy_to_reg (addr);
4044 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4046 /* The first two words are the fake descriptor:
4047 __ia64_trampoline, ADDR+16. */
4048 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4049 if (TARGET_ABI_OPEN_VMS)
4051 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4052 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4053 relocation against function symbols to make it identical to the
4054 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4055 strict ELF and dereference to get the bare code address. */
4056 rtx reg = gen_reg_rtx (Pmode);
4057 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4058 emit_move_insn (reg, tramp);
4059 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4060 tramp = reg;
4062 emit_move_insn (m_tramp, tramp);
4063 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4064 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4066 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
4067 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4068 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4070 /* The third word is the target descriptor. */
4071 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4072 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4073 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4075 /* The fourth word is the static chain. */
4076 emit_move_insn (m_tramp, static_chain);
4079 /* Do any needed setup for a variadic function. CUM has not been updated
4080 for the last named argument which has type TYPE and mode MODE.
4082 We generate the actual spill instructions during prologue generation. */
4084 static void
4085 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4086 tree type, int * pretend_size,
4087 int second_time ATTRIBUTE_UNUSED)
4089 CUMULATIVE_ARGS next_cum = *cum;
4091 /* Skip the current argument. */
4092 ia64_function_arg_advance (&next_cum, mode, type, 1);
4094 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4096 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4097 *pretend_size = n * UNITS_PER_WORD;
4098 cfun->machine->n_varargs = n;
4102 /* Check whether TYPE is a homogeneous floating point aggregate. If
4103 it is, return the mode of the floating point type that appears
4104 in all leafs. If it is not, return VOIDmode.
4106 An aggregate is a homogeneous floating point aggregate is if all
4107 fields/elements in it have the same floating point type (e.g,
4108 SFmode). 128-bit quad-precision floats are excluded.
4110 Variable sized aggregates should never arrive here, since we should
4111 have already decided to pass them by reference. Top-level zero-sized
4112 aggregates are excluded because our parallels crash the middle-end. */
4114 static enum machine_mode
4115 hfa_element_mode (const_tree type, bool nested)
4117 enum machine_mode element_mode = VOIDmode;
4118 enum machine_mode mode;
4119 enum tree_code code = TREE_CODE (type);
4120 int know_element_mode = 0;
4121 tree t;
4123 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4124 return VOIDmode;
4126 switch (code)
4128 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4129 case BOOLEAN_TYPE: case POINTER_TYPE:
4130 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4131 case LANG_TYPE: case FUNCTION_TYPE:
4132 return VOIDmode;
4134 /* Fortran complex types are supposed to be HFAs, so we need to handle
4135 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4136 types though. */
4137 case COMPLEX_TYPE:
4138 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4139 && TYPE_MODE (type) != TCmode)
4140 return GET_MODE_INNER (TYPE_MODE (type));
4141 else
4142 return VOIDmode;
4144 case REAL_TYPE:
4145 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4146 mode if this is contained within an aggregate. */
4147 if (nested && TYPE_MODE (type) != TFmode)
4148 return TYPE_MODE (type);
4149 else
4150 return VOIDmode;
4152 case ARRAY_TYPE:
4153 return hfa_element_mode (TREE_TYPE (type), 1);
4155 case RECORD_TYPE:
4156 case UNION_TYPE:
4157 case QUAL_UNION_TYPE:
4158 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
4160 if (TREE_CODE (t) != FIELD_DECL)
4161 continue;
4163 mode = hfa_element_mode (TREE_TYPE (t), 1);
4164 if (know_element_mode)
4166 if (mode != element_mode)
4167 return VOIDmode;
4169 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4170 return VOIDmode;
4171 else
4173 know_element_mode = 1;
4174 element_mode = mode;
4177 return element_mode;
4179 default:
4180 /* If we reach here, we probably have some front-end specific type
4181 that the backend doesn't know about. This can happen via the
4182 aggregate_value_p call in init_function_start. All we can do is
4183 ignore unknown tree types. */
4184 return VOIDmode;
4187 return VOIDmode;
4190 /* Return the number of words required to hold a quantity of TYPE and MODE
4191 when passed as an argument. */
4192 static int
4193 ia64_function_arg_words (tree type, enum machine_mode mode)
4195 int words;
4197 if (mode == BLKmode)
4198 words = int_size_in_bytes (type);
4199 else
4200 words = GET_MODE_SIZE (mode);
4202 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4205 /* Return the number of registers that should be skipped so the current
4206 argument (described by TYPE and WORDS) will be properly aligned.
4208 Integer and float arguments larger than 8 bytes start at the next
4209 even boundary. Aggregates larger than 8 bytes start at the next
4210 even boundary if the aggregate has 16 byte alignment. Note that
4211 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4212 but are still to be aligned in registers.
4214 ??? The ABI does not specify how to handle aggregates with
4215 alignment from 9 to 15 bytes, or greater than 16. We handle them
4216 all as if they had 16 byte alignment. Such aggregates can occur
4217 only if gcc extensions are used. */
4218 static int
4219 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
4221 /* No registers are skipped on VMS. */
4222 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4223 return 0;
4225 if (type
4226 && TREE_CODE (type) != INTEGER_TYPE
4227 && TREE_CODE (type) != REAL_TYPE)
4228 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4229 else
4230 return words > 1;
4233 /* Return rtx for register where argument is passed, or zero if it is passed
4234 on the stack. */
4235 /* ??? 128-bit quad-precision floats are always passed in general
4236 registers. */
4239 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
4240 int named, int incoming)
4242 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4243 int words = ia64_function_arg_words (type, mode);
4244 int offset = ia64_function_arg_offset (cum, type, words);
4245 enum machine_mode hfa_mode = VOIDmode;
4247 /* For OPEN VMS, emit the instruction setting up the argument register here,
4248 when we know this will be together with the other arguments setup related
4249 insns. This is not the conceptually best place to do this, but this is
4250 the easiest as we have convenient access to cumulative args info. */
4252 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4253 && named == 1)
4255 unsigned HOST_WIDE_INT regval = cum->words;
4256 int i;
4258 for (i = 0; i < 8; i++)
4259 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4261 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4262 GEN_INT (regval));
4265 /* If all argument slots are used, then it must go on the stack. */
4266 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4267 return 0;
4269 /* Check for and handle homogeneous FP aggregates. */
4270 if (type)
4271 hfa_mode = hfa_element_mode (type, 0);
4273 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4274 and unprototyped hfas are passed specially. */
4275 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4277 rtx loc[16];
4278 int i = 0;
4279 int fp_regs = cum->fp_regs;
4280 int int_regs = cum->words + offset;
4281 int hfa_size = GET_MODE_SIZE (hfa_mode);
4282 int byte_size;
4283 int args_byte_size;
4285 /* If prototyped, pass it in FR regs then GR regs.
4286 If not prototyped, pass it in both FR and GR regs.
4288 If this is an SFmode aggregate, then it is possible to run out of
4289 FR regs while GR regs are still left. In that case, we pass the
4290 remaining part in the GR regs. */
4292 /* Fill the FP regs. We do this always. We stop if we reach the end
4293 of the argument, the last FP register, or the last argument slot. */
4295 byte_size = ((mode == BLKmode)
4296 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4297 args_byte_size = int_regs * UNITS_PER_WORD;
4298 offset = 0;
4299 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4300 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4302 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4303 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4304 + fp_regs)),
4305 GEN_INT (offset));
4306 offset += hfa_size;
4307 args_byte_size += hfa_size;
4308 fp_regs++;
4311 /* If no prototype, then the whole thing must go in GR regs. */
4312 if (! cum->prototype)
4313 offset = 0;
4314 /* If this is an SFmode aggregate, then we might have some left over
4315 that needs to go in GR regs. */
4316 else if (byte_size != offset)
4317 int_regs += offset / UNITS_PER_WORD;
4319 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4321 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4323 enum machine_mode gr_mode = DImode;
4324 unsigned int gr_size;
4326 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4327 then this goes in a GR reg left adjusted/little endian, right
4328 adjusted/big endian. */
4329 /* ??? Currently this is handled wrong, because 4-byte hunks are
4330 always right adjusted/little endian. */
4331 if (offset & 0x4)
4332 gr_mode = SImode;
4333 /* If we have an even 4 byte hunk because the aggregate is a
4334 multiple of 4 bytes in size, then this goes in a GR reg right
4335 adjusted/little endian. */
4336 else if (byte_size - offset == 4)
4337 gr_mode = SImode;
4339 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4340 gen_rtx_REG (gr_mode, (basereg
4341 + int_regs)),
4342 GEN_INT (offset));
4344 gr_size = GET_MODE_SIZE (gr_mode);
4345 offset += gr_size;
4346 if (gr_size == UNITS_PER_WORD
4347 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4348 int_regs++;
4349 else if (gr_size > UNITS_PER_WORD)
4350 int_regs += gr_size / UNITS_PER_WORD;
4352 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4355 /* On OpenVMS variable argument is either in Rn or Fn. */
4356 else if (TARGET_ABI_OPEN_VMS && named == 0)
4358 if (FLOAT_MODE_P (mode))
4359 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4360 else
4361 return gen_rtx_REG (mode, basereg + cum->words);
4364 /* Integral and aggregates go in general registers. If we have run out of
4365 FR registers, then FP values must also go in general registers. This can
4366 happen when we have a SFmode HFA. */
4367 else if (mode == TFmode || mode == TCmode
4368 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4370 int byte_size = ((mode == BLKmode)
4371 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4372 if (BYTES_BIG_ENDIAN
4373 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4374 && byte_size < UNITS_PER_WORD
4375 && byte_size > 0)
4377 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4378 gen_rtx_REG (DImode,
4379 (basereg + cum->words
4380 + offset)),
4381 const0_rtx);
4382 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4384 else
4385 return gen_rtx_REG (mode, basereg + cum->words + offset);
4389 /* If there is a prototype, then FP values go in a FR register when
4390 named, and in a GR register when unnamed. */
4391 else if (cum->prototype)
4393 if (named)
4394 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4395 /* In big-endian mode, an anonymous SFmode value must be represented
4396 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4397 the value into the high half of the general register. */
4398 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4399 return gen_rtx_PARALLEL (mode,
4400 gen_rtvec (1,
4401 gen_rtx_EXPR_LIST (VOIDmode,
4402 gen_rtx_REG (DImode, basereg + cum->words + offset),
4403 const0_rtx)));
4404 else
4405 return gen_rtx_REG (mode, basereg + cum->words + offset);
4407 /* If there is no prototype, then FP values go in both FR and GR
4408 registers. */
4409 else
4411 /* See comment above. */
4412 enum machine_mode inner_mode =
4413 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4415 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4416 gen_rtx_REG (mode, (FR_ARG_FIRST
4417 + cum->fp_regs)),
4418 const0_rtx);
4419 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4420 gen_rtx_REG (inner_mode,
4421 (basereg + cum->words
4422 + offset)),
4423 const0_rtx);
4425 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4429 /* Return number of bytes, at the beginning of the argument, that must be
4430 put in registers. 0 is the argument is entirely in registers or entirely
4431 in memory. */
4433 static int
4434 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4435 tree type, bool named ATTRIBUTE_UNUSED)
4437 int words = ia64_function_arg_words (type, mode);
4438 int offset = ia64_function_arg_offset (cum, type, words);
4440 /* If all argument slots are used, then it must go on the stack. */
4441 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4442 return 0;
4444 /* It doesn't matter whether the argument goes in FR or GR regs. If
4445 it fits within the 8 argument slots, then it goes entirely in
4446 registers. If it extends past the last argument slot, then the rest
4447 goes on the stack. */
4449 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4450 return 0;
4452 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4455 /* Return ivms_arg_type based on machine_mode. */
4457 static enum ivms_arg_type
4458 ia64_arg_type (enum machine_mode mode)
4460 switch (mode)
4462 case SFmode:
4463 return FS;
4464 case DFmode:
4465 return FT;
4466 default:
4467 return I64;
4471 /* Update CUM to point after this argument. This is patterned after
4472 ia64_function_arg. */
4474 void
4475 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4476 tree type, int named)
4478 int words = ia64_function_arg_words (type, mode);
4479 int offset = ia64_function_arg_offset (cum, type, words);
4480 enum machine_mode hfa_mode = VOIDmode;
4482 /* If all arg slots are already full, then there is nothing to do. */
4483 if (cum->words >= MAX_ARGUMENT_SLOTS)
4485 cum->words += words + offset;
4486 return;
4489 cum->atypes[cum->words] = ia64_arg_type (mode);
4490 cum->words += words + offset;
4492 /* Check for and handle homogeneous FP aggregates. */
4493 if (type)
4494 hfa_mode = hfa_element_mode (type, 0);
4496 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4497 and unprototyped hfas are passed specially. */
4498 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4500 int fp_regs = cum->fp_regs;
4501 /* This is the original value of cum->words + offset. */
4502 int int_regs = cum->words - words;
4503 int hfa_size = GET_MODE_SIZE (hfa_mode);
4504 int byte_size;
4505 int args_byte_size;
4507 /* If prototyped, pass it in FR regs then GR regs.
4508 If not prototyped, pass it in both FR and GR regs.
4510 If this is an SFmode aggregate, then it is possible to run out of
4511 FR regs while GR regs are still left. In that case, we pass the
4512 remaining part in the GR regs. */
4514 /* Fill the FP regs. We do this always. We stop if we reach the end
4515 of the argument, the last FP register, or the last argument slot. */
4517 byte_size = ((mode == BLKmode)
4518 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4519 args_byte_size = int_regs * UNITS_PER_WORD;
4520 offset = 0;
4521 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4522 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4524 offset += hfa_size;
4525 args_byte_size += hfa_size;
4526 fp_regs++;
4529 cum->fp_regs = fp_regs;
4532 /* On OpenVMS variable argument is either in Rn or Fn. */
4533 else if (TARGET_ABI_OPEN_VMS && named == 0)
4535 cum->int_regs = cum->words;
4536 cum->fp_regs = cum->words;
4539 /* Integral and aggregates go in general registers. So do TFmode FP values.
4540 If we have run out of FR registers, then other FP values must also go in
4541 general registers. This can happen when we have a SFmode HFA. */
4542 else if (mode == TFmode || mode == TCmode
4543 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4544 cum->int_regs = cum->words;
4546 /* If there is a prototype, then FP values go in a FR register when
4547 named, and in a GR register when unnamed. */
4548 else if (cum->prototype)
4550 if (! named)
4551 cum->int_regs = cum->words;
4552 else
4553 /* ??? Complex types should not reach here. */
4554 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4556 /* If there is no prototype, then FP values go in both FR and GR
4557 registers. */
4558 else
4560 /* ??? Complex types should not reach here. */
4561 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4562 cum->int_regs = cum->words;
4566 /* Arguments with alignment larger than 8 bytes start at the next even
4567 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4568 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4571 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4574 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4575 return PARM_BOUNDARY * 2;
4577 if (type)
4579 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4580 return PARM_BOUNDARY * 2;
4581 else
4582 return PARM_BOUNDARY;
4585 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4586 return PARM_BOUNDARY * 2;
4587 else
4588 return PARM_BOUNDARY;
4591 /* True if it is OK to do sibling call optimization for the specified
4592 call expression EXP. DECL will be the called function, or NULL if
4593 this is an indirect call. */
4594 static bool
4595 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4597 /* We can't perform a sibcall if the current function has the syscall_linkage
4598 attribute. */
4599 if (lookup_attribute ("syscall_linkage",
4600 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4601 return false;
4603 /* We must always return with our current GP. This means we can
4604 only sibcall to functions defined in the current module unless
4605 TARGET_CONST_GP is set to true. */
4606 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
4610 /* Implement va_arg. */
4612 static tree
4613 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4614 gimple_seq *post_p)
4616 /* Variable sized types are passed by reference. */
4617 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4619 tree ptrtype = build_pointer_type (type);
4620 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4621 return build_va_arg_indirect_ref (addr);
4624 /* Aggregate arguments with alignment larger than 8 bytes start at
4625 the next even boundary. Integer and floating point arguments
4626 do so if they are larger than 8 bytes, whether or not they are
4627 also aligned larger than 8 bytes. */
4628 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4629 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4631 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4632 size_int (2 * UNITS_PER_WORD - 1));
4633 t = fold_convert (sizetype, t);
4634 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4635 size_int (-2 * UNITS_PER_WORD));
4636 t = fold_convert (TREE_TYPE (valist), t);
4637 gimplify_assign (unshare_expr (valist), t, pre_p);
4640 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4643 /* Return 1 if function return value returned in memory. Return 0 if it is
4644 in a register. */
4646 static bool
4647 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4649 enum machine_mode mode;
4650 enum machine_mode hfa_mode;
4651 HOST_WIDE_INT byte_size;
4653 mode = TYPE_MODE (valtype);
4654 byte_size = GET_MODE_SIZE (mode);
4655 if (mode == BLKmode)
4657 byte_size = int_size_in_bytes (valtype);
4658 if (byte_size < 0)
4659 return true;
4662 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4664 hfa_mode = hfa_element_mode (valtype, 0);
4665 if (hfa_mode != VOIDmode)
4667 int hfa_size = GET_MODE_SIZE (hfa_mode);
4669 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4670 return true;
4671 else
4672 return false;
4674 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4675 return true;
4676 else
4677 return false;
4680 /* Return rtx for register that holds the function return value. */
4682 static rtx
4683 ia64_function_value (const_tree valtype,
4684 const_tree fn_decl_or_type,
4685 bool outgoing ATTRIBUTE_UNUSED)
4687 enum machine_mode mode;
4688 enum machine_mode hfa_mode;
4689 int unsignedp;
4690 const_tree func = fn_decl_or_type;
4692 if (fn_decl_or_type
4693 && !DECL_P (fn_decl_or_type))
4694 func = NULL;
4696 mode = TYPE_MODE (valtype);
4697 hfa_mode = hfa_element_mode (valtype, 0);
4699 if (hfa_mode != VOIDmode)
4701 rtx loc[8];
4702 int i;
4703 int hfa_size;
4704 int byte_size;
4705 int offset;
4707 hfa_size = GET_MODE_SIZE (hfa_mode);
4708 byte_size = ((mode == BLKmode)
4709 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4710 offset = 0;
4711 for (i = 0; offset < byte_size; i++)
4713 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4714 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4715 GEN_INT (offset));
4716 offset += hfa_size;
4718 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4720 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4721 return gen_rtx_REG (mode, FR_ARG_FIRST);
4722 else
4724 bool need_parallel = false;
4726 /* In big-endian mode, we need to manage the layout of aggregates
4727 in the registers so that we get the bits properly aligned in
4728 the highpart of the registers. */
4729 if (BYTES_BIG_ENDIAN
4730 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4731 need_parallel = true;
4733 /* Something like struct S { long double x; char a[0] } is not an
4734 HFA structure, and therefore doesn't go in fp registers. But
4735 the middle-end will give it XFmode anyway, and XFmode values
4736 don't normally fit in integer registers. So we need to smuggle
4737 the value inside a parallel. */
4738 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4739 need_parallel = true;
4741 if (need_parallel)
4743 rtx loc[8];
4744 int offset;
4745 int bytesize;
4746 int i;
4748 offset = 0;
4749 bytesize = int_size_in_bytes (valtype);
4750 /* An empty PARALLEL is invalid here, but the return value
4751 doesn't matter for empty structs. */
4752 if (bytesize == 0)
4753 return gen_rtx_REG (mode, GR_RET_FIRST);
4754 for (i = 0; offset < bytesize; i++)
4756 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4757 gen_rtx_REG (DImode,
4758 GR_RET_FIRST + i),
4759 GEN_INT (offset));
4760 offset += UNITS_PER_WORD;
4762 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4765 mode = ia64_promote_function_mode (valtype, mode, &unsignedp,
4766 func ? TREE_TYPE (func) : NULL_TREE,
4767 true);
4769 return gen_rtx_REG (mode, GR_RET_FIRST);
4773 /* Worker function for TARGET_LIBCALL_VALUE. */
4775 static rtx
4776 ia64_libcall_value (enum machine_mode mode,
4777 const_rtx fun ATTRIBUTE_UNUSED)
4779 return gen_rtx_REG (mode,
4780 (((GET_MODE_CLASS (mode) == MODE_FLOAT
4781 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4782 && (mode) != TFmode)
4783 ? FR_RET_FIRST : GR_RET_FIRST));
4786 /* Worker function for FUNCTION_VALUE_REGNO_P. */
4788 static bool
4789 ia64_function_value_regno_p (const unsigned int regno)
4791 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
4792 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
4795 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4796 We need to emit DTP-relative relocations. */
4798 static void
4799 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4801 gcc_assert (size == 4 || size == 8);
4802 if (size == 4)
4803 fputs ("\tdata4.ua\t@dtprel(", file);
4804 else
4805 fputs ("\tdata8.ua\t@dtprel(", file);
4806 output_addr_const (file, x);
4807 fputs (")", file);
4810 /* Print a memory address as an operand to reference that memory location. */
4812 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4813 also call this from ia64_print_operand for memory addresses. */
4815 void
4816 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4817 rtx address ATTRIBUTE_UNUSED)
4821 /* Print an operand to an assembler instruction.
4822 C Swap and print a comparison operator.
4823 D Print an FP comparison operator.
4824 E Print 32 - constant, for SImode shifts as extract.
4825 e Print 64 - constant, for DImode rotates.
4826 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4827 a floating point register emitted normally.
4828 G A floating point constant.
4829 I Invert a predicate register by adding 1.
4830 J Select the proper predicate register for a condition.
4831 j Select the inverse predicate register for a condition.
4832 O Append .acq for volatile load.
4833 P Postincrement of a MEM.
4834 Q Append .rel for volatile store.
4835 R Print .s .d or nothing for a single, double or no truncation.
4836 S Shift amount for shladd instruction.
4837 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4838 for Intel assembler.
4839 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4840 for Intel assembler.
4841 X A pair of floating point registers.
4842 r Print register name, or constant 0 as r0. HP compatibility for
4843 Linux kernel.
4844 v Print vector constant value as an 8-byte integer value. */
4846 void
4847 ia64_print_operand (FILE * file, rtx x, int code)
4849 const char *str;
4851 switch (code)
4853 case 0:
4854 /* Handled below. */
4855 break;
4857 case 'C':
4859 enum rtx_code c = swap_condition (GET_CODE (x));
4860 fputs (GET_RTX_NAME (c), file);
4861 return;
4864 case 'D':
4865 switch (GET_CODE (x))
4867 case NE:
4868 str = "neq";
4869 break;
4870 case UNORDERED:
4871 str = "unord";
4872 break;
4873 case ORDERED:
4874 str = "ord";
4875 break;
4876 case UNLT:
4877 str = "nge";
4878 break;
4879 case UNLE:
4880 str = "ngt";
4881 break;
4882 case UNGT:
4883 str = "nle";
4884 break;
4885 case UNGE:
4886 str = "nlt";
4887 break;
4888 default:
4889 str = GET_RTX_NAME (GET_CODE (x));
4890 break;
4892 fputs (str, file);
4893 return;
4895 case 'E':
4896 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4897 return;
4899 case 'e':
4900 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4901 return;
4903 case 'F':
4904 if (x == CONST0_RTX (GET_MODE (x)))
4905 str = reg_names [FR_REG (0)];
4906 else if (x == CONST1_RTX (GET_MODE (x)))
4907 str = reg_names [FR_REG (1)];
4908 else
4910 gcc_assert (GET_CODE (x) == REG);
4911 str = reg_names [REGNO (x)];
4913 fputs (str, file);
4914 return;
4916 case 'G':
4918 long val[4];
4919 REAL_VALUE_TYPE rv;
4920 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
4921 real_to_target (val, &rv, GET_MODE (x));
4922 if (GET_MODE (x) == SFmode)
4923 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
4924 else if (GET_MODE (x) == DFmode)
4925 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
4926 & 0xffffffff,
4927 (WORDS_BIG_ENDIAN ? val[1] : val[0])
4928 & 0xffffffff);
4929 else
4930 output_operand_lossage ("invalid %%G mode");
4932 return;
4934 case 'I':
4935 fputs (reg_names [REGNO (x) + 1], file);
4936 return;
4938 case 'J':
4939 case 'j':
4941 unsigned int regno = REGNO (XEXP (x, 0));
4942 if (GET_CODE (x) == EQ)
4943 regno += 1;
4944 if (code == 'j')
4945 regno ^= 1;
4946 fputs (reg_names [regno], file);
4948 return;
4950 case 'O':
4951 if (MEM_VOLATILE_P (x))
4952 fputs(".acq", file);
4953 return;
4955 case 'P':
4957 HOST_WIDE_INT value;
4959 switch (GET_CODE (XEXP (x, 0)))
4961 default:
4962 return;
4964 case POST_MODIFY:
4965 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4966 if (GET_CODE (x) == CONST_INT)
4967 value = INTVAL (x);
4968 else
4970 gcc_assert (GET_CODE (x) == REG);
4971 fprintf (file, ", %s", reg_names[REGNO (x)]);
4972 return;
4974 break;
4976 case POST_INC:
4977 value = GET_MODE_SIZE (GET_MODE (x));
4978 break;
4980 case POST_DEC:
4981 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4982 break;
4985 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4986 return;
4989 case 'Q':
4990 if (MEM_VOLATILE_P (x))
4991 fputs(".rel", file);
4992 return;
4994 case 'R':
4995 if (x == CONST0_RTX (GET_MODE (x)))
4996 fputs(".s", file);
4997 else if (x == CONST1_RTX (GET_MODE (x)))
4998 fputs(".d", file);
4999 else if (x == CONST2_RTX (GET_MODE (x)))
5001 else
5002 output_operand_lossage ("invalid %%R value");
5003 return;
5005 case 'S':
5006 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5007 return;
5009 case 'T':
5010 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5012 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5013 return;
5015 break;
5017 case 'U':
5018 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5020 const char *prefix = "0x";
5021 if (INTVAL (x) & 0x80000000)
5023 fprintf (file, "0xffffffff");
5024 prefix = "";
5026 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5027 return;
5029 break;
5031 case 'X':
5033 unsigned int regno = REGNO (x);
5034 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5036 return;
5038 case 'r':
5039 /* If this operand is the constant zero, write it as register zero.
5040 Any register, zero, or CONST_INT value is OK here. */
5041 if (GET_CODE (x) == REG)
5042 fputs (reg_names[REGNO (x)], file);
5043 else if (x == CONST0_RTX (GET_MODE (x)))
5044 fputs ("r0", file);
5045 else if (GET_CODE (x) == CONST_INT)
5046 output_addr_const (file, x);
5047 else
5048 output_operand_lossage ("invalid %%r value");
5049 return;
5051 case 'v':
5052 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5053 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5054 break;
5056 case '+':
5058 const char *which;
5060 /* For conditional branches, returns or calls, substitute
5061 sptk, dptk, dpnt, or spnt for %s. */
5062 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5063 if (x)
5065 int pred_val = INTVAL (XEXP (x, 0));
5067 /* Guess top and bottom 10% statically predicted. */
5068 if (pred_val < REG_BR_PROB_BASE / 50
5069 && br_prob_note_reliable_p (x))
5070 which = ".spnt";
5071 else if (pred_val < REG_BR_PROB_BASE / 2)
5072 which = ".dpnt";
5073 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5074 || !br_prob_note_reliable_p (x))
5075 which = ".dptk";
5076 else
5077 which = ".sptk";
5079 else if (GET_CODE (current_output_insn) == CALL_INSN)
5080 which = ".sptk";
5081 else
5082 which = ".dptk";
5084 fputs (which, file);
5085 return;
5088 case ',':
5089 x = current_insn_predicate;
5090 if (x)
5092 unsigned int regno = REGNO (XEXP (x, 0));
5093 if (GET_CODE (x) == EQ)
5094 regno += 1;
5095 fprintf (file, "(%s) ", reg_names [regno]);
5097 return;
5099 default:
5100 output_operand_lossage ("ia64_print_operand: unknown code");
5101 return;
5104 switch (GET_CODE (x))
5106 /* This happens for the spill/restore instructions. */
5107 case POST_INC:
5108 case POST_DEC:
5109 case POST_MODIFY:
5110 x = XEXP (x, 0);
5111 /* ... fall through ... */
5113 case REG:
5114 fputs (reg_names [REGNO (x)], file);
5115 break;
5117 case MEM:
5119 rtx addr = XEXP (x, 0);
5120 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5121 addr = XEXP (addr, 0);
5122 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5123 break;
5126 default:
5127 output_addr_const (file, x);
5128 break;
5131 return;
5134 /* Compute a (partial) cost for rtx X. Return true if the complete
5135 cost has been computed, and false if subexpressions should be
5136 scanned. In either case, *TOTAL contains the cost result. */
5137 /* ??? This is incomplete. */
5139 static bool
5140 ia64_rtx_costs (rtx x, int code, int outer_code, int *total,
5141 bool speed ATTRIBUTE_UNUSED)
5143 switch (code)
5145 case CONST_INT:
5146 switch (outer_code)
5148 case SET:
5149 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5150 return true;
5151 case PLUS:
5152 if (satisfies_constraint_I (x))
5153 *total = 0;
5154 else if (satisfies_constraint_J (x))
5155 *total = 1;
5156 else
5157 *total = COSTS_N_INSNS (1);
5158 return true;
5159 default:
5160 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5161 *total = 0;
5162 else
5163 *total = COSTS_N_INSNS (1);
5164 return true;
5167 case CONST_DOUBLE:
5168 *total = COSTS_N_INSNS (1);
5169 return true;
5171 case CONST:
5172 case SYMBOL_REF:
5173 case LABEL_REF:
5174 *total = COSTS_N_INSNS (3);
5175 return true;
5177 case MULT:
5178 /* For multiplies wider than HImode, we have to go to the FPU,
5179 which normally involves copies. Plus there's the latency
5180 of the multiply itself, and the latency of the instructions to
5181 transfer integer regs to FP regs. */
5182 /* ??? Check for FP mode. */
5183 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5184 *total = COSTS_N_INSNS (10);
5185 else
5186 *total = COSTS_N_INSNS (2);
5187 return true;
5189 case PLUS:
5190 case MINUS:
5191 case ASHIFT:
5192 case ASHIFTRT:
5193 case LSHIFTRT:
5194 *total = COSTS_N_INSNS (1);
5195 return true;
5197 case DIV:
5198 case UDIV:
5199 case MOD:
5200 case UMOD:
5201 /* We make divide expensive, so that divide-by-constant will be
5202 optimized to a multiply. */
5203 *total = COSTS_N_INSNS (60);
5204 return true;
5206 default:
5207 return false;
5211 /* Calculate the cost of moving data from a register in class FROM to
5212 one in class TO, using MODE. */
5214 static int
5215 ia64_register_move_cost (enum machine_mode mode, reg_class_t from_i,
5216 reg_class_t to_i)
5218 enum reg_class from = (enum reg_class) from_i;
5219 enum reg_class to = (enum reg_class) to_i;
5221 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5222 if (to == ADDL_REGS)
5223 to = GR_REGS;
5224 if (from == ADDL_REGS)
5225 from = GR_REGS;
5227 /* All costs are symmetric, so reduce cases by putting the
5228 lower number class as the destination. */
5229 if (from < to)
5231 enum reg_class tmp = to;
5232 to = from, from = tmp;
5235 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5236 so that we get secondary memory reloads. Between FR_REGS,
5237 we have to make this at least as expensive as memory_move_cost
5238 to avoid spectacularly poor register class preferencing. */
5239 if (mode == XFmode || mode == RFmode)
5241 if (to != GR_REGS || from != GR_REGS)
5242 return memory_move_cost (mode, to, false);
5243 else
5244 return 3;
5247 switch (to)
5249 case PR_REGS:
5250 /* Moving between PR registers takes two insns. */
5251 if (from == PR_REGS)
5252 return 3;
5253 /* Moving between PR and anything but GR is impossible. */
5254 if (from != GR_REGS)
5255 return memory_move_cost (mode, to, false);
5256 break;
5258 case BR_REGS:
5259 /* Moving between BR and anything but GR is impossible. */
5260 if (from != GR_REGS && from != GR_AND_BR_REGS)
5261 return memory_move_cost (mode, to, false);
5262 break;
5264 case AR_I_REGS:
5265 case AR_M_REGS:
5266 /* Moving between AR and anything but GR is impossible. */
5267 if (from != GR_REGS)
5268 return memory_move_cost (mode, to, false);
5269 break;
5271 case GR_REGS:
5272 case FR_REGS:
5273 case FP_REGS:
5274 case GR_AND_FR_REGS:
5275 case GR_AND_BR_REGS:
5276 case ALL_REGS:
5277 break;
5279 default:
5280 gcc_unreachable ();
5283 return 2;
5286 /* Calculate the cost of moving data of MODE from a register to or from
5287 memory. */
5289 static int
5290 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5291 reg_class_t rclass,
5292 bool in ATTRIBUTE_UNUSED)
5294 if (rclass == GENERAL_REGS
5295 || rclass == FR_REGS
5296 || rclass == FP_REGS
5297 || rclass == GR_AND_FR_REGS)
5298 return 4;
5299 else
5300 return 10;
5303 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on RCLASS
5304 to use when copying X into that class. */
5306 enum reg_class
5307 ia64_preferred_reload_class (rtx x, enum reg_class rclass)
5309 switch (rclass)
5311 case FR_REGS:
5312 case FP_REGS:
5313 /* Don't allow volatile mem reloads into floating point registers.
5314 This is defined to force reload to choose the r/m case instead
5315 of the f/f case when reloading (set (reg fX) (mem/v)). */
5316 if (MEM_P (x) && MEM_VOLATILE_P (x))
5317 return NO_REGS;
5319 /* Force all unrecognized constants into the constant pool. */
5320 if (CONSTANT_P (x))
5321 return NO_REGS;
5322 break;
5324 case AR_M_REGS:
5325 case AR_I_REGS:
5326 if (!OBJECT_P (x))
5327 return NO_REGS;
5328 break;
5330 default:
5331 break;
5334 return rclass;
5337 /* This function returns the register class required for a secondary
5338 register when copying between one of the registers in RCLASS, and X,
5339 using MODE. A return value of NO_REGS means that no secondary register
5340 is required. */
5342 enum reg_class
5343 ia64_secondary_reload_class (enum reg_class rclass,
5344 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5346 int regno = -1;
5348 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5349 regno = true_regnum (x);
5351 switch (rclass)
5353 case BR_REGS:
5354 case AR_M_REGS:
5355 case AR_I_REGS:
5356 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5357 interaction. We end up with two pseudos with overlapping lifetimes
5358 both of which are equiv to the same constant, and both which need
5359 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5360 changes depending on the path length, which means the qty_first_reg
5361 check in make_regs_eqv can give different answers at different times.
5362 At some point I'll probably need a reload_indi pattern to handle
5363 this.
5365 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5366 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5367 non-general registers for good measure. */
5368 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5369 return GR_REGS;
5371 /* This is needed if a pseudo used as a call_operand gets spilled to a
5372 stack slot. */
5373 if (GET_CODE (x) == MEM)
5374 return GR_REGS;
5375 break;
5377 case FR_REGS:
5378 case FP_REGS:
5379 /* Need to go through general registers to get to other class regs. */
5380 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5381 return GR_REGS;
5383 /* This can happen when a paradoxical subreg is an operand to the
5384 muldi3 pattern. */
5385 /* ??? This shouldn't be necessary after instruction scheduling is
5386 enabled, because paradoxical subregs are not accepted by
5387 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5388 stop the paradoxical subreg stupidity in the *_operand functions
5389 in recog.c. */
5390 if (GET_CODE (x) == MEM
5391 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5392 || GET_MODE (x) == QImode))
5393 return GR_REGS;
5395 /* This can happen because of the ior/and/etc patterns that accept FP
5396 registers as operands. If the third operand is a constant, then it
5397 needs to be reloaded into a FP register. */
5398 if (GET_CODE (x) == CONST_INT)
5399 return GR_REGS;
5401 /* This can happen because of register elimination in a muldi3 insn.
5402 E.g. `26107 * (unsigned long)&u'. */
5403 if (GET_CODE (x) == PLUS)
5404 return GR_REGS;
5405 break;
5407 case PR_REGS:
5408 /* ??? This happens if we cse/gcse a BImode value across a call,
5409 and the function has a nonlocal goto. This is because global
5410 does not allocate call crossing pseudos to hard registers when
5411 crtl->has_nonlocal_goto is true. This is relatively
5412 common for C++ programs that use exceptions. To reproduce,
5413 return NO_REGS and compile libstdc++. */
5414 if (GET_CODE (x) == MEM)
5415 return GR_REGS;
5417 /* This can happen when we take a BImode subreg of a DImode value,
5418 and that DImode value winds up in some non-GR register. */
5419 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5420 return GR_REGS;
5421 break;
5423 default:
5424 break;
5427 return NO_REGS;
5431 /* Implement targetm.unspec_may_trap_p hook. */
5432 static int
5433 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5435 if (GET_CODE (x) == UNSPEC)
5437 switch (XINT (x, 1))
5439 case UNSPEC_LDA:
5440 case UNSPEC_LDS:
5441 case UNSPEC_LDSA:
5442 case UNSPEC_LDCCLR:
5443 case UNSPEC_CHKACLR:
5444 case UNSPEC_CHKS:
5445 /* These unspecs are just wrappers. */
5446 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5450 return default_unspec_may_trap_p (x, flags);
5454 /* Parse the -mfixed-range= option string. */
5456 static void
5457 fix_range (const char *const_str)
5459 int i, first, last;
5460 char *str, *dash, *comma;
5462 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5463 REG2 are either register names or register numbers. The effect
5464 of this option is to mark the registers in the range from REG1 to
5465 REG2 as ``fixed'' so they won't be used by the compiler. This is
5466 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5468 i = strlen (const_str);
5469 str = (char *) alloca (i + 1);
5470 memcpy (str, const_str, i + 1);
5472 while (1)
5474 dash = strchr (str, '-');
5475 if (!dash)
5477 warning (0, "value of -mfixed-range must have form REG1-REG2");
5478 return;
5480 *dash = '\0';
5482 comma = strchr (dash + 1, ',');
5483 if (comma)
5484 *comma = '\0';
5486 first = decode_reg_name (str);
5487 if (first < 0)
5489 warning (0, "unknown register name: %s", str);
5490 return;
5493 last = decode_reg_name (dash + 1);
5494 if (last < 0)
5496 warning (0, "unknown register name: %s", dash + 1);
5497 return;
5500 *dash = '-';
5502 if (first > last)
5504 warning (0, "%s-%s is an empty range", str, dash + 1);
5505 return;
5508 for (i = first; i <= last; ++i)
5509 fixed_regs[i] = call_used_regs[i] = 1;
5511 if (!comma)
5512 break;
5514 *comma = ',';
5515 str = comma + 1;
5519 /* Implement TARGET_HANDLE_OPTION. */
5521 static bool
5522 ia64_handle_option (size_t code, const char *arg, int value)
5524 switch (code)
5526 case OPT_mfixed_range_:
5527 fix_range (arg);
5528 return true;
5530 case OPT_mtls_size_:
5531 if (value != 14 && value != 22 && value != 64)
5532 error ("bad value %<%s%> for -mtls-size= switch", arg);
5533 return true;
5535 case OPT_mtune_:
5537 static struct pta
5539 const char *name; /* processor name or nickname. */
5540 enum processor_type processor;
5542 const processor_alias_table[] =
5544 {"itanium2", PROCESSOR_ITANIUM2},
5545 {"mckinley", PROCESSOR_ITANIUM2},
5547 int const pta_size = ARRAY_SIZE (processor_alias_table);
5548 int i;
5550 for (i = 0; i < pta_size; i++)
5551 if (!strcmp (arg, processor_alias_table[i].name))
5553 ia64_tune = processor_alias_table[i].processor;
5554 break;
5556 if (i == pta_size)
5557 error ("bad value %<%s%> for -mtune= switch", arg);
5558 return true;
5561 default:
5562 return true;
5566 /* Implement OVERRIDE_OPTIONS. */
5568 void
5569 ia64_override_options (void)
5571 if (TARGET_AUTO_PIC)
5572 target_flags |= MASK_CONST_GP;
5574 /* Numerous experiment shows that IRA based loop pressure
5575 calculation works better for RTL loop invariant motion on targets
5576 with enough (>= 32) registers. It is an expensive optimization.
5577 So it is on only for peak performance. */
5578 if (optimize >= 3)
5579 flag_ira_loop_pressure = 1;
5582 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5584 init_machine_status = ia64_init_machine_status;
5586 if (align_functions <= 0)
5587 align_functions = 64;
5588 if (align_loops <= 0)
5589 align_loops = 32;
5590 if (TARGET_ABI_OPEN_VMS)
5591 flag_no_common = 1;
5593 ia64_override_options_after_change();
5596 /* Implement targetm.override_options_after_change. */
5598 static void
5599 ia64_override_options_after_change (void)
5601 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5602 flag_schedule_insns_after_reload = 0;
5604 if (optimize >= 3
5605 && ! sel_sched_switch_set)
5607 flag_selective_scheduling2 = 1;
5608 flag_sel_sched_pipelining = 1;
5610 if (mflag_sched_control_spec == 2)
5612 /* Control speculation is on by default for the selective scheduler,
5613 but not for the Haifa scheduler. */
5614 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5616 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5618 /* FIXME: remove this when we'd implement breaking autoinsns as
5619 a transformation. */
5620 flag_auto_inc_dec = 0;
5624 /* Initialize the record of emitted frame related registers. */
5626 void ia64_init_expanders (void)
5628 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5631 static struct machine_function *
5632 ia64_init_machine_status (void)
5634 return ggc_alloc_cleared_machine_function ();
5637 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5638 static enum attr_type ia64_safe_type (rtx);
5640 static enum attr_itanium_class
5641 ia64_safe_itanium_class (rtx insn)
5643 if (recog_memoized (insn) >= 0)
5644 return get_attr_itanium_class (insn);
5645 else if (DEBUG_INSN_P (insn))
5646 return ITANIUM_CLASS_IGNORE;
5647 else
5648 return ITANIUM_CLASS_UNKNOWN;
5651 static enum attr_type
5652 ia64_safe_type (rtx insn)
5654 if (recog_memoized (insn) >= 0)
5655 return get_attr_type (insn);
5656 else
5657 return TYPE_UNKNOWN;
5660 /* The following collection of routines emit instruction group stop bits as
5661 necessary to avoid dependencies. */
5663 /* Need to track some additional registers as far as serialization is
5664 concerned so we can properly handle br.call and br.ret. We could
5665 make these registers visible to gcc, but since these registers are
5666 never explicitly used in gcc generated code, it seems wasteful to
5667 do so (plus it would make the call and return patterns needlessly
5668 complex). */
5669 #define REG_RP (BR_REG (0))
5670 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5671 /* This is used for volatile asms which may require a stop bit immediately
5672 before and after them. */
5673 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5674 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5675 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5677 /* For each register, we keep track of how it has been written in the
5678 current instruction group.
5680 If a register is written unconditionally (no qualifying predicate),
5681 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5683 If a register is written if its qualifying predicate P is true, we
5684 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5685 may be written again by the complement of P (P^1) and when this happens,
5686 WRITE_COUNT gets set to 2.
5688 The result of this is that whenever an insn attempts to write a register
5689 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5691 If a predicate register is written by a floating-point insn, we set
5692 WRITTEN_BY_FP to true.
5694 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5695 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5697 #if GCC_VERSION >= 4000
5698 #define RWS_FIELD_TYPE __extension__ unsigned short
5699 #else
5700 #define RWS_FIELD_TYPE unsigned int
5701 #endif
5702 struct reg_write_state
5704 RWS_FIELD_TYPE write_count : 2;
5705 RWS_FIELD_TYPE first_pred : 10;
5706 RWS_FIELD_TYPE written_by_fp : 1;
5707 RWS_FIELD_TYPE written_by_and : 1;
5708 RWS_FIELD_TYPE written_by_or : 1;
5711 /* Cumulative info for the current instruction group. */
5712 struct reg_write_state rws_sum[NUM_REGS];
5713 #ifdef ENABLE_CHECKING
5714 /* Bitmap whether a register has been written in the current insn. */
5715 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5716 / HOST_BITS_PER_WIDEST_FAST_INT];
5718 static inline void
5719 rws_insn_set (int regno)
5721 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5722 SET_HARD_REG_BIT (rws_insn, regno);
5725 static inline int
5726 rws_insn_test (int regno)
5728 return TEST_HARD_REG_BIT (rws_insn, regno);
5730 #else
5731 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5732 unsigned char rws_insn[2];
5734 static inline void
5735 rws_insn_set (int regno)
5737 if (regno == REG_AR_CFM)
5738 rws_insn[0] = 1;
5739 else if (regno == REG_VOLATILE)
5740 rws_insn[1] = 1;
5743 static inline int
5744 rws_insn_test (int regno)
5746 if (regno == REG_AR_CFM)
5747 return rws_insn[0];
5748 if (regno == REG_VOLATILE)
5749 return rws_insn[1];
5750 return 0;
5752 #endif
5754 /* Indicates whether this is the first instruction after a stop bit,
5755 in which case we don't need another stop bit. Without this,
5756 ia64_variable_issue will die when scheduling an alloc. */
5757 static int first_instruction;
5759 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5760 RTL for one instruction. */
5761 struct reg_flags
5763 unsigned int is_write : 1; /* Is register being written? */
5764 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5765 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5766 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5767 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5768 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5771 static void rws_update (int, struct reg_flags, int);
5772 static int rws_access_regno (int, struct reg_flags, int);
5773 static int rws_access_reg (rtx, struct reg_flags, int);
5774 static void update_set_flags (rtx, struct reg_flags *);
5775 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5776 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5777 static void init_insn_group_barriers (void);
5778 static int group_barrier_needed (rtx);
5779 static int safe_group_barrier_needed (rtx);
5780 static int in_safe_group_barrier;
5782 /* Update *RWS for REGNO, which is being written by the current instruction,
5783 with predicate PRED, and associated register flags in FLAGS. */
5785 static void
5786 rws_update (int regno, struct reg_flags flags, int pred)
5788 if (pred)
5789 rws_sum[regno].write_count++;
5790 else
5791 rws_sum[regno].write_count = 2;
5792 rws_sum[regno].written_by_fp |= flags.is_fp;
5793 /* ??? Not tracking and/or across differing predicates. */
5794 rws_sum[regno].written_by_and = flags.is_and;
5795 rws_sum[regno].written_by_or = flags.is_or;
5796 rws_sum[regno].first_pred = pred;
5799 /* Handle an access to register REGNO of type FLAGS using predicate register
5800 PRED. Update rws_sum array. Return 1 if this access creates
5801 a dependency with an earlier instruction in the same group. */
5803 static int
5804 rws_access_regno (int regno, struct reg_flags flags, int pred)
5806 int need_barrier = 0;
5808 gcc_assert (regno < NUM_REGS);
5810 if (! PR_REGNO_P (regno))
5811 flags.is_and = flags.is_or = 0;
5813 if (flags.is_write)
5815 int write_count;
5817 rws_insn_set (regno);
5818 write_count = rws_sum[regno].write_count;
5820 switch (write_count)
5822 case 0:
5823 /* The register has not been written yet. */
5824 if (!in_safe_group_barrier)
5825 rws_update (regno, flags, pred);
5826 break;
5828 case 1:
5829 /* The register has been written via a predicate. If this is
5830 not a complementary predicate, then we need a barrier. */
5831 /* ??? This assumes that P and P+1 are always complementary
5832 predicates for P even. */
5833 if (flags.is_and && rws_sum[regno].written_by_and)
5835 else if (flags.is_or && rws_sum[regno].written_by_or)
5837 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5838 need_barrier = 1;
5839 if (!in_safe_group_barrier)
5840 rws_update (regno, flags, pred);
5841 break;
5843 case 2:
5844 /* The register has been unconditionally written already. We
5845 need a barrier. */
5846 if (flags.is_and && rws_sum[regno].written_by_and)
5848 else if (flags.is_or && rws_sum[regno].written_by_or)
5850 else
5851 need_barrier = 1;
5852 if (!in_safe_group_barrier)
5854 rws_sum[regno].written_by_and = flags.is_and;
5855 rws_sum[regno].written_by_or = flags.is_or;
5857 break;
5859 default:
5860 gcc_unreachable ();
5863 else
5865 if (flags.is_branch)
5867 /* Branches have several RAW exceptions that allow to avoid
5868 barriers. */
5870 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5871 /* RAW dependencies on branch regs are permissible as long
5872 as the writer is a non-branch instruction. Since we
5873 never generate code that uses a branch register written
5874 by a branch instruction, handling this case is
5875 easy. */
5876 return 0;
5878 if (REGNO_REG_CLASS (regno) == PR_REGS
5879 && ! rws_sum[regno].written_by_fp)
5880 /* The predicates of a branch are available within the
5881 same insn group as long as the predicate was written by
5882 something other than a floating-point instruction. */
5883 return 0;
5886 if (flags.is_and && rws_sum[regno].written_by_and)
5887 return 0;
5888 if (flags.is_or && rws_sum[regno].written_by_or)
5889 return 0;
5891 switch (rws_sum[regno].write_count)
5893 case 0:
5894 /* The register has not been written yet. */
5895 break;
5897 case 1:
5898 /* The register has been written via a predicate. If this is
5899 not a complementary predicate, then we need a barrier. */
5900 /* ??? This assumes that P and P+1 are always complementary
5901 predicates for P even. */
5902 if ((rws_sum[regno].first_pred ^ 1) != pred)
5903 need_barrier = 1;
5904 break;
5906 case 2:
5907 /* The register has been unconditionally written already. We
5908 need a barrier. */
5909 need_barrier = 1;
5910 break;
5912 default:
5913 gcc_unreachable ();
5917 return need_barrier;
5920 static int
5921 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5923 int regno = REGNO (reg);
5924 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5926 if (n == 1)
5927 return rws_access_regno (regno, flags, pred);
5928 else
5930 int need_barrier = 0;
5931 while (--n >= 0)
5932 need_barrier |= rws_access_regno (regno + n, flags, pred);
5933 return need_barrier;
5937 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5938 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5940 static void
5941 update_set_flags (rtx x, struct reg_flags *pflags)
5943 rtx src = SET_SRC (x);
5945 switch (GET_CODE (src))
5947 case CALL:
5948 return;
5950 case IF_THEN_ELSE:
5951 /* There are four cases here:
5952 (1) The destination is (pc), in which case this is a branch,
5953 nothing here applies.
5954 (2) The destination is ar.lc, in which case this is a
5955 doloop_end_internal,
5956 (3) The destination is an fp register, in which case this is
5957 an fselect instruction.
5958 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
5959 this is a check load.
5960 In all cases, nothing we do in this function applies. */
5961 return;
5963 default:
5964 if (COMPARISON_P (src)
5965 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5966 /* Set pflags->is_fp to 1 so that we know we're dealing
5967 with a floating point comparison when processing the
5968 destination of the SET. */
5969 pflags->is_fp = 1;
5971 /* Discover if this is a parallel comparison. We only handle
5972 and.orcm and or.andcm at present, since we must retain a
5973 strict inverse on the predicate pair. */
5974 else if (GET_CODE (src) == AND)
5975 pflags->is_and = 1;
5976 else if (GET_CODE (src) == IOR)
5977 pflags->is_or = 1;
5979 break;
5983 /* Subroutine of rtx_needs_barrier; this function determines whether the
5984 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5985 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5986 for this insn. */
5988 static int
5989 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5991 int need_barrier = 0;
5992 rtx dst;
5993 rtx src = SET_SRC (x);
5995 if (GET_CODE (src) == CALL)
5996 /* We don't need to worry about the result registers that
5997 get written by subroutine call. */
5998 return rtx_needs_barrier (src, flags, pred);
5999 else if (SET_DEST (x) == pc_rtx)
6001 /* X is a conditional branch. */
6002 /* ??? This seems redundant, as the caller sets this bit for
6003 all JUMP_INSNs. */
6004 if (!ia64_spec_check_src_p (src))
6005 flags.is_branch = 1;
6006 return rtx_needs_barrier (src, flags, pred);
6009 if (ia64_spec_check_src_p (src))
6010 /* Avoid checking one register twice (in condition
6011 and in 'then' section) for ldc pattern. */
6013 gcc_assert (REG_P (XEXP (src, 2)));
6014 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6016 /* We process MEM below. */
6017 src = XEXP (src, 1);
6020 need_barrier |= rtx_needs_barrier (src, flags, pred);
6022 dst = SET_DEST (x);
6023 if (GET_CODE (dst) == ZERO_EXTRACT)
6025 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6026 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6028 return need_barrier;
6031 /* Handle an access to rtx X of type FLAGS using predicate register
6032 PRED. Return 1 if this access creates a dependency with an earlier
6033 instruction in the same group. */
6035 static int
6036 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6038 int i, j;
6039 int is_complemented = 0;
6040 int need_barrier = 0;
6041 const char *format_ptr;
6042 struct reg_flags new_flags;
6043 rtx cond;
6045 if (! x)
6046 return 0;
6048 new_flags = flags;
6050 switch (GET_CODE (x))
6052 case SET:
6053 update_set_flags (x, &new_flags);
6054 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6055 if (GET_CODE (SET_SRC (x)) != CALL)
6057 new_flags.is_write = 1;
6058 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6060 break;
6062 case CALL:
6063 new_flags.is_write = 0;
6064 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6066 /* Avoid multiple register writes, in case this is a pattern with
6067 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6068 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6070 new_flags.is_write = 1;
6071 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6072 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6073 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6075 break;
6077 case COND_EXEC:
6078 /* X is a predicated instruction. */
6080 cond = COND_EXEC_TEST (x);
6081 gcc_assert (!pred);
6082 need_barrier = rtx_needs_barrier (cond, flags, 0);
6084 if (GET_CODE (cond) == EQ)
6085 is_complemented = 1;
6086 cond = XEXP (cond, 0);
6087 gcc_assert (GET_CODE (cond) == REG
6088 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6089 pred = REGNO (cond);
6090 if (is_complemented)
6091 ++pred;
6093 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6094 return need_barrier;
6096 case CLOBBER:
6097 case USE:
6098 /* Clobber & use are for earlier compiler-phases only. */
6099 break;
6101 case ASM_OPERANDS:
6102 case ASM_INPUT:
6103 /* We always emit stop bits for traditional asms. We emit stop bits
6104 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6105 if (GET_CODE (x) != ASM_OPERANDS
6106 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6108 /* Avoid writing the register multiple times if we have multiple
6109 asm outputs. This avoids a failure in rws_access_reg. */
6110 if (! rws_insn_test (REG_VOLATILE))
6112 new_flags.is_write = 1;
6113 rws_access_regno (REG_VOLATILE, new_flags, pred);
6115 return 1;
6118 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6119 We cannot just fall through here since then we would be confused
6120 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6121 traditional asms unlike their normal usage. */
6123 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6124 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6125 need_barrier = 1;
6126 break;
6128 case PARALLEL:
6129 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6131 rtx pat = XVECEXP (x, 0, i);
6132 switch (GET_CODE (pat))
6134 case SET:
6135 update_set_flags (pat, &new_flags);
6136 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6137 break;
6139 case USE:
6140 case CALL:
6141 case ASM_OPERANDS:
6142 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6143 break;
6145 case CLOBBER:
6146 case RETURN:
6147 break;
6149 default:
6150 gcc_unreachable ();
6153 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6155 rtx pat = XVECEXP (x, 0, i);
6156 if (GET_CODE (pat) == SET)
6158 if (GET_CODE (SET_SRC (pat)) != CALL)
6160 new_flags.is_write = 1;
6161 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6162 pred);
6165 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6166 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6168 break;
6170 case SUBREG:
6171 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6172 break;
6173 case REG:
6174 if (REGNO (x) == AR_UNAT_REGNUM)
6176 for (i = 0; i < 64; ++i)
6177 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6179 else
6180 need_barrier = rws_access_reg (x, flags, pred);
6181 break;
6183 case MEM:
6184 /* Find the regs used in memory address computation. */
6185 new_flags.is_write = 0;
6186 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6187 break;
6189 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6190 case SYMBOL_REF: case LABEL_REF: case CONST:
6191 break;
6193 /* Operators with side-effects. */
6194 case POST_INC: case POST_DEC:
6195 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6197 new_flags.is_write = 0;
6198 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6199 new_flags.is_write = 1;
6200 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6201 break;
6203 case POST_MODIFY:
6204 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6206 new_flags.is_write = 0;
6207 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6208 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6209 new_flags.is_write = 1;
6210 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6211 break;
6213 /* Handle common unary and binary ops for efficiency. */
6214 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6215 case MOD: case UDIV: case UMOD: case AND: case IOR:
6216 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6217 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6218 case NE: case EQ: case GE: case GT: case LE:
6219 case LT: case GEU: case GTU: case LEU: case LTU:
6220 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6221 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6222 break;
6224 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6225 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6226 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6227 case SQRT: case FFS: case POPCOUNT:
6228 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6229 break;
6231 case VEC_SELECT:
6232 /* VEC_SELECT's second argument is a PARALLEL with integers that
6233 describe the elements selected. On ia64, those integers are
6234 always constants. Avoid walking the PARALLEL so that we don't
6235 get confused with "normal" parallels and then die. */
6236 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6237 break;
6239 case UNSPEC:
6240 switch (XINT (x, 1))
6242 case UNSPEC_LTOFF_DTPMOD:
6243 case UNSPEC_LTOFF_DTPREL:
6244 case UNSPEC_DTPREL:
6245 case UNSPEC_LTOFF_TPREL:
6246 case UNSPEC_TPREL:
6247 case UNSPEC_PRED_REL_MUTEX:
6248 case UNSPEC_PIC_CALL:
6249 case UNSPEC_MF:
6250 case UNSPEC_FETCHADD_ACQ:
6251 case UNSPEC_BSP_VALUE:
6252 case UNSPEC_FLUSHRS:
6253 case UNSPEC_BUNDLE_SELECTOR:
6254 break;
6256 case UNSPEC_GR_SPILL:
6257 case UNSPEC_GR_RESTORE:
6259 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6260 HOST_WIDE_INT bit = (offset >> 3) & 63;
6262 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6263 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6264 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6265 new_flags, pred);
6266 break;
6269 case UNSPEC_FR_SPILL:
6270 case UNSPEC_FR_RESTORE:
6271 case UNSPEC_GETF_EXP:
6272 case UNSPEC_SETF_EXP:
6273 case UNSPEC_ADDP4:
6274 case UNSPEC_FR_SQRT_RECIP_APPROX:
6275 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6276 case UNSPEC_LDA:
6277 case UNSPEC_LDS:
6278 case UNSPEC_LDS_A:
6279 case UNSPEC_LDSA:
6280 case UNSPEC_CHKACLR:
6281 case UNSPEC_CHKS:
6282 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6283 break;
6285 case UNSPEC_FR_RECIP_APPROX:
6286 case UNSPEC_SHRP:
6287 case UNSPEC_COPYSIGN:
6288 case UNSPEC_FR_RECIP_APPROX_RES:
6289 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6290 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6291 break;
6293 case UNSPEC_CMPXCHG_ACQ:
6294 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6295 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6296 break;
6298 default:
6299 gcc_unreachable ();
6301 break;
6303 case UNSPEC_VOLATILE:
6304 switch (XINT (x, 1))
6306 case UNSPECV_ALLOC:
6307 /* Alloc must always be the first instruction of a group.
6308 We force this by always returning true. */
6309 /* ??? We might get better scheduling if we explicitly check for
6310 input/local/output register dependencies, and modify the
6311 scheduler so that alloc is always reordered to the start of
6312 the current group. We could then eliminate all of the
6313 first_instruction code. */
6314 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6316 new_flags.is_write = 1;
6317 rws_access_regno (REG_AR_CFM, new_flags, pred);
6318 return 1;
6320 case UNSPECV_SET_BSP:
6321 need_barrier = 1;
6322 break;
6324 case UNSPECV_BLOCKAGE:
6325 case UNSPECV_INSN_GROUP_BARRIER:
6326 case UNSPECV_BREAK:
6327 case UNSPECV_PSAC_ALL:
6328 case UNSPECV_PSAC_NORMAL:
6329 return 0;
6331 default:
6332 gcc_unreachable ();
6334 break;
6336 case RETURN:
6337 new_flags.is_write = 0;
6338 need_barrier = rws_access_regno (REG_RP, flags, pred);
6339 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6341 new_flags.is_write = 1;
6342 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6343 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6344 break;
6346 default:
6347 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6348 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6349 switch (format_ptr[i])
6351 case '0': /* unused field */
6352 case 'i': /* integer */
6353 case 'n': /* note */
6354 case 'w': /* wide integer */
6355 case 's': /* pointer to string */
6356 case 'S': /* optional pointer to string */
6357 break;
6359 case 'e':
6360 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6361 need_barrier = 1;
6362 break;
6364 case 'E':
6365 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6366 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6367 need_barrier = 1;
6368 break;
6370 default:
6371 gcc_unreachable ();
6373 break;
6375 return need_barrier;
6378 /* Clear out the state for group_barrier_needed at the start of a
6379 sequence of insns. */
6381 static void
6382 init_insn_group_barriers (void)
6384 memset (rws_sum, 0, sizeof (rws_sum));
6385 first_instruction = 1;
6388 /* Given the current state, determine whether a group barrier (a stop bit) is
6389 necessary before INSN. Return nonzero if so. This modifies the state to
6390 include the effects of INSN as a side-effect. */
6392 static int
6393 group_barrier_needed (rtx insn)
6395 rtx pat;
6396 int need_barrier = 0;
6397 struct reg_flags flags;
6399 memset (&flags, 0, sizeof (flags));
6400 switch (GET_CODE (insn))
6402 case NOTE:
6403 case DEBUG_INSN:
6404 break;
6406 case BARRIER:
6407 /* A barrier doesn't imply an instruction group boundary. */
6408 break;
6410 case CODE_LABEL:
6411 memset (rws_insn, 0, sizeof (rws_insn));
6412 return 1;
6414 case CALL_INSN:
6415 flags.is_branch = 1;
6416 flags.is_sibcall = SIBLING_CALL_P (insn);
6417 memset (rws_insn, 0, sizeof (rws_insn));
6419 /* Don't bundle a call following another call. */
6420 if ((pat = prev_active_insn (insn))
6421 && GET_CODE (pat) == CALL_INSN)
6423 need_barrier = 1;
6424 break;
6427 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6428 break;
6430 case JUMP_INSN:
6431 if (!ia64_spec_check_p (insn))
6432 flags.is_branch = 1;
6434 /* Don't bundle a jump following a call. */
6435 if ((pat = prev_active_insn (insn))
6436 && GET_CODE (pat) == CALL_INSN)
6438 need_barrier = 1;
6439 break;
6441 /* FALLTHRU */
6443 case INSN:
6444 if (GET_CODE (PATTERN (insn)) == USE
6445 || GET_CODE (PATTERN (insn)) == CLOBBER)
6446 /* Don't care about USE and CLOBBER "insns"---those are used to
6447 indicate to the optimizer that it shouldn't get rid of
6448 certain operations. */
6449 break;
6451 pat = PATTERN (insn);
6453 /* Ug. Hack hacks hacked elsewhere. */
6454 switch (recog_memoized (insn))
6456 /* We play dependency tricks with the epilogue in order
6457 to get proper schedules. Undo this for dv analysis. */
6458 case CODE_FOR_epilogue_deallocate_stack:
6459 case CODE_FOR_prologue_allocate_stack:
6460 pat = XVECEXP (pat, 0, 0);
6461 break;
6463 /* The pattern we use for br.cloop confuses the code above.
6464 The second element of the vector is representative. */
6465 case CODE_FOR_doloop_end_internal:
6466 pat = XVECEXP (pat, 0, 1);
6467 break;
6469 /* Doesn't generate code. */
6470 case CODE_FOR_pred_rel_mutex:
6471 case CODE_FOR_prologue_use:
6472 return 0;
6474 default:
6475 break;
6478 memset (rws_insn, 0, sizeof (rws_insn));
6479 need_barrier = rtx_needs_barrier (pat, flags, 0);
6481 /* Check to see if the previous instruction was a volatile
6482 asm. */
6483 if (! need_barrier)
6484 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6486 break;
6488 default:
6489 gcc_unreachable ();
6492 if (first_instruction && INSN_P (insn)
6493 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6494 && GET_CODE (PATTERN (insn)) != USE
6495 && GET_CODE (PATTERN (insn)) != CLOBBER)
6497 need_barrier = 0;
6498 first_instruction = 0;
6501 return need_barrier;
6504 /* Like group_barrier_needed, but do not clobber the current state. */
6506 static int
6507 safe_group_barrier_needed (rtx insn)
6509 int saved_first_instruction;
6510 int t;
6512 saved_first_instruction = first_instruction;
6513 in_safe_group_barrier = 1;
6515 t = group_barrier_needed (insn);
6517 first_instruction = saved_first_instruction;
6518 in_safe_group_barrier = 0;
6520 return t;
6523 /* Scan the current function and insert stop bits as necessary to
6524 eliminate dependencies. This function assumes that a final
6525 instruction scheduling pass has been run which has already
6526 inserted most of the necessary stop bits. This function only
6527 inserts new ones at basic block boundaries, since these are
6528 invisible to the scheduler. */
6530 static void
6531 emit_insn_group_barriers (FILE *dump)
6533 rtx insn;
6534 rtx last_label = 0;
6535 int insns_since_last_label = 0;
6537 init_insn_group_barriers ();
6539 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6541 if (GET_CODE (insn) == CODE_LABEL)
6543 if (insns_since_last_label)
6544 last_label = insn;
6545 insns_since_last_label = 0;
6547 else if (GET_CODE (insn) == NOTE
6548 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6550 if (insns_since_last_label)
6551 last_label = insn;
6552 insns_since_last_label = 0;
6554 else if (GET_CODE (insn) == INSN
6555 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6556 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6558 init_insn_group_barriers ();
6559 last_label = 0;
6561 else if (NONDEBUG_INSN_P (insn))
6563 insns_since_last_label = 1;
6565 if (group_barrier_needed (insn))
6567 if (last_label)
6569 if (dump)
6570 fprintf (dump, "Emitting stop before label %d\n",
6571 INSN_UID (last_label));
6572 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6573 insn = last_label;
6575 init_insn_group_barriers ();
6576 last_label = 0;
6583 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6584 This function has to emit all necessary group barriers. */
6586 static void
6587 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6589 rtx insn;
6591 init_insn_group_barriers ();
6593 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6595 if (GET_CODE (insn) == BARRIER)
6597 rtx last = prev_active_insn (insn);
6599 if (! last)
6600 continue;
6601 if (GET_CODE (last) == JUMP_INSN
6602 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6603 last = prev_active_insn (last);
6604 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6605 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6607 init_insn_group_barriers ();
6609 else if (NONDEBUG_INSN_P (insn))
6611 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6612 init_insn_group_barriers ();
6613 else if (group_barrier_needed (insn))
6615 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6616 init_insn_group_barriers ();
6617 group_barrier_needed (insn);
6625 /* Instruction scheduling support. */
6627 #define NR_BUNDLES 10
6629 /* A list of names of all available bundles. */
6631 static const char *bundle_name [NR_BUNDLES] =
6633 ".mii",
6634 ".mmi",
6635 ".mfi",
6636 ".mmf",
6637 #if NR_BUNDLES == 10
6638 ".bbb",
6639 ".mbb",
6640 #endif
6641 ".mib",
6642 ".mmb",
6643 ".mfb",
6644 ".mlx"
6647 /* Nonzero if we should insert stop bits into the schedule. */
6649 int ia64_final_schedule = 0;
6651 /* Codes of the corresponding queried units: */
6653 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6654 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6656 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6657 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6659 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6661 /* The following variable value is an insn group barrier. */
6663 static rtx dfa_stop_insn;
6665 /* The following variable value is the last issued insn. */
6667 static rtx last_scheduled_insn;
6669 /* The following variable value is pointer to a DFA state used as
6670 temporary variable. */
6672 static state_t temp_dfa_state = NULL;
6674 /* The following variable value is DFA state after issuing the last
6675 insn. */
6677 static state_t prev_cycle_state = NULL;
6679 /* The following array element values are TRUE if the corresponding
6680 insn requires to add stop bits before it. */
6682 static char *stops_p = NULL;
6684 /* The following variable is used to set up the mentioned above array. */
6686 static int stop_before_p = 0;
6688 /* The following variable value is length of the arrays `clocks' and
6689 `add_cycles'. */
6691 static int clocks_length;
6693 /* The following variable value is number of data speculations in progress. */
6694 static int pending_data_specs = 0;
6696 /* Number of memory references on current and three future processor cycles. */
6697 static char mem_ops_in_group[4];
6699 /* Number of current processor cycle (from scheduler's point of view). */
6700 static int current_cycle;
6702 static rtx ia64_single_set (rtx);
6703 static void ia64_emit_insn_before (rtx, rtx);
6705 /* Map a bundle number to its pseudo-op. */
6707 const char *
6708 get_bundle_name (int b)
6710 return bundle_name[b];
6714 /* Return the maximum number of instructions a cpu can issue. */
6716 static int
6717 ia64_issue_rate (void)
6719 return 6;
6722 /* Helper function - like single_set, but look inside COND_EXEC. */
6724 static rtx
6725 ia64_single_set (rtx insn)
6727 rtx x = PATTERN (insn), ret;
6728 if (GET_CODE (x) == COND_EXEC)
6729 x = COND_EXEC_CODE (x);
6730 if (GET_CODE (x) == SET)
6731 return x;
6733 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6734 Although they are not classical single set, the second set is there just
6735 to protect it from moving past FP-relative stack accesses. */
6736 switch (recog_memoized (insn))
6738 case CODE_FOR_prologue_allocate_stack:
6739 case CODE_FOR_epilogue_deallocate_stack:
6740 ret = XVECEXP (x, 0, 0);
6741 break;
6743 default:
6744 ret = single_set_2 (insn, x);
6745 break;
6748 return ret;
6751 /* Adjust the cost of a scheduling dependency.
6752 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
6753 COST is the current cost, DW is dependency weakness. */
6754 static int
6755 ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
6757 enum reg_note dep_type = (enum reg_note) dep_type1;
6758 enum attr_itanium_class dep_class;
6759 enum attr_itanium_class insn_class;
6761 insn_class = ia64_safe_itanium_class (insn);
6762 dep_class = ia64_safe_itanium_class (dep_insn);
6764 /* Treat true memory dependencies separately. Ignore apparent true
6765 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
6766 if (dep_type == REG_DEP_TRUE
6767 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
6768 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
6769 return 0;
6771 if (dw == MIN_DEP_WEAK)
6772 /* Store and load are likely to alias, use higher cost to avoid stall. */
6773 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
6774 else if (dw > MIN_DEP_WEAK)
6776 /* Store and load are less likely to alias. */
6777 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
6778 /* Assume there will be no cache conflict for floating-point data.
6779 For integer data, L1 conflict penalty is huge (17 cycles), so we
6780 never assume it will not cause a conflict. */
6781 return 0;
6782 else
6783 return cost;
6786 if (dep_type != REG_DEP_OUTPUT)
6787 return cost;
6789 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6790 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6791 return 0;
6793 return cost;
6796 /* Like emit_insn_before, but skip cycle_display notes.
6797 ??? When cycle display notes are implemented, update this. */
6799 static void
6800 ia64_emit_insn_before (rtx insn, rtx before)
6802 emit_insn_before (insn, before);
6805 /* The following function marks insns who produce addresses for load
6806 and store insns. Such insns will be placed into M slots because it
6807 decrease latency time for Itanium1 (see function
6808 `ia64_produce_address_p' and the DFA descriptions). */
6810 static void
6811 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6813 rtx insn, next, next_tail;
6815 /* Before reload, which_alternative is not set, which means that
6816 ia64_safe_itanium_class will produce wrong results for (at least)
6817 move instructions. */
6818 if (!reload_completed)
6819 return;
6821 next_tail = NEXT_INSN (tail);
6822 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6823 if (INSN_P (insn))
6824 insn->call = 0;
6825 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6826 if (INSN_P (insn)
6827 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6829 sd_iterator_def sd_it;
6830 dep_t dep;
6831 bool has_mem_op_consumer_p = false;
6833 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
6835 enum attr_itanium_class c;
6837 if (DEP_TYPE (dep) != REG_DEP_TRUE)
6838 continue;
6840 next = DEP_CON (dep);
6841 c = ia64_safe_itanium_class (next);
6842 if ((c == ITANIUM_CLASS_ST
6843 || c == ITANIUM_CLASS_STF)
6844 && ia64_st_address_bypass_p (insn, next))
6846 has_mem_op_consumer_p = true;
6847 break;
6849 else if ((c == ITANIUM_CLASS_LD
6850 || c == ITANIUM_CLASS_FLD
6851 || c == ITANIUM_CLASS_FLDP)
6852 && ia64_ld_address_bypass_p (insn, next))
6854 has_mem_op_consumer_p = true;
6855 break;
6859 insn->call = has_mem_op_consumer_p;
6863 /* We're beginning a new block. Initialize data structures as necessary. */
6865 static void
6866 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6867 int sched_verbose ATTRIBUTE_UNUSED,
6868 int max_ready ATTRIBUTE_UNUSED)
6870 #ifdef ENABLE_CHECKING
6871 rtx insn;
6873 if (!sel_sched_p () && reload_completed)
6874 for (insn = NEXT_INSN (current_sched_info->prev_head);
6875 insn != current_sched_info->next_tail;
6876 insn = NEXT_INSN (insn))
6877 gcc_assert (!SCHED_GROUP_P (insn));
6878 #endif
6879 last_scheduled_insn = NULL_RTX;
6880 init_insn_group_barriers ();
6882 current_cycle = 0;
6883 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
6886 /* We're beginning a scheduling pass. Check assertion. */
6888 static void
6889 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
6890 int sched_verbose ATTRIBUTE_UNUSED,
6891 int max_ready ATTRIBUTE_UNUSED)
6893 gcc_assert (pending_data_specs == 0);
6896 /* Scheduling pass is now finished. Free/reset static variable. */
6897 static void
6898 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6899 int sched_verbose ATTRIBUTE_UNUSED)
6901 gcc_assert (pending_data_specs == 0);
6904 /* Return TRUE if INSN is a load (either normal or speculative, but not a
6905 speculation check), FALSE otherwise. */
6906 static bool
6907 is_load_p (rtx insn)
6909 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
6911 return
6912 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
6913 && get_attr_check_load (insn) == CHECK_LOAD_NO);
6916 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
6917 (taking account for 3-cycle cache reference postponing for stores: Intel
6918 Itanium 2 Reference Manual for Software Development and Optimization,
6919 6.7.3.1). */
6920 static void
6921 record_memory_reference (rtx insn)
6923 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
6925 switch (insn_class) {
6926 case ITANIUM_CLASS_FLD:
6927 case ITANIUM_CLASS_LD:
6928 mem_ops_in_group[current_cycle % 4]++;
6929 break;
6930 case ITANIUM_CLASS_STF:
6931 case ITANIUM_CLASS_ST:
6932 mem_ops_in_group[(current_cycle + 3) % 4]++;
6933 break;
6934 default:;
6938 /* We are about to being issuing insns for this clock cycle.
6939 Override the default sort algorithm to better slot instructions. */
6941 static int
6942 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6943 int *pn_ready, int clock_var,
6944 int reorder_type)
6946 int n_asms;
6947 int n_ready = *pn_ready;
6948 rtx *e_ready = ready + n_ready;
6949 rtx *insnp;
6951 if (sched_verbose)
6952 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6954 if (reorder_type == 0)
6956 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6957 n_asms = 0;
6958 for (insnp = ready; insnp < e_ready; insnp++)
6959 if (insnp < e_ready)
6961 rtx insn = *insnp;
6962 enum attr_type t = ia64_safe_type (insn);
6963 if (t == TYPE_UNKNOWN)
6965 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6966 || asm_noperands (PATTERN (insn)) >= 0)
6968 rtx lowest = ready[n_asms];
6969 ready[n_asms] = insn;
6970 *insnp = lowest;
6971 n_asms++;
6973 else
6975 rtx highest = ready[n_ready - 1];
6976 ready[n_ready - 1] = insn;
6977 *insnp = highest;
6978 return 1;
6983 if (n_asms < n_ready)
6985 /* Some normal insns to process. Skip the asms. */
6986 ready += n_asms;
6987 n_ready -= n_asms;
6989 else if (n_ready > 0)
6990 return 1;
6993 if (ia64_final_schedule)
6995 int deleted = 0;
6996 int nr_need_stop = 0;
6998 for (insnp = ready; insnp < e_ready; insnp++)
6999 if (safe_group_barrier_needed (*insnp))
7000 nr_need_stop++;
7002 if (reorder_type == 1 && n_ready == nr_need_stop)
7003 return 0;
7004 if (reorder_type == 0)
7005 return 1;
7006 insnp = e_ready;
7007 /* Move down everything that needs a stop bit, preserving
7008 relative order. */
7009 while (insnp-- > ready + deleted)
7010 while (insnp >= ready + deleted)
7012 rtx insn = *insnp;
7013 if (! safe_group_barrier_needed (insn))
7014 break;
7015 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7016 *ready = insn;
7017 deleted++;
7019 n_ready -= deleted;
7020 ready += deleted;
7023 current_cycle = clock_var;
7024 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7026 int moved = 0;
7028 insnp = e_ready;
7029 /* Move down loads/stores, preserving relative order. */
7030 while (insnp-- > ready + moved)
7031 while (insnp >= ready + moved)
7033 rtx insn = *insnp;
7034 if (! is_load_p (insn))
7035 break;
7036 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7037 *ready = insn;
7038 moved++;
7040 n_ready -= moved;
7041 ready += moved;
7044 return 1;
7047 /* We are about to being issuing insns for this clock cycle. Override
7048 the default sort algorithm to better slot instructions. */
7050 static int
7051 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
7052 int clock_var)
7054 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7055 pn_ready, clock_var, 0);
7058 /* Like ia64_sched_reorder, but called after issuing each insn.
7059 Override the default sort algorithm to better slot instructions. */
7061 static int
7062 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7063 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
7064 int *pn_ready, int clock_var)
7066 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7067 clock_var, 1);
7070 /* We are about to issue INSN. Return the number of insns left on the
7071 ready queue that can be issued this cycle. */
7073 static int
7074 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7075 int sched_verbose ATTRIBUTE_UNUSED,
7076 rtx insn ATTRIBUTE_UNUSED,
7077 int can_issue_more ATTRIBUTE_UNUSED)
7079 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7080 /* Modulo scheduling does not extend h_i_d when emitting
7081 new instructions. Don't use h_i_d, if we don't have to. */
7083 if (DONE_SPEC (insn) & BEGIN_DATA)
7084 pending_data_specs++;
7085 if (CHECK_SPEC (insn) & BEGIN_DATA)
7086 pending_data_specs--;
7089 if (DEBUG_INSN_P (insn))
7090 return 1;
7092 last_scheduled_insn = insn;
7093 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7094 if (reload_completed)
7096 int needed = group_barrier_needed (insn);
7098 gcc_assert (!needed);
7099 if (GET_CODE (insn) == CALL_INSN)
7100 init_insn_group_barriers ();
7101 stops_p [INSN_UID (insn)] = stop_before_p;
7102 stop_before_p = 0;
7104 record_memory_reference (insn);
7106 return 1;
7109 /* We are choosing insn from the ready queue. Return nonzero if INSN
7110 can be chosen. */
7112 static int
7113 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
7115 gcc_assert (insn && INSN_P (insn));
7116 return ((!reload_completed
7117 || !safe_group_barrier_needed (insn))
7118 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn)
7119 && (!mflag_sched_mem_insns_hard_limit
7120 || !is_load_p (insn)
7121 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns));
7124 /* We are choosing insn from the ready queue. Return nonzero if INSN
7125 can be chosen. */
7127 static bool
7128 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
7130 gcc_assert (insn && INSN_P (insn));
7131 /* Size of ALAT is 32. As far as we perform conservative data speculation,
7132 we keep ALAT half-empty. */
7133 return (pending_data_specs < 16
7134 || !(TODO_SPEC (insn) & BEGIN_DATA));
7137 /* The following variable value is pseudo-insn used by the DFA insn
7138 scheduler to change the DFA state when the simulated clock is
7139 increased. */
7141 static rtx dfa_pre_cycle_insn;
7143 /* Returns 1 when a meaningful insn was scheduled between the last group
7144 barrier and LAST. */
7145 static int
7146 scheduled_good_insn (rtx last)
7148 if (last && recog_memoized (last) >= 0)
7149 return 1;
7151 for ( ;
7152 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7153 && !stops_p[INSN_UID (last)];
7154 last = PREV_INSN (last))
7155 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7156 the ebb we're scheduling. */
7157 if (INSN_P (last) && recog_memoized (last) >= 0)
7158 return 1;
7160 return 0;
7163 /* We are about to being issuing INSN. Return nonzero if we cannot
7164 issue it on given cycle CLOCK and return zero if we should not sort
7165 the ready queue on the next clock start. */
7167 static int
7168 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
7169 int clock, int *sort_p)
7171 gcc_assert (insn && INSN_P (insn));
7173 if (DEBUG_INSN_P (insn))
7174 return 0;
7176 /* When a group barrier is needed for insn, last_scheduled_insn
7177 should be set. */
7178 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7179 || last_scheduled_insn);
7181 if ((reload_completed
7182 && (safe_group_barrier_needed (insn)
7183 || (mflag_sched_stop_bits_after_every_cycle
7184 && last_clock != clock
7185 && last_scheduled_insn
7186 && scheduled_good_insn (last_scheduled_insn))))
7187 || (last_scheduled_insn
7188 && (GET_CODE (last_scheduled_insn) == CALL_INSN
7189 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7190 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
7192 init_insn_group_barriers ();
7194 if (verbose && dump)
7195 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7196 last_clock == clock ? " + cycle advance" : "");
7198 stop_before_p = 1;
7199 current_cycle = clock;
7200 mem_ops_in_group[current_cycle % 4] = 0;
7202 if (last_clock == clock)
7204 state_transition (curr_state, dfa_stop_insn);
7205 if (TARGET_EARLY_STOP_BITS)
7206 *sort_p = (last_scheduled_insn == NULL_RTX
7207 || GET_CODE (last_scheduled_insn) != CALL_INSN);
7208 else
7209 *sort_p = 0;
7210 return 1;
7213 if (last_scheduled_insn)
7215 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7216 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
7217 state_reset (curr_state);
7218 else
7220 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7221 state_transition (curr_state, dfa_stop_insn);
7222 state_transition (curr_state, dfa_pre_cycle_insn);
7223 state_transition (curr_state, NULL);
7227 return 0;
7230 /* Implement targetm.sched.h_i_d_extended hook.
7231 Extend internal data structures. */
7232 static void
7233 ia64_h_i_d_extended (void)
7235 if (stops_p != NULL)
7237 int new_clocks_length = get_max_uid () * 3 / 2;
7238 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7239 clocks_length = new_clocks_length;
7244 /* This structure describes the data used by the backend to guide scheduling.
7245 When the current scheduling point is switched, this data should be saved
7246 and restored later, if the scheduler returns to this point. */
7247 struct _ia64_sched_context
7249 state_t prev_cycle_state;
7250 rtx last_scheduled_insn;
7251 struct reg_write_state rws_sum[NUM_REGS];
7252 struct reg_write_state rws_insn[NUM_REGS];
7253 int first_instruction;
7254 int pending_data_specs;
7255 int current_cycle;
7256 char mem_ops_in_group[4];
7258 typedef struct _ia64_sched_context *ia64_sched_context_t;
7260 /* Allocates a scheduling context. */
7261 static void *
7262 ia64_alloc_sched_context (void)
7264 return xmalloc (sizeof (struct _ia64_sched_context));
7267 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7268 the global context otherwise. */
7269 static void
7270 ia64_init_sched_context (void *_sc, bool clean_p)
7272 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7274 sc->prev_cycle_state = xmalloc (dfa_state_size);
7275 if (clean_p)
7277 state_reset (sc->prev_cycle_state);
7278 sc->last_scheduled_insn = NULL_RTX;
7279 memset (sc->rws_sum, 0, sizeof (rws_sum));
7280 memset (sc->rws_insn, 0, sizeof (rws_insn));
7281 sc->first_instruction = 1;
7282 sc->pending_data_specs = 0;
7283 sc->current_cycle = 0;
7284 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7286 else
7288 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7289 sc->last_scheduled_insn = last_scheduled_insn;
7290 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7291 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7292 sc->first_instruction = first_instruction;
7293 sc->pending_data_specs = pending_data_specs;
7294 sc->current_cycle = current_cycle;
7295 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7299 /* Sets the global scheduling context to the one pointed to by _SC. */
7300 static void
7301 ia64_set_sched_context (void *_sc)
7303 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7305 gcc_assert (sc != NULL);
7307 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7308 last_scheduled_insn = sc->last_scheduled_insn;
7309 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7310 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7311 first_instruction = sc->first_instruction;
7312 pending_data_specs = sc->pending_data_specs;
7313 current_cycle = sc->current_cycle;
7314 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7317 /* Clears the data in the _SC scheduling context. */
7318 static void
7319 ia64_clear_sched_context (void *_sc)
7321 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7323 free (sc->prev_cycle_state);
7324 sc->prev_cycle_state = NULL;
7327 /* Frees the _SC scheduling context. */
7328 static void
7329 ia64_free_sched_context (void *_sc)
7331 gcc_assert (_sc != NULL);
7333 free (_sc);
7336 typedef rtx (* gen_func_t) (rtx, rtx);
7338 /* Return a function that will generate a load of mode MODE_NO
7339 with speculation types TS. */
7340 static gen_func_t
7341 get_spec_load_gen_function (ds_t ts, int mode_no)
7343 static gen_func_t gen_ld_[] = {
7344 gen_movbi,
7345 gen_movqi_internal,
7346 gen_movhi_internal,
7347 gen_movsi_internal,
7348 gen_movdi_internal,
7349 gen_movsf_internal,
7350 gen_movdf_internal,
7351 gen_movxf_internal,
7352 gen_movti_internal,
7353 gen_zero_extendqidi2,
7354 gen_zero_extendhidi2,
7355 gen_zero_extendsidi2,
7358 static gen_func_t gen_ld_a[] = {
7359 gen_movbi_advanced,
7360 gen_movqi_advanced,
7361 gen_movhi_advanced,
7362 gen_movsi_advanced,
7363 gen_movdi_advanced,
7364 gen_movsf_advanced,
7365 gen_movdf_advanced,
7366 gen_movxf_advanced,
7367 gen_movti_advanced,
7368 gen_zero_extendqidi2_advanced,
7369 gen_zero_extendhidi2_advanced,
7370 gen_zero_extendsidi2_advanced,
7372 static gen_func_t gen_ld_s[] = {
7373 gen_movbi_speculative,
7374 gen_movqi_speculative,
7375 gen_movhi_speculative,
7376 gen_movsi_speculative,
7377 gen_movdi_speculative,
7378 gen_movsf_speculative,
7379 gen_movdf_speculative,
7380 gen_movxf_speculative,
7381 gen_movti_speculative,
7382 gen_zero_extendqidi2_speculative,
7383 gen_zero_extendhidi2_speculative,
7384 gen_zero_extendsidi2_speculative,
7386 static gen_func_t gen_ld_sa[] = {
7387 gen_movbi_speculative_advanced,
7388 gen_movqi_speculative_advanced,
7389 gen_movhi_speculative_advanced,
7390 gen_movsi_speculative_advanced,
7391 gen_movdi_speculative_advanced,
7392 gen_movsf_speculative_advanced,
7393 gen_movdf_speculative_advanced,
7394 gen_movxf_speculative_advanced,
7395 gen_movti_speculative_advanced,
7396 gen_zero_extendqidi2_speculative_advanced,
7397 gen_zero_extendhidi2_speculative_advanced,
7398 gen_zero_extendsidi2_speculative_advanced,
7400 static gen_func_t gen_ld_s_a[] = {
7401 gen_movbi_speculative_a,
7402 gen_movqi_speculative_a,
7403 gen_movhi_speculative_a,
7404 gen_movsi_speculative_a,
7405 gen_movdi_speculative_a,
7406 gen_movsf_speculative_a,
7407 gen_movdf_speculative_a,
7408 gen_movxf_speculative_a,
7409 gen_movti_speculative_a,
7410 gen_zero_extendqidi2_speculative_a,
7411 gen_zero_extendhidi2_speculative_a,
7412 gen_zero_extendsidi2_speculative_a,
7415 gen_func_t *gen_ld;
7417 if (ts & BEGIN_DATA)
7419 if (ts & BEGIN_CONTROL)
7420 gen_ld = gen_ld_sa;
7421 else
7422 gen_ld = gen_ld_a;
7424 else if (ts & BEGIN_CONTROL)
7426 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7427 || ia64_needs_block_p (ts))
7428 gen_ld = gen_ld_s;
7429 else
7430 gen_ld = gen_ld_s_a;
7432 else if (ts == 0)
7433 gen_ld = gen_ld_;
7434 else
7435 gcc_unreachable ();
7437 return gen_ld[mode_no];
7440 /* Constants that help mapping 'enum machine_mode' to int. */
7441 enum SPEC_MODES
7443 SPEC_MODE_INVALID = -1,
7444 SPEC_MODE_FIRST = 0,
7445 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7446 SPEC_MODE_FOR_EXTEND_LAST = 3,
7447 SPEC_MODE_LAST = 8
7450 enum
7452 /* Offset to reach ZERO_EXTEND patterns. */
7453 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7456 /* Return index of the MODE. */
7457 static int
7458 ia64_mode_to_int (enum machine_mode mode)
7460 switch (mode)
7462 case BImode: return 0; /* SPEC_MODE_FIRST */
7463 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7464 case HImode: return 2;
7465 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7466 case DImode: return 4;
7467 case SFmode: return 5;
7468 case DFmode: return 6;
7469 case XFmode: return 7;
7470 case TImode:
7471 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7472 mentioned in itanium[12].md. Predicate fp_register_operand also
7473 needs to be defined. Bottom line: better disable for now. */
7474 return SPEC_MODE_INVALID;
7475 default: return SPEC_MODE_INVALID;
7479 /* Provide information about speculation capabilities. */
7480 static void
7481 ia64_set_sched_flags (spec_info_t spec_info)
7483 unsigned int *flags = &(current_sched_info->flags);
7485 if (*flags & SCHED_RGN
7486 || *flags & SCHED_EBB
7487 || *flags & SEL_SCHED)
7489 int mask = 0;
7491 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7492 || (mflag_sched_ar_data_spec && reload_completed))
7494 mask |= BEGIN_DATA;
7496 if (!sel_sched_p ()
7497 && ((mflag_sched_br_in_data_spec && !reload_completed)
7498 || (mflag_sched_ar_in_data_spec && reload_completed)))
7499 mask |= BE_IN_DATA;
7502 if (mflag_sched_control_spec
7503 && (!sel_sched_p ()
7504 || reload_completed))
7506 mask |= BEGIN_CONTROL;
7508 if (!sel_sched_p () && mflag_sched_in_control_spec)
7509 mask |= BE_IN_CONTROL;
7512 spec_info->mask = mask;
7514 if (mask)
7516 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7518 if (mask & BE_IN_SPEC)
7519 *flags |= NEW_BBS;
7521 spec_info->flags = 0;
7523 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
7524 spec_info->flags |= PREFER_NON_DATA_SPEC;
7526 if (mask & CONTROL_SPEC)
7528 if (mflag_sched_prefer_non_control_spec_insns)
7529 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
7531 if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7532 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7535 if (sched_verbose >= 1)
7536 spec_info->dump = sched_dump;
7537 else
7538 spec_info->dump = 0;
7540 if (mflag_sched_count_spec_in_critical_path)
7541 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7544 else
7545 spec_info->mask = 0;
7548 /* If INSN is an appropriate load return its mode.
7549 Return -1 otherwise. */
7550 static int
7551 get_mode_no_for_insn (rtx insn)
7553 rtx reg, mem, mode_rtx;
7554 int mode_no;
7555 bool extend_p;
7557 extract_insn_cached (insn);
7559 /* We use WHICH_ALTERNATIVE only after reload. This will
7560 guarantee that reload won't touch a speculative insn. */
7562 if (recog_data.n_operands != 2)
7563 return -1;
7565 reg = recog_data.operand[0];
7566 mem = recog_data.operand[1];
7568 /* We should use MEM's mode since REG's mode in presence of
7569 ZERO_EXTEND will always be DImode. */
7570 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7571 /* Process non-speculative ld. */
7573 if (!reload_completed)
7575 /* Do not speculate into regs like ar.lc. */
7576 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7577 return -1;
7579 if (!MEM_P (mem))
7580 return -1;
7583 rtx mem_reg = XEXP (mem, 0);
7585 if (!REG_P (mem_reg))
7586 return -1;
7589 mode_rtx = mem;
7591 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
7593 gcc_assert (REG_P (reg) && MEM_P (mem));
7594 mode_rtx = mem;
7596 else
7597 return -1;
7599 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
7600 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
7601 || get_attr_check_load (insn) == CHECK_LOAD_YES)
7602 /* Process speculative ld or ld.c. */
7604 gcc_assert (REG_P (reg) && MEM_P (mem));
7605 mode_rtx = mem;
7607 else
7609 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
7611 if (attr_class == ITANIUM_CLASS_CHK_A
7612 || attr_class == ITANIUM_CLASS_CHK_S_I
7613 || attr_class == ITANIUM_CLASS_CHK_S_F)
7614 /* Process chk. */
7615 mode_rtx = reg;
7616 else
7617 return -1;
7620 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
7622 if (mode_no == SPEC_MODE_INVALID)
7623 return -1;
7625 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
7627 if (extend_p)
7629 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
7630 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
7631 return -1;
7633 mode_no += SPEC_GEN_EXTEND_OFFSET;
7636 return mode_no;
7639 /* If X is an unspec part of a speculative load, return its code.
7640 Return -1 otherwise. */
7641 static int
7642 get_spec_unspec_code (const_rtx x)
7644 if (GET_CODE (x) != UNSPEC)
7645 return -1;
7648 int code;
7650 code = XINT (x, 1);
7652 switch (code)
7654 case UNSPEC_LDA:
7655 case UNSPEC_LDS:
7656 case UNSPEC_LDS_A:
7657 case UNSPEC_LDSA:
7658 return code;
7660 default:
7661 return -1;
7666 /* Implement skip_rtx_p hook. */
7667 static bool
7668 ia64_skip_rtx_p (const_rtx x)
7670 return get_spec_unspec_code (x) != -1;
7673 /* If INSN is a speculative load, return its UNSPEC code.
7674 Return -1 otherwise. */
7675 static int
7676 get_insn_spec_code (const_rtx insn)
7678 rtx pat, reg, mem;
7680 pat = PATTERN (insn);
7682 if (GET_CODE (pat) == COND_EXEC)
7683 pat = COND_EXEC_CODE (pat);
7685 if (GET_CODE (pat) != SET)
7686 return -1;
7688 reg = SET_DEST (pat);
7689 if (!REG_P (reg))
7690 return -1;
7692 mem = SET_SRC (pat);
7693 if (GET_CODE (mem) == ZERO_EXTEND)
7694 mem = XEXP (mem, 0);
7696 return get_spec_unspec_code (mem);
7699 /* If INSN is a speculative load, return a ds with the speculation types.
7700 Otherwise [if INSN is a normal instruction] return 0. */
7701 static ds_t
7702 ia64_get_insn_spec_ds (rtx insn)
7704 int code = get_insn_spec_code (insn);
7706 switch (code)
7708 case UNSPEC_LDA:
7709 return BEGIN_DATA;
7711 case UNSPEC_LDS:
7712 case UNSPEC_LDS_A:
7713 return BEGIN_CONTROL;
7715 case UNSPEC_LDSA:
7716 return BEGIN_DATA | BEGIN_CONTROL;
7718 default:
7719 return 0;
7723 /* If INSN is a speculative load return a ds with the speculation types that
7724 will be checked.
7725 Otherwise [if INSN is a normal instruction] return 0. */
7726 static ds_t
7727 ia64_get_insn_checked_ds (rtx insn)
7729 int code = get_insn_spec_code (insn);
7731 switch (code)
7733 case UNSPEC_LDA:
7734 return BEGIN_DATA | BEGIN_CONTROL;
7736 case UNSPEC_LDS:
7737 return BEGIN_CONTROL;
7739 case UNSPEC_LDS_A:
7740 case UNSPEC_LDSA:
7741 return BEGIN_DATA | BEGIN_CONTROL;
7743 default:
7744 return 0;
7748 /* If GEN_P is true, calculate the index of needed speculation check and return
7749 speculative pattern for INSN with speculative mode TS, machine mode
7750 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7751 If GEN_P is false, just calculate the index of needed speculation check. */
7752 static rtx
7753 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
7755 rtx pat, new_pat;
7756 gen_func_t gen_load;
7758 gen_load = get_spec_load_gen_function (ts, mode_no);
7760 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
7761 copy_rtx (recog_data.operand[1]));
7763 pat = PATTERN (insn);
7764 if (GET_CODE (pat) == COND_EXEC)
7765 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7766 new_pat);
7768 return new_pat;
7771 static bool
7772 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
7773 ds_t ds ATTRIBUTE_UNUSED)
7775 return false;
7778 /* Implement targetm.sched.speculate_insn hook.
7779 Check if the INSN can be TS speculative.
7780 If 'no' - return -1.
7781 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
7782 If current pattern of the INSN already provides TS speculation,
7783 return 0. */
7784 static int
7785 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
7787 int mode_no;
7788 int res;
7790 gcc_assert (!(ts & ~SPECULATIVE));
7792 if (ia64_spec_check_p (insn))
7793 return -1;
7795 if ((ts & BE_IN_SPEC)
7796 && !insn_can_be_in_speculative_p (insn, ts))
7797 return -1;
7799 mode_no = get_mode_no_for_insn (insn);
7801 if (mode_no != SPEC_MODE_INVALID)
7803 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
7804 res = 0;
7805 else
7807 res = 1;
7808 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
7811 else
7812 res = -1;
7814 return res;
7817 /* Return a function that will generate a check for speculation TS with mode
7818 MODE_NO.
7819 If simple check is needed, pass true for SIMPLE_CHECK_P.
7820 If clearing check is needed, pass true for CLEARING_CHECK_P. */
7821 static gen_func_t
7822 get_spec_check_gen_function (ds_t ts, int mode_no,
7823 bool simple_check_p, bool clearing_check_p)
7825 static gen_func_t gen_ld_c_clr[] = {
7826 gen_movbi_clr,
7827 gen_movqi_clr,
7828 gen_movhi_clr,
7829 gen_movsi_clr,
7830 gen_movdi_clr,
7831 gen_movsf_clr,
7832 gen_movdf_clr,
7833 gen_movxf_clr,
7834 gen_movti_clr,
7835 gen_zero_extendqidi2_clr,
7836 gen_zero_extendhidi2_clr,
7837 gen_zero_extendsidi2_clr,
7839 static gen_func_t gen_ld_c_nc[] = {
7840 gen_movbi_nc,
7841 gen_movqi_nc,
7842 gen_movhi_nc,
7843 gen_movsi_nc,
7844 gen_movdi_nc,
7845 gen_movsf_nc,
7846 gen_movdf_nc,
7847 gen_movxf_nc,
7848 gen_movti_nc,
7849 gen_zero_extendqidi2_nc,
7850 gen_zero_extendhidi2_nc,
7851 gen_zero_extendsidi2_nc,
7853 static gen_func_t gen_chk_a_clr[] = {
7854 gen_advanced_load_check_clr_bi,
7855 gen_advanced_load_check_clr_qi,
7856 gen_advanced_load_check_clr_hi,
7857 gen_advanced_load_check_clr_si,
7858 gen_advanced_load_check_clr_di,
7859 gen_advanced_load_check_clr_sf,
7860 gen_advanced_load_check_clr_df,
7861 gen_advanced_load_check_clr_xf,
7862 gen_advanced_load_check_clr_ti,
7863 gen_advanced_load_check_clr_di,
7864 gen_advanced_load_check_clr_di,
7865 gen_advanced_load_check_clr_di,
7867 static gen_func_t gen_chk_a_nc[] = {
7868 gen_advanced_load_check_nc_bi,
7869 gen_advanced_load_check_nc_qi,
7870 gen_advanced_load_check_nc_hi,
7871 gen_advanced_load_check_nc_si,
7872 gen_advanced_load_check_nc_di,
7873 gen_advanced_load_check_nc_sf,
7874 gen_advanced_load_check_nc_df,
7875 gen_advanced_load_check_nc_xf,
7876 gen_advanced_load_check_nc_ti,
7877 gen_advanced_load_check_nc_di,
7878 gen_advanced_load_check_nc_di,
7879 gen_advanced_load_check_nc_di,
7881 static gen_func_t gen_chk_s[] = {
7882 gen_speculation_check_bi,
7883 gen_speculation_check_qi,
7884 gen_speculation_check_hi,
7885 gen_speculation_check_si,
7886 gen_speculation_check_di,
7887 gen_speculation_check_sf,
7888 gen_speculation_check_df,
7889 gen_speculation_check_xf,
7890 gen_speculation_check_ti,
7891 gen_speculation_check_di,
7892 gen_speculation_check_di,
7893 gen_speculation_check_di,
7896 gen_func_t *gen_check;
7898 if (ts & BEGIN_DATA)
7900 /* We don't need recovery because even if this is ld.sa
7901 ALAT entry will be allocated only if NAT bit is set to zero.
7902 So it is enough to use ld.c here. */
7904 if (simple_check_p)
7906 gcc_assert (mflag_sched_spec_ldc);
7908 if (clearing_check_p)
7909 gen_check = gen_ld_c_clr;
7910 else
7911 gen_check = gen_ld_c_nc;
7913 else
7915 if (clearing_check_p)
7916 gen_check = gen_chk_a_clr;
7917 else
7918 gen_check = gen_chk_a_nc;
7921 else if (ts & BEGIN_CONTROL)
7923 if (simple_check_p)
7924 /* We might want to use ld.sa -> ld.c instead of
7925 ld.s -> chk.s. */
7927 gcc_assert (!ia64_needs_block_p (ts));
7929 if (clearing_check_p)
7930 gen_check = gen_ld_c_clr;
7931 else
7932 gen_check = gen_ld_c_nc;
7934 else
7936 gen_check = gen_chk_s;
7939 else
7940 gcc_unreachable ();
7942 gcc_assert (mode_no >= 0);
7943 return gen_check[mode_no];
7946 /* Return nonzero, if INSN needs branchy recovery check. */
7947 static bool
7948 ia64_needs_block_p (ds_t ts)
7950 if (ts & BEGIN_DATA)
7951 return !mflag_sched_spec_ldc;
7953 gcc_assert ((ts & BEGIN_CONTROL) != 0);
7955 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
7958 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
7959 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
7960 Otherwise, generate a simple check. */
7961 static rtx
7962 ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
7964 rtx op1, pat, check_pat;
7965 gen_func_t gen_check;
7966 int mode_no;
7968 mode_no = get_mode_no_for_insn (insn);
7969 gcc_assert (mode_no >= 0);
7971 if (label)
7972 op1 = label;
7973 else
7975 gcc_assert (!ia64_needs_block_p (ds));
7976 op1 = copy_rtx (recog_data.operand[1]);
7979 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
7980 true);
7982 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
7984 pat = PATTERN (insn);
7985 if (GET_CODE (pat) == COND_EXEC)
7986 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7987 check_pat);
7989 return check_pat;
7992 /* Return nonzero, if X is branchy recovery check. */
7993 static int
7994 ia64_spec_check_p (rtx x)
7996 x = PATTERN (x);
7997 if (GET_CODE (x) == COND_EXEC)
7998 x = COND_EXEC_CODE (x);
7999 if (GET_CODE (x) == SET)
8000 return ia64_spec_check_src_p (SET_SRC (x));
8001 return 0;
8004 /* Return nonzero, if SRC belongs to recovery check. */
8005 static int
8006 ia64_spec_check_src_p (rtx src)
8008 if (GET_CODE (src) == IF_THEN_ELSE)
8010 rtx t;
8012 t = XEXP (src, 0);
8013 if (GET_CODE (t) == NE)
8015 t = XEXP (t, 0);
8017 if (GET_CODE (t) == UNSPEC)
8019 int code;
8021 code = XINT (t, 1);
8023 if (code == UNSPEC_LDCCLR
8024 || code == UNSPEC_LDCNC
8025 || code == UNSPEC_CHKACLR
8026 || code == UNSPEC_CHKANC
8027 || code == UNSPEC_CHKS)
8029 gcc_assert (code != 0);
8030 return code;
8035 return 0;
8039 /* The following page contains abstract data `bundle states' which are
8040 used for bundling insns (inserting nops and template generation). */
8042 /* The following describes state of insn bundling. */
8044 struct bundle_state
8046 /* Unique bundle state number to identify them in the debugging
8047 output */
8048 int unique_num;
8049 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
8050 /* number nops before and after the insn */
8051 short before_nops_num, after_nops_num;
8052 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8053 insn */
8054 int cost; /* cost of the state in cycles */
8055 int accumulated_insns_num; /* number of all previous insns including
8056 nops. L is considered as 2 insns */
8057 int branch_deviation; /* deviation of previous branches from 3rd slots */
8058 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8059 struct bundle_state *next; /* next state with the same insn_num */
8060 struct bundle_state *originator; /* originator (previous insn state) */
8061 /* All bundle states are in the following chain. */
8062 struct bundle_state *allocated_states_chain;
8063 /* The DFA State after issuing the insn and the nops. */
8064 state_t dfa_state;
8067 /* The following is map insn number to the corresponding bundle state. */
8069 static struct bundle_state **index_to_bundle_states;
8071 /* The unique number of next bundle state. */
8073 static int bundle_states_num;
8075 /* All allocated bundle states are in the following chain. */
8077 static struct bundle_state *allocated_bundle_states_chain;
8079 /* All allocated but not used bundle states are in the following
8080 chain. */
8082 static struct bundle_state *free_bundle_state_chain;
8085 /* The following function returns a free bundle state. */
8087 static struct bundle_state *
8088 get_free_bundle_state (void)
8090 struct bundle_state *result;
8092 if (free_bundle_state_chain != NULL)
8094 result = free_bundle_state_chain;
8095 free_bundle_state_chain = result->next;
8097 else
8099 result = XNEW (struct bundle_state);
8100 result->dfa_state = xmalloc (dfa_state_size);
8101 result->allocated_states_chain = allocated_bundle_states_chain;
8102 allocated_bundle_states_chain = result;
8104 result->unique_num = bundle_states_num++;
8105 return result;
8109 /* The following function frees given bundle state. */
8111 static void
8112 free_bundle_state (struct bundle_state *state)
8114 state->next = free_bundle_state_chain;
8115 free_bundle_state_chain = state;
8118 /* Start work with abstract data `bundle states'. */
8120 static void
8121 initiate_bundle_states (void)
8123 bundle_states_num = 0;
8124 free_bundle_state_chain = NULL;
8125 allocated_bundle_states_chain = NULL;
8128 /* Finish work with abstract data `bundle states'. */
8130 static void
8131 finish_bundle_states (void)
8133 struct bundle_state *curr_state, *next_state;
8135 for (curr_state = allocated_bundle_states_chain;
8136 curr_state != NULL;
8137 curr_state = next_state)
8139 next_state = curr_state->allocated_states_chain;
8140 free (curr_state->dfa_state);
8141 free (curr_state);
8145 /* Hash table of the bundle states. The key is dfa_state and insn_num
8146 of the bundle states. */
8148 static htab_t bundle_state_table;
8150 /* The function returns hash of BUNDLE_STATE. */
8152 static unsigned
8153 bundle_state_hash (const void *bundle_state)
8155 const struct bundle_state *const state
8156 = (const struct bundle_state *) bundle_state;
8157 unsigned result, i;
8159 for (result = i = 0; i < dfa_state_size; i++)
8160 result += (((unsigned char *) state->dfa_state) [i]
8161 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8162 return result + state->insn_num;
8165 /* The function returns nonzero if the bundle state keys are equal. */
8167 static int
8168 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
8170 const struct bundle_state *const state1
8171 = (const struct bundle_state *) bundle_state_1;
8172 const struct bundle_state *const state2
8173 = (const struct bundle_state *) bundle_state_2;
8175 return (state1->insn_num == state2->insn_num
8176 && memcmp (state1->dfa_state, state2->dfa_state,
8177 dfa_state_size) == 0);
8180 /* The function inserts the BUNDLE_STATE into the hash table. The
8181 function returns nonzero if the bundle has been inserted into the
8182 table. The table contains the best bundle state with given key. */
8184 static int
8185 insert_bundle_state (struct bundle_state *bundle_state)
8187 void **entry_ptr;
8189 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, INSERT);
8190 if (*entry_ptr == NULL)
8192 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8193 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8194 *entry_ptr = (void *) bundle_state;
8195 return TRUE;
8197 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
8198 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
8199 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
8200 > bundle_state->accumulated_insns_num
8201 || (((struct bundle_state *)
8202 *entry_ptr)->accumulated_insns_num
8203 == bundle_state->accumulated_insns_num
8204 && (((struct bundle_state *)
8205 *entry_ptr)->branch_deviation
8206 > bundle_state->branch_deviation
8207 || (((struct bundle_state *)
8208 *entry_ptr)->branch_deviation
8209 == bundle_state->branch_deviation
8210 && ((struct bundle_state *)
8211 *entry_ptr)->middle_bundle_stops
8212 > bundle_state->middle_bundle_stops))))))
8215 struct bundle_state temp;
8217 temp = *(struct bundle_state *) *entry_ptr;
8218 *(struct bundle_state *) *entry_ptr = *bundle_state;
8219 ((struct bundle_state *) *entry_ptr)->next = temp.next;
8220 *bundle_state = temp;
8222 return FALSE;
8225 /* Start work with the hash table. */
8227 static void
8228 initiate_bundle_state_table (void)
8230 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
8231 (htab_del) 0);
8234 /* Finish work with the hash table. */
8236 static void
8237 finish_bundle_state_table (void)
8239 htab_delete (bundle_state_table);
8244 /* The following variable is a insn `nop' used to check bundle states
8245 with different number of inserted nops. */
8247 static rtx ia64_nop;
8249 /* The following function tries to issue NOPS_NUM nops for the current
8250 state without advancing processor cycle. If it failed, the
8251 function returns FALSE and frees the current state. */
8253 static int
8254 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8256 int i;
8258 for (i = 0; i < nops_num; i++)
8259 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8261 free_bundle_state (curr_state);
8262 return FALSE;
8264 return TRUE;
8267 /* The following function tries to issue INSN for the current
8268 state without advancing processor cycle. If it failed, the
8269 function returns FALSE and frees the current state. */
8271 static int
8272 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8274 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8276 free_bundle_state (curr_state);
8277 return FALSE;
8279 return TRUE;
8282 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8283 starting with ORIGINATOR without advancing processor cycle. If
8284 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8285 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8286 If it was successful, the function creates new bundle state and
8287 insert into the hash table and into `index_to_bundle_states'. */
8289 static void
8290 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8291 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
8293 struct bundle_state *curr_state;
8295 curr_state = get_free_bundle_state ();
8296 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8297 curr_state->insn = insn;
8298 curr_state->insn_num = originator->insn_num + 1;
8299 curr_state->cost = originator->cost;
8300 curr_state->originator = originator;
8301 curr_state->before_nops_num = before_nops_num;
8302 curr_state->after_nops_num = 0;
8303 curr_state->accumulated_insns_num
8304 = originator->accumulated_insns_num + before_nops_num;
8305 curr_state->branch_deviation = originator->branch_deviation;
8306 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8307 gcc_assert (insn);
8308 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8310 gcc_assert (GET_MODE (insn) != TImode);
8311 if (!try_issue_nops (curr_state, before_nops_num))
8312 return;
8313 if (!try_issue_insn (curr_state, insn))
8314 return;
8315 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8316 if (curr_state->accumulated_insns_num % 3 != 0)
8317 curr_state->middle_bundle_stops++;
8318 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8319 && curr_state->accumulated_insns_num % 3 != 0)
8321 free_bundle_state (curr_state);
8322 return;
8325 else if (GET_MODE (insn) != TImode)
8327 if (!try_issue_nops (curr_state, before_nops_num))
8328 return;
8329 if (!try_issue_insn (curr_state, insn))
8330 return;
8331 curr_state->accumulated_insns_num++;
8332 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
8333 && asm_noperands (PATTERN (insn)) < 0);
8335 if (ia64_safe_type (insn) == TYPE_L)
8336 curr_state->accumulated_insns_num++;
8338 else
8340 /* If this is an insn that must be first in a group, then don't allow
8341 nops to be emitted before it. Currently, alloc is the only such
8342 supported instruction. */
8343 /* ??? The bundling automatons should handle this for us, but they do
8344 not yet have support for the first_insn attribute. */
8345 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8347 free_bundle_state (curr_state);
8348 return;
8351 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8352 state_transition (curr_state->dfa_state, NULL);
8353 curr_state->cost++;
8354 if (!try_issue_nops (curr_state, before_nops_num))
8355 return;
8356 if (!try_issue_insn (curr_state, insn))
8357 return;
8358 curr_state->accumulated_insns_num++;
8359 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
8360 || asm_noperands (PATTERN (insn)) >= 0)
8362 /* Finish bundle containing asm insn. */
8363 curr_state->after_nops_num
8364 = 3 - curr_state->accumulated_insns_num % 3;
8365 curr_state->accumulated_insns_num
8366 += 3 - curr_state->accumulated_insns_num % 3;
8368 else if (ia64_safe_type (insn) == TYPE_L)
8369 curr_state->accumulated_insns_num++;
8371 if (ia64_safe_type (insn) == TYPE_B)
8372 curr_state->branch_deviation
8373 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8374 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8376 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8378 state_t dfa_state;
8379 struct bundle_state *curr_state1;
8380 struct bundle_state *allocated_states_chain;
8382 curr_state1 = get_free_bundle_state ();
8383 dfa_state = curr_state1->dfa_state;
8384 allocated_states_chain = curr_state1->allocated_states_chain;
8385 *curr_state1 = *curr_state;
8386 curr_state1->dfa_state = dfa_state;
8387 curr_state1->allocated_states_chain = allocated_states_chain;
8388 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8389 dfa_state_size);
8390 curr_state = curr_state1;
8392 if (!try_issue_nops (curr_state,
8393 3 - curr_state->accumulated_insns_num % 3))
8394 return;
8395 curr_state->after_nops_num
8396 = 3 - curr_state->accumulated_insns_num % 3;
8397 curr_state->accumulated_insns_num
8398 += 3 - curr_state->accumulated_insns_num % 3;
8400 if (!insert_bundle_state (curr_state))
8401 free_bundle_state (curr_state);
8402 return;
8405 /* The following function returns position in the two window bundle
8406 for given STATE. */
8408 static int
8409 get_max_pos (state_t state)
8411 if (cpu_unit_reservation_p (state, pos_6))
8412 return 6;
8413 else if (cpu_unit_reservation_p (state, pos_5))
8414 return 5;
8415 else if (cpu_unit_reservation_p (state, pos_4))
8416 return 4;
8417 else if (cpu_unit_reservation_p (state, pos_3))
8418 return 3;
8419 else if (cpu_unit_reservation_p (state, pos_2))
8420 return 2;
8421 else if (cpu_unit_reservation_p (state, pos_1))
8422 return 1;
8423 else
8424 return 0;
8427 /* The function returns code of a possible template for given position
8428 and state. The function should be called only with 2 values of
8429 position equal to 3 or 6. We avoid generating F NOPs by putting
8430 templates containing F insns at the end of the template search
8431 because undocumented anomaly in McKinley derived cores which can
8432 cause stalls if an F-unit insn (including a NOP) is issued within a
8433 six-cycle window after reading certain application registers (such
8434 as ar.bsp). Furthermore, power-considerations also argue against
8435 the use of F-unit instructions unless they're really needed. */
8437 static int
8438 get_template (state_t state, int pos)
8440 switch (pos)
8442 case 3:
8443 if (cpu_unit_reservation_p (state, _0mmi_))
8444 return 1;
8445 else if (cpu_unit_reservation_p (state, _0mii_))
8446 return 0;
8447 else if (cpu_unit_reservation_p (state, _0mmb_))
8448 return 7;
8449 else if (cpu_unit_reservation_p (state, _0mib_))
8450 return 6;
8451 else if (cpu_unit_reservation_p (state, _0mbb_))
8452 return 5;
8453 else if (cpu_unit_reservation_p (state, _0bbb_))
8454 return 4;
8455 else if (cpu_unit_reservation_p (state, _0mmf_))
8456 return 3;
8457 else if (cpu_unit_reservation_p (state, _0mfi_))
8458 return 2;
8459 else if (cpu_unit_reservation_p (state, _0mfb_))
8460 return 8;
8461 else if (cpu_unit_reservation_p (state, _0mlx_))
8462 return 9;
8463 else
8464 gcc_unreachable ();
8465 case 6:
8466 if (cpu_unit_reservation_p (state, _1mmi_))
8467 return 1;
8468 else if (cpu_unit_reservation_p (state, _1mii_))
8469 return 0;
8470 else if (cpu_unit_reservation_p (state, _1mmb_))
8471 return 7;
8472 else if (cpu_unit_reservation_p (state, _1mib_))
8473 return 6;
8474 else if (cpu_unit_reservation_p (state, _1mbb_))
8475 return 5;
8476 else if (cpu_unit_reservation_p (state, _1bbb_))
8477 return 4;
8478 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8479 return 3;
8480 else if (cpu_unit_reservation_p (state, _1mfi_))
8481 return 2;
8482 else if (cpu_unit_reservation_p (state, _1mfb_))
8483 return 8;
8484 else if (cpu_unit_reservation_p (state, _1mlx_))
8485 return 9;
8486 else
8487 gcc_unreachable ();
8488 default:
8489 gcc_unreachable ();
8493 /* True when INSN is important for bundling. */
8494 static bool
8495 important_for_bundling_p (rtx insn)
8497 return (INSN_P (insn)
8498 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8499 && GET_CODE (PATTERN (insn)) != USE
8500 && GET_CODE (PATTERN (insn)) != CLOBBER);
8503 /* The following function returns an insn important for insn bundling
8504 followed by INSN and before TAIL. */
8506 static rtx
8507 get_next_important_insn (rtx insn, rtx tail)
8509 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8510 if (important_for_bundling_p (insn))
8511 return insn;
8512 return NULL_RTX;
8515 /* Add a bundle selector TEMPLATE0 before INSN. */
8517 static void
8518 ia64_add_bundle_selector_before (int template0, rtx insn)
8520 rtx b = gen_bundle_selector (GEN_INT (template0));
8522 ia64_emit_insn_before (b, insn);
8523 #if NR_BUNDLES == 10
8524 if ((template0 == 4 || template0 == 5)
8525 && (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
8527 int i;
8528 rtx note = NULL_RTX;
8530 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8531 first or second slot. If it is and has REG_EH_NOTE set, copy it
8532 to following nops, as br.call sets rp to the address of following
8533 bundle and therefore an EH region end must be on a bundle
8534 boundary. */
8535 insn = PREV_INSN (insn);
8536 for (i = 0; i < 3; i++)
8539 insn = next_active_insn (insn);
8540 while (GET_CODE (insn) == INSN
8541 && get_attr_empty (insn) == EMPTY_YES);
8542 if (GET_CODE (insn) == CALL_INSN)
8543 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8544 else if (note)
8546 int code;
8548 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8549 || code == CODE_FOR_nop_b);
8550 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8551 note = NULL_RTX;
8552 else
8553 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8557 #endif
8560 /* The following function does insn bundling. Bundling means
8561 inserting templates and nop insns to fit insn groups into permitted
8562 templates. Instruction scheduling uses NDFA (non-deterministic
8563 finite automata) encoding informations about the templates and the
8564 inserted nops. Nondeterminism of the automata permits follows
8565 all possible insn sequences very fast.
8567 Unfortunately it is not possible to get information about inserting
8568 nop insns and used templates from the automata states. The
8569 automata only says that we can issue an insn possibly inserting
8570 some nops before it and using some template. Therefore insn
8571 bundling in this function is implemented by using DFA
8572 (deterministic finite automata). We follow all possible insn
8573 sequences by inserting 0-2 nops (that is what the NDFA describe for
8574 insn scheduling) before/after each insn being bundled. We know the
8575 start of simulated processor cycle from insn scheduling (insn
8576 starting a new cycle has TImode).
8578 Simple implementation of insn bundling would create enormous
8579 number of possible insn sequences satisfying information about new
8580 cycle ticks taken from the insn scheduling. To make the algorithm
8581 practical we use dynamic programming. Each decision (about
8582 inserting nops and implicitly about previous decisions) is described
8583 by structure bundle_state (see above). If we generate the same
8584 bundle state (key is automaton state after issuing the insns and
8585 nops for it), we reuse already generated one. As consequence we
8586 reject some decisions which cannot improve the solution and
8587 reduce memory for the algorithm.
8589 When we reach the end of EBB (extended basic block), we choose the
8590 best sequence and then, moving back in EBB, insert templates for
8591 the best alternative. The templates are taken from querying
8592 automaton state for each insn in chosen bundle states.
8594 So the algorithm makes two (forward and backward) passes through
8595 EBB. */
8597 static void
8598 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
8600 struct bundle_state *curr_state, *next_state, *best_state;
8601 rtx insn, next_insn;
8602 int insn_num;
8603 int i, bundle_end_p, only_bundle_end_p, asm_p;
8604 int pos = 0, max_pos, template0, template1;
8605 rtx b;
8606 rtx nop;
8607 enum attr_type type;
8609 insn_num = 0;
8610 /* Count insns in the EBB. */
8611 for (insn = NEXT_INSN (prev_head_insn);
8612 insn && insn != tail;
8613 insn = NEXT_INSN (insn))
8614 if (INSN_P (insn))
8615 insn_num++;
8616 if (insn_num == 0)
8617 return;
8618 bundling_p = 1;
8619 dfa_clean_insn_cache ();
8620 initiate_bundle_state_table ();
8621 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
8622 /* First (forward) pass -- generation of bundle states. */
8623 curr_state = get_free_bundle_state ();
8624 curr_state->insn = NULL;
8625 curr_state->before_nops_num = 0;
8626 curr_state->after_nops_num = 0;
8627 curr_state->insn_num = 0;
8628 curr_state->cost = 0;
8629 curr_state->accumulated_insns_num = 0;
8630 curr_state->branch_deviation = 0;
8631 curr_state->middle_bundle_stops = 0;
8632 curr_state->next = NULL;
8633 curr_state->originator = NULL;
8634 state_reset (curr_state->dfa_state);
8635 index_to_bundle_states [0] = curr_state;
8636 insn_num = 0;
8637 /* Shift cycle mark if it is put on insn which could be ignored. */
8638 for (insn = NEXT_INSN (prev_head_insn);
8639 insn != tail;
8640 insn = NEXT_INSN (insn))
8641 if (INSN_P (insn)
8642 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
8643 || GET_CODE (PATTERN (insn)) == USE
8644 || GET_CODE (PATTERN (insn)) == CLOBBER)
8645 && GET_MODE (insn) == TImode)
8647 PUT_MODE (insn, VOIDmode);
8648 for (next_insn = NEXT_INSN (insn);
8649 next_insn != tail;
8650 next_insn = NEXT_INSN (next_insn))
8651 if (INSN_P (next_insn)
8652 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
8653 && GET_CODE (PATTERN (next_insn)) != USE
8654 && GET_CODE (PATTERN (next_insn)) != CLOBBER
8655 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
8657 PUT_MODE (next_insn, TImode);
8658 break;
8661 /* Forward pass: generation of bundle states. */
8662 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8663 insn != NULL_RTX;
8664 insn = next_insn)
8666 gcc_assert (INSN_P (insn)
8667 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8668 && GET_CODE (PATTERN (insn)) != USE
8669 && GET_CODE (PATTERN (insn)) != CLOBBER);
8670 type = ia64_safe_type (insn);
8671 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8672 insn_num++;
8673 index_to_bundle_states [insn_num] = NULL;
8674 for (curr_state = index_to_bundle_states [insn_num - 1];
8675 curr_state != NULL;
8676 curr_state = next_state)
8678 pos = curr_state->accumulated_insns_num % 3;
8679 next_state = curr_state->next;
8680 /* We must fill up the current bundle in order to start a
8681 subsequent asm insn in a new bundle. Asm insn is always
8682 placed in a separate bundle. */
8683 only_bundle_end_p
8684 = (next_insn != NULL_RTX
8685 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
8686 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
8687 /* We may fill up the current bundle if it is the cycle end
8688 without a group barrier. */
8689 bundle_end_p
8690 = (only_bundle_end_p || next_insn == NULL_RTX
8691 || (GET_MODE (next_insn) == TImode
8692 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
8693 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
8694 || type == TYPE_S)
8695 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8696 only_bundle_end_p);
8697 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8698 only_bundle_end_p);
8699 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8700 only_bundle_end_p);
8702 gcc_assert (index_to_bundle_states [insn_num]);
8703 for (curr_state = index_to_bundle_states [insn_num];
8704 curr_state != NULL;
8705 curr_state = curr_state->next)
8706 if (verbose >= 2 && dump)
8708 /* This structure is taken from generated code of the
8709 pipeline hazard recognizer (see file insn-attrtab.c).
8710 Please don't forget to change the structure if a new
8711 automaton is added to .md file. */
8712 struct DFA_chip
8714 unsigned short one_automaton_state;
8715 unsigned short oneb_automaton_state;
8716 unsigned short two_automaton_state;
8717 unsigned short twob_automaton_state;
8720 fprintf
8721 (dump,
8722 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
8723 curr_state->unique_num,
8724 (curr_state->originator == NULL
8725 ? -1 : curr_state->originator->unique_num),
8726 curr_state->cost,
8727 curr_state->before_nops_num, curr_state->after_nops_num,
8728 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8729 curr_state->middle_bundle_stops,
8730 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8731 INSN_UID (insn));
8735 /* We should find a solution because the 2nd insn scheduling has
8736 found one. */
8737 gcc_assert (index_to_bundle_states [insn_num]);
8738 /* Find a state corresponding to the best insn sequence. */
8739 best_state = NULL;
8740 for (curr_state = index_to_bundle_states [insn_num];
8741 curr_state != NULL;
8742 curr_state = curr_state->next)
8743 /* We are just looking at the states with fully filled up last
8744 bundle. The first we prefer insn sequences with minimal cost
8745 then with minimal inserted nops and finally with branch insns
8746 placed in the 3rd slots. */
8747 if (curr_state->accumulated_insns_num % 3 == 0
8748 && (best_state == NULL || best_state->cost > curr_state->cost
8749 || (best_state->cost == curr_state->cost
8750 && (curr_state->accumulated_insns_num
8751 < best_state->accumulated_insns_num
8752 || (curr_state->accumulated_insns_num
8753 == best_state->accumulated_insns_num
8754 && (curr_state->branch_deviation
8755 < best_state->branch_deviation
8756 || (curr_state->branch_deviation
8757 == best_state->branch_deviation
8758 && curr_state->middle_bundle_stops
8759 < best_state->middle_bundle_stops)))))))
8760 best_state = curr_state;
8761 /* Second (backward) pass: adding nops and templates. */
8762 gcc_assert (best_state);
8763 insn_num = best_state->before_nops_num;
8764 template0 = template1 = -1;
8765 for (curr_state = best_state;
8766 curr_state->originator != NULL;
8767 curr_state = curr_state->originator)
8769 insn = curr_state->insn;
8770 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8771 || asm_noperands (PATTERN (insn)) >= 0);
8772 insn_num++;
8773 if (verbose >= 2 && dump)
8775 struct DFA_chip
8777 unsigned short one_automaton_state;
8778 unsigned short oneb_automaton_state;
8779 unsigned short two_automaton_state;
8780 unsigned short twob_automaton_state;
8783 fprintf
8784 (dump,
8785 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
8786 curr_state->unique_num,
8787 (curr_state->originator == NULL
8788 ? -1 : curr_state->originator->unique_num),
8789 curr_state->cost,
8790 curr_state->before_nops_num, curr_state->after_nops_num,
8791 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8792 curr_state->middle_bundle_stops,
8793 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8794 INSN_UID (insn));
8796 /* Find the position in the current bundle window. The window can
8797 contain at most two bundles. Two bundle window means that
8798 the processor will make two bundle rotation. */
8799 max_pos = get_max_pos (curr_state->dfa_state);
8800 if (max_pos == 6
8801 /* The following (negative template number) means that the
8802 processor did one bundle rotation. */
8803 || (max_pos == 3 && template0 < 0))
8805 /* We are at the end of the window -- find template(s) for
8806 its bundle(s). */
8807 pos = max_pos;
8808 if (max_pos == 3)
8809 template0 = get_template (curr_state->dfa_state, 3);
8810 else
8812 template1 = get_template (curr_state->dfa_state, 3);
8813 template0 = get_template (curr_state->dfa_state, 6);
8816 if (max_pos > 3 && template1 < 0)
8817 /* It may happen when we have the stop inside a bundle. */
8819 gcc_assert (pos <= 3);
8820 template1 = get_template (curr_state->dfa_state, 3);
8821 pos += 3;
8823 if (!asm_p)
8824 /* Emit nops after the current insn. */
8825 for (i = 0; i < curr_state->after_nops_num; i++)
8827 nop = gen_nop ();
8828 emit_insn_after (nop, insn);
8829 pos--;
8830 gcc_assert (pos >= 0);
8831 if (pos % 3 == 0)
8833 /* We are at the start of a bundle: emit the template
8834 (it should be defined). */
8835 gcc_assert (template0 >= 0);
8836 ia64_add_bundle_selector_before (template0, nop);
8837 /* If we have two bundle window, we make one bundle
8838 rotation. Otherwise template0 will be undefined
8839 (negative value). */
8840 template0 = template1;
8841 template1 = -1;
8844 /* Move the position backward in the window. Group barrier has
8845 no slot. Asm insn takes all bundle. */
8846 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8847 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8848 && asm_noperands (PATTERN (insn)) < 0)
8849 pos--;
8850 /* Long insn takes 2 slots. */
8851 if (ia64_safe_type (insn) == TYPE_L)
8852 pos--;
8853 gcc_assert (pos >= 0);
8854 if (pos % 3 == 0
8855 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8856 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8857 && asm_noperands (PATTERN (insn)) < 0)
8859 /* The current insn is at the bundle start: emit the
8860 template. */
8861 gcc_assert (template0 >= 0);
8862 ia64_add_bundle_selector_before (template0, insn);
8863 b = PREV_INSN (insn);
8864 insn = b;
8865 /* See comment above in analogous place for emitting nops
8866 after the insn. */
8867 template0 = template1;
8868 template1 = -1;
8870 /* Emit nops after the current insn. */
8871 for (i = 0; i < curr_state->before_nops_num; i++)
8873 nop = gen_nop ();
8874 ia64_emit_insn_before (nop, insn);
8875 nop = PREV_INSN (insn);
8876 insn = nop;
8877 pos--;
8878 gcc_assert (pos >= 0);
8879 if (pos % 3 == 0)
8881 /* See comment above in analogous place for emitting nops
8882 after the insn. */
8883 gcc_assert (template0 >= 0);
8884 ia64_add_bundle_selector_before (template0, insn);
8885 b = PREV_INSN (insn);
8886 insn = b;
8887 template0 = template1;
8888 template1 = -1;
8893 #ifdef ENABLE_CHECKING
8895 /* Assert right calculation of middle_bundle_stops. */
8896 int num = best_state->middle_bundle_stops;
8897 bool start_bundle = true, end_bundle = false;
8899 for (insn = NEXT_INSN (prev_head_insn);
8900 insn && insn != tail;
8901 insn = NEXT_INSN (insn))
8903 if (!INSN_P (insn))
8904 continue;
8905 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
8906 start_bundle = true;
8907 else
8909 rtx next_insn;
8911 for (next_insn = NEXT_INSN (insn);
8912 next_insn && next_insn != tail;
8913 next_insn = NEXT_INSN (next_insn))
8914 if (INSN_P (next_insn)
8915 && (ia64_safe_itanium_class (next_insn)
8916 != ITANIUM_CLASS_IGNORE
8917 || recog_memoized (next_insn)
8918 == CODE_FOR_bundle_selector)
8919 && GET_CODE (PATTERN (next_insn)) != USE
8920 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
8921 break;
8923 end_bundle = next_insn == NULL_RTX
8924 || next_insn == tail
8925 || (INSN_P (next_insn)
8926 && recog_memoized (next_insn)
8927 == CODE_FOR_bundle_selector);
8928 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
8929 && !start_bundle && !end_bundle
8930 && next_insn
8931 && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
8932 && asm_noperands (PATTERN (next_insn)) < 0)
8933 num--;
8935 start_bundle = false;
8939 gcc_assert (num == 0);
8941 #endif
8943 free (index_to_bundle_states);
8944 finish_bundle_state_table ();
8945 bundling_p = 0;
8946 dfa_clean_insn_cache ();
8949 /* The following function is called at the end of scheduling BB or
8950 EBB. After reload, it inserts stop bits and does insn bundling. */
8952 static void
8953 ia64_sched_finish (FILE *dump, int sched_verbose)
8955 if (sched_verbose)
8956 fprintf (dump, "// Finishing schedule.\n");
8957 if (!reload_completed)
8958 return;
8959 if (reload_completed)
8961 final_emit_insn_group_barriers (dump);
8962 bundling (dump, sched_verbose, current_sched_info->prev_head,
8963 current_sched_info->next_tail);
8964 if (sched_verbose && dump)
8965 fprintf (dump, "// finishing %d-%d\n",
8966 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
8967 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
8969 return;
8973 /* The following function inserts stop bits in scheduled BB or EBB. */
8975 static void
8976 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
8978 rtx insn;
8979 int need_barrier_p = 0;
8980 int seen_good_insn = 0;
8982 init_insn_group_barriers ();
8984 for (insn = NEXT_INSN (current_sched_info->prev_head);
8985 insn != current_sched_info->next_tail;
8986 insn = NEXT_INSN (insn))
8988 if (GET_CODE (insn) == BARRIER)
8990 rtx last = prev_active_insn (insn);
8992 if (! last)
8993 continue;
8994 if (GET_CODE (last) == JUMP_INSN
8995 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
8996 last = prev_active_insn (last);
8997 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
8998 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9000 init_insn_group_barriers ();
9001 seen_good_insn = 0;
9002 need_barrier_p = 0;
9004 else if (NONDEBUG_INSN_P (insn))
9006 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9008 init_insn_group_barriers ();
9009 seen_good_insn = 0;
9010 need_barrier_p = 0;
9012 else if (need_barrier_p || group_barrier_needed (insn)
9013 || (mflag_sched_stop_bits_after_every_cycle
9014 && GET_MODE (insn) == TImode
9015 && seen_good_insn))
9017 if (TARGET_EARLY_STOP_BITS)
9019 rtx last;
9021 for (last = insn;
9022 last != current_sched_info->prev_head;
9023 last = PREV_INSN (last))
9024 if (INSN_P (last) && GET_MODE (last) == TImode
9025 && stops_p [INSN_UID (last)])
9026 break;
9027 if (last == current_sched_info->prev_head)
9028 last = insn;
9029 last = prev_active_insn (last);
9030 if (last
9031 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9032 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9033 last);
9034 init_insn_group_barriers ();
9035 for (last = NEXT_INSN (last);
9036 last != insn;
9037 last = NEXT_INSN (last))
9038 if (INSN_P (last))
9040 group_barrier_needed (last);
9041 if (recog_memoized (last) >= 0
9042 && important_for_bundling_p (last))
9043 seen_good_insn = 1;
9046 else
9048 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9049 insn);
9050 init_insn_group_barriers ();
9051 seen_good_insn = 0;
9053 group_barrier_needed (insn);
9054 if (recog_memoized (insn) >= 0
9055 && important_for_bundling_p (insn))
9056 seen_good_insn = 1;
9058 else if (recog_memoized (insn) >= 0
9059 && important_for_bundling_p (insn))
9060 seen_good_insn = 1;
9061 need_barrier_p = (GET_CODE (insn) == CALL_INSN
9062 || GET_CODE (PATTERN (insn)) == ASM_INPUT
9063 || asm_noperands (PATTERN (insn)) >= 0);
9070 /* If the following function returns TRUE, we will use the DFA
9071 insn scheduler. */
9073 static int
9074 ia64_first_cycle_multipass_dfa_lookahead (void)
9076 return (reload_completed ? 6 : 4);
9079 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9081 static void
9082 ia64_init_dfa_pre_cycle_insn (void)
9084 if (temp_dfa_state == NULL)
9086 dfa_state_size = state_size ();
9087 temp_dfa_state = xmalloc (dfa_state_size);
9088 prev_cycle_state = xmalloc (dfa_state_size);
9090 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9091 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9092 recog_memoized (dfa_pre_cycle_insn);
9093 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9094 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9095 recog_memoized (dfa_stop_insn);
9098 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9099 used by the DFA insn scheduler. */
9101 static rtx
9102 ia64_dfa_pre_cycle_insn (void)
9104 return dfa_pre_cycle_insn;
9107 /* The following function returns TRUE if PRODUCER (of type ilog or
9108 ld) produces address for CONSUMER (of type st or stf). */
9111 ia64_st_address_bypass_p (rtx producer, rtx consumer)
9113 rtx dest, reg, mem;
9115 gcc_assert (producer && consumer);
9116 dest = ia64_single_set (producer);
9117 gcc_assert (dest);
9118 reg = SET_DEST (dest);
9119 gcc_assert (reg);
9120 if (GET_CODE (reg) == SUBREG)
9121 reg = SUBREG_REG (reg);
9122 gcc_assert (GET_CODE (reg) == REG);
9124 dest = ia64_single_set (consumer);
9125 gcc_assert (dest);
9126 mem = SET_DEST (dest);
9127 gcc_assert (mem && GET_CODE (mem) == MEM);
9128 return reg_mentioned_p (reg, mem);
9131 /* The following function returns TRUE if PRODUCER (of type ilog or
9132 ld) produces address for CONSUMER (of type ld or fld). */
9135 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
9137 rtx dest, src, reg, mem;
9139 gcc_assert (producer && consumer);
9140 dest = ia64_single_set (producer);
9141 gcc_assert (dest);
9142 reg = SET_DEST (dest);
9143 gcc_assert (reg);
9144 if (GET_CODE (reg) == SUBREG)
9145 reg = SUBREG_REG (reg);
9146 gcc_assert (GET_CODE (reg) == REG);
9148 src = ia64_single_set (consumer);
9149 gcc_assert (src);
9150 mem = SET_SRC (src);
9151 gcc_assert (mem);
9153 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9154 mem = XVECEXP (mem, 0, 0);
9155 else if (GET_CODE (mem) == IF_THEN_ELSE)
9156 /* ??? Is this bypass necessary for ld.c? */
9158 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9159 mem = XEXP (mem, 1);
9162 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9163 mem = XEXP (mem, 0);
9165 if (GET_CODE (mem) == UNSPEC)
9167 int c = XINT (mem, 1);
9169 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9170 || c == UNSPEC_LDSA);
9171 mem = XVECEXP (mem, 0, 0);
9174 /* Note that LO_SUM is used for GOT loads. */
9175 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9177 return reg_mentioned_p (reg, mem);
9180 /* The following function returns TRUE if INSN produces address for a
9181 load/store insn. We will place such insns into M slot because it
9182 decreases its latency time. */
9185 ia64_produce_address_p (rtx insn)
9187 return insn->call;
9191 /* Emit pseudo-ops for the assembler to describe predicate relations.
9192 At present this assumes that we only consider predicate pairs to
9193 be mutex, and that the assembler can deduce proper values from
9194 straight-line code. */
9196 static void
9197 emit_predicate_relation_info (void)
9199 basic_block bb;
9201 FOR_EACH_BB_REVERSE (bb)
9203 int r;
9204 rtx head = BB_HEAD (bb);
9206 /* We only need such notes at code labels. */
9207 if (GET_CODE (head) != CODE_LABEL)
9208 continue;
9209 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9210 head = NEXT_INSN (head);
9212 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9213 grabbing the entire block of predicate registers. */
9214 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9215 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9217 rtx p = gen_rtx_REG (BImode, r);
9218 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
9219 if (head == BB_END (bb))
9220 BB_END (bb) = n;
9221 head = n;
9225 /* Look for conditional calls that do not return, and protect predicate
9226 relations around them. Otherwise the assembler will assume the call
9227 returns, and complain about uses of call-clobbered predicates after
9228 the call. */
9229 FOR_EACH_BB_REVERSE (bb)
9231 rtx insn = BB_HEAD (bb);
9233 while (1)
9235 if (GET_CODE (insn) == CALL_INSN
9236 && GET_CODE (PATTERN (insn)) == COND_EXEC
9237 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9239 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
9240 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9241 if (BB_HEAD (bb) == insn)
9242 BB_HEAD (bb) = b;
9243 if (BB_END (bb) == insn)
9244 BB_END (bb) = a;
9247 if (insn == BB_END (bb))
9248 break;
9249 insn = NEXT_INSN (insn);
9254 /* Perform machine dependent operations on the rtl chain INSNS. */
9256 static void
9257 ia64_reorg (void)
9259 /* We are freeing block_for_insn in the toplev to keep compatibility
9260 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9261 compute_bb_for_insn ();
9263 /* If optimizing, we'll have split before scheduling. */
9264 if (optimize == 0)
9265 split_all_insns ();
9267 if (optimize && ia64_flag_schedule_insns2
9268 && dbg_cnt (ia64_sched2))
9270 timevar_push (TV_SCHED2);
9271 ia64_final_schedule = 1;
9273 initiate_bundle_states ();
9274 ia64_nop = make_insn_raw (gen_nop ());
9275 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9276 recog_memoized (ia64_nop);
9277 clocks_length = get_max_uid () + 1;
9278 stops_p = XCNEWVEC (char, clocks_length);
9280 if (ia64_tune == PROCESSOR_ITANIUM2)
9282 pos_1 = get_cpu_unit_code ("2_1");
9283 pos_2 = get_cpu_unit_code ("2_2");
9284 pos_3 = get_cpu_unit_code ("2_3");
9285 pos_4 = get_cpu_unit_code ("2_4");
9286 pos_5 = get_cpu_unit_code ("2_5");
9287 pos_6 = get_cpu_unit_code ("2_6");
9288 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9289 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9290 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9291 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9292 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9293 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9294 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9295 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9296 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9297 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9298 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9299 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9300 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9301 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9302 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9303 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9304 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9305 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9306 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9307 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9309 else
9311 pos_1 = get_cpu_unit_code ("1_1");
9312 pos_2 = get_cpu_unit_code ("1_2");
9313 pos_3 = get_cpu_unit_code ("1_3");
9314 pos_4 = get_cpu_unit_code ("1_4");
9315 pos_5 = get_cpu_unit_code ("1_5");
9316 pos_6 = get_cpu_unit_code ("1_6");
9317 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9318 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9319 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9320 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9321 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9322 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9323 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9324 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9325 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9326 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9327 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9328 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9329 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9330 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9331 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9332 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9333 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9334 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9335 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9336 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9339 if (flag_selective_scheduling2
9340 && !maybe_skip_selective_scheduling ())
9341 run_selective_scheduling ();
9342 else
9343 schedule_ebbs ();
9345 /* Redo alignment computation, as it might gone wrong. */
9346 compute_alignments ();
9348 /* We cannot reuse this one because it has been corrupted by the
9349 evil glat. */
9350 finish_bundle_states ();
9351 free (stops_p);
9352 stops_p = NULL;
9353 emit_insn_group_barriers (dump_file);
9355 ia64_final_schedule = 0;
9356 timevar_pop (TV_SCHED2);
9358 else
9359 emit_all_insn_group_barriers (dump_file);
9361 df_analyze ();
9363 /* A call must not be the last instruction in a function, so that the
9364 return address is still within the function, so that unwinding works
9365 properly. Note that IA-64 differs from dwarf2 on this point. */
9366 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
9368 rtx insn;
9369 int saw_stop = 0;
9371 insn = get_last_insn ();
9372 if (! INSN_P (insn))
9373 insn = prev_active_insn (insn);
9374 if (insn)
9376 /* Skip over insns that expand to nothing. */
9377 while (GET_CODE (insn) == INSN
9378 && get_attr_empty (insn) == EMPTY_YES)
9380 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9381 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9382 saw_stop = 1;
9383 insn = prev_active_insn (insn);
9385 if (GET_CODE (insn) == CALL_INSN)
9387 if (! saw_stop)
9388 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9389 emit_insn (gen_break_f ());
9390 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9395 emit_predicate_relation_info ();
9397 if (ia64_flag_var_tracking)
9399 timevar_push (TV_VAR_TRACKING);
9400 variable_tracking_main ();
9401 timevar_pop (TV_VAR_TRACKING);
9403 df_finish_pass (false);
9406 /* Return true if REGNO is used by the epilogue. */
9409 ia64_epilogue_uses (int regno)
9411 switch (regno)
9413 case R_GR (1):
9414 /* With a call to a function in another module, we will write a new
9415 value to "gp". After returning from such a call, we need to make
9416 sure the function restores the original gp-value, even if the
9417 function itself does not use the gp anymore. */
9418 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9420 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9421 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9422 /* For functions defined with the syscall_linkage attribute, all
9423 input registers are marked as live at all function exits. This
9424 prevents the register allocator from using the input registers,
9425 which in turn makes it possible to restart a system call after
9426 an interrupt without having to save/restore the input registers.
9427 This also prevents kernel data from leaking to application code. */
9428 return lookup_attribute ("syscall_linkage",
9429 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9431 case R_BR (0):
9432 /* Conditional return patterns can't represent the use of `b0' as
9433 the return address, so we force the value live this way. */
9434 return 1;
9436 case AR_PFS_REGNUM:
9437 /* Likewise for ar.pfs, which is used by br.ret. */
9438 return 1;
9440 default:
9441 return 0;
9445 /* Return true if REGNO is used by the frame unwinder. */
9448 ia64_eh_uses (int regno)
9450 unsigned int r;
9452 if (! reload_completed)
9453 return 0;
9455 if (regno == 0)
9456 return 0;
9458 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9459 if (regno == current_frame_info.r[r]
9460 || regno == emitted_frame_related_regs[r])
9461 return 1;
9463 return 0;
9466 /* Return true if this goes in small data/bss. */
9468 /* ??? We could also support own long data here. Generating movl/add/ld8
9469 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9470 code faster because there is one less load. This also includes incomplete
9471 types which can't go in sdata/sbss. */
9473 static bool
9474 ia64_in_small_data_p (const_tree exp)
9476 if (TARGET_NO_SDATA)
9477 return false;
9479 /* We want to merge strings, so we never consider them small data. */
9480 if (TREE_CODE (exp) == STRING_CST)
9481 return false;
9483 /* Functions are never small data. */
9484 if (TREE_CODE (exp) == FUNCTION_DECL)
9485 return false;
9487 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9489 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
9491 if (strcmp (section, ".sdata") == 0
9492 || strncmp (section, ".sdata.", 7) == 0
9493 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9494 || strcmp (section, ".sbss") == 0
9495 || strncmp (section, ".sbss.", 6) == 0
9496 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9497 return true;
9499 else
9501 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9503 /* If this is an incomplete type with size 0, then we can't put it
9504 in sdata because it might be too big when completed. */
9505 if (size > 0 && size <= ia64_section_threshold)
9506 return true;
9509 return false;
9512 /* Output assembly directives for prologue regions. */
9514 /* The current basic block number. */
9516 static bool last_block;
9518 /* True if we need a copy_state command at the start of the next block. */
9520 static bool need_copy_state;
9522 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9523 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9524 #endif
9526 /* Emit a debugging label after a call-frame-related insn. We'd
9527 rather output the label right away, but we'd have to output it
9528 after, not before, the instruction, and the instruction has not
9529 been output yet. So we emit the label after the insn, delete it to
9530 avoid introducing basic blocks, and mark it as preserved, such that
9531 it is still output, given that it is referenced in debug info. */
9533 static const char *
9534 ia64_emit_deleted_label_after_insn (rtx insn)
9536 char label[MAX_ARTIFICIAL_LABEL_BYTES];
9537 rtx lb = gen_label_rtx ();
9538 rtx label_insn = emit_label_after (lb, insn);
9540 LABEL_PRESERVE_P (lb) = 1;
9542 delete_insn (label_insn);
9544 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
9546 return xstrdup (label);
9549 /* Define the CFA after INSN with the steady-state definition. */
9551 static void
9552 ia64_dwarf2out_def_steady_cfa (rtx insn, bool frame)
9554 rtx fp = frame_pointer_needed
9555 ? hard_frame_pointer_rtx
9556 : stack_pointer_rtx;
9557 const char *label = ia64_emit_deleted_label_after_insn (insn);
9559 if (!frame)
9560 return;
9562 dwarf2out_def_cfa
9563 (label, REGNO (fp),
9564 ia64_initial_elimination_offset
9565 (REGNO (arg_pointer_rtx), REGNO (fp))
9566 + ARG_POINTER_CFA_OFFSET (current_function_decl));
9569 /* The generic dwarf2 frame debug info generator does not define a
9570 separate region for the very end of the epilogue, so refrain from
9571 doing so in the IA64-specific code as well. */
9573 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
9575 /* The function emits unwind directives for the start of an epilogue. */
9577 static void
9578 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
9580 /* If this isn't the last block of the function, then we need to label the
9581 current state, and copy it back in at the start of the next block. */
9583 if (!last_block)
9585 if (unwind)
9586 fprintf (asm_out_file, "\t.label_state %d\n",
9587 ++cfun->machine->state_num);
9588 need_copy_state = true;
9591 if (unwind)
9592 fprintf (asm_out_file, "\t.restore sp\n");
9593 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9594 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
9595 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
9598 /* This function processes a SET pattern looking for specific patterns
9599 which result in emitting an assembly directive required for unwinding. */
9601 static int
9602 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
9604 rtx src = SET_SRC (pat);
9605 rtx dest = SET_DEST (pat);
9606 int src_regno, dest_regno;
9608 /* Look for the ALLOC insn. */
9609 if (GET_CODE (src) == UNSPEC_VOLATILE
9610 && XINT (src, 1) == UNSPECV_ALLOC
9611 && GET_CODE (dest) == REG)
9613 dest_regno = REGNO (dest);
9615 /* If this is the final destination for ar.pfs, then this must
9616 be the alloc in the prologue. */
9617 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
9619 if (unwind)
9620 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
9621 ia64_dbx_register_number (dest_regno));
9623 else
9625 /* This must be an alloc before a sibcall. We must drop the
9626 old frame info. The easiest way to drop the old frame
9627 info is to ensure we had a ".restore sp" directive
9628 followed by a new prologue. If the procedure doesn't
9629 have a memory-stack frame, we'll issue a dummy ".restore
9630 sp" now. */
9631 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
9632 /* if haven't done process_epilogue() yet, do it now */
9633 process_epilogue (asm_out_file, insn, unwind, frame);
9634 if (unwind)
9635 fprintf (asm_out_file, "\t.prologue\n");
9637 return 1;
9640 /* Look for SP = .... */
9641 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
9643 if (GET_CODE (src) == PLUS)
9645 rtx op0 = XEXP (src, 0);
9646 rtx op1 = XEXP (src, 1);
9648 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9650 if (INTVAL (op1) < 0)
9652 gcc_assert (!frame_pointer_needed);
9653 if (unwind)
9654 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9655 -INTVAL (op1));
9656 ia64_dwarf2out_def_steady_cfa (insn, frame);
9658 else
9659 process_epilogue (asm_out_file, insn, unwind, frame);
9661 else
9663 gcc_assert (GET_CODE (src) == REG
9664 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
9665 process_epilogue (asm_out_file, insn, unwind, frame);
9668 return 1;
9671 /* Register move we need to look at. */
9672 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
9674 src_regno = REGNO (src);
9675 dest_regno = REGNO (dest);
9677 switch (src_regno)
9679 case BR_REG (0):
9680 /* Saving return address pointer. */
9681 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9682 if (unwind)
9683 fprintf (asm_out_file, "\t.save rp, r%d\n",
9684 ia64_dbx_register_number (dest_regno));
9685 return 1;
9687 case PR_REG (0):
9688 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9689 if (unwind)
9690 fprintf (asm_out_file, "\t.save pr, r%d\n",
9691 ia64_dbx_register_number (dest_regno));
9692 return 1;
9694 case AR_UNAT_REGNUM:
9695 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9696 if (unwind)
9697 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9698 ia64_dbx_register_number (dest_regno));
9699 return 1;
9701 case AR_LC_REGNUM:
9702 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9703 if (unwind)
9704 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9705 ia64_dbx_register_number (dest_regno));
9706 return 1;
9708 case STACK_POINTER_REGNUM:
9709 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
9710 && frame_pointer_needed);
9711 if (unwind)
9712 fprintf (asm_out_file, "\t.vframe r%d\n",
9713 ia64_dbx_register_number (dest_regno));
9714 ia64_dwarf2out_def_steady_cfa (insn, frame);
9715 return 1;
9717 default:
9718 /* Everything else should indicate being stored to memory. */
9719 gcc_unreachable ();
9723 /* Memory store we need to look at. */
9724 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
9726 long off;
9727 rtx base;
9728 const char *saveop;
9730 if (GET_CODE (XEXP (dest, 0)) == REG)
9732 base = XEXP (dest, 0);
9733 off = 0;
9735 else
9737 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9738 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9739 base = XEXP (XEXP (dest, 0), 0);
9740 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9743 if (base == hard_frame_pointer_rtx)
9745 saveop = ".savepsp";
9746 off = - off;
9748 else
9750 gcc_assert (base == stack_pointer_rtx);
9751 saveop = ".savesp";
9754 src_regno = REGNO (src);
9755 switch (src_regno)
9757 case BR_REG (0):
9758 gcc_assert (!current_frame_info.r[reg_save_b0]);
9759 if (unwind)
9760 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
9761 return 1;
9763 case PR_REG (0):
9764 gcc_assert (!current_frame_info.r[reg_save_pr]);
9765 if (unwind)
9766 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
9767 return 1;
9769 case AR_LC_REGNUM:
9770 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9771 if (unwind)
9772 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
9773 return 1;
9775 case AR_PFS_REGNUM:
9776 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9777 if (unwind)
9778 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
9779 return 1;
9781 case AR_UNAT_REGNUM:
9782 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9783 if (unwind)
9784 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
9785 return 1;
9787 case GR_REG (4):
9788 case GR_REG (5):
9789 case GR_REG (6):
9790 case GR_REG (7):
9791 if (unwind)
9792 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9793 1 << (src_regno - GR_REG (4)));
9794 return 1;
9796 case BR_REG (1):
9797 case BR_REG (2):
9798 case BR_REG (3):
9799 case BR_REG (4):
9800 case BR_REG (5):
9801 if (unwind)
9802 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9803 1 << (src_regno - BR_REG (1)));
9804 return 1;
9806 case FR_REG (2):
9807 case FR_REG (3):
9808 case FR_REG (4):
9809 case FR_REG (5):
9810 if (unwind)
9811 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9812 1 << (src_regno - FR_REG (2)));
9813 return 1;
9815 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9816 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9817 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9818 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9819 if (unwind)
9820 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9821 1 << (src_regno - FR_REG (12)));
9822 return 1;
9824 default:
9825 return 0;
9829 return 0;
9833 /* This function looks at a single insn and emits any directives
9834 required to unwind this insn. */
9835 void
9836 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
9838 bool unwind = (flag_unwind_tables
9839 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
9840 bool frame = dwarf2out_do_frame ();
9842 if (unwind || frame)
9844 rtx pat;
9846 if (NOTE_INSN_BASIC_BLOCK_P (insn))
9848 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9850 /* Restore unwind state from immediately before the epilogue. */
9851 if (need_copy_state)
9853 if (unwind)
9855 fprintf (asm_out_file, "\t.body\n");
9856 fprintf (asm_out_file, "\t.copy_state %d\n",
9857 cfun->machine->state_num);
9859 if (IA64_CHANGE_CFA_IN_EPILOGUE)
9860 ia64_dwarf2out_def_steady_cfa (insn, frame);
9861 need_copy_state = false;
9865 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9866 return;
9868 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
9869 if (pat)
9870 pat = XEXP (pat, 0);
9871 else
9872 pat = PATTERN (insn);
9874 switch (GET_CODE (pat))
9876 case SET:
9877 process_set (asm_out_file, pat, insn, unwind, frame);
9878 break;
9880 case PARALLEL:
9882 int par_index;
9883 int limit = XVECLEN (pat, 0);
9884 for (par_index = 0; par_index < limit; par_index++)
9886 rtx x = XVECEXP (pat, 0, par_index);
9887 if (GET_CODE (x) == SET)
9888 process_set (asm_out_file, x, insn, unwind, frame);
9890 break;
9893 default:
9894 gcc_unreachable ();
9900 enum ia64_builtins
9902 IA64_BUILTIN_BSP,
9903 IA64_BUILTIN_COPYSIGNQ,
9904 IA64_BUILTIN_FABSQ,
9905 IA64_BUILTIN_FLUSHRS,
9906 IA64_BUILTIN_INFQ,
9907 IA64_BUILTIN_HUGE_VALQ
9910 void
9911 ia64_init_builtins (void)
9913 tree fpreg_type;
9914 tree float80_type;
9916 /* The __fpreg type. */
9917 fpreg_type = make_node (REAL_TYPE);
9918 TYPE_PRECISION (fpreg_type) = 82;
9919 layout_type (fpreg_type);
9920 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
9922 /* The __float80 type. */
9923 float80_type = make_node (REAL_TYPE);
9924 TYPE_PRECISION (float80_type) = 80;
9925 layout_type (float80_type);
9926 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
9928 /* The __float128 type. */
9929 if (!TARGET_HPUX)
9931 tree ftype, decl;
9932 tree float128_type = make_node (REAL_TYPE);
9934 TYPE_PRECISION (float128_type) = 128;
9935 layout_type (float128_type);
9936 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
9938 /* TFmode support builtins. */
9939 ftype = build_function_type (float128_type, void_list_node);
9940 add_builtin_function ("__builtin_infq", ftype,
9941 IA64_BUILTIN_INFQ, BUILT_IN_MD,
9942 NULL, NULL_TREE);
9944 add_builtin_function ("__builtin_huge_valq", ftype,
9945 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
9946 NULL, NULL_TREE);
9948 ftype = build_function_type_list (float128_type,
9949 float128_type,
9950 NULL_TREE);
9951 decl = add_builtin_function ("__builtin_fabsq", ftype,
9952 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
9953 "__fabstf2", NULL_TREE);
9954 TREE_READONLY (decl) = 1;
9956 ftype = build_function_type_list (float128_type,
9957 float128_type,
9958 float128_type,
9959 NULL_TREE);
9960 decl = add_builtin_function ("__builtin_copysignq", ftype,
9961 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
9962 "__copysigntf3", NULL_TREE);
9963 TREE_READONLY (decl) = 1;
9965 else
9966 /* Under HPUX, this is a synonym for "long double". */
9967 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9968 "__float128");
9970 /* Fwrite on VMS is non-standard. */
9971 if (TARGET_ABI_OPEN_VMS)
9973 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
9974 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
9977 #define def_builtin(name, type, code) \
9978 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9979 NULL, NULL_TREE)
9981 def_builtin ("__builtin_ia64_bsp",
9982 build_function_type (ptr_type_node, void_list_node),
9983 IA64_BUILTIN_BSP);
9985 def_builtin ("__builtin_ia64_flushrs",
9986 build_function_type (void_type_node, void_list_node),
9987 IA64_BUILTIN_FLUSHRS);
9989 #undef def_builtin
9991 if (TARGET_HPUX)
9993 if (built_in_decls [BUILT_IN_FINITE])
9994 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
9995 "_Isfinite");
9996 if (built_in_decls [BUILT_IN_FINITEF])
9997 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
9998 "_Isfinitef");
9999 if (built_in_decls [BUILT_IN_FINITEL])
10000 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
10001 "_Isfinitef128");
10006 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10007 enum machine_mode mode ATTRIBUTE_UNUSED,
10008 int ignore ATTRIBUTE_UNUSED)
10010 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10011 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10013 switch (fcode)
10015 case IA64_BUILTIN_BSP:
10016 if (! target || ! register_operand (target, DImode))
10017 target = gen_reg_rtx (DImode);
10018 emit_insn (gen_bsp_value (target));
10019 #ifdef POINTERS_EXTEND_UNSIGNED
10020 target = convert_memory_address (ptr_mode, target);
10021 #endif
10022 return target;
10024 case IA64_BUILTIN_FLUSHRS:
10025 emit_insn (gen_flushrs ());
10026 return const0_rtx;
10028 case IA64_BUILTIN_INFQ:
10029 case IA64_BUILTIN_HUGE_VALQ:
10031 REAL_VALUE_TYPE inf;
10032 rtx tmp;
10034 real_inf (&inf);
10035 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
10037 tmp = validize_mem (force_const_mem (mode, tmp));
10039 if (target == 0)
10040 target = gen_reg_rtx (mode);
10042 emit_move_insn (target, tmp);
10043 return target;
10046 case IA64_BUILTIN_FABSQ:
10047 case IA64_BUILTIN_COPYSIGNQ:
10048 return expand_call (exp, target, ignore);
10050 default:
10051 gcc_unreachable ();
10054 return NULL_RTX;
10057 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10058 most significant bits of the stack slot. */
10060 enum direction
10061 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10063 /* Exception to normal case for structures/unions/etc. */
10065 if (type && AGGREGATE_TYPE_P (type)
10066 && int_size_in_bytes (type) < UNITS_PER_WORD)
10067 return upward;
10069 /* Fall back to the default. */
10070 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10073 /* Emit text to declare externally defined variables and functions, because
10074 the Intel assembler does not support undefined externals. */
10076 void
10077 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10079 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10080 set in order to avoid putting out names that are never really
10081 used. */
10082 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10084 /* maybe_assemble_visibility will return 1 if the assembler
10085 visibility directive is output. */
10086 int need_visibility = ((*targetm.binds_local_p) (decl)
10087 && maybe_assemble_visibility (decl));
10089 #ifdef DO_CRTL_NAMES
10090 DO_CRTL_NAMES;
10091 #endif
10093 /* GNU as does not need anything here, but the HP linker does
10094 need something for external functions. */
10095 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10096 && TREE_CODE (decl) == FUNCTION_DECL)
10097 (*targetm.asm_out.globalize_decl_name) (file, decl);
10098 else if (need_visibility && !TARGET_GNU_AS)
10099 (*targetm.asm_out.globalize_label) (file, name);
10103 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10104 modes of word_mode and larger. Rename the TFmode libfuncs using the
10105 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10106 backward compatibility. */
10108 static void
10109 ia64_init_libfuncs (void)
10111 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10112 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10113 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10114 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10116 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10117 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10118 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10119 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10120 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10122 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10123 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10124 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10125 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10126 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10127 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10129 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10130 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10131 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10132 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10133 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10135 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10136 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10137 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10138 /* HP-UX 11.23 libc does not have a function for unsigned
10139 SImode-to-TFmode conversion. */
10140 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10143 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10145 static void
10146 ia64_hpux_init_libfuncs (void)
10148 ia64_init_libfuncs ();
10150 /* The HP SI millicode division and mod functions expect DI arguments.
10151 By turning them off completely we avoid using both libgcc and the
10152 non-standard millicode routines and use the HP DI millicode routines
10153 instead. */
10155 set_optab_libfunc (sdiv_optab, SImode, 0);
10156 set_optab_libfunc (udiv_optab, SImode, 0);
10157 set_optab_libfunc (smod_optab, SImode, 0);
10158 set_optab_libfunc (umod_optab, SImode, 0);
10160 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10161 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10162 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10163 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10165 /* HP-UX libc has TF min/max/abs routines in it. */
10166 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10167 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10168 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10170 /* ia64_expand_compare uses this. */
10171 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10173 /* These should never be used. */
10174 set_optab_libfunc (eq_optab, TFmode, 0);
10175 set_optab_libfunc (ne_optab, TFmode, 0);
10176 set_optab_libfunc (gt_optab, TFmode, 0);
10177 set_optab_libfunc (ge_optab, TFmode, 0);
10178 set_optab_libfunc (lt_optab, TFmode, 0);
10179 set_optab_libfunc (le_optab, TFmode, 0);
10182 /* Rename the division and modulus functions in VMS. */
10184 static void
10185 ia64_vms_init_libfuncs (void)
10187 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10188 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10189 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10190 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10191 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10192 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10193 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10194 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10195 abort_libfunc = init_one_libfunc ("decc$abort");
10196 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10197 #ifdef MEM_LIBFUNCS_INIT
10198 MEM_LIBFUNCS_INIT;
10199 #endif
10202 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10203 the HPUX conventions. */
10205 static void
10206 ia64_sysv4_init_libfuncs (void)
10208 ia64_init_libfuncs ();
10210 /* These functions are not part of the HPUX TFmode interface. We
10211 use them instead of _U_Qfcmp, which doesn't work the way we
10212 expect. */
10213 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10214 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10215 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10216 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10217 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10218 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10220 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10221 glibc doesn't have them. */
10224 /* Use soft-fp. */
10226 static void
10227 ia64_soft_fp_init_libfuncs (void)
10231 static bool
10232 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10234 return (mode == SImode || mode == DImode);
10237 /* For HPUX, it is illegal to have relocations in shared segments. */
10239 static int
10240 ia64_hpux_reloc_rw_mask (void)
10242 return 3;
10245 /* For others, relax this so that relocations to local data goes in
10246 read-only segments, but we still cannot allow global relocations
10247 in read-only segments. */
10249 static int
10250 ia64_reloc_rw_mask (void)
10252 return flag_pic ? 3 : 2;
10255 /* Return the section to use for X. The only special thing we do here
10256 is to honor small data. */
10258 static section *
10259 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10260 unsigned HOST_WIDE_INT align)
10262 if (GET_MODE_SIZE (mode) > 0
10263 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10264 && !TARGET_NO_SDATA)
10265 return sdata_section;
10266 else
10267 return default_elf_select_rtx_section (mode, x, align);
10270 static unsigned int
10271 ia64_section_type_flags (tree decl, const char *name, int reloc)
10273 unsigned int flags = 0;
10275 if (strcmp (name, ".sdata") == 0
10276 || strncmp (name, ".sdata.", 7) == 0
10277 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10278 || strncmp (name, ".sdata2.", 8) == 0
10279 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10280 || strcmp (name, ".sbss") == 0
10281 || strncmp (name, ".sbss.", 6) == 0
10282 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10283 flags = SECTION_SMALL;
10285 #if TARGET_ABI_OPEN_VMS
10286 if (decl && DECL_ATTRIBUTES (decl)
10287 && lookup_attribute ("common_object", DECL_ATTRIBUTES (decl)))
10288 flags |= SECTION_VMS_OVERLAY;
10289 #endif
10291 flags |= default_section_type_flags (decl, name, reloc);
10292 return flags;
10295 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10296 structure type and that the address of that type should be passed
10297 in out0, rather than in r8. */
10299 static bool
10300 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10302 tree ret_type = TREE_TYPE (fntype);
10304 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10305 as the structure return address parameter, if the return value
10306 type has a non-trivial copy constructor or destructor. It is not
10307 clear if this same convention should be used for other
10308 programming languages. Until G++ 3.4, we incorrectly used r8 for
10309 these return values. */
10310 return (abi_version_at_least (2)
10311 && ret_type
10312 && TYPE_MODE (ret_type) == BLKmode
10313 && TREE_ADDRESSABLE (ret_type)
10314 && strcmp (lang_hooks.name, "GNU C++") == 0);
10317 /* Output the assembler code for a thunk function. THUNK_DECL is the
10318 declaration for the thunk function itself, FUNCTION is the decl for
10319 the target function. DELTA is an immediate constant offset to be
10320 added to THIS. If VCALL_OFFSET is nonzero, the word at
10321 *(*this + vcall_offset) should be added to THIS. */
10323 static void
10324 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10325 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10326 tree function)
10328 rtx this_rtx, insn, funexp;
10329 unsigned int this_parmno;
10330 unsigned int this_regno;
10331 rtx delta_rtx;
10333 reload_completed = 1;
10334 epilogue_completed = 1;
10336 /* Set things up as ia64_expand_prologue might. */
10337 last_scratch_gr_reg = 15;
10339 memset (&current_frame_info, 0, sizeof (current_frame_info));
10340 current_frame_info.spill_cfa_off = -16;
10341 current_frame_info.n_input_regs = 1;
10342 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10344 /* Mark the end of the (empty) prologue. */
10345 emit_note (NOTE_INSN_PROLOGUE_END);
10347 /* Figure out whether "this" will be the first parameter (the
10348 typical case) or the second parameter (as happens when the
10349 virtual function returns certain class objects). */
10350 this_parmno
10351 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10352 ? 1 : 0);
10353 this_regno = IN_REG (this_parmno);
10354 if (!TARGET_REG_NAMES)
10355 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10357 this_rtx = gen_rtx_REG (Pmode, this_regno);
10359 /* Apply the constant offset, if required. */
10360 delta_rtx = GEN_INT (delta);
10361 if (TARGET_ILP32)
10363 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10364 REG_POINTER (tmp) = 1;
10365 if (delta && satisfies_constraint_I (delta_rtx))
10367 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10368 delta = 0;
10370 else
10371 emit_insn (gen_ptr_extend (this_rtx, tmp));
10373 if (delta)
10375 if (!satisfies_constraint_I (delta_rtx))
10377 rtx tmp = gen_rtx_REG (Pmode, 2);
10378 emit_move_insn (tmp, delta_rtx);
10379 delta_rtx = tmp;
10381 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10384 /* Apply the offset from the vtable, if required. */
10385 if (vcall_offset)
10387 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10388 rtx tmp = gen_rtx_REG (Pmode, 2);
10390 if (TARGET_ILP32)
10392 rtx t = gen_rtx_REG (ptr_mode, 2);
10393 REG_POINTER (t) = 1;
10394 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10395 if (satisfies_constraint_I (vcall_offset_rtx))
10397 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10398 vcall_offset = 0;
10400 else
10401 emit_insn (gen_ptr_extend (tmp, t));
10403 else
10404 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10406 if (vcall_offset)
10408 if (!satisfies_constraint_J (vcall_offset_rtx))
10410 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10411 emit_move_insn (tmp2, vcall_offset_rtx);
10412 vcall_offset_rtx = tmp2;
10414 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10417 if (TARGET_ILP32)
10418 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10419 else
10420 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10422 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10425 /* Generate a tail call to the target function. */
10426 if (! TREE_USED (function))
10428 assemble_external (function);
10429 TREE_USED (function) = 1;
10431 funexp = XEXP (DECL_RTL (function), 0);
10432 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10433 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10434 insn = get_last_insn ();
10435 SIBLING_CALL_P (insn) = 1;
10437 /* Code generation for calls relies on splitting. */
10438 reload_completed = 1;
10439 epilogue_completed = 1;
10440 try_split (PATTERN (insn), insn, 0);
10442 emit_barrier ();
10444 /* Run just enough of rest_of_compilation to get the insns emitted.
10445 There's not really enough bulk here to make other passes such as
10446 instruction scheduling worth while. Note that use_thunk calls
10447 assemble_start_function and assemble_end_function. */
10449 insn_locators_alloc ();
10450 emit_all_insn_group_barriers (NULL);
10451 insn = get_insns ();
10452 shorten_branches (insn);
10453 final_start_function (insn, file, 1);
10454 final (insn, file, 1);
10455 final_end_function ();
10457 reload_completed = 0;
10458 epilogue_completed = 0;
10461 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10463 static rtx
10464 ia64_struct_value_rtx (tree fntype,
10465 int incoming ATTRIBUTE_UNUSED)
10467 if (TARGET_ABI_OPEN_VMS ||
10468 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10469 return NULL_RTX;
10470 return gen_rtx_REG (Pmode, GR_REG (8));
10473 static bool
10474 ia64_scalar_mode_supported_p (enum machine_mode mode)
10476 switch (mode)
10478 case QImode:
10479 case HImode:
10480 case SImode:
10481 case DImode:
10482 case TImode:
10483 return true;
10485 case SFmode:
10486 case DFmode:
10487 case XFmode:
10488 case RFmode:
10489 return true;
10491 case TFmode:
10492 return true;
10494 default:
10495 return false;
10499 static bool
10500 ia64_vector_mode_supported_p (enum machine_mode mode)
10502 switch (mode)
10504 case V8QImode:
10505 case V4HImode:
10506 case V2SImode:
10507 return true;
10509 case V2SFmode:
10510 return true;
10512 default:
10513 return false;
10517 /* Implement the FUNCTION_PROFILER macro. */
10519 void
10520 ia64_output_function_profiler (FILE *file, int labelno)
10522 bool indirect_call;
10524 /* If the function needs a static chain and the static chain
10525 register is r15, we use an indirect call so as to bypass
10526 the PLT stub in case the executable is dynamically linked,
10527 because the stub clobbers r15 as per 5.3.6 of the psABI.
10528 We don't need to do that in non canonical PIC mode. */
10530 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10532 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10533 indirect_call = true;
10535 else
10536 indirect_call = false;
10538 if (TARGET_GNU_AS)
10539 fputs ("\t.prologue 4, r40\n", file);
10540 else
10541 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
10542 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
10544 if (NO_PROFILE_COUNTERS)
10545 fputs ("\tmov out3 = r0\n", file);
10546 else
10548 char buf[20];
10549 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10551 if (TARGET_AUTO_PIC)
10552 fputs ("\tmovl out3 = @gprel(", file);
10553 else
10554 fputs ("\taddl out3 = @ltoff(", file);
10555 assemble_name (file, buf);
10556 if (TARGET_AUTO_PIC)
10557 fputs (")\n", file);
10558 else
10559 fputs ("), r1\n", file);
10562 if (indirect_call)
10563 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
10564 fputs ("\t;;\n", file);
10566 fputs ("\t.save rp, r42\n", file);
10567 fputs ("\tmov out2 = b0\n", file);
10568 if (indirect_call)
10569 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
10570 fputs ("\t.body\n", file);
10571 fputs ("\tmov out1 = r1\n", file);
10572 if (indirect_call)
10574 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
10575 fputs ("\tmov b6 = r16\n", file);
10576 fputs ("\tld8 r1 = [r14]\n", file);
10577 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
10579 else
10580 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
10583 static GTY(()) rtx mcount_func_rtx;
10584 static rtx
10585 gen_mcount_func_rtx (void)
10587 if (!mcount_func_rtx)
10588 mcount_func_rtx = init_one_libfunc ("_mcount");
10589 return mcount_func_rtx;
10592 void
10593 ia64_profile_hook (int labelno)
10595 rtx label, ip;
10597 if (NO_PROFILE_COUNTERS)
10598 label = const0_rtx;
10599 else
10601 char buf[30];
10602 const char *label_name;
10603 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10604 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
10605 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
10606 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
10608 ip = gen_reg_rtx (Pmode);
10609 emit_insn (gen_ip_value (ip));
10610 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
10611 VOIDmode, 3,
10612 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
10613 ip, Pmode,
10614 label, Pmode);
10617 /* Return the mangling of TYPE if it is an extended fundamental type. */
10619 static const char *
10620 ia64_mangle_type (const_tree type)
10622 type = TYPE_MAIN_VARIANT (type);
10624 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
10625 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
10626 return NULL;
10628 /* On HP-UX, "long double" is mangled as "e" so __float128 is
10629 mangled as "e". */
10630 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
10631 return "g";
10632 /* On HP-UX, "e" is not available as a mangling of __float80 so use
10633 an extended mangling. Elsewhere, "e" is available since long
10634 double is 80 bits. */
10635 if (TYPE_MODE (type) == XFmode)
10636 return TARGET_HPUX ? "u9__float80" : "e";
10637 if (TYPE_MODE (type) == RFmode)
10638 return "u7__fpreg";
10639 return NULL;
10642 /* Return the diagnostic message string if conversion from FROMTYPE to
10643 TOTYPE is not allowed, NULL otherwise. */
10644 static const char *
10645 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
10647 /* Reject nontrivial conversion to or from __fpreg. */
10648 if (TYPE_MODE (fromtype) == RFmode
10649 && TYPE_MODE (totype) != RFmode
10650 && TYPE_MODE (totype) != VOIDmode)
10651 return N_("invalid conversion from %<__fpreg%>");
10652 if (TYPE_MODE (totype) == RFmode
10653 && TYPE_MODE (fromtype) != RFmode)
10654 return N_("invalid conversion to %<__fpreg%>");
10655 return NULL;
10658 /* Return the diagnostic message string if the unary operation OP is
10659 not permitted on TYPE, NULL otherwise. */
10660 static const char *
10661 ia64_invalid_unary_op (int op, const_tree type)
10663 /* Reject operations on __fpreg other than unary + or &. */
10664 if (TYPE_MODE (type) == RFmode
10665 && op != CONVERT_EXPR
10666 && op != ADDR_EXPR)
10667 return N_("invalid operation on %<__fpreg%>");
10668 return NULL;
10671 /* Return the diagnostic message string if the binary operation OP is
10672 not permitted on TYPE1 and TYPE2, NULL otherwise. */
10673 static const char *
10674 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
10676 /* Reject operations on __fpreg. */
10677 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
10678 return N_("invalid operation on %<__fpreg%>");
10679 return NULL;
10682 /* Implement overriding of the optimization options. */
10683 void
10684 ia64_optimization_options (int level ATTRIBUTE_UNUSED,
10685 int size ATTRIBUTE_UNUSED)
10687 /* Let the scheduler form additional regions. */
10688 set_param_value ("max-sched-extend-regions-iters", 2);
10690 /* Set the default values for cache-related parameters. */
10691 set_param_value ("simultaneous-prefetches", 6);
10692 set_param_value ("l1-cache-line-size", 32);
10694 set_param_value("sched-mem-true-dep-cost", 4);
10697 /* HP-UX version_id attribute.
10698 For object foo, if the version_id is set to 1234 put out an alias
10699 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
10700 other than an alias statement because it is an illegal symbol name. */
10702 static tree
10703 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
10704 tree name ATTRIBUTE_UNUSED,
10705 tree args,
10706 int flags ATTRIBUTE_UNUSED,
10707 bool *no_add_attrs)
10709 tree arg = TREE_VALUE (args);
10711 if (TREE_CODE (arg) != STRING_CST)
10713 error("version attribute is not a string");
10714 *no_add_attrs = true;
10715 return NULL_TREE;
10717 return NULL_TREE;
10720 /* Target hook for c_mode_for_suffix. */
10722 static enum machine_mode
10723 ia64_c_mode_for_suffix (char suffix)
10725 if (suffix == 'q')
10726 return TFmode;
10727 if (suffix == 'w')
10728 return XFmode;
10730 return VOIDmode;
10733 static enum machine_mode
10734 ia64_promote_function_mode (const_tree type,
10735 enum machine_mode mode,
10736 int *punsignedp,
10737 const_tree funtype,
10738 int for_return)
10740 /* Special processing required for OpenVMS ... */
10742 if (!TARGET_ABI_OPEN_VMS)
10743 return default_promote_function_mode(type, mode, punsignedp, funtype,
10744 for_return);
10746 /* HP OpenVMS Calling Standard dated June, 2004, that describes
10747 HP OpenVMS I64 Version 8.2EFT,
10748 chapter 4 "OpenVMS I64 Conventions"
10749 section 4.7 "Procedure Linkage"
10750 subsection 4.7.5.2, "Normal Register Parameters"
10752 "Unsigned integral (except unsigned 32-bit), set, and VAX floating-point
10753 values passed in registers are zero-filled; signed integral values as
10754 well as unsigned 32-bit integral values are sign-extended to 64 bits.
10755 For all other types passed in the general registers, unused bits are
10756 undefined." */
10758 if (!AGGREGATE_TYPE_P (type)
10759 && GET_MODE_CLASS (mode) == MODE_INT
10760 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
10762 if (mode == SImode)
10763 *punsignedp = 0;
10764 return DImode;
10766 else
10767 return promote_mode (type, mode, punsignedp);
10770 static GTY(()) rtx ia64_dconst_0_5_rtx;
10773 ia64_dconst_0_5 (void)
10775 if (! ia64_dconst_0_5_rtx)
10777 REAL_VALUE_TYPE rv;
10778 real_from_string (&rv, "0.5");
10779 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
10781 return ia64_dconst_0_5_rtx;
10784 static GTY(()) rtx ia64_dconst_0_375_rtx;
10787 ia64_dconst_0_375 (void)
10789 if (! ia64_dconst_0_375_rtx)
10791 REAL_VALUE_TYPE rv;
10792 real_from_string (&rv, "0.375");
10793 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
10795 return ia64_dconst_0_375_rtx;
10799 #include "gt-ia64.h"