Remove LIBGCC2_HAS_?F_MODE target macros.
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob9337be144134169571334b9aab0ec73a0482b363
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3 Contributed by James E. Wilson <wilson@cygnus.com> and
4 David Mosberger <davidm@hpl.hp.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "stringpool.h"
29 #include "stor-layout.h"
30 #include "calls.h"
31 #include "varasm.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "function.h"
44 #include "ggc.h"
45 #include "basic-block.h"
46 #include "libfuncs.h"
47 #include "diagnostic-core.h"
48 #include "sched-int.h"
49 #include "timevar.h"
50 #include "target.h"
51 #include "target-def.h"
52 #include "common/common-target.h"
53 #include "tm_p.h"
54 #include "hash-table.h"
55 #include "langhooks.h"
56 #include "vec.h"
57 #include "basic-block.h"
58 #include "tree-ssa-alias.h"
59 #include "internal-fn.h"
60 #include "gimple-fold.h"
61 #include "tree-eh.h"
62 #include "gimple-expr.h"
63 #include "is-a.h"
64 #include "gimple.h"
65 #include "gimplify.h"
66 #include "intl.h"
67 #include "df.h"
68 #include "debug.h"
69 #include "params.h"
70 #include "dbgcnt.h"
71 #include "tm-constrs.h"
72 #include "sel-sched.h"
73 #include "reload.h"
74 #include "opts.h"
75 #include "dumpfile.h"
76 #include "builtins.h"
78 /* This is used for communication between ASM_OUTPUT_LABEL and
79 ASM_OUTPUT_LABELREF. */
80 int ia64_asm_output_label = 0;
82 /* Register names for ia64_expand_prologue. */
83 static const char * const ia64_reg_numbers[96] =
84 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
85 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
86 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
87 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
88 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
89 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
90 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
91 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
92 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
93 "r104","r105","r106","r107","r108","r109","r110","r111",
94 "r112","r113","r114","r115","r116","r117","r118","r119",
95 "r120","r121","r122","r123","r124","r125","r126","r127"};
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_input_reg_names[8] =
99 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
101 /* ??? These strings could be shared with REGISTER_NAMES. */
102 static const char * const ia64_local_reg_names[80] =
103 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
104 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
105 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
106 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
107 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
108 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
109 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
110 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
111 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
112 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
114 /* ??? These strings could be shared with REGISTER_NAMES. */
115 static const char * const ia64_output_reg_names[8] =
116 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
118 /* Variables which are this size or smaller are put in the sdata/sbss
119 sections. */
121 unsigned int ia64_section_threshold;
123 /* The following variable is used by the DFA insn scheduler. The value is
124 TRUE if we do insn bundling instead of insn scheduling. */
125 int bundling_p = 0;
127 enum ia64_frame_regs
129 reg_fp,
130 reg_save_b0,
131 reg_save_pr,
132 reg_save_ar_pfs,
133 reg_save_ar_unat,
134 reg_save_ar_lc,
135 reg_save_gp,
136 number_of_ia64_frame_regs
139 /* Structure to be filled in by ia64_compute_frame_size with register
140 save masks and offsets for the current function. */
142 struct ia64_frame_info
144 HOST_WIDE_INT total_size; /* size of the stack frame, not including
145 the caller's scratch area. */
146 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
147 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
148 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
149 HARD_REG_SET mask; /* mask of saved registers. */
150 unsigned int gr_used_mask; /* mask of registers in use as gr spill
151 registers or long-term scratches. */
152 int n_spilled; /* number of spilled registers. */
153 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
154 int n_input_regs; /* number of input registers used. */
155 int n_local_regs; /* number of local registers used. */
156 int n_output_regs; /* number of output registers used. */
157 int n_rotate_regs; /* number of rotating registers used. */
159 char need_regstk; /* true if a .regstk directive needed. */
160 char initialized; /* true if the data is finalized. */
163 /* Current frame information calculated by ia64_compute_frame_size. */
164 static struct ia64_frame_info current_frame_info;
165 /* The actual registers that are emitted. */
166 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
168 static int ia64_first_cycle_multipass_dfa_lookahead (void);
169 static void ia64_dependencies_evaluation_hook (rtx_insn *, rtx_insn *);
170 static void ia64_init_dfa_pre_cycle_insn (void);
171 static rtx ia64_dfa_pre_cycle_insn (void);
172 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *, int);
173 static int ia64_dfa_new_cycle (FILE *, int, rtx_insn *, int, int, int *);
174 static void ia64_h_i_d_extended (void);
175 static void * ia64_alloc_sched_context (void);
176 static void ia64_init_sched_context (void *, bool);
177 static void ia64_set_sched_context (void *);
178 static void ia64_clear_sched_context (void *);
179 static void ia64_free_sched_context (void *);
180 static int ia64_mode_to_int (enum machine_mode);
181 static void ia64_set_sched_flags (spec_info_t);
182 static ds_t ia64_get_insn_spec_ds (rtx_insn *);
183 static ds_t ia64_get_insn_checked_ds (rtx_insn *);
184 static bool ia64_skip_rtx_p (const_rtx);
185 static int ia64_speculate_insn (rtx_insn *, ds_t, rtx *);
186 static bool ia64_needs_block_p (ds_t);
187 static rtx ia64_gen_spec_check (rtx_insn *, rtx_insn *, ds_t);
188 static int ia64_spec_check_p (rtx);
189 static int ia64_spec_check_src_p (rtx);
190 static rtx gen_tls_get_addr (void);
191 static rtx gen_thread_pointer (void);
192 static int find_gr_spill (enum ia64_frame_regs, int);
193 static int next_scratch_gr_reg (void);
194 static void mark_reg_gr_used_mask (rtx, void *);
195 static void ia64_compute_frame_size (HOST_WIDE_INT);
196 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
197 static void finish_spill_pointers (void);
198 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
199 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
200 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
201 static rtx gen_movdi_x (rtx, rtx, rtx);
202 static rtx gen_fr_spill_x (rtx, rtx, rtx);
203 static rtx gen_fr_restore_x (rtx, rtx, rtx);
205 static void ia64_option_override (void);
206 static bool ia64_can_eliminate (const int, const int);
207 static enum machine_mode hfa_element_mode (const_tree, bool);
208 static void ia64_setup_incoming_varargs (cumulative_args_t, enum machine_mode,
209 tree, int *, int);
210 static int ia64_arg_partial_bytes (cumulative_args_t, enum machine_mode,
211 tree, bool);
212 static rtx ia64_function_arg_1 (cumulative_args_t, enum machine_mode,
213 const_tree, bool, bool);
214 static rtx ia64_function_arg (cumulative_args_t, enum machine_mode,
215 const_tree, bool);
216 static rtx ia64_function_incoming_arg (cumulative_args_t,
217 enum machine_mode, const_tree, bool);
218 static void ia64_function_arg_advance (cumulative_args_t, enum machine_mode,
219 const_tree, bool);
220 static unsigned int ia64_function_arg_boundary (enum machine_mode,
221 const_tree);
222 static bool ia64_function_ok_for_sibcall (tree, tree);
223 static bool ia64_return_in_memory (const_tree, const_tree);
224 static rtx ia64_function_value (const_tree, const_tree, bool);
225 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
226 static bool ia64_function_value_regno_p (const unsigned int);
227 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
228 reg_class_t);
229 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
230 bool);
231 static bool ia64_rtx_costs (rtx, int, int, int, int *, bool);
232 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
233 static void fix_range (const char *);
234 static struct machine_function * ia64_init_machine_status (void);
235 static void emit_insn_group_barriers (FILE *);
236 static void emit_all_insn_group_barriers (FILE *);
237 static void final_emit_insn_group_barriers (FILE *);
238 static void emit_predicate_relation_info (void);
239 static void ia64_reorg (void);
240 static bool ia64_in_small_data_p (const_tree);
241 static void process_epilogue (FILE *, rtx, bool, bool);
243 static bool ia64_assemble_integer (rtx, unsigned int, int);
244 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
245 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
246 static void ia64_output_function_end_prologue (FILE *);
248 static void ia64_print_operand (FILE *, rtx, int);
249 static void ia64_print_operand_address (FILE *, rtx);
250 static bool ia64_print_operand_punct_valid_p (unsigned char code);
252 static int ia64_issue_rate (void);
253 static int ia64_adjust_cost_2 (rtx_insn *, int, rtx_insn *, int, dw_t);
254 static void ia64_sched_init (FILE *, int, int);
255 static void ia64_sched_init_global (FILE *, int, int);
256 static void ia64_sched_finish_global (FILE *, int);
257 static void ia64_sched_finish (FILE *, int);
258 static int ia64_dfa_sched_reorder (FILE *, int, rtx_insn **, int *, int, int);
259 static int ia64_sched_reorder (FILE *, int, rtx_insn **, int *, int);
260 static int ia64_sched_reorder2 (FILE *, int, rtx_insn **, int *, int);
261 static int ia64_variable_issue (FILE *, int, rtx_insn *, int);
263 static void ia64_asm_unwind_emit (FILE *, rtx_insn *);
264 static void ia64_asm_emit_except_personality (rtx);
265 static void ia64_asm_init_sections (void);
267 static enum unwind_info_type ia64_debug_unwind_info (void);
269 static struct bundle_state *get_free_bundle_state (void);
270 static void free_bundle_state (struct bundle_state *);
271 static void initiate_bundle_states (void);
272 static void finish_bundle_states (void);
273 static int insert_bundle_state (struct bundle_state *);
274 static void initiate_bundle_state_table (void);
275 static void finish_bundle_state_table (void);
276 static int try_issue_nops (struct bundle_state *, int);
277 static int try_issue_insn (struct bundle_state *, rtx);
278 static void issue_nops_and_insn (struct bundle_state *, int, rtx_insn *,
279 int, int);
280 static int get_max_pos (state_t);
281 static int get_template (state_t, int);
283 static rtx_insn *get_next_important_insn (rtx_insn *, rtx_insn *);
284 static bool important_for_bundling_p (rtx_insn *);
285 static bool unknown_for_bundling_p (rtx_insn *);
286 static void bundling (FILE *, int, rtx_insn *, rtx_insn *);
288 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
289 HOST_WIDE_INT, tree);
290 static void ia64_file_start (void);
291 static void ia64_globalize_decl_name (FILE *, tree);
293 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
294 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
295 static section *ia64_select_rtx_section (enum machine_mode, rtx,
296 unsigned HOST_WIDE_INT);
297 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
298 ATTRIBUTE_UNUSED;
299 static unsigned int ia64_section_type_flags (tree, const char *, int);
300 static void ia64_init_libfuncs (void)
301 ATTRIBUTE_UNUSED;
302 static void ia64_hpux_init_libfuncs (void)
303 ATTRIBUTE_UNUSED;
304 static void ia64_sysv4_init_libfuncs (void)
305 ATTRIBUTE_UNUSED;
306 static void ia64_vms_init_libfuncs (void)
307 ATTRIBUTE_UNUSED;
308 static void ia64_soft_fp_init_libfuncs (void)
309 ATTRIBUTE_UNUSED;
310 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
311 ATTRIBUTE_UNUSED;
312 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
313 ATTRIBUTE_UNUSED;
315 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
316 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
317 static void ia64_encode_section_info (tree, rtx, int);
318 static rtx ia64_struct_value_rtx (tree, int);
319 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
320 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
321 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
322 static bool ia64_libgcc_floating_mode_supported_p (enum machine_mode mode);
323 static bool ia64_legitimate_constant_p (enum machine_mode, rtx);
324 static bool ia64_legitimate_address_p (enum machine_mode, rtx, bool);
325 static bool ia64_cannot_force_const_mem (enum machine_mode, rtx);
326 static const char *ia64_mangle_type (const_tree);
327 static const char *ia64_invalid_conversion (const_tree, const_tree);
328 static const char *ia64_invalid_unary_op (int, const_tree);
329 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
330 static enum machine_mode ia64_c_mode_for_suffix (char);
331 static void ia64_trampoline_init (rtx, tree, rtx);
332 static void ia64_override_options_after_change (void);
333 static bool ia64_member_type_forces_blk (const_tree, enum machine_mode);
335 static tree ia64_builtin_decl (unsigned, bool);
337 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
338 static enum machine_mode ia64_get_reg_raw_mode (int regno);
339 static section * ia64_hpux_function_section (tree, enum node_frequency,
340 bool, bool);
342 static bool ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
343 const unsigned char *sel);
345 #define MAX_VECT_LEN 8
347 struct expand_vec_perm_d
349 rtx target, op0, op1;
350 unsigned char perm[MAX_VECT_LEN];
351 enum machine_mode vmode;
352 unsigned char nelt;
353 bool one_operand_p;
354 bool testing_p;
357 static bool ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d);
360 /* Table of valid machine attributes. */
361 static const struct attribute_spec ia64_attribute_table[] =
363 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
364 affects_type_identity } */
365 { "syscall_linkage", 0, 0, false, true, true, NULL, false },
366 { "model", 1, 1, true, false, false, ia64_handle_model_attribute,
367 false },
368 #if TARGET_ABI_OPEN_VMS
369 { "common_object", 1, 1, true, false, false,
370 ia64_vms_common_object_attribute, false },
371 #endif
372 { "version_id", 1, 1, true, false, false,
373 ia64_handle_version_id_attribute, false },
374 { NULL, 0, 0, false, false, false, NULL, false }
377 /* Initialize the GCC target structure. */
378 #undef TARGET_ATTRIBUTE_TABLE
379 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
381 #undef TARGET_INIT_BUILTINS
382 #define TARGET_INIT_BUILTINS ia64_init_builtins
384 #undef TARGET_EXPAND_BUILTIN
385 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
387 #undef TARGET_BUILTIN_DECL
388 #define TARGET_BUILTIN_DECL ia64_builtin_decl
390 #undef TARGET_ASM_BYTE_OP
391 #define TARGET_ASM_BYTE_OP "\tdata1\t"
392 #undef TARGET_ASM_ALIGNED_HI_OP
393 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
394 #undef TARGET_ASM_ALIGNED_SI_OP
395 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
396 #undef TARGET_ASM_ALIGNED_DI_OP
397 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
398 #undef TARGET_ASM_UNALIGNED_HI_OP
399 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
400 #undef TARGET_ASM_UNALIGNED_SI_OP
401 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
402 #undef TARGET_ASM_UNALIGNED_DI_OP
403 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
404 #undef TARGET_ASM_INTEGER
405 #define TARGET_ASM_INTEGER ia64_assemble_integer
407 #undef TARGET_OPTION_OVERRIDE
408 #define TARGET_OPTION_OVERRIDE ia64_option_override
410 #undef TARGET_ASM_FUNCTION_PROLOGUE
411 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
412 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
413 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
414 #undef TARGET_ASM_FUNCTION_EPILOGUE
415 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
417 #undef TARGET_PRINT_OPERAND
418 #define TARGET_PRINT_OPERAND ia64_print_operand
419 #undef TARGET_PRINT_OPERAND_ADDRESS
420 #define TARGET_PRINT_OPERAND_ADDRESS ia64_print_operand_address
421 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
422 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ia64_print_operand_punct_valid_p
424 #undef TARGET_IN_SMALL_DATA_P
425 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
427 #undef TARGET_SCHED_ADJUST_COST_2
428 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
429 #undef TARGET_SCHED_ISSUE_RATE
430 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
431 #undef TARGET_SCHED_VARIABLE_ISSUE
432 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
433 #undef TARGET_SCHED_INIT
434 #define TARGET_SCHED_INIT ia64_sched_init
435 #undef TARGET_SCHED_FINISH
436 #define TARGET_SCHED_FINISH ia64_sched_finish
437 #undef TARGET_SCHED_INIT_GLOBAL
438 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
439 #undef TARGET_SCHED_FINISH_GLOBAL
440 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
441 #undef TARGET_SCHED_REORDER
442 #define TARGET_SCHED_REORDER ia64_sched_reorder
443 #undef TARGET_SCHED_REORDER2
444 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
446 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
447 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
449 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
450 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
452 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
453 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
454 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
455 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
457 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
458 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
459 ia64_first_cycle_multipass_dfa_lookahead_guard
461 #undef TARGET_SCHED_DFA_NEW_CYCLE
462 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
464 #undef TARGET_SCHED_H_I_D_EXTENDED
465 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
467 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
468 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
470 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
471 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
473 #undef TARGET_SCHED_SET_SCHED_CONTEXT
474 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
476 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
477 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
479 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
480 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
482 #undef TARGET_SCHED_SET_SCHED_FLAGS
483 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
485 #undef TARGET_SCHED_GET_INSN_SPEC_DS
486 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
488 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
489 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
491 #undef TARGET_SCHED_SPECULATE_INSN
492 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
494 #undef TARGET_SCHED_NEEDS_BLOCK_P
495 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
497 #undef TARGET_SCHED_GEN_SPEC_CHECK
498 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
500 #undef TARGET_SCHED_SKIP_RTX_P
501 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
503 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
504 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
505 #undef TARGET_ARG_PARTIAL_BYTES
506 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
507 #undef TARGET_FUNCTION_ARG
508 #define TARGET_FUNCTION_ARG ia64_function_arg
509 #undef TARGET_FUNCTION_INCOMING_ARG
510 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
511 #undef TARGET_FUNCTION_ARG_ADVANCE
512 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
513 #undef TARGET_FUNCTION_ARG_BOUNDARY
514 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
516 #undef TARGET_ASM_OUTPUT_MI_THUNK
517 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
518 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
519 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
521 #undef TARGET_ASM_FILE_START
522 #define TARGET_ASM_FILE_START ia64_file_start
524 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
525 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
527 #undef TARGET_REGISTER_MOVE_COST
528 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
529 #undef TARGET_MEMORY_MOVE_COST
530 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
531 #undef TARGET_RTX_COSTS
532 #define TARGET_RTX_COSTS ia64_rtx_costs
533 #undef TARGET_ADDRESS_COST
534 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
536 #undef TARGET_UNSPEC_MAY_TRAP_P
537 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
539 #undef TARGET_MACHINE_DEPENDENT_REORG
540 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
542 #undef TARGET_ENCODE_SECTION_INFO
543 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
545 #undef TARGET_SECTION_TYPE_FLAGS
546 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
548 #ifdef HAVE_AS_TLS
549 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
550 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
551 #endif
553 /* ??? Investigate. */
554 #if 0
555 #undef TARGET_PROMOTE_PROTOTYPES
556 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
557 #endif
559 #undef TARGET_FUNCTION_VALUE
560 #define TARGET_FUNCTION_VALUE ia64_function_value
561 #undef TARGET_LIBCALL_VALUE
562 #define TARGET_LIBCALL_VALUE ia64_libcall_value
563 #undef TARGET_FUNCTION_VALUE_REGNO_P
564 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
566 #undef TARGET_STRUCT_VALUE_RTX
567 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
568 #undef TARGET_RETURN_IN_MEMORY
569 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
570 #undef TARGET_SETUP_INCOMING_VARARGS
571 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
572 #undef TARGET_STRICT_ARGUMENT_NAMING
573 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
574 #undef TARGET_MUST_PASS_IN_STACK
575 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
576 #undef TARGET_GET_RAW_RESULT_MODE
577 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
578 #undef TARGET_GET_RAW_ARG_MODE
579 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
581 #undef TARGET_MEMBER_TYPE_FORCES_BLK
582 #define TARGET_MEMBER_TYPE_FORCES_BLK ia64_member_type_forces_blk
584 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
585 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
587 #undef TARGET_ASM_UNWIND_EMIT
588 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
589 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
590 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
591 #undef TARGET_ASM_INIT_SECTIONS
592 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
594 #undef TARGET_DEBUG_UNWIND_INFO
595 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
597 #undef TARGET_SCALAR_MODE_SUPPORTED_P
598 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
599 #undef TARGET_VECTOR_MODE_SUPPORTED_P
600 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
602 #undef TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P
603 #define TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P \
604 ia64_libgcc_floating_mode_supported_p
606 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
607 in an order different from the specified program order. */
608 #undef TARGET_RELAXED_ORDERING
609 #define TARGET_RELAXED_ORDERING true
611 #undef TARGET_LEGITIMATE_CONSTANT_P
612 #define TARGET_LEGITIMATE_CONSTANT_P ia64_legitimate_constant_p
613 #undef TARGET_LEGITIMATE_ADDRESS_P
614 #define TARGET_LEGITIMATE_ADDRESS_P ia64_legitimate_address_p
616 #undef TARGET_CANNOT_FORCE_CONST_MEM
617 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
619 #undef TARGET_MANGLE_TYPE
620 #define TARGET_MANGLE_TYPE ia64_mangle_type
622 #undef TARGET_INVALID_CONVERSION
623 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
624 #undef TARGET_INVALID_UNARY_OP
625 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
626 #undef TARGET_INVALID_BINARY_OP
627 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
629 #undef TARGET_C_MODE_FOR_SUFFIX
630 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
632 #undef TARGET_CAN_ELIMINATE
633 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
635 #undef TARGET_TRAMPOLINE_INIT
636 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
638 #undef TARGET_CAN_USE_DOLOOP_P
639 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
640 #undef TARGET_INVALID_WITHIN_DOLOOP
641 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
643 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
644 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
646 #undef TARGET_PREFERRED_RELOAD_CLASS
647 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
649 #undef TARGET_DELAY_SCHED2
650 #define TARGET_DELAY_SCHED2 true
652 /* Variable tracking should be run after all optimizations which
653 change order of insns. It also needs a valid CFG. */
654 #undef TARGET_DELAY_VARTRACK
655 #define TARGET_DELAY_VARTRACK true
657 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
658 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK ia64_vectorize_vec_perm_const_ok
660 struct gcc_target targetm = TARGET_INITIALIZER;
662 typedef enum
664 ADDR_AREA_NORMAL, /* normal address area */
665 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
667 ia64_addr_area;
669 static GTY(()) tree small_ident1;
670 static GTY(()) tree small_ident2;
672 static void
673 init_idents (void)
675 if (small_ident1 == 0)
677 small_ident1 = get_identifier ("small");
678 small_ident2 = get_identifier ("__small__");
682 /* Retrieve the address area that has been chosen for the given decl. */
684 static ia64_addr_area
685 ia64_get_addr_area (tree decl)
687 tree model_attr;
689 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
690 if (model_attr)
692 tree id;
694 init_idents ();
695 id = TREE_VALUE (TREE_VALUE (model_attr));
696 if (id == small_ident1 || id == small_ident2)
697 return ADDR_AREA_SMALL;
699 return ADDR_AREA_NORMAL;
702 static tree
703 ia64_handle_model_attribute (tree *node, tree name, tree args,
704 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
706 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
707 ia64_addr_area area;
708 tree arg, decl = *node;
710 init_idents ();
711 arg = TREE_VALUE (args);
712 if (arg == small_ident1 || arg == small_ident2)
714 addr_area = ADDR_AREA_SMALL;
716 else
718 warning (OPT_Wattributes, "invalid argument of %qE attribute",
719 name);
720 *no_add_attrs = true;
723 switch (TREE_CODE (decl))
725 case VAR_DECL:
726 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
727 == FUNCTION_DECL)
728 && !TREE_STATIC (decl))
730 error_at (DECL_SOURCE_LOCATION (decl),
731 "an address area attribute cannot be specified for "
732 "local variables");
733 *no_add_attrs = true;
735 area = ia64_get_addr_area (decl);
736 if (area != ADDR_AREA_NORMAL && addr_area != area)
738 error ("address area of %q+D conflicts with previous "
739 "declaration", decl);
740 *no_add_attrs = true;
742 break;
744 case FUNCTION_DECL:
745 error_at (DECL_SOURCE_LOCATION (decl),
746 "address area attribute cannot be specified for "
747 "functions");
748 *no_add_attrs = true;
749 break;
751 default:
752 warning (OPT_Wattributes, "%qE attribute ignored",
753 name);
754 *no_add_attrs = true;
755 break;
758 return NULL_TREE;
761 /* Part of the low level implementation of DEC Ada pragma Common_Object which
762 enables the shared use of variables stored in overlaid linker areas
763 corresponding to the use of Fortran COMMON. */
765 static tree
766 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
767 int flags ATTRIBUTE_UNUSED,
768 bool *no_add_attrs)
770 tree decl = *node;
771 tree id;
773 gcc_assert (DECL_P (decl));
775 DECL_COMMON (decl) = 1;
776 id = TREE_VALUE (args);
777 if (TREE_CODE (id) != IDENTIFIER_NODE && TREE_CODE (id) != STRING_CST)
779 error ("%qE attribute requires a string constant argument", name);
780 *no_add_attrs = true;
781 return NULL_TREE;
783 return NULL_TREE;
786 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
788 void
789 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
790 unsigned HOST_WIDE_INT size,
791 unsigned int align)
793 tree attr = DECL_ATTRIBUTES (decl);
795 if (attr)
796 attr = lookup_attribute ("common_object", attr);
797 if (attr)
799 tree id = TREE_VALUE (TREE_VALUE (attr));
800 const char *name;
802 if (TREE_CODE (id) == IDENTIFIER_NODE)
803 name = IDENTIFIER_POINTER (id);
804 else if (TREE_CODE (id) == STRING_CST)
805 name = TREE_STRING_POINTER (id);
806 else
807 abort ();
809 fprintf (file, "\t.vms_common\t\"%s\",", name);
811 else
812 fprintf (file, "%s", COMMON_ASM_OP);
814 /* Code from elfos.h. */
815 assemble_name (file, name);
816 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u",
817 size, align / BITS_PER_UNIT);
819 fputc ('\n', file);
822 static void
823 ia64_encode_addr_area (tree decl, rtx symbol)
825 int flags;
827 flags = SYMBOL_REF_FLAGS (symbol);
828 switch (ia64_get_addr_area (decl))
830 case ADDR_AREA_NORMAL: break;
831 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
832 default: gcc_unreachable ();
834 SYMBOL_REF_FLAGS (symbol) = flags;
837 static void
838 ia64_encode_section_info (tree decl, rtx rtl, int first)
840 default_encode_section_info (decl, rtl, first);
842 /* Careful not to prod global register variables. */
843 if (TREE_CODE (decl) == VAR_DECL
844 && GET_CODE (DECL_RTL (decl)) == MEM
845 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
846 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
847 ia64_encode_addr_area (decl, XEXP (rtl, 0));
850 /* Return 1 if the operands of a move are ok. */
853 ia64_move_ok (rtx dst, rtx src)
855 /* If we're under init_recog_no_volatile, we'll not be able to use
856 memory_operand. So check the code directly and don't worry about
857 the validity of the underlying address, which should have been
858 checked elsewhere anyway. */
859 if (GET_CODE (dst) != MEM)
860 return 1;
861 if (GET_CODE (src) == MEM)
862 return 0;
863 if (register_operand (src, VOIDmode))
864 return 1;
866 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
867 if (INTEGRAL_MODE_P (GET_MODE (dst)))
868 return src == const0_rtx;
869 else
870 return satisfies_constraint_G (src);
873 /* Return 1 if the operands are ok for a floating point load pair. */
876 ia64_load_pair_ok (rtx dst, rtx src)
878 /* ??? There is a thinko in the implementation of the "x" constraint and the
879 FP_REGS class. The constraint will also reject (reg f30:TI) so we must
880 also return false for it. */
881 if (GET_CODE (dst) != REG
882 || !(FP_REGNO_P (REGNO (dst)) && FP_REGNO_P (REGNO (dst) + 1)))
883 return 0;
884 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
885 return 0;
886 switch (GET_CODE (XEXP (src, 0)))
888 case REG:
889 case POST_INC:
890 break;
891 case POST_DEC:
892 return 0;
893 case POST_MODIFY:
895 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
897 if (GET_CODE (adjust) != CONST_INT
898 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
899 return 0;
901 break;
902 default:
903 abort ();
905 return 1;
909 addp4_optimize_ok (rtx op1, rtx op2)
911 return (basereg_operand (op1, GET_MODE(op1)) !=
912 basereg_operand (op2, GET_MODE(op2)));
915 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
916 Return the length of the field, or <= 0 on failure. */
919 ia64_depz_field_mask (rtx rop, rtx rshift)
921 unsigned HOST_WIDE_INT op = INTVAL (rop);
922 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
924 /* Get rid of the zero bits we're shifting in. */
925 op >>= shift;
927 /* We must now have a solid block of 1's at bit 0. */
928 return exact_log2 (op + 1);
931 /* Return the TLS model to use for ADDR. */
933 static enum tls_model
934 tls_symbolic_operand_type (rtx addr)
936 enum tls_model tls_kind = TLS_MODEL_NONE;
938 if (GET_CODE (addr) == CONST)
940 if (GET_CODE (XEXP (addr, 0)) == PLUS
941 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
942 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
944 else if (GET_CODE (addr) == SYMBOL_REF)
945 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
947 return tls_kind;
950 /* Returns true if REG (assumed to be a `reg' RTX) is valid for use
951 as a base register. */
953 static inline bool
954 ia64_reg_ok_for_base_p (const_rtx reg, bool strict)
956 if (strict
957 && REGNO_OK_FOR_BASE_P (REGNO (reg)))
958 return true;
959 else if (!strict
960 && (GENERAL_REGNO_P (REGNO (reg))
961 || !HARD_REGISTER_P (reg)))
962 return true;
963 else
964 return false;
967 static bool
968 ia64_legitimate_address_reg (const_rtx reg, bool strict)
970 if ((REG_P (reg) && ia64_reg_ok_for_base_p (reg, strict))
971 || (GET_CODE (reg) == SUBREG && REG_P (XEXP (reg, 0))
972 && ia64_reg_ok_for_base_p (XEXP (reg, 0), strict)))
973 return true;
975 return false;
978 static bool
979 ia64_legitimate_address_disp (const_rtx reg, const_rtx disp, bool strict)
981 if (GET_CODE (disp) == PLUS
982 && rtx_equal_p (reg, XEXP (disp, 0))
983 && (ia64_legitimate_address_reg (XEXP (disp, 1), strict)
984 || (CONST_INT_P (XEXP (disp, 1))
985 && IN_RANGE (INTVAL (XEXP (disp, 1)), -256, 255))))
986 return true;
988 return false;
991 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
993 static bool
994 ia64_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
995 rtx x, bool strict)
997 if (ia64_legitimate_address_reg (x, strict))
998 return true;
999 else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == POST_DEC)
1000 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1001 && XEXP (x, 0) != arg_pointer_rtx)
1002 return true;
1003 else if (GET_CODE (x) == POST_MODIFY
1004 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1005 && XEXP (x, 0) != arg_pointer_rtx
1006 && ia64_legitimate_address_disp (XEXP (x, 0), XEXP (x, 1), strict))
1007 return true;
1008 else
1009 return false;
1012 /* Return true if X is a constant that is valid for some immediate
1013 field in an instruction. */
1015 static bool
1016 ia64_legitimate_constant_p (enum machine_mode mode, rtx x)
1018 switch (GET_CODE (x))
1020 case CONST_INT:
1021 case LABEL_REF:
1022 return true;
1024 case CONST_DOUBLE:
1025 if (GET_MODE (x) == VOIDmode || mode == SFmode || mode == DFmode)
1026 return true;
1027 return satisfies_constraint_G (x);
1029 case CONST:
1030 case SYMBOL_REF:
1031 /* ??? Short term workaround for PR 28490. We must make the code here
1032 match the code in ia64_expand_move and move_operand, even though they
1033 are both technically wrong. */
1034 if (tls_symbolic_operand_type (x) == 0)
1036 HOST_WIDE_INT addend = 0;
1037 rtx op = x;
1039 if (GET_CODE (op) == CONST
1040 && GET_CODE (XEXP (op, 0)) == PLUS
1041 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1043 addend = INTVAL (XEXP (XEXP (op, 0), 1));
1044 op = XEXP (XEXP (op, 0), 0);
1047 if (any_offset_symbol_operand (op, mode)
1048 || function_operand (op, mode))
1049 return true;
1050 if (aligned_offset_symbol_operand (op, mode))
1051 return (addend & 0x3fff) == 0;
1052 return false;
1054 return false;
1056 case CONST_VECTOR:
1057 if (mode == V2SFmode)
1058 return satisfies_constraint_Y (x);
1060 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1061 && GET_MODE_SIZE (mode) <= 8);
1063 default:
1064 return false;
1068 /* Don't allow TLS addresses to get spilled to memory. */
1070 static bool
1071 ia64_cannot_force_const_mem (enum machine_mode mode, rtx x)
1073 if (mode == RFmode)
1074 return true;
1075 return tls_symbolic_operand_type (x) != 0;
1078 /* Expand a symbolic constant load. */
1080 bool
1081 ia64_expand_load_address (rtx dest, rtx src)
1083 gcc_assert (GET_CODE (dest) == REG);
1085 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1086 having to pointer-extend the value afterward. Other forms of address
1087 computation below are also more natural to compute as 64-bit quantities.
1088 If we've been given an SImode destination register, change it. */
1089 if (GET_MODE (dest) != Pmode)
1090 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1091 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1093 if (TARGET_NO_PIC)
1094 return false;
1095 if (small_addr_symbolic_operand (src, VOIDmode))
1096 return false;
1098 if (TARGET_AUTO_PIC)
1099 emit_insn (gen_load_gprel64 (dest, src));
1100 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1101 emit_insn (gen_load_fptr (dest, src));
1102 else if (sdata_symbolic_operand (src, VOIDmode))
1103 emit_insn (gen_load_gprel (dest, src));
1104 else
1106 HOST_WIDE_INT addend = 0;
1107 rtx tmp;
1109 /* We did split constant offsets in ia64_expand_move, and we did try
1110 to keep them split in move_operand, but we also allowed reload to
1111 rematerialize arbitrary constants rather than spill the value to
1112 the stack and reload it. So we have to be prepared here to split
1113 them apart again. */
1114 if (GET_CODE (src) == CONST)
1116 HOST_WIDE_INT hi, lo;
1118 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1119 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1120 hi = hi - lo;
1122 if (lo != 0)
1124 addend = lo;
1125 src = plus_constant (Pmode, XEXP (XEXP (src, 0), 0), hi);
1129 tmp = gen_rtx_HIGH (Pmode, src);
1130 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1131 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1133 tmp = gen_rtx_LO_SUM (Pmode, gen_const_mem (Pmode, dest), src);
1134 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1136 if (addend)
1138 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1139 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1143 return true;
1146 static GTY(()) rtx gen_tls_tga;
1147 static rtx
1148 gen_tls_get_addr (void)
1150 if (!gen_tls_tga)
1151 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1152 return gen_tls_tga;
1155 static GTY(()) rtx thread_pointer_rtx;
1156 static rtx
1157 gen_thread_pointer (void)
1159 if (!thread_pointer_rtx)
1160 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1161 return thread_pointer_rtx;
1164 static rtx
1165 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1166 rtx orig_op1, HOST_WIDE_INT addend)
1168 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp;
1169 rtx_insn *insns;
1170 rtx orig_op0 = op0;
1171 HOST_WIDE_INT addend_lo, addend_hi;
1173 switch (tls_kind)
1175 case TLS_MODEL_GLOBAL_DYNAMIC:
1176 start_sequence ();
1178 tga_op1 = gen_reg_rtx (Pmode);
1179 emit_insn (gen_load_dtpmod (tga_op1, op1));
1181 tga_op2 = gen_reg_rtx (Pmode);
1182 emit_insn (gen_load_dtprel (tga_op2, op1));
1184 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1185 LCT_CONST, Pmode, 2, tga_op1,
1186 Pmode, tga_op2, Pmode);
1188 insns = get_insns ();
1189 end_sequence ();
1191 if (GET_MODE (op0) != Pmode)
1192 op0 = tga_ret;
1193 emit_libcall_block (insns, op0, tga_ret, op1);
1194 break;
1196 case TLS_MODEL_LOCAL_DYNAMIC:
1197 /* ??? This isn't the completely proper way to do local-dynamic
1198 If the call to __tls_get_addr is used only by a single symbol,
1199 then we should (somehow) move the dtprel to the second arg
1200 to avoid the extra add. */
1201 start_sequence ();
1203 tga_op1 = gen_reg_rtx (Pmode);
1204 emit_insn (gen_load_dtpmod (tga_op1, op1));
1206 tga_op2 = const0_rtx;
1208 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1209 LCT_CONST, Pmode, 2, tga_op1,
1210 Pmode, tga_op2, Pmode);
1212 insns = get_insns ();
1213 end_sequence ();
1215 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1216 UNSPEC_LD_BASE);
1217 tmp = gen_reg_rtx (Pmode);
1218 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1220 if (!register_operand (op0, Pmode))
1221 op0 = gen_reg_rtx (Pmode);
1222 if (TARGET_TLS64)
1224 emit_insn (gen_load_dtprel (op0, op1));
1225 emit_insn (gen_adddi3 (op0, tmp, op0));
1227 else
1228 emit_insn (gen_add_dtprel (op0, op1, tmp));
1229 break;
1231 case TLS_MODEL_INITIAL_EXEC:
1232 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1233 addend_hi = addend - addend_lo;
1235 op1 = plus_constant (Pmode, op1, addend_hi);
1236 addend = addend_lo;
1238 tmp = gen_reg_rtx (Pmode);
1239 emit_insn (gen_load_tprel (tmp, op1));
1241 if (!register_operand (op0, Pmode))
1242 op0 = gen_reg_rtx (Pmode);
1243 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1244 break;
1246 case TLS_MODEL_LOCAL_EXEC:
1247 if (!register_operand (op0, Pmode))
1248 op0 = gen_reg_rtx (Pmode);
1250 op1 = orig_op1;
1251 addend = 0;
1252 if (TARGET_TLS64)
1254 emit_insn (gen_load_tprel (op0, op1));
1255 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1257 else
1258 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1259 break;
1261 default:
1262 gcc_unreachable ();
1265 if (addend)
1266 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1267 orig_op0, 1, OPTAB_DIRECT);
1268 if (orig_op0 == op0)
1269 return NULL_RTX;
1270 if (GET_MODE (orig_op0) == Pmode)
1271 return op0;
1272 return gen_lowpart (GET_MODE (orig_op0), op0);
1276 ia64_expand_move (rtx op0, rtx op1)
1278 enum machine_mode mode = GET_MODE (op0);
1280 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1281 op1 = force_reg (mode, op1);
1283 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1285 HOST_WIDE_INT addend = 0;
1286 enum tls_model tls_kind;
1287 rtx sym = op1;
1289 if (GET_CODE (op1) == CONST
1290 && GET_CODE (XEXP (op1, 0)) == PLUS
1291 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1293 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1294 sym = XEXP (XEXP (op1, 0), 0);
1297 tls_kind = tls_symbolic_operand_type (sym);
1298 if (tls_kind)
1299 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1301 if (any_offset_symbol_operand (sym, mode))
1302 addend = 0;
1303 else if (aligned_offset_symbol_operand (sym, mode))
1305 HOST_WIDE_INT addend_lo, addend_hi;
1307 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1308 addend_hi = addend - addend_lo;
1310 if (addend_lo != 0)
1312 op1 = plus_constant (mode, sym, addend_hi);
1313 addend = addend_lo;
1315 else
1316 addend = 0;
1318 else
1319 op1 = sym;
1321 if (reload_completed)
1323 /* We really should have taken care of this offset earlier. */
1324 gcc_assert (addend == 0);
1325 if (ia64_expand_load_address (op0, op1))
1326 return NULL_RTX;
1329 if (addend)
1331 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1333 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1335 op1 = expand_simple_binop (mode, PLUS, subtarget,
1336 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1337 if (op0 == op1)
1338 return NULL_RTX;
1342 return op1;
1345 /* Split a move from OP1 to OP0 conditional on COND. */
1347 void
1348 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1350 rtx_insn *insn, *first = get_last_insn ();
1352 emit_move_insn (op0, op1);
1354 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1355 if (INSN_P (insn))
1356 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1357 PATTERN (insn));
1360 /* Split a post-reload TImode or TFmode reference into two DImode
1361 components. This is made extra difficult by the fact that we do
1362 not get any scratch registers to work with, because reload cannot
1363 be prevented from giving us a scratch that overlaps the register
1364 pair involved. So instead, when addressing memory, we tweak the
1365 pointer register up and back down with POST_INCs. Or up and not
1366 back down when we can get away with it.
1368 REVERSED is true when the loads must be done in reversed order
1369 (high word first) for correctness. DEAD is true when the pointer
1370 dies with the second insn we generate and therefore the second
1371 address must not carry a postmodify.
1373 May return an insn which is to be emitted after the moves. */
1375 static rtx
1376 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1378 rtx fixup = 0;
1380 switch (GET_CODE (in))
1382 case REG:
1383 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1384 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1385 break;
1387 case CONST_INT:
1388 case CONST_DOUBLE:
1389 /* Cannot occur reversed. */
1390 gcc_assert (!reversed);
1392 if (GET_MODE (in) != TFmode)
1393 split_double (in, &out[0], &out[1]);
1394 else
1395 /* split_double does not understand how to split a TFmode
1396 quantity into a pair of DImode constants. */
1398 REAL_VALUE_TYPE r;
1399 unsigned HOST_WIDE_INT p[2];
1400 long l[4]; /* TFmode is 128 bits */
1402 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1403 real_to_target (l, &r, TFmode);
1405 if (FLOAT_WORDS_BIG_ENDIAN)
1407 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1408 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1410 else
1412 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1413 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1415 out[0] = GEN_INT (p[0]);
1416 out[1] = GEN_INT (p[1]);
1418 break;
1420 case MEM:
1422 rtx base = XEXP (in, 0);
1423 rtx offset;
1425 switch (GET_CODE (base))
1427 case REG:
1428 if (!reversed)
1430 out[0] = adjust_automodify_address
1431 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1432 out[1] = adjust_automodify_address
1433 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1435 else
1437 /* Reversal requires a pre-increment, which can only
1438 be done as a separate insn. */
1439 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1440 out[0] = adjust_automodify_address
1441 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1442 out[1] = adjust_address (in, DImode, 0);
1444 break;
1446 case POST_INC:
1447 gcc_assert (!reversed && !dead);
1449 /* Just do the increment in two steps. */
1450 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1451 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1452 break;
1454 case POST_DEC:
1455 gcc_assert (!reversed && !dead);
1457 /* Add 8, subtract 24. */
1458 base = XEXP (base, 0);
1459 out[0] = adjust_automodify_address
1460 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1461 out[1] = adjust_automodify_address
1462 (in, DImode,
1463 gen_rtx_POST_MODIFY (Pmode, base,
1464 plus_constant (Pmode, base, -24)),
1466 break;
1468 case POST_MODIFY:
1469 gcc_assert (!reversed && !dead);
1471 /* Extract and adjust the modification. This case is
1472 trickier than the others, because we might have an
1473 index register, or we might have a combined offset that
1474 doesn't fit a signed 9-bit displacement field. We can
1475 assume the incoming expression is already legitimate. */
1476 offset = XEXP (base, 1);
1477 base = XEXP (base, 0);
1479 out[0] = adjust_automodify_address
1480 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1482 if (GET_CODE (XEXP (offset, 1)) == REG)
1484 /* Can't adjust the postmodify to match. Emit the
1485 original, then a separate addition insn. */
1486 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1487 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1489 else
1491 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1492 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1494 /* Again the postmodify cannot be made to match,
1495 but in this case it's more efficient to get rid
1496 of the postmodify entirely and fix up with an
1497 add insn. */
1498 out[1] = adjust_automodify_address (in, DImode, base, 8);
1499 fixup = gen_adddi3
1500 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1502 else
1504 /* Combined offset still fits in the displacement field.
1505 (We cannot overflow it at the high end.) */
1506 out[1] = adjust_automodify_address
1507 (in, DImode, gen_rtx_POST_MODIFY
1508 (Pmode, base, gen_rtx_PLUS
1509 (Pmode, base,
1510 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1514 break;
1516 default:
1517 gcc_unreachable ();
1519 break;
1522 default:
1523 gcc_unreachable ();
1526 return fixup;
1529 /* Split a TImode or TFmode move instruction after reload.
1530 This is used by *movtf_internal and *movti_internal. */
1531 void
1532 ia64_split_tmode_move (rtx operands[])
1534 rtx in[2], out[2], insn;
1535 rtx fixup[2];
1536 bool dead = false;
1537 bool reversed = false;
1539 /* It is possible for reload to decide to overwrite a pointer with
1540 the value it points to. In that case we have to do the loads in
1541 the appropriate order so that the pointer is not destroyed too
1542 early. Also we must not generate a postmodify for that second
1543 load, or rws_access_regno will die. And we must not generate a
1544 postmodify for the second load if the destination register
1545 overlaps with the base register. */
1546 if (GET_CODE (operands[1]) == MEM
1547 && reg_overlap_mentioned_p (operands[0], operands[1]))
1549 rtx base = XEXP (operands[1], 0);
1550 while (GET_CODE (base) != REG)
1551 base = XEXP (base, 0);
1553 if (REGNO (base) == REGNO (operands[0]))
1554 reversed = true;
1556 if (refers_to_regno_p (REGNO (operands[0]),
1557 REGNO (operands[0])+2,
1558 base, 0))
1559 dead = true;
1561 /* Another reason to do the moves in reversed order is if the first
1562 element of the target register pair is also the second element of
1563 the source register pair. */
1564 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1565 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1566 reversed = true;
1568 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1569 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1571 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1572 if (GET_CODE (EXP) == MEM \
1573 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1574 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1575 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1576 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1578 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1579 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1580 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1582 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1583 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1584 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1586 if (fixup[0])
1587 emit_insn (fixup[0]);
1588 if (fixup[1])
1589 emit_insn (fixup[1]);
1591 #undef MAYBE_ADD_REG_INC_NOTE
1594 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1595 through memory plus an extra GR scratch register. Except that you can
1596 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1597 SECONDARY_RELOAD_CLASS, but not both.
1599 We got into problems in the first place by allowing a construct like
1600 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1601 This solution attempts to prevent this situation from occurring. When
1602 we see something like the above, we spill the inner register to memory. */
1604 static rtx
1605 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1607 if (GET_CODE (in) == SUBREG
1608 && GET_MODE (SUBREG_REG (in)) == TImode
1609 && GET_CODE (SUBREG_REG (in)) == REG)
1611 rtx memt = assign_stack_temp (TImode, 16);
1612 emit_move_insn (memt, SUBREG_REG (in));
1613 return adjust_address (memt, mode, 0);
1615 else if (force && GET_CODE (in) == REG)
1617 rtx memx = assign_stack_temp (mode, 16);
1618 emit_move_insn (memx, in);
1619 return memx;
1621 else
1622 return in;
1625 /* Expand the movxf or movrf pattern (MODE says which) with the given
1626 OPERANDS, returning true if the pattern should then invoke
1627 DONE. */
1629 bool
1630 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1632 rtx op0 = operands[0];
1634 if (GET_CODE (op0) == SUBREG)
1635 op0 = SUBREG_REG (op0);
1637 /* We must support XFmode loads into general registers for stdarg/vararg,
1638 unprototyped calls, and a rare case where a long double is passed as
1639 an argument after a float HFA fills the FP registers. We split them into
1640 DImode loads for convenience. We also need to support XFmode stores
1641 for the last case. This case does not happen for stdarg/vararg routines,
1642 because we do a block store to memory of unnamed arguments. */
1644 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1646 rtx out[2];
1648 /* We're hoping to transform everything that deals with XFmode
1649 quantities and GR registers early in the compiler. */
1650 gcc_assert (can_create_pseudo_p ());
1652 /* Struct to register can just use TImode instead. */
1653 if ((GET_CODE (operands[1]) == SUBREG
1654 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1655 || (GET_CODE (operands[1]) == REG
1656 && GR_REGNO_P (REGNO (operands[1]))))
1658 rtx op1 = operands[1];
1660 if (GET_CODE (op1) == SUBREG)
1661 op1 = SUBREG_REG (op1);
1662 else
1663 op1 = gen_rtx_REG (TImode, REGNO (op1));
1665 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1666 return true;
1669 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1671 /* Don't word-swap when reading in the constant. */
1672 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1673 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1674 0, mode));
1675 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1676 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1677 0, mode));
1678 return true;
1681 /* If the quantity is in a register not known to be GR, spill it. */
1682 if (register_operand (operands[1], mode))
1683 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1685 gcc_assert (GET_CODE (operands[1]) == MEM);
1687 /* Don't word-swap when reading in the value. */
1688 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1689 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1691 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1692 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1693 return true;
1696 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1698 /* We're hoping to transform everything that deals with XFmode
1699 quantities and GR registers early in the compiler. */
1700 gcc_assert (can_create_pseudo_p ());
1702 /* Op0 can't be a GR_REG here, as that case is handled above.
1703 If op0 is a register, then we spill op1, so that we now have a
1704 MEM operand. This requires creating an XFmode subreg of a TImode reg
1705 to force the spill. */
1706 if (register_operand (operands[0], mode))
1708 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1709 op1 = gen_rtx_SUBREG (mode, op1, 0);
1710 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1713 else
1715 rtx in[2];
1717 gcc_assert (GET_CODE (operands[0]) == MEM);
1719 /* Don't word-swap when writing out the value. */
1720 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1721 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1723 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1724 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1725 return true;
1729 if (!reload_in_progress && !reload_completed)
1731 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1733 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1735 rtx memt, memx, in = operands[1];
1736 if (CONSTANT_P (in))
1737 in = validize_mem (force_const_mem (mode, in));
1738 if (GET_CODE (in) == MEM)
1739 memt = adjust_address (in, TImode, 0);
1740 else
1742 memt = assign_stack_temp (TImode, 16);
1743 memx = adjust_address (memt, mode, 0);
1744 emit_move_insn (memx, in);
1746 emit_move_insn (op0, memt);
1747 return true;
1750 if (!ia64_move_ok (operands[0], operands[1]))
1751 operands[1] = force_reg (mode, operands[1]);
1754 return false;
1757 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1758 with the expression that holds the compare result (in VOIDmode). */
1760 static GTY(()) rtx cmptf_libfunc;
1762 void
1763 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1765 enum rtx_code code = GET_CODE (*expr);
1766 rtx cmp;
1768 /* If we have a BImode input, then we already have a compare result, and
1769 do not need to emit another comparison. */
1770 if (GET_MODE (*op0) == BImode)
1772 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1773 cmp = *op0;
1775 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1776 magic number as its third argument, that indicates what to do.
1777 The return value is an integer to be compared against zero. */
1778 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1780 enum qfcmp_magic {
1781 QCMP_INV = 1, /* Raise FP_INVALID on NaNs as a side effect. */
1782 QCMP_UNORD = 2,
1783 QCMP_EQ = 4,
1784 QCMP_LT = 8,
1785 QCMP_GT = 16
1787 int magic;
1788 enum rtx_code ncode;
1789 rtx ret, insns;
1791 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1792 switch (code)
1794 /* 1 = equal, 0 = not equal. Equality operators do
1795 not raise FP_INVALID when given a NaN operand. */
1796 case EQ: magic = QCMP_EQ; ncode = NE; break;
1797 case NE: magic = QCMP_EQ; ncode = EQ; break;
1798 /* isunordered() from C99. */
1799 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1800 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1801 /* Relational operators raise FP_INVALID when given
1802 a NaN operand. */
1803 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1804 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1805 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1806 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1807 /* Unordered relational operators do not raise FP_INVALID
1808 when given a NaN operand. */
1809 case UNLT: magic = QCMP_LT |QCMP_UNORD; ncode = NE; break;
1810 case UNLE: magic = QCMP_LT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1811 case UNGT: magic = QCMP_GT |QCMP_UNORD; ncode = NE; break;
1812 case UNGE: magic = QCMP_GT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1813 /* Not supported. */
1814 case UNEQ:
1815 case LTGT:
1816 default: gcc_unreachable ();
1819 start_sequence ();
1821 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1822 *op0, TFmode, *op1, TFmode,
1823 GEN_INT (magic), DImode);
1824 cmp = gen_reg_rtx (BImode);
1825 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1826 gen_rtx_fmt_ee (ncode, BImode,
1827 ret, const0_rtx)));
1829 insns = get_insns ();
1830 end_sequence ();
1832 emit_libcall_block (insns, cmp, cmp,
1833 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1834 code = NE;
1836 else
1838 cmp = gen_reg_rtx (BImode);
1839 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1840 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1841 code = NE;
1844 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1845 *op0 = cmp;
1846 *op1 = const0_rtx;
1849 /* Generate an integral vector comparison. Return true if the condition has
1850 been reversed, and so the sense of the comparison should be inverted. */
1852 static bool
1853 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1854 rtx dest, rtx op0, rtx op1)
1856 bool negate = false;
1857 rtx x;
1859 /* Canonicalize the comparison to EQ, GT, GTU. */
1860 switch (code)
1862 case EQ:
1863 case GT:
1864 case GTU:
1865 break;
1867 case NE:
1868 case LE:
1869 case LEU:
1870 code = reverse_condition (code);
1871 negate = true;
1872 break;
1874 case GE:
1875 case GEU:
1876 code = reverse_condition (code);
1877 negate = true;
1878 /* FALLTHRU */
1880 case LT:
1881 case LTU:
1882 code = swap_condition (code);
1883 x = op0, op0 = op1, op1 = x;
1884 break;
1886 default:
1887 gcc_unreachable ();
1890 /* Unsigned parallel compare is not supported by the hardware. Play some
1891 tricks to turn this into a signed comparison against 0. */
1892 if (code == GTU)
1894 switch (mode)
1896 case V2SImode:
1898 rtx t1, t2, mask;
1900 /* Subtract (-(INT MAX) - 1) from both operands to make
1901 them signed. */
1902 mask = GEN_INT (0x80000000);
1903 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1904 mask = force_reg (mode, mask);
1905 t1 = gen_reg_rtx (mode);
1906 emit_insn (gen_subv2si3 (t1, op0, mask));
1907 t2 = gen_reg_rtx (mode);
1908 emit_insn (gen_subv2si3 (t2, op1, mask));
1909 op0 = t1;
1910 op1 = t2;
1911 code = GT;
1913 break;
1915 case V8QImode:
1916 case V4HImode:
1917 /* Perform a parallel unsigned saturating subtraction. */
1918 x = gen_reg_rtx (mode);
1919 emit_insn (gen_rtx_SET (VOIDmode, x,
1920 gen_rtx_US_MINUS (mode, op0, op1)));
1922 code = EQ;
1923 op0 = x;
1924 op1 = CONST0_RTX (mode);
1925 negate = !negate;
1926 break;
1928 default:
1929 gcc_unreachable ();
1933 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1934 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1936 return negate;
1939 /* Emit an integral vector conditional move. */
1941 void
1942 ia64_expand_vecint_cmov (rtx operands[])
1944 enum machine_mode mode = GET_MODE (operands[0]);
1945 enum rtx_code code = GET_CODE (operands[3]);
1946 bool negate;
1947 rtx cmp, x, ot, of;
1949 cmp = gen_reg_rtx (mode);
1950 negate = ia64_expand_vecint_compare (code, mode, cmp,
1951 operands[4], operands[5]);
1953 ot = operands[1+negate];
1954 of = operands[2-negate];
1956 if (ot == CONST0_RTX (mode))
1958 if (of == CONST0_RTX (mode))
1960 emit_move_insn (operands[0], ot);
1961 return;
1964 x = gen_rtx_NOT (mode, cmp);
1965 x = gen_rtx_AND (mode, x, of);
1966 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1968 else if (of == CONST0_RTX (mode))
1970 x = gen_rtx_AND (mode, cmp, ot);
1971 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1973 else
1975 rtx t, f;
1977 t = gen_reg_rtx (mode);
1978 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1979 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1981 f = gen_reg_rtx (mode);
1982 x = gen_rtx_NOT (mode, cmp);
1983 x = gen_rtx_AND (mode, x, operands[2-negate]);
1984 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1986 x = gen_rtx_IOR (mode, t, f);
1987 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1991 /* Emit an integral vector min or max operation. Return true if all done. */
1993 bool
1994 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1995 rtx operands[])
1997 rtx xops[6];
1999 /* These four combinations are supported directly. */
2000 if (mode == V8QImode && (code == UMIN || code == UMAX))
2001 return false;
2002 if (mode == V4HImode && (code == SMIN || code == SMAX))
2003 return false;
2005 /* This combination can be implemented with only saturating subtraction. */
2006 if (mode == V4HImode && code == UMAX)
2008 rtx x, tmp = gen_reg_rtx (mode);
2010 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
2011 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
2013 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
2014 return true;
2017 /* Everything else implemented via vector comparisons. */
2018 xops[0] = operands[0];
2019 xops[4] = xops[1] = operands[1];
2020 xops[5] = xops[2] = operands[2];
2022 switch (code)
2024 case UMIN:
2025 code = LTU;
2026 break;
2027 case UMAX:
2028 code = GTU;
2029 break;
2030 case SMIN:
2031 code = LT;
2032 break;
2033 case SMAX:
2034 code = GT;
2035 break;
2036 default:
2037 gcc_unreachable ();
2039 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
2041 ia64_expand_vecint_cmov (xops);
2042 return true;
2045 /* The vectors LO and HI each contain N halves of a double-wide vector.
2046 Reassemble either the first N/2 or the second N/2 elements. */
2048 void
2049 ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp)
2051 enum machine_mode vmode = GET_MODE (lo);
2052 unsigned int i, high, nelt = GET_MODE_NUNITS (vmode);
2053 struct expand_vec_perm_d d;
2054 bool ok;
2056 d.target = gen_lowpart (vmode, out);
2057 d.op0 = (TARGET_BIG_ENDIAN ? hi : lo);
2058 d.op1 = (TARGET_BIG_ENDIAN ? lo : hi);
2059 d.vmode = vmode;
2060 d.nelt = nelt;
2061 d.one_operand_p = false;
2062 d.testing_p = false;
2064 high = (highp ? nelt / 2 : 0);
2065 for (i = 0; i < nelt / 2; ++i)
2067 d.perm[i * 2] = i + high;
2068 d.perm[i * 2 + 1] = i + high + nelt;
2071 ok = ia64_expand_vec_perm_const_1 (&d);
2072 gcc_assert (ok);
2075 /* Return a vector of the sign-extension of VEC. */
2077 static rtx
2078 ia64_unpack_sign (rtx vec, bool unsignedp)
2080 enum machine_mode mode = GET_MODE (vec);
2081 rtx zero = CONST0_RTX (mode);
2083 if (unsignedp)
2084 return zero;
2085 else
2087 rtx sign = gen_reg_rtx (mode);
2088 bool neg;
2090 neg = ia64_expand_vecint_compare (LT, mode, sign, vec, zero);
2091 gcc_assert (!neg);
2093 return sign;
2097 /* Emit an integral vector unpack operation. */
2099 void
2100 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
2102 rtx sign = ia64_unpack_sign (operands[1], unsignedp);
2103 ia64_unpack_assemble (operands[0], operands[1], sign, highp);
2106 /* Emit an integral vector widening sum operations. */
2108 void
2109 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2111 enum machine_mode wmode;
2112 rtx l, h, t, sign;
2114 sign = ia64_unpack_sign (operands[1], unsignedp);
2116 wmode = GET_MODE (operands[0]);
2117 l = gen_reg_rtx (wmode);
2118 h = gen_reg_rtx (wmode);
2120 ia64_unpack_assemble (l, operands[1], sign, false);
2121 ia64_unpack_assemble (h, operands[1], sign, true);
2123 t = expand_binop (wmode, add_optab, l, operands[2], NULL, 0, OPTAB_DIRECT);
2124 t = expand_binop (wmode, add_optab, h, t, operands[0], 0, OPTAB_DIRECT);
2125 if (t != operands[0])
2126 emit_move_insn (operands[0], t);
2129 /* Emit the appropriate sequence for a call. */
2131 void
2132 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2133 int sibcall_p)
2135 rtx insn, b0;
2137 addr = XEXP (addr, 0);
2138 addr = convert_memory_address (DImode, addr);
2139 b0 = gen_rtx_REG (DImode, R_BR (0));
2141 /* ??? Should do this for functions known to bind local too. */
2142 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2144 if (sibcall_p)
2145 insn = gen_sibcall_nogp (addr);
2146 else if (! retval)
2147 insn = gen_call_nogp (addr, b0);
2148 else
2149 insn = gen_call_value_nogp (retval, addr, b0);
2150 insn = emit_call_insn (insn);
2152 else
2154 if (sibcall_p)
2155 insn = gen_sibcall_gp (addr);
2156 else if (! retval)
2157 insn = gen_call_gp (addr, b0);
2158 else
2159 insn = gen_call_value_gp (retval, addr, b0);
2160 insn = emit_call_insn (insn);
2162 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2165 if (sibcall_p)
2166 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2168 if (TARGET_ABI_OPEN_VMS)
2169 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2170 gen_rtx_REG (DImode, GR_REG (25)));
2173 static void
2174 reg_emitted (enum ia64_frame_regs r)
2176 if (emitted_frame_related_regs[r] == 0)
2177 emitted_frame_related_regs[r] = current_frame_info.r[r];
2178 else
2179 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2182 static int
2183 get_reg (enum ia64_frame_regs r)
2185 reg_emitted (r);
2186 return current_frame_info.r[r];
2189 static bool
2190 is_emitted (int regno)
2192 unsigned int r;
2194 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2195 if (emitted_frame_related_regs[r] == regno)
2196 return true;
2197 return false;
2200 void
2201 ia64_reload_gp (void)
2203 rtx tmp;
2205 if (current_frame_info.r[reg_save_gp])
2207 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2209 else
2211 HOST_WIDE_INT offset;
2212 rtx offset_r;
2214 offset = (current_frame_info.spill_cfa_off
2215 + current_frame_info.spill_size);
2216 if (frame_pointer_needed)
2218 tmp = hard_frame_pointer_rtx;
2219 offset = -offset;
2221 else
2223 tmp = stack_pointer_rtx;
2224 offset = current_frame_info.total_size - offset;
2227 offset_r = GEN_INT (offset);
2228 if (satisfies_constraint_I (offset_r))
2229 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2230 else
2232 emit_move_insn (pic_offset_table_rtx, offset_r);
2233 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2234 pic_offset_table_rtx, tmp));
2237 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2240 emit_move_insn (pic_offset_table_rtx, tmp);
2243 void
2244 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2245 rtx scratch_b, int noreturn_p, int sibcall_p)
2247 rtx insn;
2248 bool is_desc = false;
2250 /* If we find we're calling through a register, then we're actually
2251 calling through a descriptor, so load up the values. */
2252 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2254 rtx tmp;
2255 bool addr_dead_p;
2257 /* ??? We are currently constrained to *not* use peep2, because
2258 we can legitimately change the global lifetime of the GP
2259 (in the form of killing where previously live). This is
2260 because a call through a descriptor doesn't use the previous
2261 value of the GP, while a direct call does, and we do not
2262 commit to either form until the split here.
2264 That said, this means that we lack precise life info for
2265 whether ADDR is dead after this call. This is not terribly
2266 important, since we can fix things up essentially for free
2267 with the POST_DEC below, but it's nice to not use it when we
2268 can immediately tell it's not necessary. */
2269 addr_dead_p = ((noreturn_p || sibcall_p
2270 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2271 REGNO (addr)))
2272 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2274 /* Load the code address into scratch_b. */
2275 tmp = gen_rtx_POST_INC (Pmode, addr);
2276 tmp = gen_rtx_MEM (Pmode, tmp);
2277 emit_move_insn (scratch_r, tmp);
2278 emit_move_insn (scratch_b, scratch_r);
2280 /* Load the GP address. If ADDR is not dead here, then we must
2281 revert the change made above via the POST_INCREMENT. */
2282 if (!addr_dead_p)
2283 tmp = gen_rtx_POST_DEC (Pmode, addr);
2284 else
2285 tmp = addr;
2286 tmp = gen_rtx_MEM (Pmode, tmp);
2287 emit_move_insn (pic_offset_table_rtx, tmp);
2289 is_desc = true;
2290 addr = scratch_b;
2293 if (sibcall_p)
2294 insn = gen_sibcall_nogp (addr);
2295 else if (retval)
2296 insn = gen_call_value_nogp (retval, addr, retaddr);
2297 else
2298 insn = gen_call_nogp (addr, retaddr);
2299 emit_call_insn (insn);
2301 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2302 ia64_reload_gp ();
2305 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2307 This differs from the generic code in that we know about the zero-extending
2308 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2309 also know that ld.acq+cmpxchg.rel equals a full barrier.
2311 The loop we want to generate looks like
2313 cmp_reg = mem;
2314 label:
2315 old_reg = cmp_reg;
2316 new_reg = cmp_reg op val;
2317 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2318 if (cmp_reg != old_reg)
2319 goto label;
2321 Note that we only do the plain load from memory once. Subsequent
2322 iterations use the value loaded by the compare-and-swap pattern. */
2324 void
2325 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2326 rtx old_dst, rtx new_dst, enum memmodel model)
2328 enum machine_mode mode = GET_MODE (mem);
2329 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2330 enum insn_code icode;
2332 /* Special case for using fetchadd. */
2333 if ((mode == SImode || mode == DImode)
2334 && (code == PLUS || code == MINUS)
2335 && fetchadd_operand (val, mode))
2337 if (code == MINUS)
2338 val = GEN_INT (-INTVAL (val));
2340 if (!old_dst)
2341 old_dst = gen_reg_rtx (mode);
2343 switch (model)
2345 case MEMMODEL_ACQ_REL:
2346 case MEMMODEL_SEQ_CST:
2347 emit_insn (gen_memory_barrier ());
2348 /* FALLTHRU */
2349 case MEMMODEL_RELAXED:
2350 case MEMMODEL_ACQUIRE:
2351 case MEMMODEL_CONSUME:
2352 if (mode == SImode)
2353 icode = CODE_FOR_fetchadd_acq_si;
2354 else
2355 icode = CODE_FOR_fetchadd_acq_di;
2356 break;
2357 case MEMMODEL_RELEASE:
2358 if (mode == SImode)
2359 icode = CODE_FOR_fetchadd_rel_si;
2360 else
2361 icode = CODE_FOR_fetchadd_rel_di;
2362 break;
2364 default:
2365 gcc_unreachable ();
2368 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2370 if (new_dst)
2372 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2373 true, OPTAB_WIDEN);
2374 if (new_reg != new_dst)
2375 emit_move_insn (new_dst, new_reg);
2377 return;
2380 /* Because of the volatile mem read, we get an ld.acq, which is the
2381 front half of the full barrier. The end half is the cmpxchg.rel.
2382 For relaxed and release memory models, we don't need this. But we
2383 also don't bother trying to prevent it either. */
2384 gcc_assert (model == MEMMODEL_RELAXED
2385 || model == MEMMODEL_RELEASE
2386 || MEM_VOLATILE_P (mem));
2388 old_reg = gen_reg_rtx (DImode);
2389 cmp_reg = gen_reg_rtx (DImode);
2390 label = gen_label_rtx ();
2392 if (mode != DImode)
2394 val = simplify_gen_subreg (DImode, val, mode, 0);
2395 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2397 else
2398 emit_move_insn (cmp_reg, mem);
2400 emit_label (label);
2402 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2403 emit_move_insn (old_reg, cmp_reg);
2404 emit_move_insn (ar_ccv, cmp_reg);
2406 if (old_dst)
2407 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2409 new_reg = cmp_reg;
2410 if (code == NOT)
2412 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2413 true, OPTAB_DIRECT);
2414 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2416 else
2417 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2418 true, OPTAB_DIRECT);
2420 if (mode != DImode)
2421 new_reg = gen_lowpart (mode, new_reg);
2422 if (new_dst)
2423 emit_move_insn (new_dst, new_reg);
2425 switch (model)
2427 case MEMMODEL_RELAXED:
2428 case MEMMODEL_ACQUIRE:
2429 case MEMMODEL_CONSUME:
2430 switch (mode)
2432 case QImode: icode = CODE_FOR_cmpxchg_acq_qi; break;
2433 case HImode: icode = CODE_FOR_cmpxchg_acq_hi; break;
2434 case SImode: icode = CODE_FOR_cmpxchg_acq_si; break;
2435 case DImode: icode = CODE_FOR_cmpxchg_acq_di; break;
2436 default:
2437 gcc_unreachable ();
2439 break;
2441 case MEMMODEL_RELEASE:
2442 case MEMMODEL_ACQ_REL:
2443 case MEMMODEL_SEQ_CST:
2444 switch (mode)
2446 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2447 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2448 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2449 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2450 default:
2451 gcc_unreachable ();
2453 break;
2455 default:
2456 gcc_unreachable ();
2459 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2461 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2464 /* Begin the assembly file. */
2466 static void
2467 ia64_file_start (void)
2469 default_file_start ();
2470 emit_safe_across_calls ();
2473 void
2474 emit_safe_across_calls (void)
2476 unsigned int rs, re;
2477 int out_state;
2479 rs = 1;
2480 out_state = 0;
2481 while (1)
2483 while (rs < 64 && call_used_regs[PR_REG (rs)])
2484 rs++;
2485 if (rs >= 64)
2486 break;
2487 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2488 continue;
2489 if (out_state == 0)
2491 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2492 out_state = 1;
2494 else
2495 fputc (',', asm_out_file);
2496 if (re == rs + 1)
2497 fprintf (asm_out_file, "p%u", rs);
2498 else
2499 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2500 rs = re + 1;
2502 if (out_state)
2503 fputc ('\n', asm_out_file);
2506 /* Globalize a declaration. */
2508 static void
2509 ia64_globalize_decl_name (FILE * stream, tree decl)
2511 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2512 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2513 if (version_attr)
2515 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2516 const char *p = TREE_STRING_POINTER (v);
2517 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2519 targetm.asm_out.globalize_label (stream, name);
2520 if (TREE_CODE (decl) == FUNCTION_DECL)
2521 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2524 /* Helper function for ia64_compute_frame_size: find an appropriate general
2525 register to spill some special register to. SPECIAL_SPILL_MASK contains
2526 bits in GR0 to GR31 that have already been allocated by this routine.
2527 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2529 static int
2530 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2532 int regno;
2534 if (emitted_frame_related_regs[r] != 0)
2536 regno = emitted_frame_related_regs[r];
2537 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2538 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2539 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2540 else if (crtl->is_leaf
2541 && regno >= GR_REG (1) && regno <= GR_REG (31))
2542 current_frame_info.gr_used_mask |= 1 << regno;
2544 return regno;
2547 /* If this is a leaf function, first try an otherwise unused
2548 call-clobbered register. */
2549 if (crtl->is_leaf)
2551 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2552 if (! df_regs_ever_live_p (regno)
2553 && call_used_regs[regno]
2554 && ! fixed_regs[regno]
2555 && ! global_regs[regno]
2556 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2557 && ! is_emitted (regno))
2559 current_frame_info.gr_used_mask |= 1 << regno;
2560 return regno;
2564 if (try_locals)
2566 regno = current_frame_info.n_local_regs;
2567 /* If there is a frame pointer, then we can't use loc79, because
2568 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2569 reg_name switching code in ia64_expand_prologue. */
2570 while (regno < (80 - frame_pointer_needed))
2571 if (! is_emitted (LOC_REG (regno++)))
2573 current_frame_info.n_local_regs = regno;
2574 return LOC_REG (regno - 1);
2578 /* Failed to find a general register to spill to. Must use stack. */
2579 return 0;
2582 /* In order to make for nice schedules, we try to allocate every temporary
2583 to a different register. We must of course stay away from call-saved,
2584 fixed, and global registers. We must also stay away from registers
2585 allocated in current_frame_info.gr_used_mask, since those include regs
2586 used all through the prologue.
2588 Any register allocated here must be used immediately. The idea is to
2589 aid scheduling, not to solve data flow problems. */
2591 static int last_scratch_gr_reg;
2593 static int
2594 next_scratch_gr_reg (void)
2596 int i, regno;
2598 for (i = 0; i < 32; ++i)
2600 regno = (last_scratch_gr_reg + i + 1) & 31;
2601 if (call_used_regs[regno]
2602 && ! fixed_regs[regno]
2603 && ! global_regs[regno]
2604 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2606 last_scratch_gr_reg = regno;
2607 return regno;
2611 /* There must be _something_ available. */
2612 gcc_unreachable ();
2615 /* Helper function for ia64_compute_frame_size, called through
2616 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2618 static void
2619 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2621 unsigned int regno = REGNO (reg);
2622 if (regno < 32)
2624 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2625 for (i = 0; i < n; ++i)
2626 current_frame_info.gr_used_mask |= 1 << (regno + i);
2631 /* Returns the number of bytes offset between the frame pointer and the stack
2632 pointer for the current function. SIZE is the number of bytes of space
2633 needed for local variables. */
2635 static void
2636 ia64_compute_frame_size (HOST_WIDE_INT size)
2638 HOST_WIDE_INT total_size;
2639 HOST_WIDE_INT spill_size = 0;
2640 HOST_WIDE_INT extra_spill_size = 0;
2641 HOST_WIDE_INT pretend_args_size;
2642 HARD_REG_SET mask;
2643 int n_spilled = 0;
2644 int spilled_gr_p = 0;
2645 int spilled_fr_p = 0;
2646 unsigned int regno;
2647 int min_regno;
2648 int max_regno;
2649 int i;
2651 if (current_frame_info.initialized)
2652 return;
2654 memset (&current_frame_info, 0, sizeof current_frame_info);
2655 CLEAR_HARD_REG_SET (mask);
2657 /* Don't allocate scratches to the return register. */
2658 diddle_return_value (mark_reg_gr_used_mask, NULL);
2660 /* Don't allocate scratches to the EH scratch registers. */
2661 if (cfun->machine->ia64_eh_epilogue_sp)
2662 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2663 if (cfun->machine->ia64_eh_epilogue_bsp)
2664 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2666 /* Static stack checking uses r2 and r3. */
2667 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
2668 current_frame_info.gr_used_mask |= 0xc;
2670 /* Find the size of the register stack frame. We have only 80 local
2671 registers, because we reserve 8 for the inputs and 8 for the
2672 outputs. */
2674 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2675 since we'll be adjusting that down later. */
2676 regno = LOC_REG (78) + ! frame_pointer_needed;
2677 for (; regno >= LOC_REG (0); regno--)
2678 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2679 break;
2680 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2682 /* For functions marked with the syscall_linkage attribute, we must mark
2683 all eight input registers as in use, so that locals aren't visible to
2684 the caller. */
2686 if (cfun->machine->n_varargs > 0
2687 || lookup_attribute ("syscall_linkage",
2688 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2689 current_frame_info.n_input_regs = 8;
2690 else
2692 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2693 if (df_regs_ever_live_p (regno))
2694 break;
2695 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2698 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2699 if (df_regs_ever_live_p (regno))
2700 break;
2701 i = regno - OUT_REG (0) + 1;
2703 #ifndef PROFILE_HOOK
2704 /* When -p profiling, we need one output register for the mcount argument.
2705 Likewise for -a profiling for the bb_init_func argument. For -ax
2706 profiling, we need two output registers for the two bb_init_trace_func
2707 arguments. */
2708 if (crtl->profile)
2709 i = MAX (i, 1);
2710 #endif
2711 current_frame_info.n_output_regs = i;
2713 /* ??? No rotating register support yet. */
2714 current_frame_info.n_rotate_regs = 0;
2716 /* Discover which registers need spilling, and how much room that
2717 will take. Begin with floating point and general registers,
2718 which will always wind up on the stack. */
2720 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2721 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2723 SET_HARD_REG_BIT (mask, regno);
2724 spill_size += 16;
2725 n_spilled += 1;
2726 spilled_fr_p = 1;
2729 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2730 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2732 SET_HARD_REG_BIT (mask, regno);
2733 spill_size += 8;
2734 n_spilled += 1;
2735 spilled_gr_p = 1;
2738 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2739 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2741 SET_HARD_REG_BIT (mask, regno);
2742 spill_size += 8;
2743 n_spilled += 1;
2746 /* Now come all special registers that might get saved in other
2747 general registers. */
2749 if (frame_pointer_needed)
2751 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2752 /* If we did not get a register, then we take LOC79. This is guaranteed
2753 to be free, even if regs_ever_live is already set, because this is
2754 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2755 as we don't count loc79 above. */
2756 if (current_frame_info.r[reg_fp] == 0)
2758 current_frame_info.r[reg_fp] = LOC_REG (79);
2759 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2763 if (! crtl->is_leaf)
2765 /* Emit a save of BR0 if we call other functions. Do this even
2766 if this function doesn't return, as EH depends on this to be
2767 able to unwind the stack. */
2768 SET_HARD_REG_BIT (mask, BR_REG (0));
2770 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2771 if (current_frame_info.r[reg_save_b0] == 0)
2773 extra_spill_size += 8;
2774 n_spilled += 1;
2777 /* Similarly for ar.pfs. */
2778 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2779 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2780 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2782 extra_spill_size += 8;
2783 n_spilled += 1;
2786 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2787 registers are clobbered, so we fall back to the stack. */
2788 current_frame_info.r[reg_save_gp]
2789 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2790 if (current_frame_info.r[reg_save_gp] == 0)
2792 SET_HARD_REG_BIT (mask, GR_REG (1));
2793 spill_size += 8;
2794 n_spilled += 1;
2797 else
2799 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2801 SET_HARD_REG_BIT (mask, BR_REG (0));
2802 extra_spill_size += 8;
2803 n_spilled += 1;
2806 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2808 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2809 current_frame_info.r[reg_save_ar_pfs]
2810 = find_gr_spill (reg_save_ar_pfs, 1);
2811 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2813 extra_spill_size += 8;
2814 n_spilled += 1;
2819 /* Unwind descriptor hackery: things are most efficient if we allocate
2820 consecutive GR save registers for RP, PFS, FP in that order. However,
2821 it is absolutely critical that FP get the only hard register that's
2822 guaranteed to be free, so we allocated it first. If all three did
2823 happen to be allocated hard regs, and are consecutive, rearrange them
2824 into the preferred order now.
2826 If we have already emitted code for any of those registers,
2827 then it's already too late to change. */
2828 min_regno = MIN (current_frame_info.r[reg_fp],
2829 MIN (current_frame_info.r[reg_save_b0],
2830 current_frame_info.r[reg_save_ar_pfs]));
2831 max_regno = MAX (current_frame_info.r[reg_fp],
2832 MAX (current_frame_info.r[reg_save_b0],
2833 current_frame_info.r[reg_save_ar_pfs]));
2834 if (min_regno > 0
2835 && min_regno + 2 == max_regno
2836 && (current_frame_info.r[reg_fp] == min_regno + 1
2837 || current_frame_info.r[reg_save_b0] == min_regno + 1
2838 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2839 && (emitted_frame_related_regs[reg_save_b0] == 0
2840 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2841 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2842 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2843 && (emitted_frame_related_regs[reg_fp] == 0
2844 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2846 current_frame_info.r[reg_save_b0] = min_regno;
2847 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2848 current_frame_info.r[reg_fp] = min_regno + 2;
2851 /* See if we need to store the predicate register block. */
2852 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2853 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2854 break;
2855 if (regno <= PR_REG (63))
2857 SET_HARD_REG_BIT (mask, PR_REG (0));
2858 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2859 if (current_frame_info.r[reg_save_pr] == 0)
2861 extra_spill_size += 8;
2862 n_spilled += 1;
2865 /* ??? Mark them all as used so that register renaming and such
2866 are free to use them. */
2867 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2868 df_set_regs_ever_live (regno, true);
2871 /* If we're forced to use st8.spill, we're forced to save and restore
2872 ar.unat as well. The check for existing liveness allows inline asm
2873 to touch ar.unat. */
2874 if (spilled_gr_p || cfun->machine->n_varargs
2875 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2877 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2878 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2879 current_frame_info.r[reg_save_ar_unat]
2880 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2881 if (current_frame_info.r[reg_save_ar_unat] == 0)
2883 extra_spill_size += 8;
2884 n_spilled += 1;
2888 if (df_regs_ever_live_p (AR_LC_REGNUM))
2890 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2891 current_frame_info.r[reg_save_ar_lc]
2892 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2893 if (current_frame_info.r[reg_save_ar_lc] == 0)
2895 extra_spill_size += 8;
2896 n_spilled += 1;
2900 /* If we have an odd number of words of pretend arguments written to
2901 the stack, then the FR save area will be unaligned. We round the
2902 size of this area up to keep things 16 byte aligned. */
2903 if (spilled_fr_p)
2904 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2905 else
2906 pretend_args_size = crtl->args.pretend_args_size;
2908 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2909 + crtl->outgoing_args_size);
2910 total_size = IA64_STACK_ALIGN (total_size);
2912 /* We always use the 16-byte scratch area provided by the caller, but
2913 if we are a leaf function, there's no one to which we need to provide
2914 a scratch area. However, if the function allocates dynamic stack space,
2915 the dynamic offset is computed early and contains STACK_POINTER_OFFSET,
2916 so we need to cope. */
2917 if (crtl->is_leaf && !cfun->calls_alloca)
2918 total_size = MAX (0, total_size - 16);
2920 current_frame_info.total_size = total_size;
2921 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2922 current_frame_info.spill_size = spill_size;
2923 current_frame_info.extra_spill_size = extra_spill_size;
2924 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2925 current_frame_info.n_spilled = n_spilled;
2926 current_frame_info.initialized = reload_completed;
2929 /* Worker function for TARGET_CAN_ELIMINATE. */
2931 bool
2932 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2934 return (to == BR_REG (0) ? crtl->is_leaf : true);
2937 /* Compute the initial difference between the specified pair of registers. */
2939 HOST_WIDE_INT
2940 ia64_initial_elimination_offset (int from, int to)
2942 HOST_WIDE_INT offset;
2944 ia64_compute_frame_size (get_frame_size ());
2945 switch (from)
2947 case FRAME_POINTER_REGNUM:
2948 switch (to)
2950 case HARD_FRAME_POINTER_REGNUM:
2951 offset = -current_frame_info.total_size;
2952 if (!crtl->is_leaf || cfun->calls_alloca)
2953 offset += 16 + crtl->outgoing_args_size;
2954 break;
2956 case STACK_POINTER_REGNUM:
2957 offset = 0;
2958 if (!crtl->is_leaf || cfun->calls_alloca)
2959 offset += 16 + crtl->outgoing_args_size;
2960 break;
2962 default:
2963 gcc_unreachable ();
2965 break;
2967 case ARG_POINTER_REGNUM:
2968 /* Arguments start above the 16 byte save area, unless stdarg
2969 in which case we store through the 16 byte save area. */
2970 switch (to)
2972 case HARD_FRAME_POINTER_REGNUM:
2973 offset = 16 - crtl->args.pretend_args_size;
2974 break;
2976 case STACK_POINTER_REGNUM:
2977 offset = (current_frame_info.total_size
2978 + 16 - crtl->args.pretend_args_size);
2979 break;
2981 default:
2982 gcc_unreachable ();
2984 break;
2986 default:
2987 gcc_unreachable ();
2990 return offset;
2993 /* If there are more than a trivial number of register spills, we use
2994 two interleaved iterators so that we can get two memory references
2995 per insn group.
2997 In order to simplify things in the prologue and epilogue expanders,
2998 we use helper functions to fix up the memory references after the
2999 fact with the appropriate offsets to a POST_MODIFY memory mode.
3000 The following data structure tracks the state of the two iterators
3001 while insns are being emitted. */
3003 struct spill_fill_data
3005 rtx_insn *init_after; /* point at which to emit initializations */
3006 rtx init_reg[2]; /* initial base register */
3007 rtx iter_reg[2]; /* the iterator registers */
3008 rtx *prev_addr[2]; /* address of last memory use */
3009 rtx_insn *prev_insn[2]; /* the insn corresponding to prev_addr */
3010 HOST_WIDE_INT prev_off[2]; /* last offset */
3011 int n_iter; /* number of iterators in use */
3012 int next_iter; /* next iterator to use */
3013 unsigned int save_gr_used_mask;
3016 static struct spill_fill_data spill_fill_data;
3018 static void
3019 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
3021 int i;
3023 spill_fill_data.init_after = get_last_insn ();
3024 spill_fill_data.init_reg[0] = init_reg;
3025 spill_fill_data.init_reg[1] = init_reg;
3026 spill_fill_data.prev_addr[0] = NULL;
3027 spill_fill_data.prev_addr[1] = NULL;
3028 spill_fill_data.prev_insn[0] = NULL;
3029 spill_fill_data.prev_insn[1] = NULL;
3030 spill_fill_data.prev_off[0] = cfa_off;
3031 spill_fill_data.prev_off[1] = cfa_off;
3032 spill_fill_data.next_iter = 0;
3033 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
3035 spill_fill_data.n_iter = 1 + (n_spills > 2);
3036 for (i = 0; i < spill_fill_data.n_iter; ++i)
3038 int regno = next_scratch_gr_reg ();
3039 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
3040 current_frame_info.gr_used_mask |= 1 << regno;
3044 static void
3045 finish_spill_pointers (void)
3047 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3050 static rtx
3051 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3053 int iter = spill_fill_data.next_iter;
3054 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3055 rtx disp_rtx = GEN_INT (disp);
3056 rtx mem;
3058 if (spill_fill_data.prev_addr[iter])
3060 if (satisfies_constraint_N (disp_rtx))
3062 *spill_fill_data.prev_addr[iter]
3063 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3064 gen_rtx_PLUS (DImode,
3065 spill_fill_data.iter_reg[iter],
3066 disp_rtx));
3067 add_reg_note (spill_fill_data.prev_insn[iter],
3068 REG_INC, spill_fill_data.iter_reg[iter]);
3070 else
3072 /* ??? Could use register post_modify for loads. */
3073 if (!satisfies_constraint_I (disp_rtx))
3075 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3076 emit_move_insn (tmp, disp_rtx);
3077 disp_rtx = tmp;
3079 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3080 spill_fill_data.iter_reg[iter], disp_rtx));
3083 /* Micro-optimization: if we've created a frame pointer, it's at
3084 CFA 0, which may allow the real iterator to be initialized lower,
3085 slightly increasing parallelism. Also, if there are few saves
3086 it may eliminate the iterator entirely. */
3087 else if (disp == 0
3088 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3089 && frame_pointer_needed)
3091 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3092 set_mem_alias_set (mem, get_varargs_alias_set ());
3093 return mem;
3095 else
3097 rtx seq;
3098 rtx_insn *insn;
3100 if (disp == 0)
3101 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3102 spill_fill_data.init_reg[iter]);
3103 else
3105 start_sequence ();
3107 if (!satisfies_constraint_I (disp_rtx))
3109 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3110 emit_move_insn (tmp, disp_rtx);
3111 disp_rtx = tmp;
3114 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3115 spill_fill_data.init_reg[iter],
3116 disp_rtx));
3118 seq = get_insns ();
3119 end_sequence ();
3122 /* Careful for being the first insn in a sequence. */
3123 if (spill_fill_data.init_after)
3124 insn = emit_insn_after (seq, spill_fill_data.init_after);
3125 else
3127 rtx_insn *first = get_insns ();
3128 if (first)
3129 insn = emit_insn_before (seq, first);
3130 else
3131 insn = emit_insn (seq);
3133 spill_fill_data.init_after = insn;
3136 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3138 /* ??? Not all of the spills are for varargs, but some of them are.
3139 The rest of the spills belong in an alias set of their own. But
3140 it doesn't actually hurt to include them here. */
3141 set_mem_alias_set (mem, get_varargs_alias_set ());
3143 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3144 spill_fill_data.prev_off[iter] = cfa_off;
3146 if (++iter >= spill_fill_data.n_iter)
3147 iter = 0;
3148 spill_fill_data.next_iter = iter;
3150 return mem;
3153 static void
3154 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3155 rtx frame_reg)
3157 int iter = spill_fill_data.next_iter;
3158 rtx mem;
3159 rtx_insn *insn;
3161 mem = spill_restore_mem (reg, cfa_off);
3162 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3163 spill_fill_data.prev_insn[iter] = insn;
3165 if (frame_reg)
3167 rtx base;
3168 HOST_WIDE_INT off;
3170 RTX_FRAME_RELATED_P (insn) = 1;
3172 /* Don't even pretend that the unwind code can intuit its way
3173 through a pair of interleaved post_modify iterators. Just
3174 provide the correct answer. */
3176 if (frame_pointer_needed)
3178 base = hard_frame_pointer_rtx;
3179 off = - cfa_off;
3181 else
3183 base = stack_pointer_rtx;
3184 off = current_frame_info.total_size - cfa_off;
3187 add_reg_note (insn, REG_CFA_OFFSET,
3188 gen_rtx_SET (VOIDmode,
3189 gen_rtx_MEM (GET_MODE (reg),
3190 plus_constant (Pmode,
3191 base, off)),
3192 frame_reg));
3196 static void
3197 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3199 int iter = spill_fill_data.next_iter;
3200 rtx_insn *insn;
3202 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3203 GEN_INT (cfa_off)));
3204 spill_fill_data.prev_insn[iter] = insn;
3207 /* Wrapper functions that discards the CONST_INT spill offset. These
3208 exist so that we can give gr_spill/gr_fill the offset they need and
3209 use a consistent function interface. */
3211 static rtx
3212 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3214 return gen_movdi (dest, src);
3217 static rtx
3218 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3220 return gen_fr_spill (dest, src);
3223 static rtx
3224 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3226 return gen_fr_restore (dest, src);
3229 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
3231 /* See Table 6.2 of the IA-64 Software Developer Manual, Volume 2. */
3232 #define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
3234 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
3235 inclusive. These are offsets from the current stack pointer. BS_SIZE
3236 is the size of the backing store. ??? This clobbers r2 and r3. */
3238 static void
3239 ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
3240 int bs_size)
3242 rtx r2 = gen_rtx_REG (Pmode, GR_REG (2));
3243 rtx r3 = gen_rtx_REG (Pmode, GR_REG (3));
3244 rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
3246 /* On the IA-64 there is a second stack in memory, namely the Backing Store
3247 of the Register Stack Engine. We also need to probe it after checking
3248 that the 2 stacks don't overlap. */
3249 emit_insn (gen_bsp_value (r3));
3250 emit_move_insn (r2, GEN_INT (-(first + size)));
3252 /* Compare current value of BSP and SP registers. */
3253 emit_insn (gen_rtx_SET (VOIDmode, p6,
3254 gen_rtx_fmt_ee (LTU, BImode,
3255 r3, stack_pointer_rtx)));
3257 /* Compute the address of the probe for the Backing Store (which grows
3258 towards higher addresses). We probe only at the first offset of
3259 the next page because some OS (eg Linux/ia64) only extend the
3260 backing store when this specific address is hit (but generate a SEGV
3261 on other address). Page size is the worst case (4KB). The reserve
3262 size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
3263 Also compute the address of the last probe for the memory stack
3264 (which grows towards lower addresses). */
3265 emit_insn (gen_rtx_SET (VOIDmode, r3, plus_constant (Pmode, r3, 4095)));
3266 emit_insn (gen_rtx_SET (VOIDmode, r2,
3267 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3269 /* Compare them and raise SEGV if the former has topped the latter. */
3270 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3271 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3272 gen_rtx_SET (VOIDmode, p6,
3273 gen_rtx_fmt_ee (GEU, BImode,
3274 r3, r2))));
3275 emit_insn (gen_rtx_SET (VOIDmode,
3276 gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
3277 const0_rtx),
3278 const0_rtx));
3279 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3280 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3281 gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
3282 GEN_INT (11))));
3284 /* Probe the Backing Store if necessary. */
3285 if (bs_size > 0)
3286 emit_stack_probe (r3);
3288 /* Probe the memory stack if necessary. */
3289 if (size == 0)
3292 /* See if we have a constant small number of probes to generate. If so,
3293 that's the easy case. */
3294 else if (size <= PROBE_INTERVAL)
3295 emit_stack_probe (r2);
3297 /* The run-time loop is made up of 8 insns in the generic case while this
3298 compile-time loop is made up of 5+2*(n-2) insns for n # of intervals. */
3299 else if (size <= 4 * PROBE_INTERVAL)
3301 HOST_WIDE_INT i;
3303 emit_move_insn (r2, GEN_INT (-(first + PROBE_INTERVAL)));
3304 emit_insn (gen_rtx_SET (VOIDmode, r2,
3305 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3306 emit_stack_probe (r2);
3308 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
3309 it exceeds SIZE. If only two probes are needed, this will not
3310 generate any code. Then probe at FIRST + SIZE. */
3311 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
3313 emit_insn (gen_rtx_SET (VOIDmode, r2,
3314 plus_constant (Pmode, r2, -PROBE_INTERVAL)));
3315 emit_stack_probe (r2);
3318 emit_insn (gen_rtx_SET (VOIDmode, r2,
3319 plus_constant (Pmode, r2,
3320 (i - PROBE_INTERVAL) - size)));
3321 emit_stack_probe (r2);
3324 /* Otherwise, do the same as above, but in a loop. Note that we must be
3325 extra careful with variables wrapping around because we might be at
3326 the very top (or the very bottom) of the address space and we have
3327 to be able to handle this case properly; in particular, we use an
3328 equality test for the loop condition. */
3329 else
3331 HOST_WIDE_INT rounded_size;
3333 emit_move_insn (r2, GEN_INT (-first));
3336 /* Step 1: round SIZE to the previous multiple of the interval. */
3338 rounded_size = size & -PROBE_INTERVAL;
3341 /* Step 2: compute initial and final value of the loop counter. */
3343 /* TEST_ADDR = SP + FIRST. */
3344 emit_insn (gen_rtx_SET (VOIDmode, r2,
3345 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3347 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
3348 if (rounded_size > (1 << 21))
3350 emit_move_insn (r3, GEN_INT (-rounded_size));
3351 emit_insn (gen_rtx_SET (VOIDmode, r3, gen_rtx_PLUS (Pmode, r2, r3)));
3353 else
3354 emit_insn (gen_rtx_SET (VOIDmode, r3,
3355 gen_rtx_PLUS (Pmode, r2,
3356 GEN_INT (-rounded_size))));
3359 /* Step 3: the loop
3361 while (TEST_ADDR != LAST_ADDR)
3363 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
3364 probe at TEST_ADDR
3367 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
3368 until it is equal to ROUNDED_SIZE. */
3370 emit_insn (gen_probe_stack_range (r2, r2, r3));
3373 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
3374 that SIZE is equal to ROUNDED_SIZE. */
3376 /* TEMP = SIZE - ROUNDED_SIZE. */
3377 if (size != rounded_size)
3379 emit_insn (gen_rtx_SET (VOIDmode, r2,
3380 plus_constant (Pmode, r2,
3381 rounded_size - size)));
3382 emit_stack_probe (r2);
3386 /* Make sure nothing is scheduled before we are done. */
3387 emit_insn (gen_blockage ());
3390 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
3391 absolute addresses. */
3393 const char *
3394 output_probe_stack_range (rtx reg1, rtx reg2)
3396 static int labelno = 0;
3397 char loop_lab[32], end_lab[32];
3398 rtx xops[3];
3400 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
3401 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
3403 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
3405 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
3406 xops[0] = reg1;
3407 xops[1] = reg2;
3408 xops[2] = gen_rtx_REG (BImode, PR_REG (6));
3409 output_asm_insn ("cmp.eq %2, %I2 = %0, %1", xops);
3410 fprintf (asm_out_file, "\t(%s) br.cond.dpnt ", reg_names [REGNO (xops[2])]);
3411 assemble_name_raw (asm_out_file, end_lab);
3412 fputc ('\n', asm_out_file);
3414 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
3415 xops[1] = GEN_INT (-PROBE_INTERVAL);
3416 output_asm_insn ("addl %0 = %1, %0", xops);
3417 fputs ("\t;;\n", asm_out_file);
3419 /* Probe at TEST_ADDR and branch. */
3420 output_asm_insn ("probe.w.fault %0, 0", xops);
3421 fprintf (asm_out_file, "\tbr ");
3422 assemble_name_raw (asm_out_file, loop_lab);
3423 fputc ('\n', asm_out_file);
3425 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
3427 return "";
3430 /* Called after register allocation to add any instructions needed for the
3431 prologue. Using a prologue insn is favored compared to putting all of the
3432 instructions in output_function_prologue(), since it allows the scheduler
3433 to intermix instructions with the saves of the caller saved registers. In
3434 some cases, it might be necessary to emit a barrier instruction as the last
3435 insn to prevent such scheduling.
3437 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3438 so that the debug info generation code can handle them properly.
3440 The register save area is laid out like so:
3441 cfa+16
3442 [ varargs spill area ]
3443 [ fr register spill area ]
3444 [ br register spill area ]
3445 [ ar register spill area ]
3446 [ pr register spill area ]
3447 [ gr register spill area ] */
3449 /* ??? Get inefficient code when the frame size is larger than can fit in an
3450 adds instruction. */
3452 void
3453 ia64_expand_prologue (void)
3455 rtx_insn *insn;
3456 rtx ar_pfs_save_reg, ar_unat_save_reg;
3457 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3458 rtx reg, alt_reg;
3460 ia64_compute_frame_size (get_frame_size ());
3461 last_scratch_gr_reg = 15;
3463 if (flag_stack_usage_info)
3464 current_function_static_stack_size = current_frame_info.total_size;
3466 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
3468 HOST_WIDE_INT size = current_frame_info.total_size;
3469 int bs_size = BACKING_STORE_SIZE (current_frame_info.n_input_regs
3470 + current_frame_info.n_local_regs);
3472 if (crtl->is_leaf && !cfun->calls_alloca)
3474 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
3475 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
3476 size - STACK_CHECK_PROTECT,
3477 bs_size);
3478 else if (size + bs_size > STACK_CHECK_PROTECT)
3479 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, 0, bs_size);
3481 else if (size + bs_size > 0)
3482 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, size, bs_size);
3485 if (dump_file)
3487 fprintf (dump_file, "ia64 frame related registers "
3488 "recorded in current_frame_info.r[]:\n");
3489 #define PRINTREG(a) if (current_frame_info.r[a]) \
3490 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3491 PRINTREG(reg_fp);
3492 PRINTREG(reg_save_b0);
3493 PRINTREG(reg_save_pr);
3494 PRINTREG(reg_save_ar_pfs);
3495 PRINTREG(reg_save_ar_unat);
3496 PRINTREG(reg_save_ar_lc);
3497 PRINTREG(reg_save_gp);
3498 #undef PRINTREG
3501 /* If there is no epilogue, then we don't need some prologue insns.
3502 We need to avoid emitting the dead prologue insns, because flow
3503 will complain about them. */
3504 if (optimize)
3506 edge e;
3507 edge_iterator ei;
3509 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
3510 if ((e->flags & EDGE_FAKE) == 0
3511 && (e->flags & EDGE_FALLTHRU) != 0)
3512 break;
3513 epilogue_p = (e != NULL);
3515 else
3516 epilogue_p = 1;
3518 /* Set the local, input, and output register names. We need to do this
3519 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3520 half. If we use in/loc/out register names, then we get assembler errors
3521 in crtn.S because there is no alloc insn or regstk directive in there. */
3522 if (! TARGET_REG_NAMES)
3524 int inputs = current_frame_info.n_input_regs;
3525 int locals = current_frame_info.n_local_regs;
3526 int outputs = current_frame_info.n_output_regs;
3528 for (i = 0; i < inputs; i++)
3529 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3530 for (i = 0; i < locals; i++)
3531 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3532 for (i = 0; i < outputs; i++)
3533 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3536 /* Set the frame pointer register name. The regnum is logically loc79,
3537 but of course we'll not have allocated that many locals. Rather than
3538 worrying about renumbering the existing rtxs, we adjust the name. */
3539 /* ??? This code means that we can never use one local register when
3540 there is a frame pointer. loc79 gets wasted in this case, as it is
3541 renamed to a register that will never be used. See also the try_locals
3542 code in find_gr_spill. */
3543 if (current_frame_info.r[reg_fp])
3545 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3546 reg_names[HARD_FRAME_POINTER_REGNUM]
3547 = reg_names[current_frame_info.r[reg_fp]];
3548 reg_names[current_frame_info.r[reg_fp]] = tmp;
3551 /* We don't need an alloc instruction if we've used no outputs or locals. */
3552 if (current_frame_info.n_local_regs == 0
3553 && current_frame_info.n_output_regs == 0
3554 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3555 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3557 /* If there is no alloc, but there are input registers used, then we
3558 need a .regstk directive. */
3559 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3560 ar_pfs_save_reg = NULL_RTX;
3562 else
3564 current_frame_info.need_regstk = 0;
3566 if (current_frame_info.r[reg_save_ar_pfs])
3568 regno = current_frame_info.r[reg_save_ar_pfs];
3569 reg_emitted (reg_save_ar_pfs);
3571 else
3572 regno = next_scratch_gr_reg ();
3573 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3575 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3576 GEN_INT (current_frame_info.n_input_regs),
3577 GEN_INT (current_frame_info.n_local_regs),
3578 GEN_INT (current_frame_info.n_output_regs),
3579 GEN_INT (current_frame_info.n_rotate_regs)));
3580 if (current_frame_info.r[reg_save_ar_pfs])
3582 RTX_FRAME_RELATED_P (insn) = 1;
3583 add_reg_note (insn, REG_CFA_REGISTER,
3584 gen_rtx_SET (VOIDmode,
3585 ar_pfs_save_reg,
3586 gen_rtx_REG (DImode, AR_PFS_REGNUM)));
3590 /* Set up frame pointer, stack pointer, and spill iterators. */
3592 n_varargs = cfun->machine->n_varargs;
3593 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3594 stack_pointer_rtx, 0);
3596 if (frame_pointer_needed)
3598 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3599 RTX_FRAME_RELATED_P (insn) = 1;
3601 /* Force the unwind info to recognize this as defining a new CFA,
3602 rather than some temp register setup. */
3603 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3606 if (current_frame_info.total_size != 0)
3608 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3609 rtx offset;
3611 if (satisfies_constraint_I (frame_size_rtx))
3612 offset = frame_size_rtx;
3613 else
3615 regno = next_scratch_gr_reg ();
3616 offset = gen_rtx_REG (DImode, regno);
3617 emit_move_insn (offset, frame_size_rtx);
3620 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3621 stack_pointer_rtx, offset));
3623 if (! frame_pointer_needed)
3625 RTX_FRAME_RELATED_P (insn) = 1;
3626 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3627 gen_rtx_SET (VOIDmode,
3628 stack_pointer_rtx,
3629 gen_rtx_PLUS (DImode,
3630 stack_pointer_rtx,
3631 frame_size_rtx)));
3634 /* ??? At this point we must generate a magic insn that appears to
3635 modify the stack pointer, the frame pointer, and all spill
3636 iterators. This would allow the most scheduling freedom. For
3637 now, just hard stop. */
3638 emit_insn (gen_blockage ());
3641 /* Must copy out ar.unat before doing any integer spills. */
3642 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3644 if (current_frame_info.r[reg_save_ar_unat])
3646 ar_unat_save_reg
3647 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3648 reg_emitted (reg_save_ar_unat);
3650 else
3652 alt_regno = next_scratch_gr_reg ();
3653 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3654 current_frame_info.gr_used_mask |= 1 << alt_regno;
3657 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3658 insn = emit_move_insn (ar_unat_save_reg, reg);
3659 if (current_frame_info.r[reg_save_ar_unat])
3661 RTX_FRAME_RELATED_P (insn) = 1;
3662 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3665 /* Even if we're not going to generate an epilogue, we still
3666 need to save the register so that EH works. */
3667 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3668 emit_insn (gen_prologue_use (ar_unat_save_reg));
3670 else
3671 ar_unat_save_reg = NULL_RTX;
3673 /* Spill all varargs registers. Do this before spilling any GR registers,
3674 since we want the UNAT bits for the GR registers to override the UNAT
3675 bits from varargs, which we don't care about. */
3677 cfa_off = -16;
3678 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3680 reg = gen_rtx_REG (DImode, regno);
3681 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3684 /* Locate the bottom of the register save area. */
3685 cfa_off = (current_frame_info.spill_cfa_off
3686 + current_frame_info.spill_size
3687 + current_frame_info.extra_spill_size);
3689 /* Save the predicate register block either in a register or in memory. */
3690 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3692 reg = gen_rtx_REG (DImode, PR_REG (0));
3693 if (current_frame_info.r[reg_save_pr] != 0)
3695 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3696 reg_emitted (reg_save_pr);
3697 insn = emit_move_insn (alt_reg, reg);
3699 /* ??? Denote pr spill/fill by a DImode move that modifies all
3700 64 hard registers. */
3701 RTX_FRAME_RELATED_P (insn) = 1;
3702 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3704 /* Even if we're not going to generate an epilogue, we still
3705 need to save the register so that EH works. */
3706 if (! epilogue_p)
3707 emit_insn (gen_prologue_use (alt_reg));
3709 else
3711 alt_regno = next_scratch_gr_reg ();
3712 alt_reg = gen_rtx_REG (DImode, alt_regno);
3713 insn = emit_move_insn (alt_reg, reg);
3714 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3715 cfa_off -= 8;
3719 /* Handle AR regs in numerical order. All of them get special handling. */
3720 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3721 && current_frame_info.r[reg_save_ar_unat] == 0)
3723 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3724 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3725 cfa_off -= 8;
3728 /* The alloc insn already copied ar.pfs into a general register. The
3729 only thing we have to do now is copy that register to a stack slot
3730 if we'd not allocated a local register for the job. */
3731 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3732 && current_frame_info.r[reg_save_ar_pfs] == 0)
3734 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3735 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3736 cfa_off -= 8;
3739 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3741 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3742 if (current_frame_info.r[reg_save_ar_lc] != 0)
3744 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3745 reg_emitted (reg_save_ar_lc);
3746 insn = emit_move_insn (alt_reg, reg);
3747 RTX_FRAME_RELATED_P (insn) = 1;
3748 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3750 /* Even if we're not going to generate an epilogue, we still
3751 need to save the register so that EH works. */
3752 if (! epilogue_p)
3753 emit_insn (gen_prologue_use (alt_reg));
3755 else
3757 alt_regno = next_scratch_gr_reg ();
3758 alt_reg = gen_rtx_REG (DImode, alt_regno);
3759 emit_move_insn (alt_reg, reg);
3760 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3761 cfa_off -= 8;
3765 /* Save the return pointer. */
3766 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3768 reg = gen_rtx_REG (DImode, BR_REG (0));
3769 if (current_frame_info.r[reg_save_b0] != 0)
3771 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3772 reg_emitted (reg_save_b0);
3773 insn = emit_move_insn (alt_reg, reg);
3774 RTX_FRAME_RELATED_P (insn) = 1;
3775 add_reg_note (insn, REG_CFA_REGISTER,
3776 gen_rtx_SET (VOIDmode, alt_reg, pc_rtx));
3778 /* Even if we're not going to generate an epilogue, we still
3779 need to save the register so that EH works. */
3780 if (! epilogue_p)
3781 emit_insn (gen_prologue_use (alt_reg));
3783 else
3785 alt_regno = next_scratch_gr_reg ();
3786 alt_reg = gen_rtx_REG (DImode, alt_regno);
3787 emit_move_insn (alt_reg, reg);
3788 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3789 cfa_off -= 8;
3793 if (current_frame_info.r[reg_save_gp])
3795 reg_emitted (reg_save_gp);
3796 insn = emit_move_insn (gen_rtx_REG (DImode,
3797 current_frame_info.r[reg_save_gp]),
3798 pic_offset_table_rtx);
3801 /* We should now be at the base of the gr/br/fr spill area. */
3802 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3803 + current_frame_info.spill_size));
3805 /* Spill all general registers. */
3806 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3807 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3809 reg = gen_rtx_REG (DImode, regno);
3810 do_spill (gen_gr_spill, reg, cfa_off, reg);
3811 cfa_off -= 8;
3814 /* Spill the rest of the BR registers. */
3815 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3816 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3818 alt_regno = next_scratch_gr_reg ();
3819 alt_reg = gen_rtx_REG (DImode, alt_regno);
3820 reg = gen_rtx_REG (DImode, regno);
3821 emit_move_insn (alt_reg, reg);
3822 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3823 cfa_off -= 8;
3826 /* Align the frame and spill all FR registers. */
3827 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3828 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3830 gcc_assert (!(cfa_off & 15));
3831 reg = gen_rtx_REG (XFmode, regno);
3832 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3833 cfa_off -= 16;
3836 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3838 finish_spill_pointers ();
3841 /* Output the textual info surrounding the prologue. */
3843 void
3844 ia64_start_function (FILE *file, const char *fnname,
3845 tree decl ATTRIBUTE_UNUSED)
3847 #if TARGET_ABI_OPEN_VMS
3848 vms_start_function (fnname);
3849 #endif
3851 fputs ("\t.proc ", file);
3852 assemble_name (file, fnname);
3853 fputc ('\n', file);
3854 ASM_OUTPUT_LABEL (file, fnname);
3857 /* Called after register allocation to add any instructions needed for the
3858 epilogue. Using an epilogue insn is favored compared to putting all of the
3859 instructions in output_function_prologue(), since it allows the scheduler
3860 to intermix instructions with the saves of the caller saved registers. In
3861 some cases, it might be necessary to emit a barrier instruction as the last
3862 insn to prevent such scheduling. */
3864 void
3865 ia64_expand_epilogue (int sibcall_p)
3867 rtx_insn *insn;
3868 rtx reg, alt_reg, ar_unat_save_reg;
3869 int regno, alt_regno, cfa_off;
3871 ia64_compute_frame_size (get_frame_size ());
3873 /* If there is a frame pointer, then we use it instead of the stack
3874 pointer, so that the stack pointer does not need to be valid when
3875 the epilogue starts. See EXIT_IGNORE_STACK. */
3876 if (frame_pointer_needed)
3877 setup_spill_pointers (current_frame_info.n_spilled,
3878 hard_frame_pointer_rtx, 0);
3879 else
3880 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3881 current_frame_info.total_size);
3883 if (current_frame_info.total_size != 0)
3885 /* ??? At this point we must generate a magic insn that appears to
3886 modify the spill iterators and the frame pointer. This would
3887 allow the most scheduling freedom. For now, just hard stop. */
3888 emit_insn (gen_blockage ());
3891 /* Locate the bottom of the register save area. */
3892 cfa_off = (current_frame_info.spill_cfa_off
3893 + current_frame_info.spill_size
3894 + current_frame_info.extra_spill_size);
3896 /* Restore the predicate registers. */
3897 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3899 if (current_frame_info.r[reg_save_pr] != 0)
3901 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3902 reg_emitted (reg_save_pr);
3904 else
3906 alt_regno = next_scratch_gr_reg ();
3907 alt_reg = gen_rtx_REG (DImode, alt_regno);
3908 do_restore (gen_movdi_x, alt_reg, cfa_off);
3909 cfa_off -= 8;
3911 reg = gen_rtx_REG (DImode, PR_REG (0));
3912 emit_move_insn (reg, alt_reg);
3915 /* Restore the application registers. */
3917 /* Load the saved unat from the stack, but do not restore it until
3918 after the GRs have been restored. */
3919 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3921 if (current_frame_info.r[reg_save_ar_unat] != 0)
3923 ar_unat_save_reg
3924 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3925 reg_emitted (reg_save_ar_unat);
3927 else
3929 alt_regno = next_scratch_gr_reg ();
3930 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3931 current_frame_info.gr_used_mask |= 1 << alt_regno;
3932 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3933 cfa_off -= 8;
3936 else
3937 ar_unat_save_reg = NULL_RTX;
3939 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3941 reg_emitted (reg_save_ar_pfs);
3942 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3943 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3944 emit_move_insn (reg, alt_reg);
3946 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3948 alt_regno = next_scratch_gr_reg ();
3949 alt_reg = gen_rtx_REG (DImode, alt_regno);
3950 do_restore (gen_movdi_x, alt_reg, cfa_off);
3951 cfa_off -= 8;
3952 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3953 emit_move_insn (reg, alt_reg);
3956 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3958 if (current_frame_info.r[reg_save_ar_lc] != 0)
3960 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3961 reg_emitted (reg_save_ar_lc);
3963 else
3965 alt_regno = next_scratch_gr_reg ();
3966 alt_reg = gen_rtx_REG (DImode, alt_regno);
3967 do_restore (gen_movdi_x, alt_reg, cfa_off);
3968 cfa_off -= 8;
3970 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3971 emit_move_insn (reg, alt_reg);
3974 /* Restore the return pointer. */
3975 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3977 if (current_frame_info.r[reg_save_b0] != 0)
3979 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3980 reg_emitted (reg_save_b0);
3982 else
3984 alt_regno = next_scratch_gr_reg ();
3985 alt_reg = gen_rtx_REG (DImode, alt_regno);
3986 do_restore (gen_movdi_x, alt_reg, cfa_off);
3987 cfa_off -= 8;
3989 reg = gen_rtx_REG (DImode, BR_REG (0));
3990 emit_move_insn (reg, alt_reg);
3993 /* We should now be at the base of the gr/br/fr spill area. */
3994 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3995 + current_frame_info.spill_size));
3997 /* The GP may be stored on the stack in the prologue, but it's
3998 never restored in the epilogue. Skip the stack slot. */
3999 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
4000 cfa_off -= 8;
4002 /* Restore all general registers. */
4003 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
4004 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4006 reg = gen_rtx_REG (DImode, regno);
4007 do_restore (gen_gr_restore, reg, cfa_off);
4008 cfa_off -= 8;
4011 /* Restore the branch registers. */
4012 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
4013 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4015 alt_regno = next_scratch_gr_reg ();
4016 alt_reg = gen_rtx_REG (DImode, alt_regno);
4017 do_restore (gen_movdi_x, alt_reg, cfa_off);
4018 cfa_off -= 8;
4019 reg = gen_rtx_REG (DImode, regno);
4020 emit_move_insn (reg, alt_reg);
4023 /* Restore floating point registers. */
4024 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
4025 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4027 gcc_assert (!(cfa_off & 15));
4028 reg = gen_rtx_REG (XFmode, regno);
4029 do_restore (gen_fr_restore_x, reg, cfa_off);
4030 cfa_off -= 16;
4033 /* Restore ar.unat for real. */
4034 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
4036 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
4037 emit_move_insn (reg, ar_unat_save_reg);
4040 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
4042 finish_spill_pointers ();
4044 if (current_frame_info.total_size
4045 || cfun->machine->ia64_eh_epilogue_sp
4046 || frame_pointer_needed)
4048 /* ??? At this point we must generate a magic insn that appears to
4049 modify the spill iterators, the stack pointer, and the frame
4050 pointer. This would allow the most scheduling freedom. For now,
4051 just hard stop. */
4052 emit_insn (gen_blockage ());
4055 if (cfun->machine->ia64_eh_epilogue_sp)
4056 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
4057 else if (frame_pointer_needed)
4059 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
4060 RTX_FRAME_RELATED_P (insn) = 1;
4061 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
4063 else if (current_frame_info.total_size)
4065 rtx offset, frame_size_rtx;
4067 frame_size_rtx = GEN_INT (current_frame_info.total_size);
4068 if (satisfies_constraint_I (frame_size_rtx))
4069 offset = frame_size_rtx;
4070 else
4072 regno = next_scratch_gr_reg ();
4073 offset = gen_rtx_REG (DImode, regno);
4074 emit_move_insn (offset, frame_size_rtx);
4077 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
4078 offset));
4080 RTX_FRAME_RELATED_P (insn) = 1;
4081 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4082 gen_rtx_SET (VOIDmode,
4083 stack_pointer_rtx,
4084 gen_rtx_PLUS (DImode,
4085 stack_pointer_rtx,
4086 frame_size_rtx)));
4089 if (cfun->machine->ia64_eh_epilogue_bsp)
4090 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
4092 if (! sibcall_p)
4093 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
4094 else
4096 int fp = GR_REG (2);
4097 /* We need a throw away register here, r0 and r1 are reserved,
4098 so r2 is the first available call clobbered register. If
4099 there was a frame_pointer register, we may have swapped the
4100 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
4101 sure we're using the string "r2" when emitting the register
4102 name for the assembler. */
4103 if (current_frame_info.r[reg_fp]
4104 && current_frame_info.r[reg_fp] == GR_REG (2))
4105 fp = HARD_FRAME_POINTER_REGNUM;
4107 /* We must emit an alloc to force the input registers to become output
4108 registers. Otherwise, if the callee tries to pass its parameters
4109 through to another call without an intervening alloc, then these
4110 values get lost. */
4111 /* ??? We don't need to preserve all input registers. We only need to
4112 preserve those input registers used as arguments to the sibling call.
4113 It is unclear how to compute that number here. */
4114 if (current_frame_info.n_input_regs != 0)
4116 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
4118 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
4119 const0_rtx, const0_rtx,
4120 n_inputs, const0_rtx));
4121 RTX_FRAME_RELATED_P (insn) = 1;
4123 /* ??? We need to mark the alloc as frame-related so that it gets
4124 passed into ia64_asm_unwind_emit for ia64-specific unwinding.
4125 But there's nothing dwarf2 related to be done wrt the register
4126 windows. If we do nothing, dwarf2out will abort on the UNSPEC;
4127 the empty parallel means dwarf2out will not see anything. */
4128 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4129 gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (0)));
4134 /* Return 1 if br.ret can do all the work required to return from a
4135 function. */
4138 ia64_direct_return (void)
4140 if (reload_completed && ! frame_pointer_needed)
4142 ia64_compute_frame_size (get_frame_size ());
4144 return (current_frame_info.total_size == 0
4145 && current_frame_info.n_spilled == 0
4146 && current_frame_info.r[reg_save_b0] == 0
4147 && current_frame_info.r[reg_save_pr] == 0
4148 && current_frame_info.r[reg_save_ar_pfs] == 0
4149 && current_frame_info.r[reg_save_ar_unat] == 0
4150 && current_frame_info.r[reg_save_ar_lc] == 0);
4152 return 0;
4155 /* Return the magic cookie that we use to hold the return address
4156 during early compilation. */
4159 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
4161 if (count != 0)
4162 return NULL;
4163 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
4166 /* Split this value after reload, now that we know where the return
4167 address is saved. */
4169 void
4170 ia64_split_return_addr_rtx (rtx dest)
4172 rtx src;
4174 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
4176 if (current_frame_info.r[reg_save_b0] != 0)
4178 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
4179 reg_emitted (reg_save_b0);
4181 else
4183 HOST_WIDE_INT off;
4184 unsigned int regno;
4185 rtx off_r;
4187 /* Compute offset from CFA for BR0. */
4188 /* ??? Must be kept in sync with ia64_expand_prologue. */
4189 off = (current_frame_info.spill_cfa_off
4190 + current_frame_info.spill_size);
4191 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
4192 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4193 off -= 8;
4195 /* Convert CFA offset to a register based offset. */
4196 if (frame_pointer_needed)
4197 src = hard_frame_pointer_rtx;
4198 else
4200 src = stack_pointer_rtx;
4201 off += current_frame_info.total_size;
4204 /* Load address into scratch register. */
4205 off_r = GEN_INT (off);
4206 if (satisfies_constraint_I (off_r))
4207 emit_insn (gen_adddi3 (dest, src, off_r));
4208 else
4210 emit_move_insn (dest, off_r);
4211 emit_insn (gen_adddi3 (dest, src, dest));
4214 src = gen_rtx_MEM (Pmode, dest);
4217 else
4218 src = gen_rtx_REG (DImode, BR_REG (0));
4220 emit_move_insn (dest, src);
4224 ia64_hard_regno_rename_ok (int from, int to)
4226 /* Don't clobber any of the registers we reserved for the prologue. */
4227 unsigned int r;
4229 for (r = reg_fp; r <= reg_save_ar_lc; r++)
4230 if (to == current_frame_info.r[r]
4231 || from == current_frame_info.r[r]
4232 || to == emitted_frame_related_regs[r]
4233 || from == emitted_frame_related_regs[r])
4234 return 0;
4236 /* Don't use output registers outside the register frame. */
4237 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
4238 return 0;
4240 /* Retain even/oddness on predicate register pairs. */
4241 if (PR_REGNO_P (from) && PR_REGNO_P (to))
4242 return (from & 1) == (to & 1);
4244 return 1;
4247 /* Target hook for assembling integer objects. Handle word-sized
4248 aligned objects and detect the cases when @fptr is needed. */
4250 static bool
4251 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
4253 if (size == POINTER_SIZE / BITS_PER_UNIT
4254 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
4255 && GET_CODE (x) == SYMBOL_REF
4256 && SYMBOL_REF_FUNCTION_P (x))
4258 static const char * const directive[2][2] = {
4259 /* 64-bit pointer */ /* 32-bit pointer */
4260 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
4261 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4263 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
4264 output_addr_const (asm_out_file, x);
4265 fputs (")\n", asm_out_file);
4266 return true;
4268 return default_assemble_integer (x, size, aligned_p);
4271 /* Emit the function prologue. */
4273 static void
4274 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4276 int mask, grsave, grsave_prev;
4278 if (current_frame_info.need_regstk)
4279 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4280 current_frame_info.n_input_regs,
4281 current_frame_info.n_local_regs,
4282 current_frame_info.n_output_regs,
4283 current_frame_info.n_rotate_regs);
4285 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4286 return;
4288 /* Emit the .prologue directive. */
4290 mask = 0;
4291 grsave = grsave_prev = 0;
4292 if (current_frame_info.r[reg_save_b0] != 0)
4294 mask |= 8;
4295 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4297 if (current_frame_info.r[reg_save_ar_pfs] != 0
4298 && (grsave_prev == 0
4299 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4301 mask |= 4;
4302 if (grsave_prev == 0)
4303 grsave = current_frame_info.r[reg_save_ar_pfs];
4304 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4306 if (current_frame_info.r[reg_fp] != 0
4307 && (grsave_prev == 0
4308 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4310 mask |= 2;
4311 if (grsave_prev == 0)
4312 grsave = HARD_FRAME_POINTER_REGNUM;
4313 grsave_prev = current_frame_info.r[reg_fp];
4315 if (current_frame_info.r[reg_save_pr] != 0
4316 && (grsave_prev == 0
4317 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4319 mask |= 1;
4320 if (grsave_prev == 0)
4321 grsave = current_frame_info.r[reg_save_pr];
4324 if (mask && TARGET_GNU_AS)
4325 fprintf (file, "\t.prologue %d, %d\n", mask,
4326 ia64_dbx_register_number (grsave));
4327 else
4328 fputs ("\t.prologue\n", file);
4330 /* Emit a .spill directive, if necessary, to relocate the base of
4331 the register spill area. */
4332 if (current_frame_info.spill_cfa_off != -16)
4333 fprintf (file, "\t.spill %ld\n",
4334 (long) (current_frame_info.spill_cfa_off
4335 + current_frame_info.spill_size));
4338 /* Emit the .body directive at the scheduled end of the prologue. */
4340 static void
4341 ia64_output_function_end_prologue (FILE *file)
4343 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4344 return;
4346 fputs ("\t.body\n", file);
4349 /* Emit the function epilogue. */
4351 static void
4352 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4353 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4355 int i;
4357 if (current_frame_info.r[reg_fp])
4359 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4360 reg_names[HARD_FRAME_POINTER_REGNUM]
4361 = reg_names[current_frame_info.r[reg_fp]];
4362 reg_names[current_frame_info.r[reg_fp]] = tmp;
4363 reg_emitted (reg_fp);
4365 if (! TARGET_REG_NAMES)
4367 for (i = 0; i < current_frame_info.n_input_regs; i++)
4368 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4369 for (i = 0; i < current_frame_info.n_local_regs; i++)
4370 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4371 for (i = 0; i < current_frame_info.n_output_regs; i++)
4372 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4375 current_frame_info.initialized = 0;
4379 ia64_dbx_register_number (int regno)
4381 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4382 from its home at loc79 to something inside the register frame. We
4383 must perform the same renumbering here for the debug info. */
4384 if (current_frame_info.r[reg_fp])
4386 if (regno == HARD_FRAME_POINTER_REGNUM)
4387 regno = current_frame_info.r[reg_fp];
4388 else if (regno == current_frame_info.r[reg_fp])
4389 regno = HARD_FRAME_POINTER_REGNUM;
4392 if (IN_REGNO_P (regno))
4393 return 32 + regno - IN_REG (0);
4394 else if (LOC_REGNO_P (regno))
4395 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4396 else if (OUT_REGNO_P (regno))
4397 return (32 + current_frame_info.n_input_regs
4398 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4399 else
4400 return regno;
4403 /* Implement TARGET_TRAMPOLINE_INIT.
4405 The trampoline should set the static chain pointer to value placed
4406 into the trampoline and should branch to the specified routine.
4407 To make the normal indirect-subroutine calling convention work,
4408 the trampoline must look like a function descriptor; the first
4409 word being the target address and the second being the target's
4410 global pointer.
4412 We abuse the concept of a global pointer by arranging for it
4413 to point to the data we need to load. The complete trampoline
4414 has the following form:
4416 +-------------------+ \
4417 TRAMP: | __ia64_trampoline | |
4418 +-------------------+ > fake function descriptor
4419 | TRAMP+16 | |
4420 +-------------------+ /
4421 | target descriptor |
4422 +-------------------+
4423 | static link |
4424 +-------------------+
4427 static void
4428 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4430 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4431 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4433 /* The Intel assembler requires that the global __ia64_trampoline symbol
4434 be declared explicitly */
4435 if (!TARGET_GNU_AS)
4437 static bool declared_ia64_trampoline = false;
4439 if (!declared_ia64_trampoline)
4441 declared_ia64_trampoline = true;
4442 (*targetm.asm_out.globalize_label) (asm_out_file,
4443 "__ia64_trampoline");
4447 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4448 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4449 fnaddr = convert_memory_address (Pmode, fnaddr);
4450 static_chain = convert_memory_address (Pmode, static_chain);
4452 /* Load up our iterator. */
4453 addr_reg = copy_to_reg (addr);
4454 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4456 /* The first two words are the fake descriptor:
4457 __ia64_trampoline, ADDR+16. */
4458 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4459 if (TARGET_ABI_OPEN_VMS)
4461 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4462 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4463 relocation against function symbols to make it identical to the
4464 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4465 strict ELF and dereference to get the bare code address. */
4466 rtx reg = gen_reg_rtx (Pmode);
4467 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4468 emit_move_insn (reg, tramp);
4469 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4470 tramp = reg;
4472 emit_move_insn (m_tramp, tramp);
4473 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4474 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4476 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (Pmode, addr, 16)));
4477 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4478 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4480 /* The third word is the target descriptor. */
4481 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4482 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4483 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4485 /* The fourth word is the static chain. */
4486 emit_move_insn (m_tramp, static_chain);
4489 /* Do any needed setup for a variadic function. CUM has not been updated
4490 for the last named argument which has type TYPE and mode MODE.
4492 We generate the actual spill instructions during prologue generation. */
4494 static void
4495 ia64_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
4496 tree type, int * pretend_size,
4497 int second_time ATTRIBUTE_UNUSED)
4499 CUMULATIVE_ARGS next_cum = *get_cumulative_args (cum);
4501 /* Skip the current argument. */
4502 ia64_function_arg_advance (pack_cumulative_args (&next_cum), mode, type, 1);
4504 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4506 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4507 *pretend_size = n * UNITS_PER_WORD;
4508 cfun->machine->n_varargs = n;
4512 /* Check whether TYPE is a homogeneous floating point aggregate. If
4513 it is, return the mode of the floating point type that appears
4514 in all leafs. If it is not, return VOIDmode.
4516 An aggregate is a homogeneous floating point aggregate is if all
4517 fields/elements in it have the same floating point type (e.g,
4518 SFmode). 128-bit quad-precision floats are excluded.
4520 Variable sized aggregates should never arrive here, since we should
4521 have already decided to pass them by reference. Top-level zero-sized
4522 aggregates are excluded because our parallels crash the middle-end. */
4524 static enum machine_mode
4525 hfa_element_mode (const_tree type, bool nested)
4527 enum machine_mode element_mode = VOIDmode;
4528 enum machine_mode mode;
4529 enum tree_code code = TREE_CODE (type);
4530 int know_element_mode = 0;
4531 tree t;
4533 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4534 return VOIDmode;
4536 switch (code)
4538 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4539 case BOOLEAN_TYPE: case POINTER_TYPE:
4540 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4541 case LANG_TYPE: case FUNCTION_TYPE:
4542 return VOIDmode;
4544 /* Fortran complex types are supposed to be HFAs, so we need to handle
4545 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4546 types though. */
4547 case COMPLEX_TYPE:
4548 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4549 && TYPE_MODE (type) != TCmode)
4550 return GET_MODE_INNER (TYPE_MODE (type));
4551 else
4552 return VOIDmode;
4554 case REAL_TYPE:
4555 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4556 mode if this is contained within an aggregate. */
4557 if (nested && TYPE_MODE (type) != TFmode)
4558 return TYPE_MODE (type);
4559 else
4560 return VOIDmode;
4562 case ARRAY_TYPE:
4563 return hfa_element_mode (TREE_TYPE (type), 1);
4565 case RECORD_TYPE:
4566 case UNION_TYPE:
4567 case QUAL_UNION_TYPE:
4568 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4570 if (TREE_CODE (t) != FIELD_DECL)
4571 continue;
4573 mode = hfa_element_mode (TREE_TYPE (t), 1);
4574 if (know_element_mode)
4576 if (mode != element_mode)
4577 return VOIDmode;
4579 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4580 return VOIDmode;
4581 else
4583 know_element_mode = 1;
4584 element_mode = mode;
4587 return element_mode;
4589 default:
4590 /* If we reach here, we probably have some front-end specific type
4591 that the backend doesn't know about. This can happen via the
4592 aggregate_value_p call in init_function_start. All we can do is
4593 ignore unknown tree types. */
4594 return VOIDmode;
4597 return VOIDmode;
4600 /* Return the number of words required to hold a quantity of TYPE and MODE
4601 when passed as an argument. */
4602 static int
4603 ia64_function_arg_words (const_tree type, enum machine_mode mode)
4605 int words;
4607 if (mode == BLKmode)
4608 words = int_size_in_bytes (type);
4609 else
4610 words = GET_MODE_SIZE (mode);
4612 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4615 /* Return the number of registers that should be skipped so the current
4616 argument (described by TYPE and WORDS) will be properly aligned.
4618 Integer and float arguments larger than 8 bytes start at the next
4619 even boundary. Aggregates larger than 8 bytes start at the next
4620 even boundary if the aggregate has 16 byte alignment. Note that
4621 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4622 but are still to be aligned in registers.
4624 ??? The ABI does not specify how to handle aggregates with
4625 alignment from 9 to 15 bytes, or greater than 16. We handle them
4626 all as if they had 16 byte alignment. Such aggregates can occur
4627 only if gcc extensions are used. */
4628 static int
4629 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4630 const_tree type, int words)
4632 /* No registers are skipped on VMS. */
4633 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4634 return 0;
4636 if (type
4637 && TREE_CODE (type) != INTEGER_TYPE
4638 && TREE_CODE (type) != REAL_TYPE)
4639 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4640 else
4641 return words > 1;
4644 /* Return rtx for register where argument is passed, or zero if it is passed
4645 on the stack. */
4646 /* ??? 128-bit quad-precision floats are always passed in general
4647 registers. */
4649 static rtx
4650 ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
4651 const_tree type, bool named, bool incoming)
4653 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4655 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4656 int words = ia64_function_arg_words (type, mode);
4657 int offset = ia64_function_arg_offset (cum, type, words);
4658 enum machine_mode hfa_mode = VOIDmode;
4660 /* For OPEN VMS, emit the instruction setting up the argument register here,
4661 when we know this will be together with the other arguments setup related
4662 insns. This is not the conceptually best place to do this, but this is
4663 the easiest as we have convenient access to cumulative args info. */
4665 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4666 && named == 1)
4668 unsigned HOST_WIDE_INT regval = cum->words;
4669 int i;
4671 for (i = 0; i < 8; i++)
4672 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4674 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4675 GEN_INT (regval));
4678 /* If all argument slots are used, then it must go on the stack. */
4679 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4680 return 0;
4682 /* On OpenVMS argument is either in Rn or Fn. */
4683 if (TARGET_ABI_OPEN_VMS)
4685 if (FLOAT_MODE_P (mode))
4686 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4687 else
4688 return gen_rtx_REG (mode, basereg + cum->words);
4691 /* Check for and handle homogeneous FP aggregates. */
4692 if (type)
4693 hfa_mode = hfa_element_mode (type, 0);
4695 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4696 and unprototyped hfas are passed specially. */
4697 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4699 rtx loc[16];
4700 int i = 0;
4701 int fp_regs = cum->fp_regs;
4702 int int_regs = cum->words + offset;
4703 int hfa_size = GET_MODE_SIZE (hfa_mode);
4704 int byte_size;
4705 int args_byte_size;
4707 /* If prototyped, pass it in FR regs then GR regs.
4708 If not prototyped, pass it in both FR and GR regs.
4710 If this is an SFmode aggregate, then it is possible to run out of
4711 FR regs while GR regs are still left. In that case, we pass the
4712 remaining part in the GR regs. */
4714 /* Fill the FP regs. We do this always. We stop if we reach the end
4715 of the argument, the last FP register, or the last argument slot. */
4717 byte_size = ((mode == BLKmode)
4718 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4719 args_byte_size = int_regs * UNITS_PER_WORD;
4720 offset = 0;
4721 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4722 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4724 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4725 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4726 + fp_regs)),
4727 GEN_INT (offset));
4728 offset += hfa_size;
4729 args_byte_size += hfa_size;
4730 fp_regs++;
4733 /* If no prototype, then the whole thing must go in GR regs. */
4734 if (! cum->prototype)
4735 offset = 0;
4736 /* If this is an SFmode aggregate, then we might have some left over
4737 that needs to go in GR regs. */
4738 else if (byte_size != offset)
4739 int_regs += offset / UNITS_PER_WORD;
4741 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4743 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4745 enum machine_mode gr_mode = DImode;
4746 unsigned int gr_size;
4748 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4749 then this goes in a GR reg left adjusted/little endian, right
4750 adjusted/big endian. */
4751 /* ??? Currently this is handled wrong, because 4-byte hunks are
4752 always right adjusted/little endian. */
4753 if (offset & 0x4)
4754 gr_mode = SImode;
4755 /* If we have an even 4 byte hunk because the aggregate is a
4756 multiple of 4 bytes in size, then this goes in a GR reg right
4757 adjusted/little endian. */
4758 else if (byte_size - offset == 4)
4759 gr_mode = SImode;
4761 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4762 gen_rtx_REG (gr_mode, (basereg
4763 + int_regs)),
4764 GEN_INT (offset));
4766 gr_size = GET_MODE_SIZE (gr_mode);
4767 offset += gr_size;
4768 if (gr_size == UNITS_PER_WORD
4769 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4770 int_regs++;
4771 else if (gr_size > UNITS_PER_WORD)
4772 int_regs += gr_size / UNITS_PER_WORD;
4774 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4777 /* Integral and aggregates go in general registers. If we have run out of
4778 FR registers, then FP values must also go in general registers. This can
4779 happen when we have a SFmode HFA. */
4780 else if (mode == TFmode || mode == TCmode
4781 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4783 int byte_size = ((mode == BLKmode)
4784 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4785 if (BYTES_BIG_ENDIAN
4786 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4787 && byte_size < UNITS_PER_WORD
4788 && byte_size > 0)
4790 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4791 gen_rtx_REG (DImode,
4792 (basereg + cum->words
4793 + offset)),
4794 const0_rtx);
4795 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4797 else
4798 return gen_rtx_REG (mode, basereg + cum->words + offset);
4802 /* If there is a prototype, then FP values go in a FR register when
4803 named, and in a GR register when unnamed. */
4804 else if (cum->prototype)
4806 if (named)
4807 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4808 /* In big-endian mode, an anonymous SFmode value must be represented
4809 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4810 the value into the high half of the general register. */
4811 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4812 return gen_rtx_PARALLEL (mode,
4813 gen_rtvec (1,
4814 gen_rtx_EXPR_LIST (VOIDmode,
4815 gen_rtx_REG (DImode, basereg + cum->words + offset),
4816 const0_rtx)));
4817 else
4818 return gen_rtx_REG (mode, basereg + cum->words + offset);
4820 /* If there is no prototype, then FP values go in both FR and GR
4821 registers. */
4822 else
4824 /* See comment above. */
4825 enum machine_mode inner_mode =
4826 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4828 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4829 gen_rtx_REG (mode, (FR_ARG_FIRST
4830 + cum->fp_regs)),
4831 const0_rtx);
4832 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4833 gen_rtx_REG (inner_mode,
4834 (basereg + cum->words
4835 + offset)),
4836 const0_rtx);
4838 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4842 /* Implement TARGET_FUNCION_ARG target hook. */
4844 static rtx
4845 ia64_function_arg (cumulative_args_t cum, enum machine_mode mode,
4846 const_tree type, bool named)
4848 return ia64_function_arg_1 (cum, mode, type, named, false);
4851 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4853 static rtx
4854 ia64_function_incoming_arg (cumulative_args_t cum,
4855 enum machine_mode mode,
4856 const_tree type, bool named)
4858 return ia64_function_arg_1 (cum, mode, type, named, true);
4861 /* Return number of bytes, at the beginning of the argument, that must be
4862 put in registers. 0 is the argument is entirely in registers or entirely
4863 in memory. */
4865 static int
4866 ia64_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
4867 tree type, bool named ATTRIBUTE_UNUSED)
4869 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4871 int words = ia64_function_arg_words (type, mode);
4872 int offset = ia64_function_arg_offset (cum, type, words);
4874 /* If all argument slots are used, then it must go on the stack. */
4875 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4876 return 0;
4878 /* It doesn't matter whether the argument goes in FR or GR regs. If
4879 it fits within the 8 argument slots, then it goes entirely in
4880 registers. If it extends past the last argument slot, then the rest
4881 goes on the stack. */
4883 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4884 return 0;
4886 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4889 /* Return ivms_arg_type based on machine_mode. */
4891 static enum ivms_arg_type
4892 ia64_arg_type (enum machine_mode mode)
4894 switch (mode)
4896 case SFmode:
4897 return FS;
4898 case DFmode:
4899 return FT;
4900 default:
4901 return I64;
4905 /* Update CUM to point after this argument. This is patterned after
4906 ia64_function_arg. */
4908 static void
4909 ia64_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
4910 const_tree type, bool named)
4912 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4913 int words = ia64_function_arg_words (type, mode);
4914 int offset = ia64_function_arg_offset (cum, type, words);
4915 enum machine_mode hfa_mode = VOIDmode;
4917 /* If all arg slots are already full, then there is nothing to do. */
4918 if (cum->words >= MAX_ARGUMENT_SLOTS)
4920 cum->words += words + offset;
4921 return;
4924 cum->atypes[cum->words] = ia64_arg_type (mode);
4925 cum->words += words + offset;
4927 /* On OpenVMS argument is either in Rn or Fn. */
4928 if (TARGET_ABI_OPEN_VMS)
4930 cum->int_regs = cum->words;
4931 cum->fp_regs = cum->words;
4932 return;
4935 /* Check for and handle homogeneous FP aggregates. */
4936 if (type)
4937 hfa_mode = hfa_element_mode (type, 0);
4939 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4940 and unprototyped hfas are passed specially. */
4941 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4943 int fp_regs = cum->fp_regs;
4944 /* This is the original value of cum->words + offset. */
4945 int int_regs = cum->words - words;
4946 int hfa_size = GET_MODE_SIZE (hfa_mode);
4947 int byte_size;
4948 int args_byte_size;
4950 /* If prototyped, pass it in FR regs then GR regs.
4951 If not prototyped, pass it in both FR and GR regs.
4953 If this is an SFmode aggregate, then it is possible to run out of
4954 FR regs while GR regs are still left. In that case, we pass the
4955 remaining part in the GR regs. */
4957 /* Fill the FP regs. We do this always. We stop if we reach the end
4958 of the argument, the last FP register, or the last argument slot. */
4960 byte_size = ((mode == BLKmode)
4961 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4962 args_byte_size = int_regs * UNITS_PER_WORD;
4963 offset = 0;
4964 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4965 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4967 offset += hfa_size;
4968 args_byte_size += hfa_size;
4969 fp_regs++;
4972 cum->fp_regs = fp_regs;
4975 /* Integral and aggregates go in general registers. So do TFmode FP values.
4976 If we have run out of FR registers, then other FP values must also go in
4977 general registers. This can happen when we have a SFmode HFA. */
4978 else if (mode == TFmode || mode == TCmode
4979 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4980 cum->int_regs = cum->words;
4982 /* If there is a prototype, then FP values go in a FR register when
4983 named, and in a GR register when unnamed. */
4984 else if (cum->prototype)
4986 if (! named)
4987 cum->int_regs = cum->words;
4988 else
4989 /* ??? Complex types should not reach here. */
4990 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4992 /* If there is no prototype, then FP values go in both FR and GR
4993 registers. */
4994 else
4996 /* ??? Complex types should not reach here. */
4997 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4998 cum->int_regs = cum->words;
5002 /* Arguments with alignment larger than 8 bytes start at the next even
5003 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
5004 even though their normal alignment is 8 bytes. See ia64_function_arg. */
5006 static unsigned int
5007 ia64_function_arg_boundary (enum machine_mode mode, const_tree type)
5009 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
5010 return PARM_BOUNDARY * 2;
5012 if (type)
5014 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
5015 return PARM_BOUNDARY * 2;
5016 else
5017 return PARM_BOUNDARY;
5020 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
5021 return PARM_BOUNDARY * 2;
5022 else
5023 return PARM_BOUNDARY;
5026 /* True if it is OK to do sibling call optimization for the specified
5027 call expression EXP. DECL will be the called function, or NULL if
5028 this is an indirect call. */
5029 static bool
5030 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5032 /* We can't perform a sibcall if the current function has the syscall_linkage
5033 attribute. */
5034 if (lookup_attribute ("syscall_linkage",
5035 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
5036 return false;
5038 /* We must always return with our current GP. This means we can
5039 only sibcall to functions defined in the current module unless
5040 TARGET_CONST_GP is set to true. */
5041 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
5045 /* Implement va_arg. */
5047 static tree
5048 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5049 gimple_seq *post_p)
5051 /* Variable sized types are passed by reference. */
5052 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5054 tree ptrtype = build_pointer_type (type);
5055 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
5056 return build_va_arg_indirect_ref (addr);
5059 /* Aggregate arguments with alignment larger than 8 bytes start at
5060 the next even boundary. Integer and floating point arguments
5061 do so if they are larger than 8 bytes, whether or not they are
5062 also aligned larger than 8 bytes. */
5063 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
5064 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
5066 tree t = fold_build_pointer_plus_hwi (valist, 2 * UNITS_PER_WORD - 1);
5067 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
5068 build_int_cst (TREE_TYPE (t), -2 * UNITS_PER_WORD));
5069 gimplify_assign (unshare_expr (valist), t, pre_p);
5072 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5075 /* Return 1 if function return value returned in memory. Return 0 if it is
5076 in a register. */
5078 static bool
5079 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
5081 enum machine_mode mode;
5082 enum machine_mode hfa_mode;
5083 HOST_WIDE_INT byte_size;
5085 mode = TYPE_MODE (valtype);
5086 byte_size = GET_MODE_SIZE (mode);
5087 if (mode == BLKmode)
5089 byte_size = int_size_in_bytes (valtype);
5090 if (byte_size < 0)
5091 return true;
5094 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
5096 hfa_mode = hfa_element_mode (valtype, 0);
5097 if (hfa_mode != VOIDmode)
5099 int hfa_size = GET_MODE_SIZE (hfa_mode);
5101 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
5102 return true;
5103 else
5104 return false;
5106 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
5107 return true;
5108 else
5109 return false;
5112 /* Return rtx for register that holds the function return value. */
5114 static rtx
5115 ia64_function_value (const_tree valtype,
5116 const_tree fn_decl_or_type,
5117 bool outgoing ATTRIBUTE_UNUSED)
5119 enum machine_mode mode;
5120 enum machine_mode hfa_mode;
5121 int unsignedp;
5122 const_tree func = fn_decl_or_type;
5124 if (fn_decl_or_type
5125 && !DECL_P (fn_decl_or_type))
5126 func = NULL;
5128 mode = TYPE_MODE (valtype);
5129 hfa_mode = hfa_element_mode (valtype, 0);
5131 if (hfa_mode != VOIDmode)
5133 rtx loc[8];
5134 int i;
5135 int hfa_size;
5136 int byte_size;
5137 int offset;
5139 hfa_size = GET_MODE_SIZE (hfa_mode);
5140 byte_size = ((mode == BLKmode)
5141 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
5142 offset = 0;
5143 for (i = 0; offset < byte_size; i++)
5145 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5146 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
5147 GEN_INT (offset));
5148 offset += hfa_size;
5150 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5152 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
5153 return gen_rtx_REG (mode, FR_ARG_FIRST);
5154 else
5156 bool need_parallel = false;
5158 /* In big-endian mode, we need to manage the layout of aggregates
5159 in the registers so that we get the bits properly aligned in
5160 the highpart of the registers. */
5161 if (BYTES_BIG_ENDIAN
5162 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
5163 need_parallel = true;
5165 /* Something like struct S { long double x; char a[0] } is not an
5166 HFA structure, and therefore doesn't go in fp registers. But
5167 the middle-end will give it XFmode anyway, and XFmode values
5168 don't normally fit in integer registers. So we need to smuggle
5169 the value inside a parallel. */
5170 else if (mode == XFmode || mode == XCmode || mode == RFmode)
5171 need_parallel = true;
5173 if (need_parallel)
5175 rtx loc[8];
5176 int offset;
5177 int bytesize;
5178 int i;
5180 offset = 0;
5181 bytesize = int_size_in_bytes (valtype);
5182 /* An empty PARALLEL is invalid here, but the return value
5183 doesn't matter for empty structs. */
5184 if (bytesize == 0)
5185 return gen_rtx_REG (mode, GR_RET_FIRST);
5186 for (i = 0; offset < bytesize; i++)
5188 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5189 gen_rtx_REG (DImode,
5190 GR_RET_FIRST + i),
5191 GEN_INT (offset));
5192 offset += UNITS_PER_WORD;
5194 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5197 mode = promote_function_mode (valtype, mode, &unsignedp,
5198 func ? TREE_TYPE (func) : NULL_TREE,
5199 true);
5201 return gen_rtx_REG (mode, GR_RET_FIRST);
5205 /* Worker function for TARGET_LIBCALL_VALUE. */
5207 static rtx
5208 ia64_libcall_value (enum machine_mode mode,
5209 const_rtx fun ATTRIBUTE_UNUSED)
5211 return gen_rtx_REG (mode,
5212 (((GET_MODE_CLASS (mode) == MODE_FLOAT
5213 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5214 && (mode) != TFmode)
5215 ? FR_RET_FIRST : GR_RET_FIRST));
5218 /* Worker function for FUNCTION_VALUE_REGNO_P. */
5220 static bool
5221 ia64_function_value_regno_p (const unsigned int regno)
5223 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
5224 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
5227 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5228 We need to emit DTP-relative relocations. */
5230 static void
5231 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
5233 gcc_assert (size == 4 || size == 8);
5234 if (size == 4)
5235 fputs ("\tdata4.ua\t@dtprel(", file);
5236 else
5237 fputs ("\tdata8.ua\t@dtprel(", file);
5238 output_addr_const (file, x);
5239 fputs (")", file);
5242 /* Print a memory address as an operand to reference that memory location. */
5244 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
5245 also call this from ia64_print_operand for memory addresses. */
5247 static void
5248 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
5249 rtx address ATTRIBUTE_UNUSED)
5253 /* Print an operand to an assembler instruction.
5254 C Swap and print a comparison operator.
5255 D Print an FP comparison operator.
5256 E Print 32 - constant, for SImode shifts as extract.
5257 e Print 64 - constant, for DImode rotates.
5258 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
5259 a floating point register emitted normally.
5260 G A floating point constant.
5261 I Invert a predicate register by adding 1.
5262 J Select the proper predicate register for a condition.
5263 j Select the inverse predicate register for a condition.
5264 O Append .acq for volatile load.
5265 P Postincrement of a MEM.
5266 Q Append .rel for volatile store.
5267 R Print .s .d or nothing for a single, double or no truncation.
5268 S Shift amount for shladd instruction.
5269 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5270 for Intel assembler.
5271 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5272 for Intel assembler.
5273 X A pair of floating point registers.
5274 r Print register name, or constant 0 as r0. HP compatibility for
5275 Linux kernel.
5276 v Print vector constant value as an 8-byte integer value. */
5278 static void
5279 ia64_print_operand (FILE * file, rtx x, int code)
5281 const char *str;
5283 switch (code)
5285 case 0:
5286 /* Handled below. */
5287 break;
5289 case 'C':
5291 enum rtx_code c = swap_condition (GET_CODE (x));
5292 fputs (GET_RTX_NAME (c), file);
5293 return;
5296 case 'D':
5297 switch (GET_CODE (x))
5299 case NE:
5300 str = "neq";
5301 break;
5302 case UNORDERED:
5303 str = "unord";
5304 break;
5305 case ORDERED:
5306 str = "ord";
5307 break;
5308 case UNLT:
5309 str = "nge";
5310 break;
5311 case UNLE:
5312 str = "ngt";
5313 break;
5314 case UNGT:
5315 str = "nle";
5316 break;
5317 case UNGE:
5318 str = "nlt";
5319 break;
5320 case UNEQ:
5321 case LTGT:
5322 gcc_unreachable ();
5323 default:
5324 str = GET_RTX_NAME (GET_CODE (x));
5325 break;
5327 fputs (str, file);
5328 return;
5330 case 'E':
5331 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5332 return;
5334 case 'e':
5335 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5336 return;
5338 case 'F':
5339 if (x == CONST0_RTX (GET_MODE (x)))
5340 str = reg_names [FR_REG (0)];
5341 else if (x == CONST1_RTX (GET_MODE (x)))
5342 str = reg_names [FR_REG (1)];
5343 else
5345 gcc_assert (GET_CODE (x) == REG);
5346 str = reg_names [REGNO (x)];
5348 fputs (str, file);
5349 return;
5351 case 'G':
5353 long val[4];
5354 REAL_VALUE_TYPE rv;
5355 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5356 real_to_target (val, &rv, GET_MODE (x));
5357 if (GET_MODE (x) == SFmode)
5358 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5359 else if (GET_MODE (x) == DFmode)
5360 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5361 & 0xffffffff,
5362 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5363 & 0xffffffff);
5364 else
5365 output_operand_lossage ("invalid %%G mode");
5367 return;
5369 case 'I':
5370 fputs (reg_names [REGNO (x) + 1], file);
5371 return;
5373 case 'J':
5374 case 'j':
5376 unsigned int regno = REGNO (XEXP (x, 0));
5377 if (GET_CODE (x) == EQ)
5378 regno += 1;
5379 if (code == 'j')
5380 regno ^= 1;
5381 fputs (reg_names [regno], file);
5383 return;
5385 case 'O':
5386 if (MEM_VOLATILE_P (x))
5387 fputs(".acq", file);
5388 return;
5390 case 'P':
5392 HOST_WIDE_INT value;
5394 switch (GET_CODE (XEXP (x, 0)))
5396 default:
5397 return;
5399 case POST_MODIFY:
5400 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5401 if (GET_CODE (x) == CONST_INT)
5402 value = INTVAL (x);
5403 else
5405 gcc_assert (GET_CODE (x) == REG);
5406 fprintf (file, ", %s", reg_names[REGNO (x)]);
5407 return;
5409 break;
5411 case POST_INC:
5412 value = GET_MODE_SIZE (GET_MODE (x));
5413 break;
5415 case POST_DEC:
5416 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5417 break;
5420 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5421 return;
5424 case 'Q':
5425 if (MEM_VOLATILE_P (x))
5426 fputs(".rel", file);
5427 return;
5429 case 'R':
5430 if (x == CONST0_RTX (GET_MODE (x)))
5431 fputs(".s", file);
5432 else if (x == CONST1_RTX (GET_MODE (x)))
5433 fputs(".d", file);
5434 else if (x == CONST2_RTX (GET_MODE (x)))
5436 else
5437 output_operand_lossage ("invalid %%R value");
5438 return;
5440 case 'S':
5441 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5442 return;
5444 case 'T':
5445 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5447 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5448 return;
5450 break;
5452 case 'U':
5453 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5455 const char *prefix = "0x";
5456 if (INTVAL (x) & 0x80000000)
5458 fprintf (file, "0xffffffff");
5459 prefix = "";
5461 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5462 return;
5464 break;
5466 case 'X':
5468 unsigned int regno = REGNO (x);
5469 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5471 return;
5473 case 'r':
5474 /* If this operand is the constant zero, write it as register zero.
5475 Any register, zero, or CONST_INT value is OK here. */
5476 if (GET_CODE (x) == REG)
5477 fputs (reg_names[REGNO (x)], file);
5478 else if (x == CONST0_RTX (GET_MODE (x)))
5479 fputs ("r0", file);
5480 else if (GET_CODE (x) == CONST_INT)
5481 output_addr_const (file, x);
5482 else
5483 output_operand_lossage ("invalid %%r value");
5484 return;
5486 case 'v':
5487 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5488 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5489 break;
5491 case '+':
5493 const char *which;
5495 /* For conditional branches, returns or calls, substitute
5496 sptk, dptk, dpnt, or spnt for %s. */
5497 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5498 if (x)
5500 int pred_val = XINT (x, 0);
5502 /* Guess top and bottom 10% statically predicted. */
5503 if (pred_val < REG_BR_PROB_BASE / 50
5504 && br_prob_note_reliable_p (x))
5505 which = ".spnt";
5506 else if (pred_val < REG_BR_PROB_BASE / 2)
5507 which = ".dpnt";
5508 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5509 || !br_prob_note_reliable_p (x))
5510 which = ".dptk";
5511 else
5512 which = ".sptk";
5514 else if (CALL_P (current_output_insn))
5515 which = ".sptk";
5516 else
5517 which = ".dptk";
5519 fputs (which, file);
5520 return;
5523 case ',':
5524 x = current_insn_predicate;
5525 if (x)
5527 unsigned int regno = REGNO (XEXP (x, 0));
5528 if (GET_CODE (x) == EQ)
5529 regno += 1;
5530 fprintf (file, "(%s) ", reg_names [regno]);
5532 return;
5534 default:
5535 output_operand_lossage ("ia64_print_operand: unknown code");
5536 return;
5539 switch (GET_CODE (x))
5541 /* This happens for the spill/restore instructions. */
5542 case POST_INC:
5543 case POST_DEC:
5544 case POST_MODIFY:
5545 x = XEXP (x, 0);
5546 /* ... fall through ... */
5548 case REG:
5549 fputs (reg_names [REGNO (x)], file);
5550 break;
5552 case MEM:
5554 rtx addr = XEXP (x, 0);
5555 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5556 addr = XEXP (addr, 0);
5557 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5558 break;
5561 default:
5562 output_addr_const (file, x);
5563 break;
5566 return;
5569 /* Worker function for TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5571 static bool
5572 ia64_print_operand_punct_valid_p (unsigned char code)
5574 return (code == '+' || code == ',');
5577 /* Compute a (partial) cost for rtx X. Return true if the complete
5578 cost has been computed, and false if subexpressions should be
5579 scanned. In either case, *TOTAL contains the cost result. */
5580 /* ??? This is incomplete. */
5582 static bool
5583 ia64_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
5584 int *total, bool speed ATTRIBUTE_UNUSED)
5586 switch (code)
5588 case CONST_INT:
5589 switch (outer_code)
5591 case SET:
5592 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5593 return true;
5594 case PLUS:
5595 if (satisfies_constraint_I (x))
5596 *total = 0;
5597 else if (satisfies_constraint_J (x))
5598 *total = 1;
5599 else
5600 *total = COSTS_N_INSNS (1);
5601 return true;
5602 default:
5603 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5604 *total = 0;
5605 else
5606 *total = COSTS_N_INSNS (1);
5607 return true;
5610 case CONST_DOUBLE:
5611 *total = COSTS_N_INSNS (1);
5612 return true;
5614 case CONST:
5615 case SYMBOL_REF:
5616 case LABEL_REF:
5617 *total = COSTS_N_INSNS (3);
5618 return true;
5620 case FMA:
5621 *total = COSTS_N_INSNS (4);
5622 return true;
5624 case MULT:
5625 /* For multiplies wider than HImode, we have to go to the FPU,
5626 which normally involves copies. Plus there's the latency
5627 of the multiply itself, and the latency of the instructions to
5628 transfer integer regs to FP regs. */
5629 if (FLOAT_MODE_P (GET_MODE (x)))
5630 *total = COSTS_N_INSNS (4);
5631 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5632 *total = COSTS_N_INSNS (10);
5633 else
5634 *total = COSTS_N_INSNS (2);
5635 return true;
5637 case PLUS:
5638 case MINUS:
5639 if (FLOAT_MODE_P (GET_MODE (x)))
5641 *total = COSTS_N_INSNS (4);
5642 return true;
5644 /* FALLTHRU */
5646 case ASHIFT:
5647 case ASHIFTRT:
5648 case LSHIFTRT:
5649 *total = COSTS_N_INSNS (1);
5650 return true;
5652 case DIV:
5653 case UDIV:
5654 case MOD:
5655 case UMOD:
5656 /* We make divide expensive, so that divide-by-constant will be
5657 optimized to a multiply. */
5658 *total = COSTS_N_INSNS (60);
5659 return true;
5661 default:
5662 return false;
5666 /* Calculate the cost of moving data from a register in class FROM to
5667 one in class TO, using MODE. */
5669 static int
5670 ia64_register_move_cost (enum machine_mode mode, reg_class_t from,
5671 reg_class_t to)
5673 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5674 if (to == ADDL_REGS)
5675 to = GR_REGS;
5676 if (from == ADDL_REGS)
5677 from = GR_REGS;
5679 /* All costs are symmetric, so reduce cases by putting the
5680 lower number class as the destination. */
5681 if (from < to)
5683 reg_class_t tmp = to;
5684 to = from, from = tmp;
5687 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5688 so that we get secondary memory reloads. Between FR_REGS,
5689 we have to make this at least as expensive as memory_move_cost
5690 to avoid spectacularly poor register class preferencing. */
5691 if (mode == XFmode || mode == RFmode)
5693 if (to != GR_REGS || from != GR_REGS)
5694 return memory_move_cost (mode, to, false);
5695 else
5696 return 3;
5699 switch (to)
5701 case PR_REGS:
5702 /* Moving between PR registers takes two insns. */
5703 if (from == PR_REGS)
5704 return 3;
5705 /* Moving between PR and anything but GR is impossible. */
5706 if (from != GR_REGS)
5707 return memory_move_cost (mode, to, false);
5708 break;
5710 case BR_REGS:
5711 /* Moving between BR and anything but GR is impossible. */
5712 if (from != GR_REGS && from != GR_AND_BR_REGS)
5713 return memory_move_cost (mode, to, false);
5714 break;
5716 case AR_I_REGS:
5717 case AR_M_REGS:
5718 /* Moving between AR and anything but GR is impossible. */
5719 if (from != GR_REGS)
5720 return memory_move_cost (mode, to, false);
5721 break;
5723 case GR_REGS:
5724 case FR_REGS:
5725 case FP_REGS:
5726 case GR_AND_FR_REGS:
5727 case GR_AND_BR_REGS:
5728 case ALL_REGS:
5729 break;
5731 default:
5732 gcc_unreachable ();
5735 return 2;
5738 /* Calculate the cost of moving data of MODE from a register to or from
5739 memory. */
5741 static int
5742 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5743 reg_class_t rclass,
5744 bool in ATTRIBUTE_UNUSED)
5746 if (rclass == GENERAL_REGS
5747 || rclass == FR_REGS
5748 || rclass == FP_REGS
5749 || rclass == GR_AND_FR_REGS)
5750 return 4;
5751 else
5752 return 10;
5755 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5756 on RCLASS to use when copying X into that class. */
5758 static reg_class_t
5759 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5761 switch (rclass)
5763 case FR_REGS:
5764 case FP_REGS:
5765 /* Don't allow volatile mem reloads into floating point registers.
5766 This is defined to force reload to choose the r/m case instead
5767 of the f/f case when reloading (set (reg fX) (mem/v)). */
5768 if (MEM_P (x) && MEM_VOLATILE_P (x))
5769 return NO_REGS;
5771 /* Force all unrecognized constants into the constant pool. */
5772 if (CONSTANT_P (x))
5773 return NO_REGS;
5774 break;
5776 case AR_M_REGS:
5777 case AR_I_REGS:
5778 if (!OBJECT_P (x))
5779 return NO_REGS;
5780 break;
5782 default:
5783 break;
5786 return rclass;
5789 /* This function returns the register class required for a secondary
5790 register when copying between one of the registers in RCLASS, and X,
5791 using MODE. A return value of NO_REGS means that no secondary register
5792 is required. */
5794 enum reg_class
5795 ia64_secondary_reload_class (enum reg_class rclass,
5796 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5798 int regno = -1;
5800 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5801 regno = true_regnum (x);
5803 switch (rclass)
5805 case BR_REGS:
5806 case AR_M_REGS:
5807 case AR_I_REGS:
5808 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5809 interaction. We end up with two pseudos with overlapping lifetimes
5810 both of which are equiv to the same constant, and both which need
5811 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5812 changes depending on the path length, which means the qty_first_reg
5813 check in make_regs_eqv can give different answers at different times.
5814 At some point I'll probably need a reload_indi pattern to handle
5815 this.
5817 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5818 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5819 non-general registers for good measure. */
5820 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5821 return GR_REGS;
5823 /* This is needed if a pseudo used as a call_operand gets spilled to a
5824 stack slot. */
5825 if (GET_CODE (x) == MEM)
5826 return GR_REGS;
5827 break;
5829 case FR_REGS:
5830 case FP_REGS:
5831 /* Need to go through general registers to get to other class regs. */
5832 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5833 return GR_REGS;
5835 /* This can happen when a paradoxical subreg is an operand to the
5836 muldi3 pattern. */
5837 /* ??? This shouldn't be necessary after instruction scheduling is
5838 enabled, because paradoxical subregs are not accepted by
5839 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5840 stop the paradoxical subreg stupidity in the *_operand functions
5841 in recog.c. */
5842 if (GET_CODE (x) == MEM
5843 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5844 || GET_MODE (x) == QImode))
5845 return GR_REGS;
5847 /* This can happen because of the ior/and/etc patterns that accept FP
5848 registers as operands. If the third operand is a constant, then it
5849 needs to be reloaded into a FP register. */
5850 if (GET_CODE (x) == CONST_INT)
5851 return GR_REGS;
5853 /* This can happen because of register elimination in a muldi3 insn.
5854 E.g. `26107 * (unsigned long)&u'. */
5855 if (GET_CODE (x) == PLUS)
5856 return GR_REGS;
5857 break;
5859 case PR_REGS:
5860 /* ??? This happens if we cse/gcse a BImode value across a call,
5861 and the function has a nonlocal goto. This is because global
5862 does not allocate call crossing pseudos to hard registers when
5863 crtl->has_nonlocal_goto is true. This is relatively
5864 common for C++ programs that use exceptions. To reproduce,
5865 return NO_REGS and compile libstdc++. */
5866 if (GET_CODE (x) == MEM)
5867 return GR_REGS;
5869 /* This can happen when we take a BImode subreg of a DImode value,
5870 and that DImode value winds up in some non-GR register. */
5871 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5872 return GR_REGS;
5873 break;
5875 default:
5876 break;
5879 return NO_REGS;
5883 /* Implement targetm.unspec_may_trap_p hook. */
5884 static int
5885 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5887 switch (XINT (x, 1))
5889 case UNSPEC_LDA:
5890 case UNSPEC_LDS:
5891 case UNSPEC_LDSA:
5892 case UNSPEC_LDCCLR:
5893 case UNSPEC_CHKACLR:
5894 case UNSPEC_CHKS:
5895 /* These unspecs are just wrappers. */
5896 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5899 return default_unspec_may_trap_p (x, flags);
5903 /* Parse the -mfixed-range= option string. */
5905 static void
5906 fix_range (const char *const_str)
5908 int i, first, last;
5909 char *str, *dash, *comma;
5911 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5912 REG2 are either register names or register numbers. The effect
5913 of this option is to mark the registers in the range from REG1 to
5914 REG2 as ``fixed'' so they won't be used by the compiler. This is
5915 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5917 i = strlen (const_str);
5918 str = (char *) alloca (i + 1);
5919 memcpy (str, const_str, i + 1);
5921 while (1)
5923 dash = strchr (str, '-');
5924 if (!dash)
5926 warning (0, "value of -mfixed-range must have form REG1-REG2");
5927 return;
5929 *dash = '\0';
5931 comma = strchr (dash + 1, ',');
5932 if (comma)
5933 *comma = '\0';
5935 first = decode_reg_name (str);
5936 if (first < 0)
5938 warning (0, "unknown register name: %s", str);
5939 return;
5942 last = decode_reg_name (dash + 1);
5943 if (last < 0)
5945 warning (0, "unknown register name: %s", dash + 1);
5946 return;
5949 *dash = '-';
5951 if (first > last)
5953 warning (0, "%s-%s is an empty range", str, dash + 1);
5954 return;
5957 for (i = first; i <= last; ++i)
5958 fixed_regs[i] = call_used_regs[i] = 1;
5960 if (!comma)
5961 break;
5963 *comma = ',';
5964 str = comma + 1;
5968 /* Implement TARGET_OPTION_OVERRIDE. */
5970 static void
5971 ia64_option_override (void)
5973 unsigned int i;
5974 cl_deferred_option *opt;
5975 vec<cl_deferred_option> *v
5976 = (vec<cl_deferred_option> *) ia64_deferred_options;
5978 if (v)
5979 FOR_EACH_VEC_ELT (*v, i, opt)
5981 switch (opt->opt_index)
5983 case OPT_mfixed_range_:
5984 fix_range (opt->arg);
5985 break;
5987 default:
5988 gcc_unreachable ();
5992 if (TARGET_AUTO_PIC)
5993 target_flags |= MASK_CONST_GP;
5995 /* Numerous experiment shows that IRA based loop pressure
5996 calculation works better for RTL loop invariant motion on targets
5997 with enough (>= 32) registers. It is an expensive optimization.
5998 So it is on only for peak performance. */
5999 if (optimize >= 3)
6000 flag_ira_loop_pressure = 1;
6003 ia64_section_threshold = (global_options_set.x_g_switch_value
6004 ? g_switch_value
6005 : IA64_DEFAULT_GVALUE);
6007 init_machine_status = ia64_init_machine_status;
6009 if (align_functions <= 0)
6010 align_functions = 64;
6011 if (align_loops <= 0)
6012 align_loops = 32;
6013 if (TARGET_ABI_OPEN_VMS)
6014 flag_no_common = 1;
6016 ia64_override_options_after_change();
6019 /* Implement targetm.override_options_after_change. */
6021 static void
6022 ia64_override_options_after_change (void)
6024 if (optimize >= 3
6025 && !global_options_set.x_flag_selective_scheduling
6026 && !global_options_set.x_flag_selective_scheduling2)
6028 flag_selective_scheduling2 = 1;
6029 flag_sel_sched_pipelining = 1;
6031 if (mflag_sched_control_spec == 2)
6033 /* Control speculation is on by default for the selective scheduler,
6034 but not for the Haifa scheduler. */
6035 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
6037 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
6039 /* FIXME: remove this when we'd implement breaking autoinsns as
6040 a transformation. */
6041 flag_auto_inc_dec = 0;
6045 /* Initialize the record of emitted frame related registers. */
6047 void ia64_init_expanders (void)
6049 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
6052 static struct machine_function *
6053 ia64_init_machine_status (void)
6055 return ggc_cleared_alloc<machine_function> ();
6058 static enum attr_itanium_class ia64_safe_itanium_class (rtx_insn *);
6059 static enum attr_type ia64_safe_type (rtx_insn *);
6061 static enum attr_itanium_class
6062 ia64_safe_itanium_class (rtx_insn *insn)
6064 if (recog_memoized (insn) >= 0)
6065 return get_attr_itanium_class (insn);
6066 else if (DEBUG_INSN_P (insn))
6067 return ITANIUM_CLASS_IGNORE;
6068 else
6069 return ITANIUM_CLASS_UNKNOWN;
6072 static enum attr_type
6073 ia64_safe_type (rtx_insn *insn)
6075 if (recog_memoized (insn) >= 0)
6076 return get_attr_type (insn);
6077 else
6078 return TYPE_UNKNOWN;
6081 /* The following collection of routines emit instruction group stop bits as
6082 necessary to avoid dependencies. */
6084 /* Need to track some additional registers as far as serialization is
6085 concerned so we can properly handle br.call and br.ret. We could
6086 make these registers visible to gcc, but since these registers are
6087 never explicitly used in gcc generated code, it seems wasteful to
6088 do so (plus it would make the call and return patterns needlessly
6089 complex). */
6090 #define REG_RP (BR_REG (0))
6091 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
6092 /* This is used for volatile asms which may require a stop bit immediately
6093 before and after them. */
6094 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
6095 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
6096 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
6098 /* For each register, we keep track of how it has been written in the
6099 current instruction group.
6101 If a register is written unconditionally (no qualifying predicate),
6102 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
6104 If a register is written if its qualifying predicate P is true, we
6105 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
6106 may be written again by the complement of P (P^1) and when this happens,
6107 WRITE_COUNT gets set to 2.
6109 The result of this is that whenever an insn attempts to write a register
6110 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
6112 If a predicate register is written by a floating-point insn, we set
6113 WRITTEN_BY_FP to true.
6115 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
6116 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
6118 #if GCC_VERSION >= 4000
6119 #define RWS_FIELD_TYPE __extension__ unsigned short
6120 #else
6121 #define RWS_FIELD_TYPE unsigned int
6122 #endif
6123 struct reg_write_state
6125 RWS_FIELD_TYPE write_count : 2;
6126 RWS_FIELD_TYPE first_pred : 10;
6127 RWS_FIELD_TYPE written_by_fp : 1;
6128 RWS_FIELD_TYPE written_by_and : 1;
6129 RWS_FIELD_TYPE written_by_or : 1;
6132 /* Cumulative info for the current instruction group. */
6133 struct reg_write_state rws_sum[NUM_REGS];
6134 #ifdef ENABLE_CHECKING
6135 /* Bitmap whether a register has been written in the current insn. */
6136 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
6137 / HOST_BITS_PER_WIDEST_FAST_INT];
6139 static inline void
6140 rws_insn_set (int regno)
6142 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
6143 SET_HARD_REG_BIT (rws_insn, regno);
6146 static inline int
6147 rws_insn_test (int regno)
6149 return TEST_HARD_REG_BIT (rws_insn, regno);
6151 #else
6152 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
6153 unsigned char rws_insn[2];
6155 static inline void
6156 rws_insn_set (int regno)
6158 if (regno == REG_AR_CFM)
6159 rws_insn[0] = 1;
6160 else if (regno == REG_VOLATILE)
6161 rws_insn[1] = 1;
6164 static inline int
6165 rws_insn_test (int regno)
6167 if (regno == REG_AR_CFM)
6168 return rws_insn[0];
6169 if (regno == REG_VOLATILE)
6170 return rws_insn[1];
6171 return 0;
6173 #endif
6175 /* Indicates whether this is the first instruction after a stop bit,
6176 in which case we don't need another stop bit. Without this,
6177 ia64_variable_issue will die when scheduling an alloc. */
6178 static int first_instruction;
6180 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
6181 RTL for one instruction. */
6182 struct reg_flags
6184 unsigned int is_write : 1; /* Is register being written? */
6185 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
6186 unsigned int is_branch : 1; /* Is register used as part of a branch? */
6187 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
6188 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
6189 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
6192 static void rws_update (int, struct reg_flags, int);
6193 static int rws_access_regno (int, struct reg_flags, int);
6194 static int rws_access_reg (rtx, struct reg_flags, int);
6195 static void update_set_flags (rtx, struct reg_flags *);
6196 static int set_src_needs_barrier (rtx, struct reg_flags, int);
6197 static int rtx_needs_barrier (rtx, struct reg_flags, int);
6198 static void init_insn_group_barriers (void);
6199 static int group_barrier_needed (rtx_insn *);
6200 static int safe_group_barrier_needed (rtx_insn *);
6201 static int in_safe_group_barrier;
6203 /* Update *RWS for REGNO, which is being written by the current instruction,
6204 with predicate PRED, and associated register flags in FLAGS. */
6206 static void
6207 rws_update (int regno, struct reg_flags flags, int pred)
6209 if (pred)
6210 rws_sum[regno].write_count++;
6211 else
6212 rws_sum[regno].write_count = 2;
6213 rws_sum[regno].written_by_fp |= flags.is_fp;
6214 /* ??? Not tracking and/or across differing predicates. */
6215 rws_sum[regno].written_by_and = flags.is_and;
6216 rws_sum[regno].written_by_or = flags.is_or;
6217 rws_sum[regno].first_pred = pred;
6220 /* Handle an access to register REGNO of type FLAGS using predicate register
6221 PRED. Update rws_sum array. Return 1 if this access creates
6222 a dependency with an earlier instruction in the same group. */
6224 static int
6225 rws_access_regno (int regno, struct reg_flags flags, int pred)
6227 int need_barrier = 0;
6229 gcc_assert (regno < NUM_REGS);
6231 if (! PR_REGNO_P (regno))
6232 flags.is_and = flags.is_or = 0;
6234 if (flags.is_write)
6236 int write_count;
6238 rws_insn_set (regno);
6239 write_count = rws_sum[regno].write_count;
6241 switch (write_count)
6243 case 0:
6244 /* The register has not been written yet. */
6245 if (!in_safe_group_barrier)
6246 rws_update (regno, flags, pred);
6247 break;
6249 case 1:
6250 /* The register has been written via a predicate. Treat
6251 it like a unconditional write and do not try to check
6252 for complementary pred reg in earlier write. */
6253 if (flags.is_and && rws_sum[regno].written_by_and)
6255 else if (flags.is_or && rws_sum[regno].written_by_or)
6257 else
6258 need_barrier = 1;
6259 if (!in_safe_group_barrier)
6260 rws_update (regno, flags, pred);
6261 break;
6263 case 2:
6264 /* The register has been unconditionally written already. We
6265 need a barrier. */
6266 if (flags.is_and && rws_sum[regno].written_by_and)
6268 else if (flags.is_or && rws_sum[regno].written_by_or)
6270 else
6271 need_barrier = 1;
6272 if (!in_safe_group_barrier)
6274 rws_sum[regno].written_by_and = flags.is_and;
6275 rws_sum[regno].written_by_or = flags.is_or;
6277 break;
6279 default:
6280 gcc_unreachable ();
6283 else
6285 if (flags.is_branch)
6287 /* Branches have several RAW exceptions that allow to avoid
6288 barriers. */
6290 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6291 /* RAW dependencies on branch regs are permissible as long
6292 as the writer is a non-branch instruction. Since we
6293 never generate code that uses a branch register written
6294 by a branch instruction, handling this case is
6295 easy. */
6296 return 0;
6298 if (REGNO_REG_CLASS (regno) == PR_REGS
6299 && ! rws_sum[regno].written_by_fp)
6300 /* The predicates of a branch are available within the
6301 same insn group as long as the predicate was written by
6302 something other than a floating-point instruction. */
6303 return 0;
6306 if (flags.is_and && rws_sum[regno].written_by_and)
6307 return 0;
6308 if (flags.is_or && rws_sum[regno].written_by_or)
6309 return 0;
6311 switch (rws_sum[regno].write_count)
6313 case 0:
6314 /* The register has not been written yet. */
6315 break;
6317 case 1:
6318 /* The register has been written via a predicate, assume we
6319 need a barrier (don't check for complementary regs). */
6320 need_barrier = 1;
6321 break;
6323 case 2:
6324 /* The register has been unconditionally written already. We
6325 need a barrier. */
6326 need_barrier = 1;
6327 break;
6329 default:
6330 gcc_unreachable ();
6334 return need_barrier;
6337 static int
6338 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6340 int regno = REGNO (reg);
6341 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6343 if (n == 1)
6344 return rws_access_regno (regno, flags, pred);
6345 else
6347 int need_barrier = 0;
6348 while (--n >= 0)
6349 need_barrier |= rws_access_regno (regno + n, flags, pred);
6350 return need_barrier;
6354 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6355 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6357 static void
6358 update_set_flags (rtx x, struct reg_flags *pflags)
6360 rtx src = SET_SRC (x);
6362 switch (GET_CODE (src))
6364 case CALL:
6365 return;
6367 case IF_THEN_ELSE:
6368 /* There are four cases here:
6369 (1) The destination is (pc), in which case this is a branch,
6370 nothing here applies.
6371 (2) The destination is ar.lc, in which case this is a
6372 doloop_end_internal,
6373 (3) The destination is an fp register, in which case this is
6374 an fselect instruction.
6375 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6376 this is a check load.
6377 In all cases, nothing we do in this function applies. */
6378 return;
6380 default:
6381 if (COMPARISON_P (src)
6382 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6383 /* Set pflags->is_fp to 1 so that we know we're dealing
6384 with a floating point comparison when processing the
6385 destination of the SET. */
6386 pflags->is_fp = 1;
6388 /* Discover if this is a parallel comparison. We only handle
6389 and.orcm and or.andcm at present, since we must retain a
6390 strict inverse on the predicate pair. */
6391 else if (GET_CODE (src) == AND)
6392 pflags->is_and = 1;
6393 else if (GET_CODE (src) == IOR)
6394 pflags->is_or = 1;
6396 break;
6400 /* Subroutine of rtx_needs_barrier; this function determines whether the
6401 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6402 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6403 for this insn. */
6405 static int
6406 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6408 int need_barrier = 0;
6409 rtx dst;
6410 rtx src = SET_SRC (x);
6412 if (GET_CODE (src) == CALL)
6413 /* We don't need to worry about the result registers that
6414 get written by subroutine call. */
6415 return rtx_needs_barrier (src, flags, pred);
6416 else if (SET_DEST (x) == pc_rtx)
6418 /* X is a conditional branch. */
6419 /* ??? This seems redundant, as the caller sets this bit for
6420 all JUMP_INSNs. */
6421 if (!ia64_spec_check_src_p (src))
6422 flags.is_branch = 1;
6423 return rtx_needs_barrier (src, flags, pred);
6426 if (ia64_spec_check_src_p (src))
6427 /* Avoid checking one register twice (in condition
6428 and in 'then' section) for ldc pattern. */
6430 gcc_assert (REG_P (XEXP (src, 2)));
6431 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6433 /* We process MEM below. */
6434 src = XEXP (src, 1);
6437 need_barrier |= rtx_needs_barrier (src, flags, pred);
6439 dst = SET_DEST (x);
6440 if (GET_CODE (dst) == ZERO_EXTRACT)
6442 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6443 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6445 return need_barrier;
6448 /* Handle an access to rtx X of type FLAGS using predicate register
6449 PRED. Return 1 if this access creates a dependency with an earlier
6450 instruction in the same group. */
6452 static int
6453 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6455 int i, j;
6456 int is_complemented = 0;
6457 int need_barrier = 0;
6458 const char *format_ptr;
6459 struct reg_flags new_flags;
6460 rtx cond;
6462 if (! x)
6463 return 0;
6465 new_flags = flags;
6467 switch (GET_CODE (x))
6469 case SET:
6470 update_set_flags (x, &new_flags);
6471 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6472 if (GET_CODE (SET_SRC (x)) != CALL)
6474 new_flags.is_write = 1;
6475 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6477 break;
6479 case CALL:
6480 new_flags.is_write = 0;
6481 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6483 /* Avoid multiple register writes, in case this is a pattern with
6484 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6485 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6487 new_flags.is_write = 1;
6488 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6489 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6490 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6492 break;
6494 case COND_EXEC:
6495 /* X is a predicated instruction. */
6497 cond = COND_EXEC_TEST (x);
6498 gcc_assert (!pred);
6499 need_barrier = rtx_needs_barrier (cond, flags, 0);
6501 if (GET_CODE (cond) == EQ)
6502 is_complemented = 1;
6503 cond = XEXP (cond, 0);
6504 gcc_assert (GET_CODE (cond) == REG
6505 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6506 pred = REGNO (cond);
6507 if (is_complemented)
6508 ++pred;
6510 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6511 return need_barrier;
6513 case CLOBBER:
6514 case USE:
6515 /* Clobber & use are for earlier compiler-phases only. */
6516 break;
6518 case ASM_OPERANDS:
6519 case ASM_INPUT:
6520 /* We always emit stop bits for traditional asms. We emit stop bits
6521 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6522 if (GET_CODE (x) != ASM_OPERANDS
6523 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6525 /* Avoid writing the register multiple times if we have multiple
6526 asm outputs. This avoids a failure in rws_access_reg. */
6527 if (! rws_insn_test (REG_VOLATILE))
6529 new_flags.is_write = 1;
6530 rws_access_regno (REG_VOLATILE, new_flags, pred);
6532 return 1;
6535 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6536 We cannot just fall through here since then we would be confused
6537 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6538 traditional asms unlike their normal usage. */
6540 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6541 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6542 need_barrier = 1;
6543 break;
6545 case PARALLEL:
6546 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6548 rtx pat = XVECEXP (x, 0, i);
6549 switch (GET_CODE (pat))
6551 case SET:
6552 update_set_flags (pat, &new_flags);
6553 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6554 break;
6556 case USE:
6557 case CALL:
6558 case ASM_OPERANDS:
6559 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6560 break;
6562 case CLOBBER:
6563 if (REG_P (XEXP (pat, 0))
6564 && extract_asm_operands (x) != NULL_RTX
6565 && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
6567 new_flags.is_write = 1;
6568 need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
6569 new_flags, pred);
6570 new_flags = flags;
6572 break;
6574 case RETURN:
6575 break;
6577 default:
6578 gcc_unreachable ();
6581 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6583 rtx pat = XVECEXP (x, 0, i);
6584 if (GET_CODE (pat) == SET)
6586 if (GET_CODE (SET_SRC (pat)) != CALL)
6588 new_flags.is_write = 1;
6589 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6590 pred);
6593 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6594 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6596 break;
6598 case SUBREG:
6599 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6600 break;
6601 case REG:
6602 if (REGNO (x) == AR_UNAT_REGNUM)
6604 for (i = 0; i < 64; ++i)
6605 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6607 else
6608 need_barrier = rws_access_reg (x, flags, pred);
6609 break;
6611 case MEM:
6612 /* Find the regs used in memory address computation. */
6613 new_flags.is_write = 0;
6614 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6615 break;
6617 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6618 case SYMBOL_REF: case LABEL_REF: case CONST:
6619 break;
6621 /* Operators with side-effects. */
6622 case POST_INC: case POST_DEC:
6623 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6625 new_flags.is_write = 0;
6626 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6627 new_flags.is_write = 1;
6628 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6629 break;
6631 case POST_MODIFY:
6632 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6634 new_flags.is_write = 0;
6635 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6636 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6637 new_flags.is_write = 1;
6638 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6639 break;
6641 /* Handle common unary and binary ops for efficiency. */
6642 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6643 case MOD: case UDIV: case UMOD: case AND: case IOR:
6644 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6645 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6646 case NE: case EQ: case GE: case GT: case LE:
6647 case LT: case GEU: case GTU: case LEU: case LTU:
6648 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6649 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6650 break;
6652 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6653 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6654 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6655 case SQRT: case FFS: case POPCOUNT:
6656 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6657 break;
6659 case VEC_SELECT:
6660 /* VEC_SELECT's second argument is a PARALLEL with integers that
6661 describe the elements selected. On ia64, those integers are
6662 always constants. Avoid walking the PARALLEL so that we don't
6663 get confused with "normal" parallels and then die. */
6664 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6665 break;
6667 case UNSPEC:
6668 switch (XINT (x, 1))
6670 case UNSPEC_LTOFF_DTPMOD:
6671 case UNSPEC_LTOFF_DTPREL:
6672 case UNSPEC_DTPREL:
6673 case UNSPEC_LTOFF_TPREL:
6674 case UNSPEC_TPREL:
6675 case UNSPEC_PRED_REL_MUTEX:
6676 case UNSPEC_PIC_CALL:
6677 case UNSPEC_MF:
6678 case UNSPEC_FETCHADD_ACQ:
6679 case UNSPEC_FETCHADD_REL:
6680 case UNSPEC_BSP_VALUE:
6681 case UNSPEC_FLUSHRS:
6682 case UNSPEC_BUNDLE_SELECTOR:
6683 break;
6685 case UNSPEC_GR_SPILL:
6686 case UNSPEC_GR_RESTORE:
6688 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6689 HOST_WIDE_INT bit = (offset >> 3) & 63;
6691 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6692 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6693 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6694 new_flags, pred);
6695 break;
6698 case UNSPEC_FR_SPILL:
6699 case UNSPEC_FR_RESTORE:
6700 case UNSPEC_GETF_EXP:
6701 case UNSPEC_SETF_EXP:
6702 case UNSPEC_ADDP4:
6703 case UNSPEC_FR_SQRT_RECIP_APPROX:
6704 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6705 case UNSPEC_LDA:
6706 case UNSPEC_LDS:
6707 case UNSPEC_LDS_A:
6708 case UNSPEC_LDSA:
6709 case UNSPEC_CHKACLR:
6710 case UNSPEC_CHKS:
6711 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6712 break;
6714 case UNSPEC_FR_RECIP_APPROX:
6715 case UNSPEC_SHRP:
6716 case UNSPEC_COPYSIGN:
6717 case UNSPEC_FR_RECIP_APPROX_RES:
6718 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6719 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6720 break;
6722 case UNSPEC_CMPXCHG_ACQ:
6723 case UNSPEC_CMPXCHG_REL:
6724 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6725 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6726 break;
6728 default:
6729 gcc_unreachable ();
6731 break;
6733 case UNSPEC_VOLATILE:
6734 switch (XINT (x, 1))
6736 case UNSPECV_ALLOC:
6737 /* Alloc must always be the first instruction of a group.
6738 We force this by always returning true. */
6739 /* ??? We might get better scheduling if we explicitly check for
6740 input/local/output register dependencies, and modify the
6741 scheduler so that alloc is always reordered to the start of
6742 the current group. We could then eliminate all of the
6743 first_instruction code. */
6744 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6746 new_flags.is_write = 1;
6747 rws_access_regno (REG_AR_CFM, new_flags, pred);
6748 return 1;
6750 case UNSPECV_SET_BSP:
6751 case UNSPECV_PROBE_STACK_RANGE:
6752 need_barrier = 1;
6753 break;
6755 case UNSPECV_BLOCKAGE:
6756 case UNSPECV_INSN_GROUP_BARRIER:
6757 case UNSPECV_BREAK:
6758 case UNSPECV_PSAC_ALL:
6759 case UNSPECV_PSAC_NORMAL:
6760 return 0;
6762 case UNSPECV_PROBE_STACK_ADDRESS:
6763 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6764 break;
6766 default:
6767 gcc_unreachable ();
6769 break;
6771 case RETURN:
6772 new_flags.is_write = 0;
6773 need_barrier = rws_access_regno (REG_RP, flags, pred);
6774 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6776 new_flags.is_write = 1;
6777 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6778 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6779 break;
6781 default:
6782 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6783 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6784 switch (format_ptr[i])
6786 case '0': /* unused field */
6787 case 'i': /* integer */
6788 case 'n': /* note */
6789 case 'w': /* wide integer */
6790 case 's': /* pointer to string */
6791 case 'S': /* optional pointer to string */
6792 break;
6794 case 'e':
6795 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6796 need_barrier = 1;
6797 break;
6799 case 'E':
6800 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6801 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6802 need_barrier = 1;
6803 break;
6805 default:
6806 gcc_unreachable ();
6808 break;
6810 return need_barrier;
6813 /* Clear out the state for group_barrier_needed at the start of a
6814 sequence of insns. */
6816 static void
6817 init_insn_group_barriers (void)
6819 memset (rws_sum, 0, sizeof (rws_sum));
6820 first_instruction = 1;
6823 /* Given the current state, determine whether a group barrier (a stop bit) is
6824 necessary before INSN. Return nonzero if so. This modifies the state to
6825 include the effects of INSN as a side-effect. */
6827 static int
6828 group_barrier_needed (rtx_insn *insn)
6830 rtx pat;
6831 int need_barrier = 0;
6832 struct reg_flags flags;
6834 memset (&flags, 0, sizeof (flags));
6835 switch (GET_CODE (insn))
6837 case NOTE:
6838 case DEBUG_INSN:
6839 break;
6841 case BARRIER:
6842 /* A barrier doesn't imply an instruction group boundary. */
6843 break;
6845 case CODE_LABEL:
6846 memset (rws_insn, 0, sizeof (rws_insn));
6847 return 1;
6849 case CALL_INSN:
6850 flags.is_branch = 1;
6851 flags.is_sibcall = SIBLING_CALL_P (insn);
6852 memset (rws_insn, 0, sizeof (rws_insn));
6854 /* Don't bundle a call following another call. */
6855 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6857 need_barrier = 1;
6858 break;
6861 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6862 break;
6864 case JUMP_INSN:
6865 if (!ia64_spec_check_p (insn))
6866 flags.is_branch = 1;
6868 /* Don't bundle a jump following a call. */
6869 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6871 need_barrier = 1;
6872 break;
6874 /* FALLTHRU */
6876 case INSN:
6877 if (GET_CODE (PATTERN (insn)) == USE
6878 || GET_CODE (PATTERN (insn)) == CLOBBER)
6879 /* Don't care about USE and CLOBBER "insns"---those are used to
6880 indicate to the optimizer that it shouldn't get rid of
6881 certain operations. */
6882 break;
6884 pat = PATTERN (insn);
6886 /* Ug. Hack hacks hacked elsewhere. */
6887 switch (recog_memoized (insn))
6889 /* We play dependency tricks with the epilogue in order
6890 to get proper schedules. Undo this for dv analysis. */
6891 case CODE_FOR_epilogue_deallocate_stack:
6892 case CODE_FOR_prologue_allocate_stack:
6893 pat = XVECEXP (pat, 0, 0);
6894 break;
6896 /* The pattern we use for br.cloop confuses the code above.
6897 The second element of the vector is representative. */
6898 case CODE_FOR_doloop_end_internal:
6899 pat = XVECEXP (pat, 0, 1);
6900 break;
6902 /* Doesn't generate code. */
6903 case CODE_FOR_pred_rel_mutex:
6904 case CODE_FOR_prologue_use:
6905 return 0;
6907 default:
6908 break;
6911 memset (rws_insn, 0, sizeof (rws_insn));
6912 need_barrier = rtx_needs_barrier (pat, flags, 0);
6914 /* Check to see if the previous instruction was a volatile
6915 asm. */
6916 if (! need_barrier)
6917 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6919 break;
6921 default:
6922 gcc_unreachable ();
6925 if (first_instruction && important_for_bundling_p (insn))
6927 need_barrier = 0;
6928 first_instruction = 0;
6931 return need_barrier;
6934 /* Like group_barrier_needed, but do not clobber the current state. */
6936 static int
6937 safe_group_barrier_needed (rtx_insn *insn)
6939 int saved_first_instruction;
6940 int t;
6942 saved_first_instruction = first_instruction;
6943 in_safe_group_barrier = 1;
6945 t = group_barrier_needed (insn);
6947 first_instruction = saved_first_instruction;
6948 in_safe_group_barrier = 0;
6950 return t;
6953 /* Scan the current function and insert stop bits as necessary to
6954 eliminate dependencies. This function assumes that a final
6955 instruction scheduling pass has been run which has already
6956 inserted most of the necessary stop bits. This function only
6957 inserts new ones at basic block boundaries, since these are
6958 invisible to the scheduler. */
6960 static void
6961 emit_insn_group_barriers (FILE *dump)
6963 rtx_insn *insn;
6964 rtx_insn *last_label = 0;
6965 int insns_since_last_label = 0;
6967 init_insn_group_barriers ();
6969 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6971 if (LABEL_P (insn))
6973 if (insns_since_last_label)
6974 last_label = insn;
6975 insns_since_last_label = 0;
6977 else if (NOTE_P (insn)
6978 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6980 if (insns_since_last_label)
6981 last_label = insn;
6982 insns_since_last_label = 0;
6984 else if (NONJUMP_INSN_P (insn)
6985 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6986 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6988 init_insn_group_barriers ();
6989 last_label = 0;
6991 else if (NONDEBUG_INSN_P (insn))
6993 insns_since_last_label = 1;
6995 if (group_barrier_needed (insn))
6997 if (last_label)
6999 if (dump)
7000 fprintf (dump, "Emitting stop before label %d\n",
7001 INSN_UID (last_label));
7002 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
7003 insn = last_label;
7005 init_insn_group_barriers ();
7006 last_label = 0;
7013 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
7014 This function has to emit all necessary group barriers. */
7016 static void
7017 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7019 rtx_insn *insn;
7021 init_insn_group_barriers ();
7023 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7025 if (BARRIER_P (insn))
7027 rtx_insn *last = prev_active_insn (insn);
7029 if (! last)
7030 continue;
7031 if (JUMP_TABLE_DATA_P (last))
7032 last = prev_active_insn (last);
7033 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7034 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7036 init_insn_group_barriers ();
7038 else if (NONDEBUG_INSN_P (insn))
7040 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7041 init_insn_group_barriers ();
7042 else if (group_barrier_needed (insn))
7044 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
7045 init_insn_group_barriers ();
7046 group_barrier_needed (insn);
7054 /* Instruction scheduling support. */
7056 #define NR_BUNDLES 10
7058 /* A list of names of all available bundles. */
7060 static const char *bundle_name [NR_BUNDLES] =
7062 ".mii",
7063 ".mmi",
7064 ".mfi",
7065 ".mmf",
7066 #if NR_BUNDLES == 10
7067 ".bbb",
7068 ".mbb",
7069 #endif
7070 ".mib",
7071 ".mmb",
7072 ".mfb",
7073 ".mlx"
7076 /* Nonzero if we should insert stop bits into the schedule. */
7078 int ia64_final_schedule = 0;
7080 /* Codes of the corresponding queried units: */
7082 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
7083 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
7085 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
7086 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
7088 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
7090 /* The following variable value is an insn group barrier. */
7092 static rtx_insn *dfa_stop_insn;
7094 /* The following variable value is the last issued insn. */
7096 static rtx_insn *last_scheduled_insn;
7098 /* The following variable value is pointer to a DFA state used as
7099 temporary variable. */
7101 static state_t temp_dfa_state = NULL;
7103 /* The following variable value is DFA state after issuing the last
7104 insn. */
7106 static state_t prev_cycle_state = NULL;
7108 /* The following array element values are TRUE if the corresponding
7109 insn requires to add stop bits before it. */
7111 static char *stops_p = NULL;
7113 /* The following variable is used to set up the mentioned above array. */
7115 static int stop_before_p = 0;
7117 /* The following variable value is length of the arrays `clocks' and
7118 `add_cycles'. */
7120 static int clocks_length;
7122 /* The following variable value is number of data speculations in progress. */
7123 static int pending_data_specs = 0;
7125 /* Number of memory references on current and three future processor cycles. */
7126 static char mem_ops_in_group[4];
7128 /* Number of current processor cycle (from scheduler's point of view). */
7129 static int current_cycle;
7131 static rtx ia64_single_set (rtx_insn *);
7132 static void ia64_emit_insn_before (rtx, rtx);
7134 /* Map a bundle number to its pseudo-op. */
7136 const char *
7137 get_bundle_name (int b)
7139 return bundle_name[b];
7143 /* Return the maximum number of instructions a cpu can issue. */
7145 static int
7146 ia64_issue_rate (void)
7148 return 6;
7151 /* Helper function - like single_set, but look inside COND_EXEC. */
7153 static rtx
7154 ia64_single_set (rtx_insn *insn)
7156 rtx x = PATTERN (insn), ret;
7157 if (GET_CODE (x) == COND_EXEC)
7158 x = COND_EXEC_CODE (x);
7159 if (GET_CODE (x) == SET)
7160 return x;
7162 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
7163 Although they are not classical single set, the second set is there just
7164 to protect it from moving past FP-relative stack accesses. */
7165 switch (recog_memoized (insn))
7167 case CODE_FOR_prologue_allocate_stack:
7168 case CODE_FOR_prologue_allocate_stack_pr:
7169 case CODE_FOR_epilogue_deallocate_stack:
7170 case CODE_FOR_epilogue_deallocate_stack_pr:
7171 ret = XVECEXP (x, 0, 0);
7172 break;
7174 default:
7175 ret = single_set_2 (insn, x);
7176 break;
7179 return ret;
7182 /* Adjust the cost of a scheduling dependency.
7183 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
7184 COST is the current cost, DW is dependency weakness. */
7185 static int
7186 ia64_adjust_cost_2 (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn,
7187 int cost, dw_t dw)
7189 enum reg_note dep_type = (enum reg_note) dep_type1;
7190 enum attr_itanium_class dep_class;
7191 enum attr_itanium_class insn_class;
7193 insn_class = ia64_safe_itanium_class (insn);
7194 dep_class = ia64_safe_itanium_class (dep_insn);
7196 /* Treat true memory dependencies separately. Ignore apparent true
7197 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
7198 if (dep_type == REG_DEP_TRUE
7199 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
7200 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
7201 return 0;
7203 if (dw == MIN_DEP_WEAK)
7204 /* Store and load are likely to alias, use higher cost to avoid stall. */
7205 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
7206 else if (dw > MIN_DEP_WEAK)
7208 /* Store and load are less likely to alias. */
7209 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
7210 /* Assume there will be no cache conflict for floating-point data.
7211 For integer data, L1 conflict penalty is huge (17 cycles), so we
7212 never assume it will not cause a conflict. */
7213 return 0;
7214 else
7215 return cost;
7218 if (dep_type != REG_DEP_OUTPUT)
7219 return cost;
7221 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
7222 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
7223 return 0;
7225 return cost;
7228 /* Like emit_insn_before, but skip cycle_display notes.
7229 ??? When cycle display notes are implemented, update this. */
7231 static void
7232 ia64_emit_insn_before (rtx insn, rtx before)
7234 emit_insn_before (insn, before);
7237 /* The following function marks insns who produce addresses for load
7238 and store insns. Such insns will be placed into M slots because it
7239 decrease latency time for Itanium1 (see function
7240 `ia64_produce_address_p' and the DFA descriptions). */
7242 static void
7243 ia64_dependencies_evaluation_hook (rtx_insn *head, rtx_insn *tail)
7245 rtx_insn *insn, *next, *next_tail;
7247 /* Before reload, which_alternative is not set, which means that
7248 ia64_safe_itanium_class will produce wrong results for (at least)
7249 move instructions. */
7250 if (!reload_completed)
7251 return;
7253 next_tail = NEXT_INSN (tail);
7254 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7255 if (INSN_P (insn))
7256 insn->call = 0;
7257 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7258 if (INSN_P (insn)
7259 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
7261 sd_iterator_def sd_it;
7262 dep_t dep;
7263 bool has_mem_op_consumer_p = false;
7265 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7267 enum attr_itanium_class c;
7269 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7270 continue;
7272 next = DEP_CON (dep);
7273 c = ia64_safe_itanium_class (next);
7274 if ((c == ITANIUM_CLASS_ST
7275 || c == ITANIUM_CLASS_STF)
7276 && ia64_st_address_bypass_p (insn, next))
7278 has_mem_op_consumer_p = true;
7279 break;
7281 else if ((c == ITANIUM_CLASS_LD
7282 || c == ITANIUM_CLASS_FLD
7283 || c == ITANIUM_CLASS_FLDP)
7284 && ia64_ld_address_bypass_p (insn, next))
7286 has_mem_op_consumer_p = true;
7287 break;
7291 insn->call = has_mem_op_consumer_p;
7295 /* We're beginning a new block. Initialize data structures as necessary. */
7297 static void
7298 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7299 int sched_verbose ATTRIBUTE_UNUSED,
7300 int max_ready ATTRIBUTE_UNUSED)
7302 #ifdef ENABLE_CHECKING
7303 rtx_insn *insn;
7305 if (!sel_sched_p () && reload_completed)
7306 for (insn = NEXT_INSN (current_sched_info->prev_head);
7307 insn != current_sched_info->next_tail;
7308 insn = NEXT_INSN (insn))
7309 gcc_assert (!SCHED_GROUP_P (insn));
7310 #endif
7311 last_scheduled_insn = NULL;
7312 init_insn_group_barriers ();
7314 current_cycle = 0;
7315 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7318 /* We're beginning a scheduling pass. Check assertion. */
7320 static void
7321 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7322 int sched_verbose ATTRIBUTE_UNUSED,
7323 int max_ready ATTRIBUTE_UNUSED)
7325 gcc_assert (pending_data_specs == 0);
7328 /* Scheduling pass is now finished. Free/reset static variable. */
7329 static void
7330 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7331 int sched_verbose ATTRIBUTE_UNUSED)
7333 gcc_assert (pending_data_specs == 0);
7336 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7337 speculation check), FALSE otherwise. */
7338 static bool
7339 is_load_p (rtx_insn *insn)
7341 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7343 return
7344 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7345 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7348 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7349 (taking account for 3-cycle cache reference postponing for stores: Intel
7350 Itanium 2 Reference Manual for Software Development and Optimization,
7351 6.7.3.1). */
7352 static void
7353 record_memory_reference (rtx_insn *insn)
7355 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7357 switch (insn_class) {
7358 case ITANIUM_CLASS_FLD:
7359 case ITANIUM_CLASS_LD:
7360 mem_ops_in_group[current_cycle % 4]++;
7361 break;
7362 case ITANIUM_CLASS_STF:
7363 case ITANIUM_CLASS_ST:
7364 mem_ops_in_group[(current_cycle + 3) % 4]++;
7365 break;
7366 default:;
7370 /* We are about to being issuing insns for this clock cycle.
7371 Override the default sort algorithm to better slot instructions. */
7373 static int
7374 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
7375 int *pn_ready, int clock_var,
7376 int reorder_type)
7378 int n_asms;
7379 int n_ready = *pn_ready;
7380 rtx_insn **e_ready = ready + n_ready;
7381 rtx_insn **insnp;
7383 if (sched_verbose)
7384 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7386 if (reorder_type == 0)
7388 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7389 n_asms = 0;
7390 for (insnp = ready; insnp < e_ready; insnp++)
7391 if (insnp < e_ready)
7393 rtx_insn *insn = *insnp;
7394 enum attr_type t = ia64_safe_type (insn);
7395 if (t == TYPE_UNKNOWN)
7397 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7398 || asm_noperands (PATTERN (insn)) >= 0)
7400 rtx_insn *lowest = ready[n_asms];
7401 ready[n_asms] = insn;
7402 *insnp = lowest;
7403 n_asms++;
7405 else
7407 rtx_insn *highest = ready[n_ready - 1];
7408 ready[n_ready - 1] = insn;
7409 *insnp = highest;
7410 return 1;
7415 if (n_asms < n_ready)
7417 /* Some normal insns to process. Skip the asms. */
7418 ready += n_asms;
7419 n_ready -= n_asms;
7421 else if (n_ready > 0)
7422 return 1;
7425 if (ia64_final_schedule)
7427 int deleted = 0;
7428 int nr_need_stop = 0;
7430 for (insnp = ready; insnp < e_ready; insnp++)
7431 if (safe_group_barrier_needed (*insnp))
7432 nr_need_stop++;
7434 if (reorder_type == 1 && n_ready == nr_need_stop)
7435 return 0;
7436 if (reorder_type == 0)
7437 return 1;
7438 insnp = e_ready;
7439 /* Move down everything that needs a stop bit, preserving
7440 relative order. */
7441 while (insnp-- > ready + deleted)
7442 while (insnp >= ready + deleted)
7444 rtx_insn *insn = *insnp;
7445 if (! safe_group_barrier_needed (insn))
7446 break;
7447 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7448 *ready = insn;
7449 deleted++;
7451 n_ready -= deleted;
7452 ready += deleted;
7455 current_cycle = clock_var;
7456 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7458 int moved = 0;
7460 insnp = e_ready;
7461 /* Move down loads/stores, preserving relative order. */
7462 while (insnp-- > ready + moved)
7463 while (insnp >= ready + moved)
7465 rtx_insn *insn = *insnp;
7466 if (! is_load_p (insn))
7467 break;
7468 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7469 *ready = insn;
7470 moved++;
7472 n_ready -= moved;
7473 ready += moved;
7476 return 1;
7479 /* We are about to being issuing insns for this clock cycle. Override
7480 the default sort algorithm to better slot instructions. */
7482 static int
7483 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
7484 int *pn_ready, int clock_var)
7486 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7487 pn_ready, clock_var, 0);
7490 /* Like ia64_sched_reorder, but called after issuing each insn.
7491 Override the default sort algorithm to better slot instructions. */
7493 static int
7494 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7495 int sched_verbose ATTRIBUTE_UNUSED, rtx_insn **ready,
7496 int *pn_ready, int clock_var)
7498 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7499 clock_var, 1);
7502 /* We are about to issue INSN. Return the number of insns left on the
7503 ready queue that can be issued this cycle. */
7505 static int
7506 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7507 int sched_verbose ATTRIBUTE_UNUSED,
7508 rtx_insn *insn,
7509 int can_issue_more ATTRIBUTE_UNUSED)
7511 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7512 /* Modulo scheduling does not extend h_i_d when emitting
7513 new instructions. Don't use h_i_d, if we don't have to. */
7515 if (DONE_SPEC (insn) & BEGIN_DATA)
7516 pending_data_specs++;
7517 if (CHECK_SPEC (insn) & BEGIN_DATA)
7518 pending_data_specs--;
7521 if (DEBUG_INSN_P (insn))
7522 return 1;
7524 last_scheduled_insn = insn;
7525 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7526 if (reload_completed)
7528 int needed = group_barrier_needed (insn);
7530 gcc_assert (!needed);
7531 if (CALL_P (insn))
7532 init_insn_group_barriers ();
7533 stops_p [INSN_UID (insn)] = stop_before_p;
7534 stop_before_p = 0;
7536 record_memory_reference (insn);
7538 return 1;
7541 /* We are choosing insn from the ready queue. Return zero if INSN
7542 can be chosen. */
7544 static int
7545 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *insn, int ready_index)
7547 gcc_assert (insn && INSN_P (insn));
7549 /* Size of ALAT is 32. As far as we perform conservative
7550 data speculation, we keep ALAT half-empty. */
7551 if (pending_data_specs >= 16 && (TODO_SPEC (insn) & BEGIN_DATA))
7552 return ready_index == 0 ? -1 : 1;
7554 if (ready_index == 0)
7555 return 0;
7557 if ((!reload_completed
7558 || !safe_group_barrier_needed (insn))
7559 && (!mflag_sched_mem_insns_hard_limit
7560 || !is_load_p (insn)
7561 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns))
7562 return 0;
7564 return 1;
7567 /* The following variable value is pseudo-insn used by the DFA insn
7568 scheduler to change the DFA state when the simulated clock is
7569 increased. */
7571 static rtx_insn *dfa_pre_cycle_insn;
7573 /* Returns 1 when a meaningful insn was scheduled between the last group
7574 barrier and LAST. */
7575 static int
7576 scheduled_good_insn (rtx_insn *last)
7578 if (last && recog_memoized (last) >= 0)
7579 return 1;
7581 for ( ;
7582 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7583 && !stops_p[INSN_UID (last)];
7584 last = PREV_INSN (last))
7585 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7586 the ebb we're scheduling. */
7587 if (INSN_P (last) && recog_memoized (last) >= 0)
7588 return 1;
7590 return 0;
7593 /* We are about to being issuing INSN. Return nonzero if we cannot
7594 issue it on given cycle CLOCK and return zero if we should not sort
7595 the ready queue on the next clock start. */
7597 static int
7598 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx_insn *insn, int last_clock,
7599 int clock, int *sort_p)
7601 gcc_assert (insn && INSN_P (insn));
7603 if (DEBUG_INSN_P (insn))
7604 return 0;
7606 /* When a group barrier is needed for insn, last_scheduled_insn
7607 should be set. */
7608 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7609 || last_scheduled_insn);
7611 if ((reload_completed
7612 && (safe_group_barrier_needed (insn)
7613 || (mflag_sched_stop_bits_after_every_cycle
7614 && last_clock != clock
7615 && last_scheduled_insn
7616 && scheduled_good_insn (last_scheduled_insn))))
7617 || (last_scheduled_insn
7618 && (CALL_P (last_scheduled_insn)
7619 || unknown_for_bundling_p (last_scheduled_insn))))
7621 init_insn_group_barriers ();
7623 if (verbose && dump)
7624 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7625 last_clock == clock ? " + cycle advance" : "");
7627 stop_before_p = 1;
7628 current_cycle = clock;
7629 mem_ops_in_group[current_cycle % 4] = 0;
7631 if (last_clock == clock)
7633 state_transition (curr_state, dfa_stop_insn);
7634 if (TARGET_EARLY_STOP_BITS)
7635 *sort_p = (last_scheduled_insn == NULL_RTX
7636 || ! CALL_P (last_scheduled_insn));
7637 else
7638 *sort_p = 0;
7639 return 1;
7642 if (last_scheduled_insn)
7644 if (unknown_for_bundling_p (last_scheduled_insn))
7645 state_reset (curr_state);
7646 else
7648 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7649 state_transition (curr_state, dfa_stop_insn);
7650 state_transition (curr_state, dfa_pre_cycle_insn);
7651 state_transition (curr_state, NULL);
7655 return 0;
7658 /* Implement targetm.sched.h_i_d_extended hook.
7659 Extend internal data structures. */
7660 static void
7661 ia64_h_i_d_extended (void)
7663 if (stops_p != NULL)
7665 int new_clocks_length = get_max_uid () * 3 / 2;
7666 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7667 clocks_length = new_clocks_length;
7672 /* This structure describes the data used by the backend to guide scheduling.
7673 When the current scheduling point is switched, this data should be saved
7674 and restored later, if the scheduler returns to this point. */
7675 struct _ia64_sched_context
7677 state_t prev_cycle_state;
7678 rtx_insn *last_scheduled_insn;
7679 struct reg_write_state rws_sum[NUM_REGS];
7680 struct reg_write_state rws_insn[NUM_REGS];
7681 int first_instruction;
7682 int pending_data_specs;
7683 int current_cycle;
7684 char mem_ops_in_group[4];
7686 typedef struct _ia64_sched_context *ia64_sched_context_t;
7688 /* Allocates a scheduling context. */
7689 static void *
7690 ia64_alloc_sched_context (void)
7692 return xmalloc (sizeof (struct _ia64_sched_context));
7695 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7696 the global context otherwise. */
7697 static void
7698 ia64_init_sched_context (void *_sc, bool clean_p)
7700 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7702 sc->prev_cycle_state = xmalloc (dfa_state_size);
7703 if (clean_p)
7705 state_reset (sc->prev_cycle_state);
7706 sc->last_scheduled_insn = NULL;
7707 memset (sc->rws_sum, 0, sizeof (rws_sum));
7708 memset (sc->rws_insn, 0, sizeof (rws_insn));
7709 sc->first_instruction = 1;
7710 sc->pending_data_specs = 0;
7711 sc->current_cycle = 0;
7712 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7714 else
7716 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7717 sc->last_scheduled_insn = last_scheduled_insn;
7718 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7719 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7720 sc->first_instruction = first_instruction;
7721 sc->pending_data_specs = pending_data_specs;
7722 sc->current_cycle = current_cycle;
7723 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7727 /* Sets the global scheduling context to the one pointed to by _SC. */
7728 static void
7729 ia64_set_sched_context (void *_sc)
7731 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7733 gcc_assert (sc != NULL);
7735 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7736 last_scheduled_insn = sc->last_scheduled_insn;
7737 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7738 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7739 first_instruction = sc->first_instruction;
7740 pending_data_specs = sc->pending_data_specs;
7741 current_cycle = sc->current_cycle;
7742 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7745 /* Clears the data in the _SC scheduling context. */
7746 static void
7747 ia64_clear_sched_context (void *_sc)
7749 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7751 free (sc->prev_cycle_state);
7752 sc->prev_cycle_state = NULL;
7755 /* Frees the _SC scheduling context. */
7756 static void
7757 ia64_free_sched_context (void *_sc)
7759 gcc_assert (_sc != NULL);
7761 free (_sc);
7764 typedef rtx (* gen_func_t) (rtx, rtx);
7766 /* Return a function that will generate a load of mode MODE_NO
7767 with speculation types TS. */
7768 static gen_func_t
7769 get_spec_load_gen_function (ds_t ts, int mode_no)
7771 static gen_func_t gen_ld_[] = {
7772 gen_movbi,
7773 gen_movqi_internal,
7774 gen_movhi_internal,
7775 gen_movsi_internal,
7776 gen_movdi_internal,
7777 gen_movsf_internal,
7778 gen_movdf_internal,
7779 gen_movxf_internal,
7780 gen_movti_internal,
7781 gen_zero_extendqidi2,
7782 gen_zero_extendhidi2,
7783 gen_zero_extendsidi2,
7786 static gen_func_t gen_ld_a[] = {
7787 gen_movbi_advanced,
7788 gen_movqi_advanced,
7789 gen_movhi_advanced,
7790 gen_movsi_advanced,
7791 gen_movdi_advanced,
7792 gen_movsf_advanced,
7793 gen_movdf_advanced,
7794 gen_movxf_advanced,
7795 gen_movti_advanced,
7796 gen_zero_extendqidi2_advanced,
7797 gen_zero_extendhidi2_advanced,
7798 gen_zero_extendsidi2_advanced,
7800 static gen_func_t gen_ld_s[] = {
7801 gen_movbi_speculative,
7802 gen_movqi_speculative,
7803 gen_movhi_speculative,
7804 gen_movsi_speculative,
7805 gen_movdi_speculative,
7806 gen_movsf_speculative,
7807 gen_movdf_speculative,
7808 gen_movxf_speculative,
7809 gen_movti_speculative,
7810 gen_zero_extendqidi2_speculative,
7811 gen_zero_extendhidi2_speculative,
7812 gen_zero_extendsidi2_speculative,
7814 static gen_func_t gen_ld_sa[] = {
7815 gen_movbi_speculative_advanced,
7816 gen_movqi_speculative_advanced,
7817 gen_movhi_speculative_advanced,
7818 gen_movsi_speculative_advanced,
7819 gen_movdi_speculative_advanced,
7820 gen_movsf_speculative_advanced,
7821 gen_movdf_speculative_advanced,
7822 gen_movxf_speculative_advanced,
7823 gen_movti_speculative_advanced,
7824 gen_zero_extendqidi2_speculative_advanced,
7825 gen_zero_extendhidi2_speculative_advanced,
7826 gen_zero_extendsidi2_speculative_advanced,
7828 static gen_func_t gen_ld_s_a[] = {
7829 gen_movbi_speculative_a,
7830 gen_movqi_speculative_a,
7831 gen_movhi_speculative_a,
7832 gen_movsi_speculative_a,
7833 gen_movdi_speculative_a,
7834 gen_movsf_speculative_a,
7835 gen_movdf_speculative_a,
7836 gen_movxf_speculative_a,
7837 gen_movti_speculative_a,
7838 gen_zero_extendqidi2_speculative_a,
7839 gen_zero_extendhidi2_speculative_a,
7840 gen_zero_extendsidi2_speculative_a,
7843 gen_func_t *gen_ld;
7845 if (ts & BEGIN_DATA)
7847 if (ts & BEGIN_CONTROL)
7848 gen_ld = gen_ld_sa;
7849 else
7850 gen_ld = gen_ld_a;
7852 else if (ts & BEGIN_CONTROL)
7854 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7855 || ia64_needs_block_p (ts))
7856 gen_ld = gen_ld_s;
7857 else
7858 gen_ld = gen_ld_s_a;
7860 else if (ts == 0)
7861 gen_ld = gen_ld_;
7862 else
7863 gcc_unreachable ();
7865 return gen_ld[mode_no];
7868 /* Constants that help mapping 'enum machine_mode' to int. */
7869 enum SPEC_MODES
7871 SPEC_MODE_INVALID = -1,
7872 SPEC_MODE_FIRST = 0,
7873 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7874 SPEC_MODE_FOR_EXTEND_LAST = 3,
7875 SPEC_MODE_LAST = 8
7878 enum
7880 /* Offset to reach ZERO_EXTEND patterns. */
7881 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7884 /* Return index of the MODE. */
7885 static int
7886 ia64_mode_to_int (enum machine_mode mode)
7888 switch (mode)
7890 case BImode: return 0; /* SPEC_MODE_FIRST */
7891 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7892 case HImode: return 2;
7893 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7894 case DImode: return 4;
7895 case SFmode: return 5;
7896 case DFmode: return 6;
7897 case XFmode: return 7;
7898 case TImode:
7899 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7900 mentioned in itanium[12].md. Predicate fp_register_operand also
7901 needs to be defined. Bottom line: better disable for now. */
7902 return SPEC_MODE_INVALID;
7903 default: return SPEC_MODE_INVALID;
7907 /* Provide information about speculation capabilities. */
7908 static void
7909 ia64_set_sched_flags (spec_info_t spec_info)
7911 unsigned int *flags = &(current_sched_info->flags);
7913 if (*flags & SCHED_RGN
7914 || *flags & SCHED_EBB
7915 || *flags & SEL_SCHED)
7917 int mask = 0;
7919 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7920 || (mflag_sched_ar_data_spec && reload_completed))
7922 mask |= BEGIN_DATA;
7924 if (!sel_sched_p ()
7925 && ((mflag_sched_br_in_data_spec && !reload_completed)
7926 || (mflag_sched_ar_in_data_spec && reload_completed)))
7927 mask |= BE_IN_DATA;
7930 if (mflag_sched_control_spec
7931 && (!sel_sched_p ()
7932 || reload_completed))
7934 mask |= BEGIN_CONTROL;
7936 if (!sel_sched_p () && mflag_sched_in_control_spec)
7937 mask |= BE_IN_CONTROL;
7940 spec_info->mask = mask;
7942 if (mask)
7944 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7946 if (mask & BE_IN_SPEC)
7947 *flags |= NEW_BBS;
7949 spec_info->flags = 0;
7951 if ((mask & CONTROL_SPEC)
7952 && sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7953 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7955 if (sched_verbose >= 1)
7956 spec_info->dump = sched_dump;
7957 else
7958 spec_info->dump = 0;
7960 if (mflag_sched_count_spec_in_critical_path)
7961 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7964 else
7965 spec_info->mask = 0;
7968 /* If INSN is an appropriate load return its mode.
7969 Return -1 otherwise. */
7970 static int
7971 get_mode_no_for_insn (rtx_insn *insn)
7973 rtx reg, mem, mode_rtx;
7974 int mode_no;
7975 bool extend_p;
7977 extract_insn_cached (insn);
7979 /* We use WHICH_ALTERNATIVE only after reload. This will
7980 guarantee that reload won't touch a speculative insn. */
7982 if (recog_data.n_operands != 2)
7983 return -1;
7985 reg = recog_data.operand[0];
7986 mem = recog_data.operand[1];
7988 /* We should use MEM's mode since REG's mode in presence of
7989 ZERO_EXTEND will always be DImode. */
7990 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7991 /* Process non-speculative ld. */
7993 if (!reload_completed)
7995 /* Do not speculate into regs like ar.lc. */
7996 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7997 return -1;
7999 if (!MEM_P (mem))
8000 return -1;
8003 rtx mem_reg = XEXP (mem, 0);
8005 if (!REG_P (mem_reg))
8006 return -1;
8009 mode_rtx = mem;
8011 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
8013 gcc_assert (REG_P (reg) && MEM_P (mem));
8014 mode_rtx = mem;
8016 else
8017 return -1;
8019 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
8020 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
8021 || get_attr_check_load (insn) == CHECK_LOAD_YES)
8022 /* Process speculative ld or ld.c. */
8024 gcc_assert (REG_P (reg) && MEM_P (mem));
8025 mode_rtx = mem;
8027 else
8029 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
8031 if (attr_class == ITANIUM_CLASS_CHK_A
8032 || attr_class == ITANIUM_CLASS_CHK_S_I
8033 || attr_class == ITANIUM_CLASS_CHK_S_F)
8034 /* Process chk. */
8035 mode_rtx = reg;
8036 else
8037 return -1;
8040 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
8042 if (mode_no == SPEC_MODE_INVALID)
8043 return -1;
8045 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
8047 if (extend_p)
8049 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
8050 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
8051 return -1;
8053 mode_no += SPEC_GEN_EXTEND_OFFSET;
8056 return mode_no;
8059 /* If X is an unspec part of a speculative load, return its code.
8060 Return -1 otherwise. */
8061 static int
8062 get_spec_unspec_code (const_rtx x)
8064 if (GET_CODE (x) != UNSPEC)
8065 return -1;
8068 int code;
8070 code = XINT (x, 1);
8072 switch (code)
8074 case UNSPEC_LDA:
8075 case UNSPEC_LDS:
8076 case UNSPEC_LDS_A:
8077 case UNSPEC_LDSA:
8078 return code;
8080 default:
8081 return -1;
8086 /* Implement skip_rtx_p hook. */
8087 static bool
8088 ia64_skip_rtx_p (const_rtx x)
8090 return get_spec_unspec_code (x) != -1;
8093 /* If INSN is a speculative load, return its UNSPEC code.
8094 Return -1 otherwise. */
8095 static int
8096 get_insn_spec_code (const_rtx insn)
8098 rtx pat, reg, mem;
8100 pat = PATTERN (insn);
8102 if (GET_CODE (pat) == COND_EXEC)
8103 pat = COND_EXEC_CODE (pat);
8105 if (GET_CODE (pat) != SET)
8106 return -1;
8108 reg = SET_DEST (pat);
8109 if (!REG_P (reg))
8110 return -1;
8112 mem = SET_SRC (pat);
8113 if (GET_CODE (mem) == ZERO_EXTEND)
8114 mem = XEXP (mem, 0);
8116 return get_spec_unspec_code (mem);
8119 /* If INSN is a speculative load, return a ds with the speculation types.
8120 Otherwise [if INSN is a normal instruction] return 0. */
8121 static ds_t
8122 ia64_get_insn_spec_ds (rtx_insn *insn)
8124 int code = get_insn_spec_code (insn);
8126 switch (code)
8128 case UNSPEC_LDA:
8129 return BEGIN_DATA;
8131 case UNSPEC_LDS:
8132 case UNSPEC_LDS_A:
8133 return BEGIN_CONTROL;
8135 case UNSPEC_LDSA:
8136 return BEGIN_DATA | BEGIN_CONTROL;
8138 default:
8139 return 0;
8143 /* If INSN is a speculative load return a ds with the speculation types that
8144 will be checked.
8145 Otherwise [if INSN is a normal instruction] return 0. */
8146 static ds_t
8147 ia64_get_insn_checked_ds (rtx_insn *insn)
8149 int code = get_insn_spec_code (insn);
8151 switch (code)
8153 case UNSPEC_LDA:
8154 return BEGIN_DATA | BEGIN_CONTROL;
8156 case UNSPEC_LDS:
8157 return BEGIN_CONTROL;
8159 case UNSPEC_LDS_A:
8160 case UNSPEC_LDSA:
8161 return BEGIN_DATA | BEGIN_CONTROL;
8163 default:
8164 return 0;
8168 /* If GEN_P is true, calculate the index of needed speculation check and return
8169 speculative pattern for INSN with speculative mode TS, machine mode
8170 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
8171 If GEN_P is false, just calculate the index of needed speculation check. */
8172 static rtx
8173 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
8175 rtx pat, new_pat;
8176 gen_func_t gen_load;
8178 gen_load = get_spec_load_gen_function (ts, mode_no);
8180 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
8181 copy_rtx (recog_data.operand[1]));
8183 pat = PATTERN (insn);
8184 if (GET_CODE (pat) == COND_EXEC)
8185 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8186 new_pat);
8188 return new_pat;
8191 static bool
8192 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
8193 ds_t ds ATTRIBUTE_UNUSED)
8195 return false;
8198 /* Implement targetm.sched.speculate_insn hook.
8199 Check if the INSN can be TS speculative.
8200 If 'no' - return -1.
8201 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
8202 If current pattern of the INSN already provides TS speculation,
8203 return 0. */
8204 static int
8205 ia64_speculate_insn (rtx_insn *insn, ds_t ts, rtx *new_pat)
8207 int mode_no;
8208 int res;
8210 gcc_assert (!(ts & ~SPECULATIVE));
8212 if (ia64_spec_check_p (insn))
8213 return -1;
8215 if ((ts & BE_IN_SPEC)
8216 && !insn_can_be_in_speculative_p (insn, ts))
8217 return -1;
8219 mode_no = get_mode_no_for_insn (insn);
8221 if (mode_no != SPEC_MODE_INVALID)
8223 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
8224 res = 0;
8225 else
8227 res = 1;
8228 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
8231 else
8232 res = -1;
8234 return res;
8237 /* Return a function that will generate a check for speculation TS with mode
8238 MODE_NO.
8239 If simple check is needed, pass true for SIMPLE_CHECK_P.
8240 If clearing check is needed, pass true for CLEARING_CHECK_P. */
8241 static gen_func_t
8242 get_spec_check_gen_function (ds_t ts, int mode_no,
8243 bool simple_check_p, bool clearing_check_p)
8245 static gen_func_t gen_ld_c_clr[] = {
8246 gen_movbi_clr,
8247 gen_movqi_clr,
8248 gen_movhi_clr,
8249 gen_movsi_clr,
8250 gen_movdi_clr,
8251 gen_movsf_clr,
8252 gen_movdf_clr,
8253 gen_movxf_clr,
8254 gen_movti_clr,
8255 gen_zero_extendqidi2_clr,
8256 gen_zero_extendhidi2_clr,
8257 gen_zero_extendsidi2_clr,
8259 static gen_func_t gen_ld_c_nc[] = {
8260 gen_movbi_nc,
8261 gen_movqi_nc,
8262 gen_movhi_nc,
8263 gen_movsi_nc,
8264 gen_movdi_nc,
8265 gen_movsf_nc,
8266 gen_movdf_nc,
8267 gen_movxf_nc,
8268 gen_movti_nc,
8269 gen_zero_extendqidi2_nc,
8270 gen_zero_extendhidi2_nc,
8271 gen_zero_extendsidi2_nc,
8273 static gen_func_t gen_chk_a_clr[] = {
8274 gen_advanced_load_check_clr_bi,
8275 gen_advanced_load_check_clr_qi,
8276 gen_advanced_load_check_clr_hi,
8277 gen_advanced_load_check_clr_si,
8278 gen_advanced_load_check_clr_di,
8279 gen_advanced_load_check_clr_sf,
8280 gen_advanced_load_check_clr_df,
8281 gen_advanced_load_check_clr_xf,
8282 gen_advanced_load_check_clr_ti,
8283 gen_advanced_load_check_clr_di,
8284 gen_advanced_load_check_clr_di,
8285 gen_advanced_load_check_clr_di,
8287 static gen_func_t gen_chk_a_nc[] = {
8288 gen_advanced_load_check_nc_bi,
8289 gen_advanced_load_check_nc_qi,
8290 gen_advanced_load_check_nc_hi,
8291 gen_advanced_load_check_nc_si,
8292 gen_advanced_load_check_nc_di,
8293 gen_advanced_load_check_nc_sf,
8294 gen_advanced_load_check_nc_df,
8295 gen_advanced_load_check_nc_xf,
8296 gen_advanced_load_check_nc_ti,
8297 gen_advanced_load_check_nc_di,
8298 gen_advanced_load_check_nc_di,
8299 gen_advanced_load_check_nc_di,
8301 static gen_func_t gen_chk_s[] = {
8302 gen_speculation_check_bi,
8303 gen_speculation_check_qi,
8304 gen_speculation_check_hi,
8305 gen_speculation_check_si,
8306 gen_speculation_check_di,
8307 gen_speculation_check_sf,
8308 gen_speculation_check_df,
8309 gen_speculation_check_xf,
8310 gen_speculation_check_ti,
8311 gen_speculation_check_di,
8312 gen_speculation_check_di,
8313 gen_speculation_check_di,
8316 gen_func_t *gen_check;
8318 if (ts & BEGIN_DATA)
8320 /* We don't need recovery because even if this is ld.sa
8321 ALAT entry will be allocated only if NAT bit is set to zero.
8322 So it is enough to use ld.c here. */
8324 if (simple_check_p)
8326 gcc_assert (mflag_sched_spec_ldc);
8328 if (clearing_check_p)
8329 gen_check = gen_ld_c_clr;
8330 else
8331 gen_check = gen_ld_c_nc;
8333 else
8335 if (clearing_check_p)
8336 gen_check = gen_chk_a_clr;
8337 else
8338 gen_check = gen_chk_a_nc;
8341 else if (ts & BEGIN_CONTROL)
8343 if (simple_check_p)
8344 /* We might want to use ld.sa -> ld.c instead of
8345 ld.s -> chk.s. */
8347 gcc_assert (!ia64_needs_block_p (ts));
8349 if (clearing_check_p)
8350 gen_check = gen_ld_c_clr;
8351 else
8352 gen_check = gen_ld_c_nc;
8354 else
8356 gen_check = gen_chk_s;
8359 else
8360 gcc_unreachable ();
8362 gcc_assert (mode_no >= 0);
8363 return gen_check[mode_no];
8366 /* Return nonzero, if INSN needs branchy recovery check. */
8367 static bool
8368 ia64_needs_block_p (ds_t ts)
8370 if (ts & BEGIN_DATA)
8371 return !mflag_sched_spec_ldc;
8373 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8375 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8378 /* Generate (or regenerate) a recovery check for INSN. */
8379 static rtx
8380 ia64_gen_spec_check (rtx_insn *insn, rtx_insn *label, ds_t ds)
8382 rtx op1, pat, check_pat;
8383 gen_func_t gen_check;
8384 int mode_no;
8386 mode_no = get_mode_no_for_insn (insn);
8387 gcc_assert (mode_no >= 0);
8389 if (label)
8390 op1 = label;
8391 else
8393 gcc_assert (!ia64_needs_block_p (ds));
8394 op1 = copy_rtx (recog_data.operand[1]);
8397 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8398 true);
8400 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8402 pat = PATTERN (insn);
8403 if (GET_CODE (pat) == COND_EXEC)
8404 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8405 check_pat);
8407 return check_pat;
8410 /* Return nonzero, if X is branchy recovery check. */
8411 static int
8412 ia64_spec_check_p (rtx x)
8414 x = PATTERN (x);
8415 if (GET_CODE (x) == COND_EXEC)
8416 x = COND_EXEC_CODE (x);
8417 if (GET_CODE (x) == SET)
8418 return ia64_spec_check_src_p (SET_SRC (x));
8419 return 0;
8422 /* Return nonzero, if SRC belongs to recovery check. */
8423 static int
8424 ia64_spec_check_src_p (rtx src)
8426 if (GET_CODE (src) == IF_THEN_ELSE)
8428 rtx t;
8430 t = XEXP (src, 0);
8431 if (GET_CODE (t) == NE)
8433 t = XEXP (t, 0);
8435 if (GET_CODE (t) == UNSPEC)
8437 int code;
8439 code = XINT (t, 1);
8441 if (code == UNSPEC_LDCCLR
8442 || code == UNSPEC_LDCNC
8443 || code == UNSPEC_CHKACLR
8444 || code == UNSPEC_CHKANC
8445 || code == UNSPEC_CHKS)
8447 gcc_assert (code != 0);
8448 return code;
8453 return 0;
8457 /* The following page contains abstract data `bundle states' which are
8458 used for bundling insns (inserting nops and template generation). */
8460 /* The following describes state of insn bundling. */
8462 struct bundle_state
8464 /* Unique bundle state number to identify them in the debugging
8465 output */
8466 int unique_num;
8467 rtx_insn *insn; /* corresponding insn, NULL for the 1st and the last state */
8468 /* number nops before and after the insn */
8469 short before_nops_num, after_nops_num;
8470 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8471 insn */
8472 int cost; /* cost of the state in cycles */
8473 int accumulated_insns_num; /* number of all previous insns including
8474 nops. L is considered as 2 insns */
8475 int branch_deviation; /* deviation of previous branches from 3rd slots */
8476 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8477 struct bundle_state *next; /* next state with the same insn_num */
8478 struct bundle_state *originator; /* originator (previous insn state) */
8479 /* All bundle states are in the following chain. */
8480 struct bundle_state *allocated_states_chain;
8481 /* The DFA State after issuing the insn and the nops. */
8482 state_t dfa_state;
8485 /* The following is map insn number to the corresponding bundle state. */
8487 static struct bundle_state **index_to_bundle_states;
8489 /* The unique number of next bundle state. */
8491 static int bundle_states_num;
8493 /* All allocated bundle states are in the following chain. */
8495 static struct bundle_state *allocated_bundle_states_chain;
8497 /* All allocated but not used bundle states are in the following
8498 chain. */
8500 static struct bundle_state *free_bundle_state_chain;
8503 /* The following function returns a free bundle state. */
8505 static struct bundle_state *
8506 get_free_bundle_state (void)
8508 struct bundle_state *result;
8510 if (free_bundle_state_chain != NULL)
8512 result = free_bundle_state_chain;
8513 free_bundle_state_chain = result->next;
8515 else
8517 result = XNEW (struct bundle_state);
8518 result->dfa_state = xmalloc (dfa_state_size);
8519 result->allocated_states_chain = allocated_bundle_states_chain;
8520 allocated_bundle_states_chain = result;
8522 result->unique_num = bundle_states_num++;
8523 return result;
8527 /* The following function frees given bundle state. */
8529 static void
8530 free_bundle_state (struct bundle_state *state)
8532 state->next = free_bundle_state_chain;
8533 free_bundle_state_chain = state;
8536 /* Start work with abstract data `bundle states'. */
8538 static void
8539 initiate_bundle_states (void)
8541 bundle_states_num = 0;
8542 free_bundle_state_chain = NULL;
8543 allocated_bundle_states_chain = NULL;
8546 /* Finish work with abstract data `bundle states'. */
8548 static void
8549 finish_bundle_states (void)
8551 struct bundle_state *curr_state, *next_state;
8553 for (curr_state = allocated_bundle_states_chain;
8554 curr_state != NULL;
8555 curr_state = next_state)
8557 next_state = curr_state->allocated_states_chain;
8558 free (curr_state->dfa_state);
8559 free (curr_state);
8563 /* Hashtable helpers. */
8565 struct bundle_state_hasher : typed_noop_remove <bundle_state>
8567 typedef bundle_state value_type;
8568 typedef bundle_state compare_type;
8569 static inline hashval_t hash (const value_type *);
8570 static inline bool equal (const value_type *, const compare_type *);
8573 /* The function returns hash of BUNDLE_STATE. */
8575 inline hashval_t
8576 bundle_state_hasher::hash (const value_type *state)
8578 unsigned result, i;
8580 for (result = i = 0; i < dfa_state_size; i++)
8581 result += (((unsigned char *) state->dfa_state) [i]
8582 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8583 return result + state->insn_num;
8586 /* The function returns nonzero if the bundle state keys are equal. */
8588 inline bool
8589 bundle_state_hasher::equal (const value_type *state1,
8590 const compare_type *state2)
8592 return (state1->insn_num == state2->insn_num
8593 && memcmp (state1->dfa_state, state2->dfa_state,
8594 dfa_state_size) == 0);
8597 /* Hash table of the bundle states. The key is dfa_state and insn_num
8598 of the bundle states. */
8600 static hash_table<bundle_state_hasher> *bundle_state_table;
8602 /* The function inserts the BUNDLE_STATE into the hash table. The
8603 function returns nonzero if the bundle has been inserted into the
8604 table. The table contains the best bundle state with given key. */
8606 static int
8607 insert_bundle_state (struct bundle_state *bundle_state)
8609 struct bundle_state **entry_ptr;
8611 entry_ptr = bundle_state_table->find_slot (bundle_state, INSERT);
8612 if (*entry_ptr == NULL)
8614 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8615 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8616 *entry_ptr = bundle_state;
8617 return TRUE;
8619 else if (bundle_state->cost < (*entry_ptr)->cost
8620 || (bundle_state->cost == (*entry_ptr)->cost
8621 && ((*entry_ptr)->accumulated_insns_num
8622 > bundle_state->accumulated_insns_num
8623 || ((*entry_ptr)->accumulated_insns_num
8624 == bundle_state->accumulated_insns_num
8625 && ((*entry_ptr)->branch_deviation
8626 > bundle_state->branch_deviation
8627 || ((*entry_ptr)->branch_deviation
8628 == bundle_state->branch_deviation
8629 && (*entry_ptr)->middle_bundle_stops
8630 > bundle_state->middle_bundle_stops))))))
8633 struct bundle_state temp;
8635 temp = **entry_ptr;
8636 **entry_ptr = *bundle_state;
8637 (*entry_ptr)->next = temp.next;
8638 *bundle_state = temp;
8640 return FALSE;
8643 /* Start work with the hash table. */
8645 static void
8646 initiate_bundle_state_table (void)
8648 bundle_state_table = new hash_table<bundle_state_hasher> (50);
8651 /* Finish work with the hash table. */
8653 static void
8654 finish_bundle_state_table (void)
8656 delete bundle_state_table;
8657 bundle_state_table = NULL;
8662 /* The following variable is a insn `nop' used to check bundle states
8663 with different number of inserted nops. */
8665 static rtx_insn *ia64_nop;
8667 /* The following function tries to issue NOPS_NUM nops for the current
8668 state without advancing processor cycle. If it failed, the
8669 function returns FALSE and frees the current state. */
8671 static int
8672 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8674 int i;
8676 for (i = 0; i < nops_num; i++)
8677 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8679 free_bundle_state (curr_state);
8680 return FALSE;
8682 return TRUE;
8685 /* The following function tries to issue INSN for the current
8686 state without advancing processor cycle. If it failed, the
8687 function returns FALSE and frees the current state. */
8689 static int
8690 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8692 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8694 free_bundle_state (curr_state);
8695 return FALSE;
8697 return TRUE;
8700 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8701 starting with ORIGINATOR without advancing processor cycle. If
8702 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8703 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8704 If it was successful, the function creates new bundle state and
8705 insert into the hash table and into `index_to_bundle_states'. */
8707 static void
8708 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8709 rtx_insn *insn, int try_bundle_end_p,
8710 int only_bundle_end_p)
8712 struct bundle_state *curr_state;
8714 curr_state = get_free_bundle_state ();
8715 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8716 curr_state->insn = insn;
8717 curr_state->insn_num = originator->insn_num + 1;
8718 curr_state->cost = originator->cost;
8719 curr_state->originator = originator;
8720 curr_state->before_nops_num = before_nops_num;
8721 curr_state->after_nops_num = 0;
8722 curr_state->accumulated_insns_num
8723 = originator->accumulated_insns_num + before_nops_num;
8724 curr_state->branch_deviation = originator->branch_deviation;
8725 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8726 gcc_assert (insn);
8727 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8729 gcc_assert (GET_MODE (insn) != TImode);
8730 if (!try_issue_nops (curr_state, before_nops_num))
8731 return;
8732 if (!try_issue_insn (curr_state, insn))
8733 return;
8734 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8735 if (curr_state->accumulated_insns_num % 3 != 0)
8736 curr_state->middle_bundle_stops++;
8737 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8738 && curr_state->accumulated_insns_num % 3 != 0)
8740 free_bundle_state (curr_state);
8741 return;
8744 else if (GET_MODE (insn) != TImode)
8746 if (!try_issue_nops (curr_state, before_nops_num))
8747 return;
8748 if (!try_issue_insn (curr_state, insn))
8749 return;
8750 curr_state->accumulated_insns_num++;
8751 gcc_assert (!unknown_for_bundling_p (insn));
8753 if (ia64_safe_type (insn) == TYPE_L)
8754 curr_state->accumulated_insns_num++;
8756 else
8758 /* If this is an insn that must be first in a group, then don't allow
8759 nops to be emitted before it. Currently, alloc is the only such
8760 supported instruction. */
8761 /* ??? The bundling automatons should handle this for us, but they do
8762 not yet have support for the first_insn attribute. */
8763 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8765 free_bundle_state (curr_state);
8766 return;
8769 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8770 state_transition (curr_state->dfa_state, NULL);
8771 curr_state->cost++;
8772 if (!try_issue_nops (curr_state, before_nops_num))
8773 return;
8774 if (!try_issue_insn (curr_state, insn))
8775 return;
8776 curr_state->accumulated_insns_num++;
8777 if (unknown_for_bundling_p (insn))
8779 /* Finish bundle containing asm insn. */
8780 curr_state->after_nops_num
8781 = 3 - curr_state->accumulated_insns_num % 3;
8782 curr_state->accumulated_insns_num
8783 += 3 - curr_state->accumulated_insns_num % 3;
8785 else if (ia64_safe_type (insn) == TYPE_L)
8786 curr_state->accumulated_insns_num++;
8788 if (ia64_safe_type (insn) == TYPE_B)
8789 curr_state->branch_deviation
8790 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8791 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8793 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8795 state_t dfa_state;
8796 struct bundle_state *curr_state1;
8797 struct bundle_state *allocated_states_chain;
8799 curr_state1 = get_free_bundle_state ();
8800 dfa_state = curr_state1->dfa_state;
8801 allocated_states_chain = curr_state1->allocated_states_chain;
8802 *curr_state1 = *curr_state;
8803 curr_state1->dfa_state = dfa_state;
8804 curr_state1->allocated_states_chain = allocated_states_chain;
8805 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8806 dfa_state_size);
8807 curr_state = curr_state1;
8809 if (!try_issue_nops (curr_state,
8810 3 - curr_state->accumulated_insns_num % 3))
8811 return;
8812 curr_state->after_nops_num
8813 = 3 - curr_state->accumulated_insns_num % 3;
8814 curr_state->accumulated_insns_num
8815 += 3 - curr_state->accumulated_insns_num % 3;
8817 if (!insert_bundle_state (curr_state))
8818 free_bundle_state (curr_state);
8819 return;
8822 /* The following function returns position in the two window bundle
8823 for given STATE. */
8825 static int
8826 get_max_pos (state_t state)
8828 if (cpu_unit_reservation_p (state, pos_6))
8829 return 6;
8830 else if (cpu_unit_reservation_p (state, pos_5))
8831 return 5;
8832 else if (cpu_unit_reservation_p (state, pos_4))
8833 return 4;
8834 else if (cpu_unit_reservation_p (state, pos_3))
8835 return 3;
8836 else if (cpu_unit_reservation_p (state, pos_2))
8837 return 2;
8838 else if (cpu_unit_reservation_p (state, pos_1))
8839 return 1;
8840 else
8841 return 0;
8844 /* The function returns code of a possible template for given position
8845 and state. The function should be called only with 2 values of
8846 position equal to 3 or 6. We avoid generating F NOPs by putting
8847 templates containing F insns at the end of the template search
8848 because undocumented anomaly in McKinley derived cores which can
8849 cause stalls if an F-unit insn (including a NOP) is issued within a
8850 six-cycle window after reading certain application registers (such
8851 as ar.bsp). Furthermore, power-considerations also argue against
8852 the use of F-unit instructions unless they're really needed. */
8854 static int
8855 get_template (state_t state, int pos)
8857 switch (pos)
8859 case 3:
8860 if (cpu_unit_reservation_p (state, _0mmi_))
8861 return 1;
8862 else if (cpu_unit_reservation_p (state, _0mii_))
8863 return 0;
8864 else if (cpu_unit_reservation_p (state, _0mmb_))
8865 return 7;
8866 else if (cpu_unit_reservation_p (state, _0mib_))
8867 return 6;
8868 else if (cpu_unit_reservation_p (state, _0mbb_))
8869 return 5;
8870 else if (cpu_unit_reservation_p (state, _0bbb_))
8871 return 4;
8872 else if (cpu_unit_reservation_p (state, _0mmf_))
8873 return 3;
8874 else if (cpu_unit_reservation_p (state, _0mfi_))
8875 return 2;
8876 else if (cpu_unit_reservation_p (state, _0mfb_))
8877 return 8;
8878 else if (cpu_unit_reservation_p (state, _0mlx_))
8879 return 9;
8880 else
8881 gcc_unreachable ();
8882 case 6:
8883 if (cpu_unit_reservation_p (state, _1mmi_))
8884 return 1;
8885 else if (cpu_unit_reservation_p (state, _1mii_))
8886 return 0;
8887 else if (cpu_unit_reservation_p (state, _1mmb_))
8888 return 7;
8889 else if (cpu_unit_reservation_p (state, _1mib_))
8890 return 6;
8891 else if (cpu_unit_reservation_p (state, _1mbb_))
8892 return 5;
8893 else if (cpu_unit_reservation_p (state, _1bbb_))
8894 return 4;
8895 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8896 return 3;
8897 else if (cpu_unit_reservation_p (state, _1mfi_))
8898 return 2;
8899 else if (cpu_unit_reservation_p (state, _1mfb_))
8900 return 8;
8901 else if (cpu_unit_reservation_p (state, _1mlx_))
8902 return 9;
8903 else
8904 gcc_unreachable ();
8905 default:
8906 gcc_unreachable ();
8910 /* True when INSN is important for bundling. */
8912 static bool
8913 important_for_bundling_p (rtx_insn *insn)
8915 return (INSN_P (insn)
8916 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8917 && GET_CODE (PATTERN (insn)) != USE
8918 && GET_CODE (PATTERN (insn)) != CLOBBER);
8921 /* The following function returns an insn important for insn bundling
8922 followed by INSN and before TAIL. */
8924 static rtx_insn *
8925 get_next_important_insn (rtx_insn *insn, rtx_insn *tail)
8927 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8928 if (important_for_bundling_p (insn))
8929 return insn;
8930 return NULL;
8933 /* True when INSN is unknown, but important, for bundling. */
8935 static bool
8936 unknown_for_bundling_p (rtx_insn *insn)
8938 return (INSN_P (insn)
8939 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
8940 && GET_CODE (PATTERN (insn)) != USE
8941 && GET_CODE (PATTERN (insn)) != CLOBBER);
8944 /* Add a bundle selector TEMPLATE0 before INSN. */
8946 static void
8947 ia64_add_bundle_selector_before (int template0, rtx_insn *insn)
8949 rtx b = gen_bundle_selector (GEN_INT (template0));
8951 ia64_emit_insn_before (b, insn);
8952 #if NR_BUNDLES == 10
8953 if ((template0 == 4 || template0 == 5)
8954 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8956 int i;
8957 rtx note = NULL_RTX;
8959 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8960 first or second slot. If it is and has REG_EH_NOTE set, copy it
8961 to following nops, as br.call sets rp to the address of following
8962 bundle and therefore an EH region end must be on a bundle
8963 boundary. */
8964 insn = PREV_INSN (insn);
8965 for (i = 0; i < 3; i++)
8968 insn = next_active_insn (insn);
8969 while (NONJUMP_INSN_P (insn)
8970 && get_attr_empty (insn) == EMPTY_YES);
8971 if (CALL_P (insn))
8972 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8973 else if (note)
8975 int code;
8977 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8978 || code == CODE_FOR_nop_b);
8979 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8980 note = NULL_RTX;
8981 else
8982 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8986 #endif
8989 /* The following function does insn bundling. Bundling means
8990 inserting templates and nop insns to fit insn groups into permitted
8991 templates. Instruction scheduling uses NDFA (non-deterministic
8992 finite automata) encoding informations about the templates and the
8993 inserted nops. Nondeterminism of the automata permits follows
8994 all possible insn sequences very fast.
8996 Unfortunately it is not possible to get information about inserting
8997 nop insns and used templates from the automata states. The
8998 automata only says that we can issue an insn possibly inserting
8999 some nops before it and using some template. Therefore insn
9000 bundling in this function is implemented by using DFA
9001 (deterministic finite automata). We follow all possible insn
9002 sequences by inserting 0-2 nops (that is what the NDFA describe for
9003 insn scheduling) before/after each insn being bundled. We know the
9004 start of simulated processor cycle from insn scheduling (insn
9005 starting a new cycle has TImode).
9007 Simple implementation of insn bundling would create enormous
9008 number of possible insn sequences satisfying information about new
9009 cycle ticks taken from the insn scheduling. To make the algorithm
9010 practical we use dynamic programming. Each decision (about
9011 inserting nops and implicitly about previous decisions) is described
9012 by structure bundle_state (see above). If we generate the same
9013 bundle state (key is automaton state after issuing the insns and
9014 nops for it), we reuse already generated one. As consequence we
9015 reject some decisions which cannot improve the solution and
9016 reduce memory for the algorithm.
9018 When we reach the end of EBB (extended basic block), we choose the
9019 best sequence and then, moving back in EBB, insert templates for
9020 the best alternative. The templates are taken from querying
9021 automaton state for each insn in chosen bundle states.
9023 So the algorithm makes two (forward and backward) passes through
9024 EBB. */
9026 static void
9027 bundling (FILE *dump, int verbose, rtx_insn *prev_head_insn, rtx_insn *tail)
9029 struct bundle_state *curr_state, *next_state, *best_state;
9030 rtx_insn *insn, *next_insn;
9031 int insn_num;
9032 int i, bundle_end_p, only_bundle_end_p, asm_p;
9033 int pos = 0, max_pos, template0, template1;
9034 rtx_insn *b;
9035 enum attr_type type;
9037 insn_num = 0;
9038 /* Count insns in the EBB. */
9039 for (insn = NEXT_INSN (prev_head_insn);
9040 insn && insn != tail;
9041 insn = NEXT_INSN (insn))
9042 if (INSN_P (insn))
9043 insn_num++;
9044 if (insn_num == 0)
9045 return;
9046 bundling_p = 1;
9047 dfa_clean_insn_cache ();
9048 initiate_bundle_state_table ();
9049 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
9050 /* First (forward) pass -- generation of bundle states. */
9051 curr_state = get_free_bundle_state ();
9052 curr_state->insn = NULL;
9053 curr_state->before_nops_num = 0;
9054 curr_state->after_nops_num = 0;
9055 curr_state->insn_num = 0;
9056 curr_state->cost = 0;
9057 curr_state->accumulated_insns_num = 0;
9058 curr_state->branch_deviation = 0;
9059 curr_state->middle_bundle_stops = 0;
9060 curr_state->next = NULL;
9061 curr_state->originator = NULL;
9062 state_reset (curr_state->dfa_state);
9063 index_to_bundle_states [0] = curr_state;
9064 insn_num = 0;
9065 /* Shift cycle mark if it is put on insn which could be ignored. */
9066 for (insn = NEXT_INSN (prev_head_insn);
9067 insn != tail;
9068 insn = NEXT_INSN (insn))
9069 if (INSN_P (insn)
9070 && !important_for_bundling_p (insn)
9071 && GET_MODE (insn) == TImode)
9073 PUT_MODE (insn, VOIDmode);
9074 for (next_insn = NEXT_INSN (insn);
9075 next_insn != tail;
9076 next_insn = NEXT_INSN (next_insn))
9077 if (important_for_bundling_p (next_insn)
9078 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
9080 PUT_MODE (next_insn, TImode);
9081 break;
9084 /* Forward pass: generation of bundle states. */
9085 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
9086 insn != NULL_RTX;
9087 insn = next_insn)
9089 gcc_assert (important_for_bundling_p (insn));
9090 type = ia64_safe_type (insn);
9091 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
9092 insn_num++;
9093 index_to_bundle_states [insn_num] = NULL;
9094 for (curr_state = index_to_bundle_states [insn_num - 1];
9095 curr_state != NULL;
9096 curr_state = next_state)
9098 pos = curr_state->accumulated_insns_num % 3;
9099 next_state = curr_state->next;
9100 /* We must fill up the current bundle in order to start a
9101 subsequent asm insn in a new bundle. Asm insn is always
9102 placed in a separate bundle. */
9103 only_bundle_end_p
9104 = (next_insn != NULL_RTX
9105 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
9106 && unknown_for_bundling_p (next_insn));
9107 /* We may fill up the current bundle if it is the cycle end
9108 without a group barrier. */
9109 bundle_end_p
9110 = (only_bundle_end_p || next_insn == NULL_RTX
9111 || (GET_MODE (next_insn) == TImode
9112 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
9113 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
9114 || type == TYPE_S)
9115 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
9116 only_bundle_end_p);
9117 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
9118 only_bundle_end_p);
9119 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
9120 only_bundle_end_p);
9122 gcc_assert (index_to_bundle_states [insn_num]);
9123 for (curr_state = index_to_bundle_states [insn_num];
9124 curr_state != NULL;
9125 curr_state = curr_state->next)
9126 if (verbose >= 2 && dump)
9128 /* This structure is taken from generated code of the
9129 pipeline hazard recognizer (see file insn-attrtab.c).
9130 Please don't forget to change the structure if a new
9131 automaton is added to .md file. */
9132 struct DFA_chip
9134 unsigned short one_automaton_state;
9135 unsigned short oneb_automaton_state;
9136 unsigned short two_automaton_state;
9137 unsigned short twob_automaton_state;
9140 fprintf
9141 (dump,
9142 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
9143 curr_state->unique_num,
9144 (curr_state->originator == NULL
9145 ? -1 : curr_state->originator->unique_num),
9146 curr_state->cost,
9147 curr_state->before_nops_num, curr_state->after_nops_num,
9148 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9149 curr_state->middle_bundle_stops,
9150 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9151 INSN_UID (insn));
9155 /* We should find a solution because the 2nd insn scheduling has
9156 found one. */
9157 gcc_assert (index_to_bundle_states [insn_num]);
9158 /* Find a state corresponding to the best insn sequence. */
9159 best_state = NULL;
9160 for (curr_state = index_to_bundle_states [insn_num];
9161 curr_state != NULL;
9162 curr_state = curr_state->next)
9163 /* We are just looking at the states with fully filled up last
9164 bundle. The first we prefer insn sequences with minimal cost
9165 then with minimal inserted nops and finally with branch insns
9166 placed in the 3rd slots. */
9167 if (curr_state->accumulated_insns_num % 3 == 0
9168 && (best_state == NULL || best_state->cost > curr_state->cost
9169 || (best_state->cost == curr_state->cost
9170 && (curr_state->accumulated_insns_num
9171 < best_state->accumulated_insns_num
9172 || (curr_state->accumulated_insns_num
9173 == best_state->accumulated_insns_num
9174 && (curr_state->branch_deviation
9175 < best_state->branch_deviation
9176 || (curr_state->branch_deviation
9177 == best_state->branch_deviation
9178 && curr_state->middle_bundle_stops
9179 < best_state->middle_bundle_stops)))))))
9180 best_state = curr_state;
9181 /* Second (backward) pass: adding nops and templates. */
9182 gcc_assert (best_state);
9183 insn_num = best_state->before_nops_num;
9184 template0 = template1 = -1;
9185 for (curr_state = best_state;
9186 curr_state->originator != NULL;
9187 curr_state = curr_state->originator)
9189 insn = curr_state->insn;
9190 asm_p = unknown_for_bundling_p (insn);
9191 insn_num++;
9192 if (verbose >= 2 && dump)
9194 struct DFA_chip
9196 unsigned short one_automaton_state;
9197 unsigned short oneb_automaton_state;
9198 unsigned short two_automaton_state;
9199 unsigned short twob_automaton_state;
9202 fprintf
9203 (dump,
9204 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
9205 curr_state->unique_num,
9206 (curr_state->originator == NULL
9207 ? -1 : curr_state->originator->unique_num),
9208 curr_state->cost,
9209 curr_state->before_nops_num, curr_state->after_nops_num,
9210 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9211 curr_state->middle_bundle_stops,
9212 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9213 INSN_UID (insn));
9215 /* Find the position in the current bundle window. The window can
9216 contain at most two bundles. Two bundle window means that
9217 the processor will make two bundle rotation. */
9218 max_pos = get_max_pos (curr_state->dfa_state);
9219 if (max_pos == 6
9220 /* The following (negative template number) means that the
9221 processor did one bundle rotation. */
9222 || (max_pos == 3 && template0 < 0))
9224 /* We are at the end of the window -- find template(s) for
9225 its bundle(s). */
9226 pos = max_pos;
9227 if (max_pos == 3)
9228 template0 = get_template (curr_state->dfa_state, 3);
9229 else
9231 template1 = get_template (curr_state->dfa_state, 3);
9232 template0 = get_template (curr_state->dfa_state, 6);
9235 if (max_pos > 3 && template1 < 0)
9236 /* It may happen when we have the stop inside a bundle. */
9238 gcc_assert (pos <= 3);
9239 template1 = get_template (curr_state->dfa_state, 3);
9240 pos += 3;
9242 if (!asm_p)
9243 /* Emit nops after the current insn. */
9244 for (i = 0; i < curr_state->after_nops_num; i++)
9246 rtx nop_pat = gen_nop ();
9247 rtx_insn *nop = emit_insn_after (nop_pat, insn);
9248 pos--;
9249 gcc_assert (pos >= 0);
9250 if (pos % 3 == 0)
9252 /* We are at the start of a bundle: emit the template
9253 (it should be defined). */
9254 gcc_assert (template0 >= 0);
9255 ia64_add_bundle_selector_before (template0, nop);
9256 /* If we have two bundle window, we make one bundle
9257 rotation. Otherwise template0 will be undefined
9258 (negative value). */
9259 template0 = template1;
9260 template1 = -1;
9263 /* Move the position backward in the window. Group barrier has
9264 no slot. Asm insn takes all bundle. */
9265 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9266 && !unknown_for_bundling_p (insn))
9267 pos--;
9268 /* Long insn takes 2 slots. */
9269 if (ia64_safe_type (insn) == TYPE_L)
9270 pos--;
9271 gcc_assert (pos >= 0);
9272 if (pos % 3 == 0
9273 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9274 && !unknown_for_bundling_p (insn))
9276 /* The current insn is at the bundle start: emit the
9277 template. */
9278 gcc_assert (template0 >= 0);
9279 ia64_add_bundle_selector_before (template0, insn);
9280 b = PREV_INSN (insn);
9281 insn = b;
9282 /* See comment above in analogous place for emitting nops
9283 after the insn. */
9284 template0 = template1;
9285 template1 = -1;
9287 /* Emit nops after the current insn. */
9288 for (i = 0; i < curr_state->before_nops_num; i++)
9290 rtx nop_pat = gen_nop ();
9291 ia64_emit_insn_before (nop_pat, insn);
9292 rtx_insn *nop = PREV_INSN (insn);
9293 insn = nop;
9294 pos--;
9295 gcc_assert (pos >= 0);
9296 if (pos % 3 == 0)
9298 /* See comment above in analogous place for emitting nops
9299 after the insn. */
9300 gcc_assert (template0 >= 0);
9301 ia64_add_bundle_selector_before (template0, insn);
9302 b = PREV_INSN (insn);
9303 insn = b;
9304 template0 = template1;
9305 template1 = -1;
9310 #ifdef ENABLE_CHECKING
9312 /* Assert right calculation of middle_bundle_stops. */
9313 int num = best_state->middle_bundle_stops;
9314 bool start_bundle = true, end_bundle = false;
9316 for (insn = NEXT_INSN (prev_head_insn);
9317 insn && insn != tail;
9318 insn = NEXT_INSN (insn))
9320 if (!INSN_P (insn))
9321 continue;
9322 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9323 start_bundle = true;
9324 else
9326 rtx_insn *next_insn;
9328 for (next_insn = NEXT_INSN (insn);
9329 next_insn && next_insn != tail;
9330 next_insn = NEXT_INSN (next_insn))
9331 if (INSN_P (next_insn)
9332 && (ia64_safe_itanium_class (next_insn)
9333 != ITANIUM_CLASS_IGNORE
9334 || recog_memoized (next_insn)
9335 == CODE_FOR_bundle_selector)
9336 && GET_CODE (PATTERN (next_insn)) != USE
9337 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9338 break;
9340 end_bundle = next_insn == NULL_RTX
9341 || next_insn == tail
9342 || (INSN_P (next_insn)
9343 && recog_memoized (next_insn)
9344 == CODE_FOR_bundle_selector);
9345 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9346 && !start_bundle && !end_bundle
9347 && next_insn
9348 && !unknown_for_bundling_p (next_insn))
9349 num--;
9351 start_bundle = false;
9355 gcc_assert (num == 0);
9357 #endif
9359 free (index_to_bundle_states);
9360 finish_bundle_state_table ();
9361 bundling_p = 0;
9362 dfa_clean_insn_cache ();
9365 /* The following function is called at the end of scheduling BB or
9366 EBB. After reload, it inserts stop bits and does insn bundling. */
9368 static void
9369 ia64_sched_finish (FILE *dump, int sched_verbose)
9371 if (sched_verbose)
9372 fprintf (dump, "// Finishing schedule.\n");
9373 if (!reload_completed)
9374 return;
9375 if (reload_completed)
9377 final_emit_insn_group_barriers (dump);
9378 bundling (dump, sched_verbose, current_sched_info->prev_head,
9379 current_sched_info->next_tail);
9380 if (sched_verbose && dump)
9381 fprintf (dump, "// finishing %d-%d\n",
9382 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9383 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9385 return;
9389 /* The following function inserts stop bits in scheduled BB or EBB. */
9391 static void
9392 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9394 rtx_insn *insn;
9395 int need_barrier_p = 0;
9396 int seen_good_insn = 0;
9398 init_insn_group_barriers ();
9400 for (insn = NEXT_INSN (current_sched_info->prev_head);
9401 insn != current_sched_info->next_tail;
9402 insn = NEXT_INSN (insn))
9404 if (BARRIER_P (insn))
9406 rtx_insn *last = prev_active_insn (insn);
9408 if (! last)
9409 continue;
9410 if (JUMP_TABLE_DATA_P (last))
9411 last = prev_active_insn (last);
9412 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9413 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9415 init_insn_group_barriers ();
9416 seen_good_insn = 0;
9417 need_barrier_p = 0;
9419 else if (NONDEBUG_INSN_P (insn))
9421 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9423 init_insn_group_barriers ();
9424 seen_good_insn = 0;
9425 need_barrier_p = 0;
9427 else if (need_barrier_p || group_barrier_needed (insn)
9428 || (mflag_sched_stop_bits_after_every_cycle
9429 && GET_MODE (insn) == TImode
9430 && seen_good_insn))
9432 if (TARGET_EARLY_STOP_BITS)
9434 rtx_insn *last;
9436 for (last = insn;
9437 last != current_sched_info->prev_head;
9438 last = PREV_INSN (last))
9439 if (INSN_P (last) && GET_MODE (last) == TImode
9440 && stops_p [INSN_UID (last)])
9441 break;
9442 if (last == current_sched_info->prev_head)
9443 last = insn;
9444 last = prev_active_insn (last);
9445 if (last
9446 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9447 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9448 last);
9449 init_insn_group_barriers ();
9450 for (last = NEXT_INSN (last);
9451 last != insn;
9452 last = NEXT_INSN (last))
9453 if (INSN_P (last))
9455 group_barrier_needed (last);
9456 if (recog_memoized (last) >= 0
9457 && important_for_bundling_p (last))
9458 seen_good_insn = 1;
9461 else
9463 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9464 insn);
9465 init_insn_group_barriers ();
9466 seen_good_insn = 0;
9468 group_barrier_needed (insn);
9469 if (recog_memoized (insn) >= 0
9470 && important_for_bundling_p (insn))
9471 seen_good_insn = 1;
9473 else if (recog_memoized (insn) >= 0
9474 && important_for_bundling_p (insn))
9475 seen_good_insn = 1;
9476 need_barrier_p = (CALL_P (insn) || unknown_for_bundling_p (insn));
9483 /* If the following function returns TRUE, we will use the DFA
9484 insn scheduler. */
9486 static int
9487 ia64_first_cycle_multipass_dfa_lookahead (void)
9489 return (reload_completed ? 6 : 4);
9492 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9494 static void
9495 ia64_init_dfa_pre_cycle_insn (void)
9497 if (temp_dfa_state == NULL)
9499 dfa_state_size = state_size ();
9500 temp_dfa_state = xmalloc (dfa_state_size);
9501 prev_cycle_state = xmalloc (dfa_state_size);
9503 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9504 SET_PREV_INSN (dfa_pre_cycle_insn) = SET_NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9505 recog_memoized (dfa_pre_cycle_insn);
9506 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9507 SET_PREV_INSN (dfa_stop_insn) = SET_NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9508 recog_memoized (dfa_stop_insn);
9511 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9512 used by the DFA insn scheduler. */
9514 static rtx
9515 ia64_dfa_pre_cycle_insn (void)
9517 return dfa_pre_cycle_insn;
9520 /* The following function returns TRUE if PRODUCER (of type ilog or
9521 ld) produces address for CONSUMER (of type st or stf). */
9524 ia64_st_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
9526 rtx dest, reg, mem;
9528 gcc_assert (producer && consumer);
9529 dest = ia64_single_set (producer);
9530 gcc_assert (dest);
9531 reg = SET_DEST (dest);
9532 gcc_assert (reg);
9533 if (GET_CODE (reg) == SUBREG)
9534 reg = SUBREG_REG (reg);
9535 gcc_assert (GET_CODE (reg) == REG);
9537 dest = ia64_single_set (consumer);
9538 gcc_assert (dest);
9539 mem = SET_DEST (dest);
9540 gcc_assert (mem && GET_CODE (mem) == MEM);
9541 return reg_mentioned_p (reg, mem);
9544 /* The following function returns TRUE if PRODUCER (of type ilog or
9545 ld) produces address for CONSUMER (of type ld or fld). */
9548 ia64_ld_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
9550 rtx dest, src, reg, mem;
9552 gcc_assert (producer && consumer);
9553 dest = ia64_single_set (producer);
9554 gcc_assert (dest);
9555 reg = SET_DEST (dest);
9556 gcc_assert (reg);
9557 if (GET_CODE (reg) == SUBREG)
9558 reg = SUBREG_REG (reg);
9559 gcc_assert (GET_CODE (reg) == REG);
9561 src = ia64_single_set (consumer);
9562 gcc_assert (src);
9563 mem = SET_SRC (src);
9564 gcc_assert (mem);
9566 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9567 mem = XVECEXP (mem, 0, 0);
9568 else if (GET_CODE (mem) == IF_THEN_ELSE)
9569 /* ??? Is this bypass necessary for ld.c? */
9571 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9572 mem = XEXP (mem, 1);
9575 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9576 mem = XEXP (mem, 0);
9578 if (GET_CODE (mem) == UNSPEC)
9580 int c = XINT (mem, 1);
9582 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9583 || c == UNSPEC_LDSA);
9584 mem = XVECEXP (mem, 0, 0);
9587 /* Note that LO_SUM is used for GOT loads. */
9588 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9590 return reg_mentioned_p (reg, mem);
9593 /* The following function returns TRUE if INSN produces address for a
9594 load/store insn. We will place such insns into M slot because it
9595 decreases its latency time. */
9598 ia64_produce_address_p (rtx insn)
9600 return insn->call;
9604 /* Emit pseudo-ops for the assembler to describe predicate relations.
9605 At present this assumes that we only consider predicate pairs to
9606 be mutex, and that the assembler can deduce proper values from
9607 straight-line code. */
9609 static void
9610 emit_predicate_relation_info (void)
9612 basic_block bb;
9614 FOR_EACH_BB_REVERSE_FN (bb, cfun)
9616 int r;
9617 rtx_insn *head = BB_HEAD (bb);
9619 /* We only need such notes at code labels. */
9620 if (! LABEL_P (head))
9621 continue;
9622 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9623 head = NEXT_INSN (head);
9625 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9626 grabbing the entire block of predicate registers. */
9627 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9628 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9630 rtx p = gen_rtx_REG (BImode, r);
9631 rtx_insn *n = emit_insn_after (gen_pred_rel_mutex (p), head);
9632 if (head == BB_END (bb))
9633 BB_END (bb) = n;
9634 head = n;
9638 /* Look for conditional calls that do not return, and protect predicate
9639 relations around them. Otherwise the assembler will assume the call
9640 returns, and complain about uses of call-clobbered predicates after
9641 the call. */
9642 FOR_EACH_BB_REVERSE_FN (bb, cfun)
9644 rtx_insn *insn = BB_HEAD (bb);
9646 while (1)
9648 if (CALL_P (insn)
9649 && GET_CODE (PATTERN (insn)) == COND_EXEC
9650 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9652 rtx_insn *b =
9653 emit_insn_before (gen_safe_across_calls_all (), insn);
9654 rtx_insn *a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9655 if (BB_HEAD (bb) == insn)
9656 BB_HEAD (bb) = b;
9657 if (BB_END (bb) == insn)
9658 BB_END (bb) = a;
9661 if (insn == BB_END (bb))
9662 break;
9663 insn = NEXT_INSN (insn);
9668 /* Perform machine dependent operations on the rtl chain INSNS. */
9670 static void
9671 ia64_reorg (void)
9673 /* We are freeing block_for_insn in the toplev to keep compatibility
9674 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9675 compute_bb_for_insn ();
9677 /* If optimizing, we'll have split before scheduling. */
9678 if (optimize == 0)
9679 split_all_insns ();
9681 if (optimize && flag_schedule_insns_after_reload
9682 && dbg_cnt (ia64_sched2))
9684 basic_block bb;
9685 timevar_push (TV_SCHED2);
9686 ia64_final_schedule = 1;
9688 /* We can't let modulo-sched prevent us from scheduling any bbs,
9689 since we need the final schedule to produce bundle information. */
9690 FOR_EACH_BB_FN (bb, cfun)
9691 bb->flags &= ~BB_DISABLE_SCHEDULE;
9693 initiate_bundle_states ();
9694 ia64_nop = make_insn_raw (gen_nop ());
9695 SET_PREV_INSN (ia64_nop) = SET_NEXT_INSN (ia64_nop) = NULL_RTX;
9696 recog_memoized (ia64_nop);
9697 clocks_length = get_max_uid () + 1;
9698 stops_p = XCNEWVEC (char, clocks_length);
9700 if (ia64_tune == PROCESSOR_ITANIUM2)
9702 pos_1 = get_cpu_unit_code ("2_1");
9703 pos_2 = get_cpu_unit_code ("2_2");
9704 pos_3 = get_cpu_unit_code ("2_3");
9705 pos_4 = get_cpu_unit_code ("2_4");
9706 pos_5 = get_cpu_unit_code ("2_5");
9707 pos_6 = get_cpu_unit_code ("2_6");
9708 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9709 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9710 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9711 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9712 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9713 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9714 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9715 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9716 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9717 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9718 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9719 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9720 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9721 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9722 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9723 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9724 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9725 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9726 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9727 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9729 else
9731 pos_1 = get_cpu_unit_code ("1_1");
9732 pos_2 = get_cpu_unit_code ("1_2");
9733 pos_3 = get_cpu_unit_code ("1_3");
9734 pos_4 = get_cpu_unit_code ("1_4");
9735 pos_5 = get_cpu_unit_code ("1_5");
9736 pos_6 = get_cpu_unit_code ("1_6");
9737 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9738 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9739 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9740 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9741 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9742 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9743 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9744 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9745 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9746 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9747 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9748 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9749 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9750 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9751 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9752 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9753 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9754 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9755 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9756 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9759 if (flag_selective_scheduling2
9760 && !maybe_skip_selective_scheduling ())
9761 run_selective_scheduling ();
9762 else
9763 schedule_ebbs ();
9765 /* Redo alignment computation, as it might gone wrong. */
9766 compute_alignments ();
9768 /* We cannot reuse this one because it has been corrupted by the
9769 evil glat. */
9770 finish_bundle_states ();
9771 free (stops_p);
9772 stops_p = NULL;
9773 emit_insn_group_barriers (dump_file);
9775 ia64_final_schedule = 0;
9776 timevar_pop (TV_SCHED2);
9778 else
9779 emit_all_insn_group_barriers (dump_file);
9781 df_analyze ();
9783 /* A call must not be the last instruction in a function, so that the
9784 return address is still within the function, so that unwinding works
9785 properly. Note that IA-64 differs from dwarf2 on this point. */
9786 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9788 rtx_insn *insn;
9789 int saw_stop = 0;
9791 insn = get_last_insn ();
9792 if (! INSN_P (insn))
9793 insn = prev_active_insn (insn);
9794 if (insn)
9796 /* Skip over insns that expand to nothing. */
9797 while (NONJUMP_INSN_P (insn)
9798 && get_attr_empty (insn) == EMPTY_YES)
9800 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9801 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9802 saw_stop = 1;
9803 insn = prev_active_insn (insn);
9805 if (CALL_P (insn))
9807 if (! saw_stop)
9808 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9809 emit_insn (gen_break_f ());
9810 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9815 emit_predicate_relation_info ();
9817 if (flag_var_tracking)
9819 timevar_push (TV_VAR_TRACKING);
9820 variable_tracking_main ();
9821 timevar_pop (TV_VAR_TRACKING);
9823 df_finish_pass (false);
9826 /* Return true if REGNO is used by the epilogue. */
9829 ia64_epilogue_uses (int regno)
9831 switch (regno)
9833 case R_GR (1):
9834 /* With a call to a function in another module, we will write a new
9835 value to "gp". After returning from such a call, we need to make
9836 sure the function restores the original gp-value, even if the
9837 function itself does not use the gp anymore. */
9838 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9840 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9841 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9842 /* For functions defined with the syscall_linkage attribute, all
9843 input registers are marked as live at all function exits. This
9844 prevents the register allocator from using the input registers,
9845 which in turn makes it possible to restart a system call after
9846 an interrupt without having to save/restore the input registers.
9847 This also prevents kernel data from leaking to application code. */
9848 return lookup_attribute ("syscall_linkage",
9849 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9851 case R_BR (0):
9852 /* Conditional return patterns can't represent the use of `b0' as
9853 the return address, so we force the value live this way. */
9854 return 1;
9856 case AR_PFS_REGNUM:
9857 /* Likewise for ar.pfs, which is used by br.ret. */
9858 return 1;
9860 default:
9861 return 0;
9865 /* Return true if REGNO is used by the frame unwinder. */
9868 ia64_eh_uses (int regno)
9870 unsigned int r;
9872 if (! reload_completed)
9873 return 0;
9875 if (regno == 0)
9876 return 0;
9878 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9879 if (regno == current_frame_info.r[r]
9880 || regno == emitted_frame_related_regs[r])
9881 return 1;
9883 return 0;
9886 /* Return true if this goes in small data/bss. */
9888 /* ??? We could also support own long data here. Generating movl/add/ld8
9889 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9890 code faster because there is one less load. This also includes incomplete
9891 types which can't go in sdata/sbss. */
9893 static bool
9894 ia64_in_small_data_p (const_tree exp)
9896 if (TARGET_NO_SDATA)
9897 return false;
9899 /* We want to merge strings, so we never consider them small data. */
9900 if (TREE_CODE (exp) == STRING_CST)
9901 return false;
9903 /* Functions are never small data. */
9904 if (TREE_CODE (exp) == FUNCTION_DECL)
9905 return false;
9907 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9909 const char *section = DECL_SECTION_NAME (exp);
9911 if (strcmp (section, ".sdata") == 0
9912 || strncmp (section, ".sdata.", 7) == 0
9913 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9914 || strcmp (section, ".sbss") == 0
9915 || strncmp (section, ".sbss.", 6) == 0
9916 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9917 return true;
9919 else
9921 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9923 /* If this is an incomplete type with size 0, then we can't put it
9924 in sdata because it might be too big when completed. */
9925 if (size > 0 && size <= ia64_section_threshold)
9926 return true;
9929 return false;
9932 /* Output assembly directives for prologue regions. */
9934 /* The current basic block number. */
9936 static bool last_block;
9938 /* True if we need a copy_state command at the start of the next block. */
9940 static bool need_copy_state;
9942 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9943 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9944 #endif
9946 /* The function emits unwind directives for the start of an epilogue. */
9948 static void
9949 process_epilogue (FILE *asm_out_file, rtx insn ATTRIBUTE_UNUSED,
9950 bool unwind, bool frame ATTRIBUTE_UNUSED)
9952 /* If this isn't the last block of the function, then we need to label the
9953 current state, and copy it back in at the start of the next block. */
9955 if (!last_block)
9957 if (unwind)
9958 fprintf (asm_out_file, "\t.label_state %d\n",
9959 ++cfun->machine->state_num);
9960 need_copy_state = true;
9963 if (unwind)
9964 fprintf (asm_out_file, "\t.restore sp\n");
9967 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9969 static void
9970 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9971 bool unwind, bool frame)
9973 rtx dest = SET_DEST (pat);
9974 rtx src = SET_SRC (pat);
9976 if (dest == stack_pointer_rtx)
9978 if (GET_CODE (src) == PLUS)
9980 rtx op0 = XEXP (src, 0);
9981 rtx op1 = XEXP (src, 1);
9983 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9985 if (INTVAL (op1) < 0)
9987 gcc_assert (!frame_pointer_needed);
9988 if (unwind)
9989 fprintf (asm_out_file,
9990 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9991 -INTVAL (op1));
9993 else
9994 process_epilogue (asm_out_file, insn, unwind, frame);
9996 else
9998 gcc_assert (src == hard_frame_pointer_rtx);
9999 process_epilogue (asm_out_file, insn, unwind, frame);
10002 else if (dest == hard_frame_pointer_rtx)
10004 gcc_assert (src == stack_pointer_rtx);
10005 gcc_assert (frame_pointer_needed);
10007 if (unwind)
10008 fprintf (asm_out_file, "\t.vframe r%d\n",
10009 ia64_dbx_register_number (REGNO (dest)));
10011 else
10012 gcc_unreachable ();
10015 /* This function processes a SET pattern for REG_CFA_REGISTER. */
10017 static void
10018 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
10020 rtx dest = SET_DEST (pat);
10021 rtx src = SET_SRC (pat);
10022 int dest_regno = REGNO (dest);
10023 int src_regno;
10025 if (src == pc_rtx)
10027 /* Saving return address pointer. */
10028 if (unwind)
10029 fprintf (asm_out_file, "\t.save rp, r%d\n",
10030 ia64_dbx_register_number (dest_regno));
10031 return;
10034 src_regno = REGNO (src);
10036 switch (src_regno)
10038 case PR_REG (0):
10039 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
10040 if (unwind)
10041 fprintf (asm_out_file, "\t.save pr, r%d\n",
10042 ia64_dbx_register_number (dest_regno));
10043 break;
10045 case AR_UNAT_REGNUM:
10046 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
10047 if (unwind)
10048 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
10049 ia64_dbx_register_number (dest_regno));
10050 break;
10052 case AR_LC_REGNUM:
10053 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
10054 if (unwind)
10055 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
10056 ia64_dbx_register_number (dest_regno));
10057 break;
10059 default:
10060 /* Everything else should indicate being stored to memory. */
10061 gcc_unreachable ();
10065 /* This function processes a SET pattern for REG_CFA_OFFSET. */
10067 static void
10068 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
10070 rtx dest = SET_DEST (pat);
10071 rtx src = SET_SRC (pat);
10072 int src_regno = REGNO (src);
10073 const char *saveop;
10074 HOST_WIDE_INT off;
10075 rtx base;
10077 gcc_assert (MEM_P (dest));
10078 if (GET_CODE (XEXP (dest, 0)) == REG)
10080 base = XEXP (dest, 0);
10081 off = 0;
10083 else
10085 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
10086 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
10087 base = XEXP (XEXP (dest, 0), 0);
10088 off = INTVAL (XEXP (XEXP (dest, 0), 1));
10091 if (base == hard_frame_pointer_rtx)
10093 saveop = ".savepsp";
10094 off = - off;
10096 else
10098 gcc_assert (base == stack_pointer_rtx);
10099 saveop = ".savesp";
10102 src_regno = REGNO (src);
10103 switch (src_regno)
10105 case BR_REG (0):
10106 gcc_assert (!current_frame_info.r[reg_save_b0]);
10107 if (unwind)
10108 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
10109 saveop, off);
10110 break;
10112 case PR_REG (0):
10113 gcc_assert (!current_frame_info.r[reg_save_pr]);
10114 if (unwind)
10115 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
10116 saveop, off);
10117 break;
10119 case AR_LC_REGNUM:
10120 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
10121 if (unwind)
10122 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
10123 saveop, off);
10124 break;
10126 case AR_PFS_REGNUM:
10127 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
10128 if (unwind)
10129 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
10130 saveop, off);
10131 break;
10133 case AR_UNAT_REGNUM:
10134 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
10135 if (unwind)
10136 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
10137 saveop, off);
10138 break;
10140 case GR_REG (4):
10141 case GR_REG (5):
10142 case GR_REG (6):
10143 case GR_REG (7):
10144 if (unwind)
10145 fprintf (asm_out_file, "\t.save.g 0x%x\n",
10146 1 << (src_regno - GR_REG (4)));
10147 break;
10149 case BR_REG (1):
10150 case BR_REG (2):
10151 case BR_REG (3):
10152 case BR_REG (4):
10153 case BR_REG (5):
10154 if (unwind)
10155 fprintf (asm_out_file, "\t.save.b 0x%x\n",
10156 1 << (src_regno - BR_REG (1)));
10157 break;
10159 case FR_REG (2):
10160 case FR_REG (3):
10161 case FR_REG (4):
10162 case FR_REG (5):
10163 if (unwind)
10164 fprintf (asm_out_file, "\t.save.f 0x%x\n",
10165 1 << (src_regno - FR_REG (2)));
10166 break;
10168 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
10169 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
10170 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
10171 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
10172 if (unwind)
10173 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
10174 1 << (src_regno - FR_REG (12)));
10175 break;
10177 default:
10178 /* ??? For some reason we mark other general registers, even those
10179 we can't represent in the unwind info. Ignore them. */
10180 break;
10184 /* This function looks at a single insn and emits any directives
10185 required to unwind this insn. */
10187 static void
10188 ia64_asm_unwind_emit (FILE *asm_out_file, rtx_insn *insn)
10190 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
10191 bool frame = dwarf2out_do_frame ();
10192 rtx note, pat;
10193 bool handled_one;
10195 if (!unwind && !frame)
10196 return;
10198 if (NOTE_INSN_BASIC_BLOCK_P (insn))
10200 last_block = NOTE_BASIC_BLOCK (insn)->next_bb
10201 == EXIT_BLOCK_PTR_FOR_FN (cfun);
10203 /* Restore unwind state from immediately before the epilogue. */
10204 if (need_copy_state)
10206 if (unwind)
10208 fprintf (asm_out_file, "\t.body\n");
10209 fprintf (asm_out_file, "\t.copy_state %d\n",
10210 cfun->machine->state_num);
10212 need_copy_state = false;
10216 if (NOTE_P (insn) || ! RTX_FRAME_RELATED_P (insn))
10217 return;
10219 /* Look for the ALLOC insn. */
10220 if (INSN_CODE (insn) == CODE_FOR_alloc)
10222 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10223 int dest_regno = REGNO (dest);
10225 /* If this is the final destination for ar.pfs, then this must
10226 be the alloc in the prologue. */
10227 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10229 if (unwind)
10230 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10231 ia64_dbx_register_number (dest_regno));
10233 else
10235 /* This must be an alloc before a sibcall. We must drop the
10236 old frame info. The easiest way to drop the old frame
10237 info is to ensure we had a ".restore sp" directive
10238 followed by a new prologue. If the procedure doesn't
10239 have a memory-stack frame, we'll issue a dummy ".restore
10240 sp" now. */
10241 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10242 /* if haven't done process_epilogue() yet, do it now */
10243 process_epilogue (asm_out_file, insn, unwind, frame);
10244 if (unwind)
10245 fprintf (asm_out_file, "\t.prologue\n");
10247 return;
10250 handled_one = false;
10251 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10252 switch (REG_NOTE_KIND (note))
10254 case REG_CFA_ADJUST_CFA:
10255 pat = XEXP (note, 0);
10256 if (pat == NULL)
10257 pat = PATTERN (insn);
10258 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10259 handled_one = true;
10260 break;
10262 case REG_CFA_OFFSET:
10263 pat = XEXP (note, 0);
10264 if (pat == NULL)
10265 pat = PATTERN (insn);
10266 process_cfa_offset (asm_out_file, pat, unwind);
10267 handled_one = true;
10268 break;
10270 case REG_CFA_REGISTER:
10271 pat = XEXP (note, 0);
10272 if (pat == NULL)
10273 pat = PATTERN (insn);
10274 process_cfa_register (asm_out_file, pat, unwind);
10275 handled_one = true;
10276 break;
10278 case REG_FRAME_RELATED_EXPR:
10279 case REG_CFA_DEF_CFA:
10280 case REG_CFA_EXPRESSION:
10281 case REG_CFA_RESTORE:
10282 case REG_CFA_SET_VDRAP:
10283 /* Not used in the ia64 port. */
10284 gcc_unreachable ();
10286 default:
10287 /* Not a frame-related note. */
10288 break;
10291 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10292 explicit action to take. No guessing required. */
10293 gcc_assert (handled_one);
10296 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10298 static void
10299 ia64_asm_emit_except_personality (rtx personality)
10301 fputs ("\t.personality\t", asm_out_file);
10302 output_addr_const (asm_out_file, personality);
10303 fputc ('\n', asm_out_file);
10306 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10308 static void
10309 ia64_asm_init_sections (void)
10311 exception_section = get_unnamed_section (0, output_section_asm_op,
10312 "\t.handlerdata");
10315 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10317 static enum unwind_info_type
10318 ia64_debug_unwind_info (void)
10320 return UI_TARGET;
10323 enum ia64_builtins
10325 IA64_BUILTIN_BSP,
10326 IA64_BUILTIN_COPYSIGNQ,
10327 IA64_BUILTIN_FABSQ,
10328 IA64_BUILTIN_FLUSHRS,
10329 IA64_BUILTIN_INFQ,
10330 IA64_BUILTIN_HUGE_VALQ,
10331 IA64_BUILTIN_max
10334 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10336 void
10337 ia64_init_builtins (void)
10339 tree fpreg_type;
10340 tree float80_type;
10341 tree decl;
10343 /* The __fpreg type. */
10344 fpreg_type = make_node (REAL_TYPE);
10345 TYPE_PRECISION (fpreg_type) = 82;
10346 layout_type (fpreg_type);
10347 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10349 /* The __float80 type. */
10350 float80_type = make_node (REAL_TYPE);
10351 TYPE_PRECISION (float80_type) = 80;
10352 layout_type (float80_type);
10353 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10355 /* The __float128 type. */
10356 if (!TARGET_HPUX)
10358 tree ftype;
10359 tree float128_type = make_node (REAL_TYPE);
10361 TYPE_PRECISION (float128_type) = 128;
10362 layout_type (float128_type);
10363 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10365 /* TFmode support builtins. */
10366 ftype = build_function_type_list (float128_type, NULL_TREE);
10367 decl = add_builtin_function ("__builtin_infq", ftype,
10368 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10369 NULL, NULL_TREE);
10370 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10372 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10373 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10374 NULL, NULL_TREE);
10375 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10377 ftype = build_function_type_list (float128_type,
10378 float128_type,
10379 NULL_TREE);
10380 decl = add_builtin_function ("__builtin_fabsq", ftype,
10381 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10382 "__fabstf2", NULL_TREE);
10383 TREE_READONLY (decl) = 1;
10384 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10386 ftype = build_function_type_list (float128_type,
10387 float128_type,
10388 float128_type,
10389 NULL_TREE);
10390 decl = add_builtin_function ("__builtin_copysignq", ftype,
10391 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10392 "__copysigntf3", NULL_TREE);
10393 TREE_READONLY (decl) = 1;
10394 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10396 else
10397 /* Under HPUX, this is a synonym for "long double". */
10398 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10399 "__float128");
10401 /* Fwrite on VMS is non-standard. */
10402 #if TARGET_ABI_OPEN_VMS
10403 vms_patch_builtins ();
10404 #endif
10406 #define def_builtin(name, type, code) \
10407 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10408 NULL, NULL_TREE)
10410 decl = def_builtin ("__builtin_ia64_bsp",
10411 build_function_type_list (ptr_type_node, NULL_TREE),
10412 IA64_BUILTIN_BSP);
10413 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10415 decl = def_builtin ("__builtin_ia64_flushrs",
10416 build_function_type_list (void_type_node, NULL_TREE),
10417 IA64_BUILTIN_FLUSHRS);
10418 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10420 #undef def_builtin
10422 if (TARGET_HPUX)
10424 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
10425 set_user_assembler_name (decl, "_Isfinite");
10426 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
10427 set_user_assembler_name (decl, "_Isfinitef");
10428 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEL)) != NULL_TREE)
10429 set_user_assembler_name (decl, "_Isfinitef128");
10434 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10435 enum machine_mode mode ATTRIBUTE_UNUSED,
10436 int ignore ATTRIBUTE_UNUSED)
10438 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10439 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10441 switch (fcode)
10443 case IA64_BUILTIN_BSP:
10444 if (! target || ! register_operand (target, DImode))
10445 target = gen_reg_rtx (DImode);
10446 emit_insn (gen_bsp_value (target));
10447 #ifdef POINTERS_EXTEND_UNSIGNED
10448 target = convert_memory_address (ptr_mode, target);
10449 #endif
10450 return target;
10452 case IA64_BUILTIN_FLUSHRS:
10453 emit_insn (gen_flushrs ());
10454 return const0_rtx;
10456 case IA64_BUILTIN_INFQ:
10457 case IA64_BUILTIN_HUGE_VALQ:
10459 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10460 REAL_VALUE_TYPE inf;
10461 rtx tmp;
10463 real_inf (&inf);
10464 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10466 tmp = validize_mem (force_const_mem (target_mode, tmp));
10468 if (target == 0)
10469 target = gen_reg_rtx (target_mode);
10471 emit_move_insn (target, tmp);
10472 return target;
10475 case IA64_BUILTIN_FABSQ:
10476 case IA64_BUILTIN_COPYSIGNQ:
10477 return expand_call (exp, target, ignore);
10479 default:
10480 gcc_unreachable ();
10483 return NULL_RTX;
10486 /* Return the ia64 builtin for CODE. */
10488 static tree
10489 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10491 if (code >= IA64_BUILTIN_max)
10492 return error_mark_node;
10494 return ia64_builtins[code];
10497 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10498 most significant bits of the stack slot. */
10500 enum direction
10501 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10503 /* Exception to normal case for structures/unions/etc. */
10505 if (type && AGGREGATE_TYPE_P (type)
10506 && int_size_in_bytes (type) < UNITS_PER_WORD)
10507 return upward;
10509 /* Fall back to the default. */
10510 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10513 /* Emit text to declare externally defined variables and functions, because
10514 the Intel assembler does not support undefined externals. */
10516 void
10517 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10519 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10520 set in order to avoid putting out names that are never really
10521 used. */
10522 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10524 /* maybe_assemble_visibility will return 1 if the assembler
10525 visibility directive is output. */
10526 int need_visibility = ((*targetm.binds_local_p) (decl)
10527 && maybe_assemble_visibility (decl));
10529 /* GNU as does not need anything here, but the HP linker does
10530 need something for external functions. */
10531 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10532 && TREE_CODE (decl) == FUNCTION_DECL)
10533 (*targetm.asm_out.globalize_decl_name) (file, decl);
10534 else if (need_visibility && !TARGET_GNU_AS)
10535 (*targetm.asm_out.globalize_label) (file, name);
10539 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10540 modes of word_mode and larger. Rename the TFmode libfuncs using the
10541 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10542 backward compatibility. */
10544 static void
10545 ia64_init_libfuncs (void)
10547 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10548 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10549 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10550 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10552 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10553 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10554 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10555 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10556 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10558 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10559 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10560 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10561 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10562 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10563 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10565 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10566 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10567 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10568 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10569 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10571 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10572 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10573 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10574 /* HP-UX 11.23 libc does not have a function for unsigned
10575 SImode-to-TFmode conversion. */
10576 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10579 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10581 static void
10582 ia64_hpux_init_libfuncs (void)
10584 ia64_init_libfuncs ();
10586 /* The HP SI millicode division and mod functions expect DI arguments.
10587 By turning them off completely we avoid using both libgcc and the
10588 non-standard millicode routines and use the HP DI millicode routines
10589 instead. */
10591 set_optab_libfunc (sdiv_optab, SImode, 0);
10592 set_optab_libfunc (udiv_optab, SImode, 0);
10593 set_optab_libfunc (smod_optab, SImode, 0);
10594 set_optab_libfunc (umod_optab, SImode, 0);
10596 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10597 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10598 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10599 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10601 /* HP-UX libc has TF min/max/abs routines in it. */
10602 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10603 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10604 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10606 /* ia64_expand_compare uses this. */
10607 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10609 /* These should never be used. */
10610 set_optab_libfunc (eq_optab, TFmode, 0);
10611 set_optab_libfunc (ne_optab, TFmode, 0);
10612 set_optab_libfunc (gt_optab, TFmode, 0);
10613 set_optab_libfunc (ge_optab, TFmode, 0);
10614 set_optab_libfunc (lt_optab, TFmode, 0);
10615 set_optab_libfunc (le_optab, TFmode, 0);
10618 /* Rename the division and modulus functions in VMS. */
10620 static void
10621 ia64_vms_init_libfuncs (void)
10623 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10624 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10625 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10626 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10627 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10628 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10629 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10630 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10631 abort_libfunc = init_one_libfunc ("decc$abort");
10632 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10633 #ifdef MEM_LIBFUNCS_INIT
10634 MEM_LIBFUNCS_INIT;
10635 #endif
10638 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10639 the HPUX conventions. */
10641 static void
10642 ia64_sysv4_init_libfuncs (void)
10644 ia64_init_libfuncs ();
10646 /* These functions are not part of the HPUX TFmode interface. We
10647 use them instead of _U_Qfcmp, which doesn't work the way we
10648 expect. */
10649 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10650 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10651 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10652 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10653 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10654 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10656 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10657 glibc doesn't have them. */
10660 /* Use soft-fp. */
10662 static void
10663 ia64_soft_fp_init_libfuncs (void)
10667 static bool
10668 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10670 return (mode == SImode || mode == DImode);
10673 /* For HPUX, it is illegal to have relocations in shared segments. */
10675 static int
10676 ia64_hpux_reloc_rw_mask (void)
10678 return 3;
10681 /* For others, relax this so that relocations to local data goes in
10682 read-only segments, but we still cannot allow global relocations
10683 in read-only segments. */
10685 static int
10686 ia64_reloc_rw_mask (void)
10688 return flag_pic ? 3 : 2;
10691 /* Return the section to use for X. The only special thing we do here
10692 is to honor small data. */
10694 static section *
10695 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10696 unsigned HOST_WIDE_INT align)
10698 if (GET_MODE_SIZE (mode) > 0
10699 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10700 && !TARGET_NO_SDATA)
10701 return sdata_section;
10702 else
10703 return default_elf_select_rtx_section (mode, x, align);
10706 static unsigned int
10707 ia64_section_type_flags (tree decl, const char *name, int reloc)
10709 unsigned int flags = 0;
10711 if (strcmp (name, ".sdata") == 0
10712 || strncmp (name, ".sdata.", 7) == 0
10713 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10714 || strncmp (name, ".sdata2.", 8) == 0
10715 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10716 || strcmp (name, ".sbss") == 0
10717 || strncmp (name, ".sbss.", 6) == 0
10718 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10719 flags = SECTION_SMALL;
10721 flags |= default_section_type_flags (decl, name, reloc);
10722 return flags;
10725 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10726 structure type and that the address of that type should be passed
10727 in out0, rather than in r8. */
10729 static bool
10730 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10732 tree ret_type = TREE_TYPE (fntype);
10734 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10735 as the structure return address parameter, if the return value
10736 type has a non-trivial copy constructor or destructor. It is not
10737 clear if this same convention should be used for other
10738 programming languages. Until G++ 3.4, we incorrectly used r8 for
10739 these return values. */
10740 return (abi_version_at_least (2)
10741 && ret_type
10742 && TYPE_MODE (ret_type) == BLKmode
10743 && TREE_ADDRESSABLE (ret_type)
10744 && strcmp (lang_hooks.name, "GNU C++") == 0);
10747 /* Output the assembler code for a thunk function. THUNK_DECL is the
10748 declaration for the thunk function itself, FUNCTION is the decl for
10749 the target function. DELTA is an immediate constant offset to be
10750 added to THIS. If VCALL_OFFSET is nonzero, the word at
10751 *(*this + vcall_offset) should be added to THIS. */
10753 static void
10754 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10755 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10756 tree function)
10758 rtx this_rtx, funexp;
10759 rtx_insn *insn;
10760 unsigned int this_parmno;
10761 unsigned int this_regno;
10762 rtx delta_rtx;
10764 reload_completed = 1;
10765 epilogue_completed = 1;
10767 /* Set things up as ia64_expand_prologue might. */
10768 last_scratch_gr_reg = 15;
10770 memset (&current_frame_info, 0, sizeof (current_frame_info));
10771 current_frame_info.spill_cfa_off = -16;
10772 current_frame_info.n_input_regs = 1;
10773 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10775 /* Mark the end of the (empty) prologue. */
10776 emit_note (NOTE_INSN_PROLOGUE_END);
10778 /* Figure out whether "this" will be the first parameter (the
10779 typical case) or the second parameter (as happens when the
10780 virtual function returns certain class objects). */
10781 this_parmno
10782 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10783 ? 1 : 0);
10784 this_regno = IN_REG (this_parmno);
10785 if (!TARGET_REG_NAMES)
10786 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10788 this_rtx = gen_rtx_REG (Pmode, this_regno);
10790 /* Apply the constant offset, if required. */
10791 delta_rtx = GEN_INT (delta);
10792 if (TARGET_ILP32)
10794 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10795 REG_POINTER (tmp) = 1;
10796 if (delta && satisfies_constraint_I (delta_rtx))
10798 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10799 delta = 0;
10801 else
10802 emit_insn (gen_ptr_extend (this_rtx, tmp));
10804 if (delta)
10806 if (!satisfies_constraint_I (delta_rtx))
10808 rtx tmp = gen_rtx_REG (Pmode, 2);
10809 emit_move_insn (tmp, delta_rtx);
10810 delta_rtx = tmp;
10812 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10815 /* Apply the offset from the vtable, if required. */
10816 if (vcall_offset)
10818 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10819 rtx tmp = gen_rtx_REG (Pmode, 2);
10821 if (TARGET_ILP32)
10823 rtx t = gen_rtx_REG (ptr_mode, 2);
10824 REG_POINTER (t) = 1;
10825 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10826 if (satisfies_constraint_I (vcall_offset_rtx))
10828 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10829 vcall_offset = 0;
10831 else
10832 emit_insn (gen_ptr_extend (tmp, t));
10834 else
10835 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10837 if (vcall_offset)
10839 if (!satisfies_constraint_J (vcall_offset_rtx))
10841 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10842 emit_move_insn (tmp2, vcall_offset_rtx);
10843 vcall_offset_rtx = tmp2;
10845 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10848 if (TARGET_ILP32)
10849 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10850 else
10851 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10853 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10856 /* Generate a tail call to the target function. */
10857 if (! TREE_USED (function))
10859 assemble_external (function);
10860 TREE_USED (function) = 1;
10862 funexp = XEXP (DECL_RTL (function), 0);
10863 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10864 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10865 insn = get_last_insn ();
10866 SIBLING_CALL_P (insn) = 1;
10868 /* Code generation for calls relies on splitting. */
10869 reload_completed = 1;
10870 epilogue_completed = 1;
10871 try_split (PATTERN (insn), insn, 0);
10873 emit_barrier ();
10875 /* Run just enough of rest_of_compilation to get the insns emitted.
10876 There's not really enough bulk here to make other passes such as
10877 instruction scheduling worth while. Note that use_thunk calls
10878 assemble_start_function and assemble_end_function. */
10880 emit_all_insn_group_barriers (NULL);
10881 insn = get_insns ();
10882 shorten_branches (insn);
10883 final_start_function (insn, file, 1);
10884 final (insn, file, 1);
10885 final_end_function ();
10887 reload_completed = 0;
10888 epilogue_completed = 0;
10891 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10893 static rtx
10894 ia64_struct_value_rtx (tree fntype,
10895 int incoming ATTRIBUTE_UNUSED)
10897 if (TARGET_ABI_OPEN_VMS ||
10898 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10899 return NULL_RTX;
10900 return gen_rtx_REG (Pmode, GR_REG (8));
10903 static bool
10904 ia64_scalar_mode_supported_p (enum machine_mode mode)
10906 switch (mode)
10908 case QImode:
10909 case HImode:
10910 case SImode:
10911 case DImode:
10912 case TImode:
10913 return true;
10915 case SFmode:
10916 case DFmode:
10917 case XFmode:
10918 case RFmode:
10919 return true;
10921 case TFmode:
10922 return true;
10924 default:
10925 return false;
10929 static bool
10930 ia64_vector_mode_supported_p (enum machine_mode mode)
10932 switch (mode)
10934 case V8QImode:
10935 case V4HImode:
10936 case V2SImode:
10937 return true;
10939 case V2SFmode:
10940 return true;
10942 default:
10943 return false;
10947 /* Implement TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P. */
10949 static bool
10950 ia64_libgcc_floating_mode_supported_p (enum machine_mode mode)
10952 switch (mode)
10954 case SFmode:
10955 case DFmode:
10956 return true;
10958 case XFmode:
10959 #ifdef IA64_NO_LIBGCC_XFMODE
10960 return false;
10961 #else
10962 return true;
10963 #endif
10965 case TFmode:
10966 #ifdef IA64_NO_LIBGCC_TFMODE
10967 return false;
10968 #else
10969 return true;
10970 #endif
10972 default:
10973 return false;
10977 /* Implement the FUNCTION_PROFILER macro. */
10979 void
10980 ia64_output_function_profiler (FILE *file, int labelno)
10982 bool indirect_call;
10984 /* If the function needs a static chain and the static chain
10985 register is r15, we use an indirect call so as to bypass
10986 the PLT stub in case the executable is dynamically linked,
10987 because the stub clobbers r15 as per 5.3.6 of the psABI.
10988 We don't need to do that in non canonical PIC mode. */
10990 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10992 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10993 indirect_call = true;
10995 else
10996 indirect_call = false;
10998 if (TARGET_GNU_AS)
10999 fputs ("\t.prologue 4, r40\n", file);
11000 else
11001 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
11002 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
11004 if (NO_PROFILE_COUNTERS)
11005 fputs ("\tmov out3 = r0\n", file);
11006 else
11008 char buf[20];
11009 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
11011 if (TARGET_AUTO_PIC)
11012 fputs ("\tmovl out3 = @gprel(", file);
11013 else
11014 fputs ("\taddl out3 = @ltoff(", file);
11015 assemble_name (file, buf);
11016 if (TARGET_AUTO_PIC)
11017 fputs (")\n", file);
11018 else
11019 fputs ("), r1\n", file);
11022 if (indirect_call)
11023 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
11024 fputs ("\t;;\n", file);
11026 fputs ("\t.save rp, r42\n", file);
11027 fputs ("\tmov out2 = b0\n", file);
11028 if (indirect_call)
11029 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
11030 fputs ("\t.body\n", file);
11031 fputs ("\tmov out1 = r1\n", file);
11032 if (indirect_call)
11034 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
11035 fputs ("\tmov b6 = r16\n", file);
11036 fputs ("\tld8 r1 = [r14]\n", file);
11037 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
11039 else
11040 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
11043 static GTY(()) rtx mcount_func_rtx;
11044 static rtx
11045 gen_mcount_func_rtx (void)
11047 if (!mcount_func_rtx)
11048 mcount_func_rtx = init_one_libfunc ("_mcount");
11049 return mcount_func_rtx;
11052 void
11053 ia64_profile_hook (int labelno)
11055 rtx label, ip;
11057 if (NO_PROFILE_COUNTERS)
11058 label = const0_rtx;
11059 else
11061 char buf[30];
11062 const char *label_name;
11063 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
11064 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
11065 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
11066 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
11068 ip = gen_reg_rtx (Pmode);
11069 emit_insn (gen_ip_value (ip));
11070 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
11071 VOIDmode, 3,
11072 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
11073 ip, Pmode,
11074 label, Pmode);
11077 /* Return the mangling of TYPE if it is an extended fundamental type. */
11079 static const char *
11080 ia64_mangle_type (const_tree type)
11082 type = TYPE_MAIN_VARIANT (type);
11084 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
11085 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
11086 return NULL;
11088 /* On HP-UX, "long double" is mangled as "e" so __float128 is
11089 mangled as "e". */
11090 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
11091 return "g";
11092 /* On HP-UX, "e" is not available as a mangling of __float80 so use
11093 an extended mangling. Elsewhere, "e" is available since long
11094 double is 80 bits. */
11095 if (TYPE_MODE (type) == XFmode)
11096 return TARGET_HPUX ? "u9__float80" : "e";
11097 if (TYPE_MODE (type) == RFmode)
11098 return "u7__fpreg";
11099 return NULL;
11102 /* Return the diagnostic message string if conversion from FROMTYPE to
11103 TOTYPE is not allowed, NULL otherwise. */
11104 static const char *
11105 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
11107 /* Reject nontrivial conversion to or from __fpreg. */
11108 if (TYPE_MODE (fromtype) == RFmode
11109 && TYPE_MODE (totype) != RFmode
11110 && TYPE_MODE (totype) != VOIDmode)
11111 return N_("invalid conversion from %<__fpreg%>");
11112 if (TYPE_MODE (totype) == RFmode
11113 && TYPE_MODE (fromtype) != RFmode)
11114 return N_("invalid conversion to %<__fpreg%>");
11115 return NULL;
11118 /* Return the diagnostic message string if the unary operation OP is
11119 not permitted on TYPE, NULL otherwise. */
11120 static const char *
11121 ia64_invalid_unary_op (int op, const_tree type)
11123 /* Reject operations on __fpreg other than unary + or &. */
11124 if (TYPE_MODE (type) == RFmode
11125 && op != CONVERT_EXPR
11126 && op != ADDR_EXPR)
11127 return N_("invalid operation on %<__fpreg%>");
11128 return NULL;
11131 /* Return the diagnostic message string if the binary operation OP is
11132 not permitted on TYPE1 and TYPE2, NULL otherwise. */
11133 static const char *
11134 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
11136 /* Reject operations on __fpreg. */
11137 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
11138 return N_("invalid operation on %<__fpreg%>");
11139 return NULL;
11142 /* HP-UX version_id attribute.
11143 For object foo, if the version_id is set to 1234 put out an alias
11144 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
11145 other than an alias statement because it is an illegal symbol name. */
11147 static tree
11148 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
11149 tree name ATTRIBUTE_UNUSED,
11150 tree args,
11151 int flags ATTRIBUTE_UNUSED,
11152 bool *no_add_attrs)
11154 tree arg = TREE_VALUE (args);
11156 if (TREE_CODE (arg) != STRING_CST)
11158 error("version attribute is not a string");
11159 *no_add_attrs = true;
11160 return NULL_TREE;
11162 return NULL_TREE;
11165 /* Target hook for c_mode_for_suffix. */
11167 static enum machine_mode
11168 ia64_c_mode_for_suffix (char suffix)
11170 if (suffix == 'q')
11171 return TFmode;
11172 if (suffix == 'w')
11173 return XFmode;
11175 return VOIDmode;
11178 static GTY(()) rtx ia64_dconst_0_5_rtx;
11181 ia64_dconst_0_5 (void)
11183 if (! ia64_dconst_0_5_rtx)
11185 REAL_VALUE_TYPE rv;
11186 real_from_string (&rv, "0.5");
11187 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
11189 return ia64_dconst_0_5_rtx;
11192 static GTY(()) rtx ia64_dconst_0_375_rtx;
11195 ia64_dconst_0_375 (void)
11197 if (! ia64_dconst_0_375_rtx)
11199 REAL_VALUE_TYPE rv;
11200 real_from_string (&rv, "0.375");
11201 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11203 return ia64_dconst_0_375_rtx;
11206 static enum machine_mode
11207 ia64_get_reg_raw_mode (int regno)
11209 if (FR_REGNO_P (regno))
11210 return XFmode;
11211 return default_get_reg_raw_mode(regno);
11214 /* Implement TARGET_MEMBER_TYPE_FORCES_BLK. ??? Might not be needed
11215 anymore. */
11217 bool
11218 ia64_member_type_forces_blk (const_tree, enum machine_mode mode)
11220 return TARGET_HPUX && mode == TFmode;
11223 /* Always default to .text section until HP-UX linker is fixed. */
11225 ATTRIBUTE_UNUSED static section *
11226 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11227 enum node_frequency freq ATTRIBUTE_UNUSED,
11228 bool startup ATTRIBUTE_UNUSED,
11229 bool exit ATTRIBUTE_UNUSED)
11231 return NULL;
11234 /* Construct (set target (vec_select op0 (parallel perm))) and
11235 return true if that's a valid instruction in the active ISA. */
11237 static bool
11238 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
11240 rtx rperm[MAX_VECT_LEN], x;
11241 unsigned i;
11243 for (i = 0; i < nelt; ++i)
11244 rperm[i] = GEN_INT (perm[i]);
11246 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
11247 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
11248 x = gen_rtx_SET (VOIDmode, target, x);
11250 rtx_insn *insn = emit_insn (x);
11251 if (recog_memoized (insn) < 0)
11253 remove_insn (insn);
11254 return false;
11256 return true;
11259 /* Similar, but generate a vec_concat from op0 and op1 as well. */
11261 static bool
11262 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
11263 const unsigned char *perm, unsigned nelt)
11265 enum machine_mode v2mode;
11266 rtx x;
11268 v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0));
11269 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
11270 return expand_vselect (target, x, perm, nelt);
11273 /* Try to expand a no-op permutation. */
11275 static bool
11276 expand_vec_perm_identity (struct expand_vec_perm_d *d)
11278 unsigned i, nelt = d->nelt;
11280 for (i = 0; i < nelt; ++i)
11281 if (d->perm[i] != i)
11282 return false;
11284 if (!d->testing_p)
11285 emit_move_insn (d->target, d->op0);
11287 return true;
11290 /* Try to expand D via a shrp instruction. */
11292 static bool
11293 expand_vec_perm_shrp (struct expand_vec_perm_d *d)
11295 unsigned i, nelt = d->nelt, shift, mask;
11296 rtx tmp, hi, lo;
11298 /* ??? Don't force V2SFmode into the integer registers. */
11299 if (d->vmode == V2SFmode)
11300 return false;
11302 mask = (d->one_operand_p ? nelt - 1 : 2 * nelt - 1);
11304 shift = d->perm[0];
11305 if (BYTES_BIG_ENDIAN && shift > nelt)
11306 return false;
11308 for (i = 1; i < nelt; ++i)
11309 if (d->perm[i] != ((shift + i) & mask))
11310 return false;
11312 if (d->testing_p)
11313 return true;
11315 hi = shift < nelt ? d->op1 : d->op0;
11316 lo = shift < nelt ? d->op0 : d->op1;
11318 shift %= nelt;
11320 shift *= GET_MODE_UNIT_SIZE (d->vmode) * BITS_PER_UNIT;
11322 /* We've eliminated the shift 0 case via expand_vec_perm_identity. */
11323 gcc_assert (IN_RANGE (shift, 1, 63));
11325 /* Recall that big-endian elements are numbered starting at the top of
11326 the register. Ideally we'd have a shift-left-pair. But since we
11327 don't, convert to a shift the other direction. */
11328 if (BYTES_BIG_ENDIAN)
11329 shift = 64 - shift;
11331 tmp = gen_reg_rtx (DImode);
11332 hi = gen_lowpart (DImode, hi);
11333 lo = gen_lowpart (DImode, lo);
11334 emit_insn (gen_shrp (tmp, hi, lo, GEN_INT (shift)));
11336 emit_move_insn (d->target, gen_lowpart (d->vmode, tmp));
11337 return true;
11340 /* Try to instantiate D in a single instruction. */
11342 static bool
11343 expand_vec_perm_1 (struct expand_vec_perm_d *d)
11345 unsigned i, nelt = d->nelt;
11346 unsigned char perm2[MAX_VECT_LEN];
11348 /* Try single-operand selections. */
11349 if (d->one_operand_p)
11351 if (expand_vec_perm_identity (d))
11352 return true;
11353 if (expand_vselect (d->target, d->op0, d->perm, nelt))
11354 return true;
11357 /* Try two operand selections. */
11358 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
11359 return true;
11361 /* Recognize interleave style patterns with reversed operands. */
11362 if (!d->one_operand_p)
11364 for (i = 0; i < nelt; ++i)
11366 unsigned e = d->perm[i];
11367 if (e >= nelt)
11368 e -= nelt;
11369 else
11370 e += nelt;
11371 perm2[i] = e;
11374 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
11375 return true;
11378 if (expand_vec_perm_shrp (d))
11379 return true;
11381 /* ??? Look for deposit-like permutations where most of the result
11382 comes from one vector unchanged and the rest comes from a
11383 sequential hunk of the other vector. */
11385 return false;
11388 /* Pattern match broadcast permutations. */
11390 static bool
11391 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
11393 unsigned i, elt, nelt = d->nelt;
11394 unsigned char perm2[2];
11395 rtx temp;
11396 bool ok;
11398 if (!d->one_operand_p)
11399 return false;
11401 elt = d->perm[0];
11402 for (i = 1; i < nelt; ++i)
11403 if (d->perm[i] != elt)
11404 return false;
11406 switch (d->vmode)
11408 case V2SImode:
11409 case V2SFmode:
11410 /* Implementable by interleave. */
11411 perm2[0] = elt;
11412 perm2[1] = elt + 2;
11413 ok = expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, 2);
11414 gcc_assert (ok);
11415 break;
11417 case V8QImode:
11418 /* Implementable by extract + broadcast. */
11419 if (BYTES_BIG_ENDIAN)
11420 elt = 7 - elt;
11421 elt *= BITS_PER_UNIT;
11422 temp = gen_reg_rtx (DImode);
11423 emit_insn (gen_extzv (temp, gen_lowpart (DImode, d->op0),
11424 GEN_INT (8), GEN_INT (elt)));
11425 emit_insn (gen_mux1_brcst_qi (d->target, gen_lowpart (QImode, temp)));
11426 break;
11428 case V4HImode:
11429 /* Should have been matched directly by vec_select. */
11430 default:
11431 gcc_unreachable ();
11434 return true;
11437 /* A subroutine of ia64_expand_vec_perm_const_1. Try to simplify a
11438 two vector permutation into a single vector permutation by using
11439 an interleave operation to merge the vectors. */
11441 static bool
11442 expand_vec_perm_interleave_2 (struct expand_vec_perm_d *d)
11444 struct expand_vec_perm_d dremap, dfinal;
11445 unsigned char remap[2 * MAX_VECT_LEN];
11446 unsigned contents, i, nelt, nelt2;
11447 unsigned h0, h1, h2, h3;
11448 rtx_insn *seq;
11449 bool ok;
11451 if (d->one_operand_p)
11452 return false;
11454 nelt = d->nelt;
11455 nelt2 = nelt / 2;
11457 /* Examine from whence the elements come. */
11458 contents = 0;
11459 for (i = 0; i < nelt; ++i)
11460 contents |= 1u << d->perm[i];
11462 memset (remap, 0xff, sizeof (remap));
11463 dremap = *d;
11465 h0 = (1u << nelt2) - 1;
11466 h1 = h0 << nelt2;
11467 h2 = h0 << nelt;
11468 h3 = h0 << (nelt + nelt2);
11470 if ((contents & (h0 | h2)) == contents) /* punpck even halves */
11472 for (i = 0; i < nelt; ++i)
11474 unsigned which = i / 2 + (i & 1 ? nelt : 0);
11475 remap[which] = i;
11476 dremap.perm[i] = which;
11479 else if ((contents & (h1 | h3)) == contents) /* punpck odd halves */
11481 for (i = 0; i < nelt; ++i)
11483 unsigned which = i / 2 + nelt2 + (i & 1 ? nelt : 0);
11484 remap[which] = i;
11485 dremap.perm[i] = which;
11488 else if ((contents & 0x5555) == contents) /* mix even elements */
11490 for (i = 0; i < nelt; ++i)
11492 unsigned which = (i & ~1) + (i & 1 ? nelt : 0);
11493 remap[which] = i;
11494 dremap.perm[i] = which;
11497 else if ((contents & 0xaaaa) == contents) /* mix odd elements */
11499 for (i = 0; i < nelt; ++i)
11501 unsigned which = (i | 1) + (i & 1 ? nelt : 0);
11502 remap[which] = i;
11503 dremap.perm[i] = which;
11506 else if (floor_log2 (contents) - ctz_hwi (contents) < (int)nelt) /* shrp */
11508 unsigned shift = ctz_hwi (contents);
11509 for (i = 0; i < nelt; ++i)
11511 unsigned which = (i + shift) & (2 * nelt - 1);
11512 remap[which] = i;
11513 dremap.perm[i] = which;
11516 else
11517 return false;
11519 /* Use the remapping array set up above to move the elements from their
11520 swizzled locations into their final destinations. */
11521 dfinal = *d;
11522 for (i = 0; i < nelt; ++i)
11524 unsigned e = remap[d->perm[i]];
11525 gcc_assert (e < nelt);
11526 dfinal.perm[i] = e;
11528 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
11529 dfinal.op1 = dfinal.op0;
11530 dfinal.one_operand_p = true;
11531 dremap.target = dfinal.op0;
11533 /* Test if the final remap can be done with a single insn. For V4HImode
11534 this *will* succeed. For V8QImode or V2SImode it may not. */
11535 start_sequence ();
11536 ok = expand_vec_perm_1 (&dfinal);
11537 seq = get_insns ();
11538 end_sequence ();
11539 if (!ok)
11540 return false;
11541 if (d->testing_p)
11542 return true;
11544 ok = expand_vec_perm_1 (&dremap);
11545 gcc_assert (ok);
11547 emit_insn (seq);
11548 return true;
11551 /* A subroutine of ia64_expand_vec_perm_const_1. Emit a full V4HImode
11552 constant permutation via two mux2 and a merge. */
11554 static bool
11555 expand_vec_perm_v4hi_5 (struct expand_vec_perm_d *d)
11557 unsigned char perm2[4];
11558 rtx rmask[4];
11559 unsigned i;
11560 rtx t0, t1, mask, x;
11561 bool ok;
11563 if (d->vmode != V4HImode || d->one_operand_p)
11564 return false;
11565 if (d->testing_p)
11566 return true;
11568 for (i = 0; i < 4; ++i)
11570 perm2[i] = d->perm[i] & 3;
11571 rmask[i] = (d->perm[i] & 4 ? const0_rtx : constm1_rtx);
11573 mask = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmask));
11574 mask = force_reg (V4HImode, mask);
11576 t0 = gen_reg_rtx (V4HImode);
11577 t1 = gen_reg_rtx (V4HImode);
11579 ok = expand_vselect (t0, d->op0, perm2, 4);
11580 gcc_assert (ok);
11581 ok = expand_vselect (t1, d->op1, perm2, 4);
11582 gcc_assert (ok);
11584 x = gen_rtx_AND (V4HImode, mask, t0);
11585 emit_insn (gen_rtx_SET (VOIDmode, t0, x));
11587 x = gen_rtx_NOT (V4HImode, mask);
11588 x = gen_rtx_AND (V4HImode, x, t1);
11589 emit_insn (gen_rtx_SET (VOIDmode, t1, x));
11591 x = gen_rtx_IOR (V4HImode, t0, t1);
11592 emit_insn (gen_rtx_SET (VOIDmode, d->target, x));
11594 return true;
11597 /* The guts of ia64_expand_vec_perm_const, also used by the ok hook.
11598 With all of the interface bits taken care of, perform the expansion
11599 in D and return true on success. */
11601 static bool
11602 ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
11604 if (expand_vec_perm_1 (d))
11605 return true;
11606 if (expand_vec_perm_broadcast (d))
11607 return true;
11608 if (expand_vec_perm_interleave_2 (d))
11609 return true;
11610 if (expand_vec_perm_v4hi_5 (d))
11611 return true;
11612 return false;
11615 bool
11616 ia64_expand_vec_perm_const (rtx operands[4])
11618 struct expand_vec_perm_d d;
11619 unsigned char perm[MAX_VECT_LEN];
11620 int i, nelt, which;
11621 rtx sel;
11623 d.target = operands[0];
11624 d.op0 = operands[1];
11625 d.op1 = operands[2];
11626 sel = operands[3];
11628 d.vmode = GET_MODE (d.target);
11629 gcc_assert (VECTOR_MODE_P (d.vmode));
11630 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11631 d.testing_p = false;
11633 gcc_assert (GET_CODE (sel) == CONST_VECTOR);
11634 gcc_assert (XVECLEN (sel, 0) == nelt);
11635 gcc_checking_assert (sizeof (d.perm) == sizeof (perm));
11637 for (i = which = 0; i < nelt; ++i)
11639 rtx e = XVECEXP (sel, 0, i);
11640 int ei = INTVAL (e) & (2 * nelt - 1);
11642 which |= (ei < nelt ? 1 : 2);
11643 d.perm[i] = ei;
11644 perm[i] = ei;
11647 switch (which)
11649 default:
11650 gcc_unreachable();
11652 case 3:
11653 if (!rtx_equal_p (d.op0, d.op1))
11655 d.one_operand_p = false;
11656 break;
11659 /* The elements of PERM do not suggest that only the first operand
11660 is used, but both operands are identical. Allow easier matching
11661 of the permutation by folding the permutation into the single
11662 input vector. */
11663 for (i = 0; i < nelt; ++i)
11664 if (d.perm[i] >= nelt)
11665 d.perm[i] -= nelt;
11666 /* FALLTHRU */
11668 case 1:
11669 d.op1 = d.op0;
11670 d.one_operand_p = true;
11671 break;
11673 case 2:
11674 for (i = 0; i < nelt; ++i)
11675 d.perm[i] -= nelt;
11676 d.op0 = d.op1;
11677 d.one_operand_p = true;
11678 break;
11681 if (ia64_expand_vec_perm_const_1 (&d))
11682 return true;
11684 /* If the mask says both arguments are needed, but they are the same,
11685 the above tried to expand with one_operand_p true. If that didn't
11686 work, retry with one_operand_p false, as that's what we used in _ok. */
11687 if (which == 3 && d.one_operand_p)
11689 memcpy (d.perm, perm, sizeof (perm));
11690 d.one_operand_p = false;
11691 return ia64_expand_vec_perm_const_1 (&d);
11694 return false;
11697 /* Implement targetm.vectorize.vec_perm_const_ok. */
11699 static bool
11700 ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
11701 const unsigned char *sel)
11703 struct expand_vec_perm_d d;
11704 unsigned int i, nelt, which;
11705 bool ret;
11707 d.vmode = vmode;
11708 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11709 d.testing_p = true;
11711 /* Extract the values from the vector CST into the permutation
11712 array in D. */
11713 memcpy (d.perm, sel, nelt);
11714 for (i = which = 0; i < nelt; ++i)
11716 unsigned char e = d.perm[i];
11717 gcc_assert (e < 2 * nelt);
11718 which |= (e < nelt ? 1 : 2);
11721 /* For all elements from second vector, fold the elements to first. */
11722 if (which == 2)
11723 for (i = 0; i < nelt; ++i)
11724 d.perm[i] -= nelt;
11726 /* Check whether the mask can be applied to the vector type. */
11727 d.one_operand_p = (which != 3);
11729 /* Otherwise we have to go through the motions and see if we can
11730 figure out how to generate the requested permutation. */
11731 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
11732 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
11733 if (!d.one_operand_p)
11734 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
11736 start_sequence ();
11737 ret = ia64_expand_vec_perm_const_1 (&d);
11738 end_sequence ();
11740 return ret;
11743 void
11744 ia64_expand_vec_setv2sf (rtx operands[3])
11746 struct expand_vec_perm_d d;
11747 unsigned int which;
11748 bool ok;
11750 d.target = operands[0];
11751 d.op0 = operands[0];
11752 d.op1 = gen_reg_rtx (V2SFmode);
11753 d.vmode = V2SFmode;
11754 d.nelt = 2;
11755 d.one_operand_p = false;
11756 d.testing_p = false;
11758 which = INTVAL (operands[2]);
11759 gcc_assert (which <= 1);
11760 d.perm[0] = 1 - which;
11761 d.perm[1] = which + 2;
11763 emit_insn (gen_fpack (d.op1, operands[1], CONST0_RTX (SFmode)));
11765 ok = ia64_expand_vec_perm_const_1 (&d);
11766 gcc_assert (ok);
11769 void
11770 ia64_expand_vec_perm_even_odd (rtx target, rtx op0, rtx op1, int odd)
11772 struct expand_vec_perm_d d;
11773 enum machine_mode vmode = GET_MODE (target);
11774 unsigned int i, nelt = GET_MODE_NUNITS (vmode);
11775 bool ok;
11777 d.target = target;
11778 d.op0 = op0;
11779 d.op1 = op1;
11780 d.vmode = vmode;
11781 d.nelt = nelt;
11782 d.one_operand_p = false;
11783 d.testing_p = false;
11785 for (i = 0; i < nelt; ++i)
11786 d.perm[i] = i * 2 + odd;
11788 ok = ia64_expand_vec_perm_const_1 (&d);
11789 gcc_assert (ok);
11792 #include "gt-ia64.h"